language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def main(self): """ Load in the template file, and run through the parser :return none: """ logger_lpd_noaa.info("enter main") # Starting Directory: dir_tmp/dir_bag/data/ # convert all lipd keys to noaa keys # timestamp the conversion of the file # MISC SETUP FUNCTIONS self.noaa_data_sorted["File_Last_Modified_Date"]["Modified_Date"] = generate_timestamp() self.__get_table_count() # Get measurement tables from metadata, and sort into object self self.__put_tables_in_self(["paleo", "paleoData", "measurementTable"]) self.__put_tables_in_self(["chron", "chronData", "measurementTable"]) # how many measurement tables exist? this will tell use how many noaa files to create self.__get_table_pairs() # reorganize data into noaa sections self.__reorganize() # special case: earliest_year, most_recent_year, and time unit # self.__check_time_values() # self.__check_time_unit() self.__get_overall_data(self.lipd_data) self.__reorganize_sensor() self.__lists_to_str() self.__generate_study_name() # END MISC SETUP FUNCTIONS # Use data in steps_dict to write to # self.noaa_data_sorted = self.__key_conversion(self.noaa_data_sorted) self.__create_file() logger_lpd_noaa.info("exit main") return
python
def getAsKmlGridAnimation(self, tableName, timeStampedRasters=[], rasterIdFieldName='id', rasterFieldName='raster', documentName='default', alpha=1.0, noDataValue=0, discreet=False): """ Return a sequence of rasters with timestamps as a kml with time markers for animation. :param tableName: Name of the table to extract rasters from :param timeStampedRasters: List of dictionaries with keys: rasterId, dateTime rasterId = a unique integer identifier used to locate the raster (usually value of primary key column) dateTime = a datetime object representing the time the raster occurs e.g: timeStampedRasters = [{ 'rasterId': 1, 'dateTime': datetime(1970, 1, 1)}, { 'rasterId': 2, 'dateTime': datetime(1970, 1, 2)}, { 'rasterId': 3, 'dateTime': datetime(1970, 1, 3)}] :param rasterIdFieldName: Name of the id field for rasters (usually the primary key field) :param rasterFieldName: Name of the field where rasters are stored (of type raster) :param documentName: The name to give to the KML document (will be listed in legend under this name) :param alpha: The transparency to apply to each raster cell :param noDataValue: The value to be used as the no data value (default is 0) :rtype : string """ # Validate alpha if not (alpha >= 0 and alpha <= 1.0): raise ValueError("RASTER CONVERSION ERROR: alpha must be between 0.0 and 1.0.") rasterIds = [] for timeStampedRaster in timeStampedRasters: # Validate dictionary if 'rasterId' not in timeStampedRaster: raise ValueError('RASTER CONVERSION ERROR: rasterId must be provided for each raster.') elif 'dateTime' not in timeStampedRaster: raise ValueError('RASTER CONVERSION ERROR: dateTime must be provided for each raster.') rasterIds.append(str(timeStampedRaster['rasterId'])) # One color ramp to rule them all # Get a single color ramp that is based on the range of values in all the rasters minValue, maxValue = self.getMinMaxOfRasters(session=self._session, table=tableName, rasterIds=rasterIds, rasterIdField=rasterIdFieldName, rasterField=rasterFieldName, noDataValue=noDataValue) mappedColorRamp = ColorRampGenerator.mapColorRampToValues(colorRamp=self._colorRamp, minValue=minValue, maxValue=maxValue, alpha=alpha) # Default to time delta to None deltaTime = None # Calculate delta time between images if more than one time1 = timeStampedRasters[0]['dateTime'] if len(timeStampedRasters) >= 2: time2 = timeStampedRasters[1]['dateTime'] deltaTime = time2 - time1 # Initialize KML Document kml = ET.Element('kml', xmlns='http://www.opengis.net/kml/2.2') document = ET.SubElement(kml, 'Document') docName = ET.SubElement(document, 'name') docName.text = documentName if not discreet: # Embed the color ramp in SLD format document.append(ET.fromstring(mappedColorRamp.getColorMapAsContinuousSLD())) else: values = [] document.append(ET.fromstring(mappedColorRamp.getColorMapAsDiscreetSLD(values))) # Apply special style to hide legend items style = ET.SubElement(document, 'Style', id='check-hide-children') listStyle = ET.SubElement(style, 'ListStyle') listItemType = ET.SubElement(listStyle, 'listItemType') listItemType.text = 'checkHideChildren' styleUrl = ET.SubElement(document, 'styleUrl') styleUrl.text = '#check-hide-children' # Collect unique values uniqueValues = [] # Retrieve the rasters and styles for timeStampedRaster in timeStampedRasters: # Extract variables rasterId = timeStampedRaster['rasterId'] if deltaTime: dateTime = timeStampedRaster['dateTime'] prevDateTime = dateTime - deltaTime # Get polygons for each cell in kml format statement = ''' SELECT x, y, val, ST_AsKML(geom) AS polygon FROM ( SELECT (ST_PixelAsPolygons({0})).* FROM {1} WHERE {2}={3} ) AS foo ORDER BY val; '''.format(rasterFieldName, tableName, rasterIdFieldName, rasterId) result = self._session.execute(statement) # Set initial group value groupValue = -9999999.0 # Add polygons to the kml file with styling for row in result: # Value will be None if it is a no data value if row.val: value = float(row.val) else: value = None polygonString = row.polygon i = int(row.x) j = int(row.y) # Only create placemarks for values that are not no data values if value: if value not in uniqueValues: uniqueValues.append(value) # Create a new placemark for each group of values if value != groupValue: placemark = ET.SubElement(document, 'Placemark') placemarkName = ET.SubElement(placemark, 'name') placemarkName.text = str(value) # Create style tag and setup styles style = ET.SubElement(placemark, 'Style') # Set polygon line style lineStyle = ET.SubElement(style, 'LineStyle') # Set polygon line color and width lineColor = ET.SubElement(lineStyle, 'color') lineColor.text = self.LINE_COLOR lineWidth = ET.SubElement(lineStyle, 'width') lineWidth.text = str(self.LINE_WIDTH) # Set polygon fill color polyStyle = ET.SubElement(style, 'PolyStyle') polyColor = ET.SubElement(polyStyle, 'color') # Convert alpha from 0.0-1.0 decimal to 00-FF string integerAlpha = mappedColorRamp.getAlphaAsInteger() # Get RGB color from color ramp and convert to KML hex ABGR string with alpha integerRGB = mappedColorRamp.getColorForValue(value) hexABGR = '%02X%02X%02X%02X' % (integerAlpha, integerRGB[mappedColorRamp.B], integerRGB[mappedColorRamp.G], integerRGB[mappedColorRamp.R]) # Set the polygon fill alpha and color polyColor.text = hexABGR if deltaTime: # Create TimeSpan tag timeSpan = ET.SubElement(placemark, 'TimeSpan') # Create begin and end tags begin = ET.SubElement(timeSpan, 'begin') begin.text = prevDateTime.strftime('%Y-%m-%dT%H:%M:%S') end = ET.SubElement(timeSpan, 'end') end.text = dateTime.strftime('%Y-%m-%dT%H:%M:%S') # Create multigeometry tag multigeometry = ET.SubElement(placemark, 'MultiGeometry') # Create the data tag extendedData = ET.SubElement(placemark, 'ExtendedData') # Add value to data valueData = ET.SubElement(extendedData, 'Data', name='value') valueValue = ET.SubElement(valueData, 'value') valueValue.text = str(value) iData = ET.SubElement(extendedData, 'Data', name='i') valueI = ET.SubElement(iData, 'value') valueI.text = str(i) jData = ET.SubElement(extendedData, 'Data', name='j') valueJ = ET.SubElement(jData, 'value') valueJ.text = str(j) if deltaTime: tData = ET.SubElement(extendedData, 'Data', name='t') valueT = ET.SubElement(tData, 'value') valueT.text = dateTime.strftime('%Y-%m-%dT%H:%M:%S') groupValue = value # Get polygon object from kml string and append to the current multigeometry group polygon = ET.fromstring(polygonString) multigeometry.append(polygon) if not discreet: # Embed the color ramp in SLD format document.append(ET.fromstring(mappedColorRamp.getColorMapAsContinuousSLD())) else: # Sort the unique values uniqueValues.sort() document.append(ET.fromstring(mappedColorRamp.getColorMapAsDiscreetSLD(uniqueValues))) return ET.tostring(kml)
python
def create_dialog(self): """ Create the dialog.""" box0 = QGroupBox('Info') self.name = FormStr() self.name.setText('sw') self.idx_group.activated.connect(self.update_channels) form = QFormLayout(box0) form.addRow('Event name', self.name) form.addRow('Channel group', self.idx_group) form.addRow('Channel(s)', self.idx_chan) form.addRow('Cycle(s)', self.idx_cycle) form.addRow('Stage(s)', self.idx_stage) box1 = QGroupBox('Parameters') mbox = QComboBox() method_list = SLOW_WAVE_METHODS for method in method_list: mbox.addItem(method) self.idx_method = mbox self.method = mbox.currentText() mbox.currentIndexChanged.connect(self.update_values) self.index['f1'] = FormFloat() self.index['f2'] = FormFloat() self.index['min_trough_dur'] = FormFloat() self.index['max_trough_dur'] = FormFloat() self.index['max_trough_amp'] = FormFloat() self.index['min_ptp'] = FormFloat() self.index['min_dur'] = FormFloat() self.index['max_dur'] = FormFloat() form = QFormLayout(box1) form.addRow('Method', mbox) form.addRow('Lowcut (Hz)', self.index['f1']) form.addRow('Highcut (Hz)', self.index['f2']) form.addRow('Min. trough duration (sec)', self.index['min_trough_dur']) form.addRow(' Max. trough duration (sec)', self.index['max_trough_dur']) form.addRow(' Max. trough amplitude (uV)', self.index['max_trough_amp']) form.addRow('Min. peak-to-peak amplitude (uV)', self.index['min_ptp']) form.addRow('Min. duration (sec)', self.index['min_dur']) form.addRow(' Max. duration (sec)', self.index['max_dur']) box3 = QGroupBox('Options') self.index['detrend'] = FormBool('Detrend (linear)') self.index['invert'] = FormBool('Invert detection (down-then-up)') self.index['excl_epoch'] = FormBool('Exclude Poor signal epochs') self.index['excl_event'] = FormMenu(['none', 'channel-specific', 'from any channel']) self.index['min_seg_dur'] = FormFloat(5) self.index['excl_epoch'].set_value(True) self.index['detrend'].set_value(True) form = QFormLayout(box3) form.addRow(self.index['excl_epoch']) form.addRow('Exclude Artefact events', self.index['excl_event']) form.addRow('Minimum subsegment duration', self.index['min_seg_dur']) form.addRow(self.index['detrend']) form.addRow(self.index['invert']) self.bbox.clicked.connect(self.button_clicked) btnlayout = QHBoxLayout() btnlayout.addStretch(1) btnlayout.addWidget(self.bbox) vlayout = QVBoxLayout() vlayout.addWidget(box1) vlayout.addWidget(box3) vlayout.addStretch(1) vlayout.addLayout(btnlayout) hlayout = QHBoxLayout() hlayout.addWidget(box0) hlayout.addLayout(vlayout) self.update_values() self.setLayout(hlayout)
python
def setup_package(): """Setup procedure.""" import json from setuptools import setup, find_packages filename_setup_json = 'setup.json' filename_description = 'README.md' with open(filename_setup_json, 'r') as handle: setup_json = json.load(handle) with open(filename_description, 'r') as handle: description = handle.read() setup( include_package_data=True, packages=find_packages(), setup_requires=['reentry'], reentry_register=True, long_description=description, long_description_content_type='text/markdown', **setup_json)
python
def destroy(self, stream=False): """ Run a 'terraform destroy' :param stream: whether or not to stream TF output in realtime :type stream: bool """ self._setup_tf(stream=stream) args = ['-refresh=true', '-force', '.'] logger.warning('Running terraform destroy: %s', ' '.join(args)) out = self._run_tf('destroy', cmd_args=args, stream=stream) if stream: logger.warning('Terraform destroy finished successfully.') else: logger.warning("Terraform destroy finished successfully:\n%s", out)
java
public void checkPermissions(String... permissions) throws AuthorizationException { if (!isPermittedAll(permissions)) { throw new AuthorizationException("'{}' does not have the permissions {}", toString(), Arrays.toString(permissions)); } }
java
protected void renderLayers (Graphics2D g, Component pcomp, Rectangle bounds, boolean[] clipped, Rectangle dirty) { JLayeredPane lpane = JLayeredPane.getLayeredPaneAbove(pcomp); if (lpane != null) { renderLayer(g, bounds, lpane, clipped, JLayeredPane.PALETTE_LAYER); renderLayer(g, bounds, lpane, clipped, JLayeredPane.MODAL_LAYER); renderLayer(g, bounds, lpane, clipped, JLayeredPane.POPUP_LAYER); renderLayer(g, bounds, lpane, clipped, JLayeredPane.DRAG_LAYER); } // if we have a MediaOverlay, let it know that any sprites in this region need to be // repainted as the components beneath them have just been redrawn if (_overlay != null) { _overlay.addDirtyRegion(dirty); } }
python
def patch_stdout_context(self, raw=False, patch_stdout=True, patch_stderr=True): """ Return a context manager that will replace ``sys.stdout`` with a proxy that makes sure that all printed text will appear above the prompt, and that it doesn't destroy the output from the renderer. :param patch_stdout: Replace `sys.stdout`. :param patch_stderr: Replace `sys.stderr`. """ return _PatchStdoutContext( self.stdout_proxy(raw=raw), patch_stdout=patch_stdout, patch_stderr=patch_stderr)
java
@Override public double getCopyProcessingRate(long currentTime) { @SuppressWarnings("deprecation") long bytesCopied = super.getCounters().findCounter (Task.Counter.REDUCE_SHUFFLE_BYTES).getCounter(); long timeSpentCopying = 0; long startTime = getStartTime(); if(getPhase() == Phase.SHUFFLE) { if (currentTime <= startTime) { LOG.error("current time is " + currentTime + ", which is <= start " + "time " + startTime + " in " + this.getTaskID()); } timeSpentCopying = currentTime - startTime; } else { //shuffle phase is done long shuffleFinishTime = getShuffleFinishTime(); if (shuffleFinishTime <= startTime) { LOG.error("Shuffle finish time is " + shuffleFinishTime + ", which is <= start time " + startTime + " in " + this.getTaskID()); return 0; } timeSpentCopying = shuffleFinishTime - startTime; } copyProcessingRate = bytesCopied/timeSpentCopying; return copyProcessingRate; }
java
protected I_CmsSearchConfigurationSortOption parseSortOption(JSONObject json) { try { String solrValue = json.getString(JSON_KEY_SORTOPTION_SOLRVALUE); String paramValue = parseOptionalStringValue(json, JSON_KEY_SORTOPTION_PARAMVALUE); paramValue = (paramValue == null) ? solrValue : paramValue; String label = parseOptionalStringValue(json, JSON_KEY_SORTOPTION_LABEL); label = (label == null) ? paramValue : label; return new CmsSearchConfigurationSortOption(label, paramValue, solrValue); } catch (JSONException e) { LOG.error( Messages.get().getBundle().key(Messages.ERR_SORT_OPTION_NOT_PARSABLE_1, JSON_KEY_SORTOPTION_SOLRVALUE), e); return null; } }
python
def get_response(self, deflate=True): """ Returns the Logout Response defated, base64encoded :param deflate: It makes the deflate process optional :type: bool :return: Logout Response maybe deflated and base64 encoded :rtype: string """ if deflate: response = OneLogin_Saml2_Utils.deflate_and_base64_encode(self.__logout_response) else: response = b64encode(self.__logout_response) return response
java
@EventThread public void endSession () { _clmgr.clientSessionWillEnd(this); // queue up a request for our connection to be closed (if we have a connection, that is) Connection conn = getConnection(); if (conn != null) { // go ahead and clear out our connection now to prevent funniness setConnection(null); // have the connection manager close our connection when it is next convenient _conmgr.closeConnection(conn); } // if we don't have a client object, we failed to resolve in the first place, in which case // we have to cope as best we can if (_clobj != null) { // and clean up after ourselves try { sessionDidEnd(); } catch (Exception e) { log.warning("Choked in sessionDidEnd " + this + ".", e); } // release (and destroy) our client object _clmgr.releaseClientObject(_clobj.username); // we only report that our session started if we managed to resolve our client object, // so we only report that it ended in the same circumstance _clmgr.clientSessionDidEnd(this); } // we always want to clear ourselves out of the client manager _clmgr.clearSession(this); // clear out the client object so that we know the session is over _clobj = null; }
java
public T get( int index ) { if( index >= size ) throw new IllegalArgumentException("Index out of bounds: index "+index+" size "+size); return data[index]; }
java
public Observable<ExpressRouteCircuitAuthorizationInner> beginCreateOrUpdateAsync(String resourceGroupName, String circuitName, String authorizationName, ExpressRouteCircuitAuthorizationInner authorizationParameters) { return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, circuitName, authorizationName, authorizationParameters).map(new Func1<ServiceResponse<ExpressRouteCircuitAuthorizationInner>, ExpressRouteCircuitAuthorizationInner>() { @Override public ExpressRouteCircuitAuthorizationInner call(ServiceResponse<ExpressRouteCircuitAuthorizationInner> response) { return response.body(); } }); }
java
public EClass getIfcRelConnectsPorts() { if (ifcRelConnectsPortsEClass == null) { ifcRelConnectsPortsEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI) .getEClassifiers().get(460); } return ifcRelConnectsPortsEClass; }
python
def version(syslog_ng_sbin_dir=None): ''' Returns the version of the installed syslog-ng. If syslog_ng_sbin_dir is specified, it is added to the PATH during the execution of the command syslog-ng. CLI Example: .. code-block:: bash salt '*' syslog_ng.version salt '*' syslog_ng.version /home/user/install/syslog-ng/sbin ''' try: ret = _run_command_in_extended_path(syslog_ng_sbin_dir, 'syslog-ng', ('-V',)) except CommandExecutionError as err: return _format_return_data(retcode=-1, stderr=six.text_type(err)) if ret['retcode'] != 0: return _format_return_data(ret['retcode'], stderr=ret['stderr'], stdout=ret['stdout']) lines = ret['stdout'].split('\n') # The format of the first line in the output is: # syslog-ng 3.6.0alpha0 version_line_index = 0 version_column_index = 1 line = lines[version_line_index].split()[version_column_index] return _format_return_data(0, stdout=line)
python
def _record_hyper_configs(self, hyper_configs): """after generating one round of hyperconfigs, this function records the generated hyperconfigs, creates a dict to record the performance when those hyperconifgs are running, set the number of finished configs in this round to be 0, and increase the round number. Parameters ---------- hyper_configs: list the generated hyperconfigs """ self.hyper_configs.append(hyper_configs) self.configs_perf.append(dict()) self.num_finished_configs.append(0) self.num_configs_to_run.append(len(hyper_configs)) self.increase_i()
java
public String linkRel() { if (this.linkRel!=null) return this.linkRel; if (node.hasAttribute("rel")) return node.getAttribute("rel"); //else return node.getName(); }
java
@Override @Transactional public void addContentItem(Snapshot snapshot, String contentId, Map<String, String> props) throws SnapshotException { String contentIdHash = createChecksumGenerator().generateChecksum(contentId); try { if (this.snapshotContentItemRepo.findBySnapshotAndContentIdHash(snapshot, contentIdHash) != null) { return; } SnapshotContentItem item = new SnapshotContentItem(); item.setContentId(contentId); item.setSnapshot(snapshot); item.setContentIdHash(contentIdHash); String propString = PropertiesSerializer.serialize(props); item.setMetadata(propString); this.snapshotContentItemRepo.save(item); } catch (Exception ex) { throw new SnapshotException("failed to add content item: " + ex.getMessage(), ex); } }
python
def to_dict(self, nested=False): """Return dict object with model's data. :param nested: flag to return nested relationships' data if true :type: bool :return: dict """ result = dict() for key in self.columns: result[key] = getattr(self, key) if nested: for key in self.relations: obj = getattr(self, key) if isinstance(obj, SerializeMixin): result[key] = obj.to_dict() elif isinstance(obj, Iterable): result[key] = [o.to_dict() for o in obj] return result
java
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) { if (AbstractDataSource.this.options.databaseLifecycleHandler != null) { AbstractDataSource.this.options.databaseLifecycleHandler.onUpdate(db, oldVersion, newVersion, true); versionChanged = true; } }
python
def angsep(lon1, lat1, lon2, lat2): """ Angular separation (deg) between two sky coordinates. Borrowed from astropy (www.astropy.org) Notes ----- The angular separation is calculated using the Vincenty formula [1], which is slighly more complex and computationally expensive than some alternatives, but is stable at at all distances, including the poles and antipodes. [1] http://en.wikipedia.org/wiki/Great-circle_distance """ lon1,lat1 = np.radians([lon1,lat1]) lon2,lat2 = np.radians([lon2,lat2]) sdlon = np.sin(lon2 - lon1) cdlon = np.cos(lon2 - lon1) slat1 = np.sin(lat1) slat2 = np.sin(lat2) clat1 = np.cos(lat1) clat2 = np.cos(lat2) num1 = clat2 * sdlon num2 = clat1 * slat2 - slat1 * clat2 * cdlon denominator = slat1 * slat2 + clat1 * clat2 * cdlon return np.degrees(np.arctan2(np.hypot(num1,num2), denominator))
java
public void removeHoursFromDay(ProjectCalendarHours hours) { if (hours.getParentCalendar() != this) { throw new IllegalArgumentException(); } m_hours[hours.getDay().getValue() - 1] = null; }
java
private Set<JType> findReflectedClasses(final GeneratorContext context, final TypeOracle typeOracle, final TreeLogger logger) throws UnableToCompleteException { final Set<JType> types = new HashSet<JType>(); final JPackage[] packages = typeOracle.getPackages(); // gather all types from wanted packages for (final JPackage jPackage : packages) { for (final JClassType jType : jPackage.getTypes()) { gatherTypes(jType.getErasedType(), types, context, logger); } } // gather all types from explicitely requested packages try { final ConfigurationProperty reflectionProperties = context.getPropertyOracle() .getConfigurationProperty("gdx.reflect.include"); for (final String property : reflectionProperties.getValues()) { final JClassType type = typeOracle.findType(property); if (type != null) { gatherTypes(type.getErasedType(), types, context, logger); } } } catch (final BadPropertyValueException exception) { logger.log(Type.ERROR, "Unknown property: " + "gdx.reflect.include", exception); throw new UnableToCompleteException(); } gatherTypes(typeOracle.findType("java.util.List").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.util.ArrayList").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.util.HashMap").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.util.Map").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.String").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Boolean").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Byte").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Long").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Character").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Short").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Integer").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Float").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.CharSequence").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Double").getErasedType(), types, context, logger); gatherTypes(typeOracle.findType("java.lang.Object").getErasedType(), types, context, logger); return types; }
python
def add_block(self): """ adds a random size block to the map """ row_max = self.grd.get_grid_height() - 15 if row_max < 2: row_max = 2 row = randint(0, row_max) col_max = self.grd.get_grid_width() - 10 if col_max < 2: col_max = 2 col = randint(0, col_max) direction = randint(1,19)-10 if direction > 0: y_len = 10 * (math.floor(self.grd.get_grid_height() / 120) + 1) x_len = 1 * (math.floor(self.grd.get_grid_width() / 200) + 1) else: y_len = 1 * (math.floor(self.grd.get_grid_height() / 200) + 1) x_len = 10 * (math.floor(self.grd.get_grid_width() / 120) + 1) print("Adding block to ", row, col, direction) for r in range(row, row + y_len): for c in range(col, col + x_len): self.grd.set_tile(r,c,TERRAIN_BLOCKED)
java
public static boolean isUnicodeIdentifierStart(int ch) { /*int cat = getType(ch);*/ // if props == 0, it will just fall through and return false return ((1 << getType(ch)) & ((1 << UCharacterCategory.UPPERCASE_LETTER) | (1 << UCharacterCategory.LOWERCASE_LETTER) | (1 << UCharacterCategory.TITLECASE_LETTER) | (1 << UCharacterCategory.MODIFIER_LETTER) | (1 << UCharacterCategory.OTHER_LETTER) | (1 << UCharacterCategory.LETTER_NUMBER))) != 0; }
python
def get_interface_addresses(): """ Get addresses of available network interfaces. See netifaces on pypi for details. Returns a list of dicts """ addresses = [] ifaces = netifaces.interfaces() for iface in ifaces: addrs = netifaces.ifaddresses(iface) families = addrs.keys() # put IPv4 to the end so it lists as the main iface address if netifaces.AF_INET in families: families.remove(netifaces.AF_INET) families.append(netifaces.AF_INET) for family in families: for addr in addrs[family]: address = { 'name': iface, 'family': family, 'ip': addr['addr'], } addresses.append(address) return addresses
python
def startInventory(self, proto=None, force_regen_rospec=False): """Add a ROSpec to the reader and enable it.""" if self.state == LLRPClient.STATE_INVENTORYING: logger.warn('ignoring startInventory() while already inventorying') return None rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec'] logger.info('starting inventory') # upside-down chain of callbacks: add, enable, start ROSpec # started_rospec = defer.Deferred() # started_rospec.addCallback(self._setState_wrapper, # LLRPClient.STATE_INVENTORYING) # started_rospec.addErrback(self.panic, 'START_ROSPEC failed') # logger.debug('made started_rospec') enabled_rospec = defer.Deferred() enabled_rospec.addCallback(self._setState_wrapper, LLRPClient.STATE_INVENTORYING) # enabled_rospec.addCallback(self.send_START_ROSPEC, rospec, # onCompletion=started_rospec) enabled_rospec.addErrback(self.panic, 'ENABLE_ROSPEC failed') logger.debug('made enabled_rospec') added_rospec = defer.Deferred() added_rospec.addCallback(self.send_ENABLE_ROSPEC, rospec, onCompletion=enabled_rospec) added_rospec.addErrback(self.panic, 'ADD_ROSPEC failed') logger.debug('made added_rospec') self.send_ADD_ROSPEC(rospec, onCompletion=added_rospec)
java
public static String binToHex(byte[] bin) { StringBuffer hex = new StringBuffer(); binToHex(bin,0,bin.length,hex); return hex.toString(); }
java
public static String[] toStrings(final Object... aVarargs) { final String[] strings = new String[aVarargs.length]; for (int index = 0; index < strings.length; index++) { strings[index] = aVarargs[index].toString(); } return strings; }
python
def _lookup_by_mapping(): """Return a the init system based on a constant mapping of distribution+version to init system.. See constants.py for the mapping. A failover of the version is proposed for when no version is supplied. For instance, Arch Linux's version will most probably be "rolling" at any given time, which means that the init system cannot be idenfied by the version of the distro. On top of trying to identify by the distro's ID, if /etc/os-release contains an "ID_LIKE" field, it will be tried. That, again is true for Arch where the distro's ID changes (Manjaro, Antergos, etc...) But the "ID_LIKE" field is always (?) `arch`. """ like = distro.like().lower() distribution_id = distro.id().lower() version = distro.major_version() if 'arch' in (distribution_id, like): version = 'any' init_sys = constants.DIST_TO_INITSYS.get( distribution_id, constants.DIST_TO_INITSYS.get(like)) if init_sys: system = init_sys.get(version) return [system] if system else []
java
public DeviceEnvelope updateDevice(String deviceId, Device device) throws ApiException { ApiResponse<DeviceEnvelope> resp = updateDeviceWithHttpInfo(deviceId, device); return resp.getData(); }
python
def _build_xpath_expr(attrs): """Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. """ # give class attribute as class_ because class is a python keyword if 'class_' in attrs: attrs['class'] = attrs.pop('class_') s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s))
java
protected final Pair<?, ?> entry(Object key, Object value) { return Pair.of(key, value); }
python
def follow_log(self): """Reads a logfile continuously and updates internal graph if new step is found""" # Server needs to be up and running before starting sending POST requests time.sleep(5) try: if self.remote: logger.debug('Logfile in remote host!') cl = client.SSHClient() # Try to load system keys cl.load_system_host_keys() cl.connect(self.remote['host'], port=self.remote.get('port', 22), username=self.remote.get('username', None), \ password=self.remote.get('password', None)) sftp = cl.open_sftp() f = sftp.open(self.logfile, 'r') f.settimeout(5) # Set 5 seconds timeout for read operations else: f = open(self.logfile, 'r') except IOError: raise RuntimeError("Provided logfile does not exist or its not readable") self.analysis_finished = False last_line_read = False while not self.analysis_finished: try: line = f.readline() except timeout: logger.error("Connection with the server lost, trying to reconnect and continue reading") current_pos = f.tell() try: cl.connect(self.remote['host'], port=self.remote.get('port', 22), username=self.remote.get('username', None), \ password=self.remote.get('password', None), timeout=300, banner_timeout=300) except error: logger.error("Couldn't connect to the server after 5 minutes, aborting.") os._exit(0) else: logger.info("Connection restablished!! Will continue reading the logfile") sftp = cl.open_sftp() f = sftp.open(self.logfile, 'r') f.seek(current_pos) else: if not line: self.finished_reading = True if not last_line_read: self.update_frontend({'finished_reading': True}) if self.update: self.update_frontend(self._last_message) last_line_read = True time.sleep(1) continue parsed_line = ps.parse_log_line(line) self._last_message = parsed_line self.analysis_finished = parsed_line['step'] == 'finished' # If this is a new step, update internal data parsed_line['new_run'] = False if parsed_line['step'] and not parsed_line['step'] == 'error': if self.FIRST_STEP is None: self.FIRST_STEP = parsed_line['step'] elif parsed_line['step'] == self.FIRST_STEP: parsed_line['new_run'] = True self.new_run() node_id = 'run-{}_'.format(self.current_run + 1) + '_'.join(parsed_line['step'].lower().split()) parsed_line['step_id'] = node_id self.runs[self.current_run].steps.append(parsed_line) self.runs[self.current_run].node(node_id, parsed_line['step']) self.runs[self.current_run]._nodes.append(node_id) n_nodes = len(self.runs[self.current_run]._nodes) if n_nodes > 1: self.runs[self.current_run].edge(self.runs[self.current_run]._nodes[n_nodes - 2], self.runs[self.current_run]._nodes[n_nodes -1]) parsed_line['graph_source'] = self.runs[self.current_run].source elif parsed_line['step'] == 'error': self.runs[self.current_run].errored = True # Update frontend only if its a new step _or_ the update flag is set to true and we are # not loading the log for the first time if (last_line_read and self.update) or parsed_line['step']: self.update_frontend(parsed_line) f.close()
java
@Override @Transactional public Namespace findNamespaceByPrimaryKey(BigInteger id) { requireNotDisposed(); requireArgument(id != null && id.compareTo(ZERO) > 0, "ID must be a positive non-zero value."); Namespace result = findEntity(emf.get(), id, Namespace.class); _logger.debug("Query for namespace having id {} resulted in : {}", id, result); return result; }
python
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="Complexity"): """ Compute complexity indices of epochs or raw object. DOCS INCOMPLETE :( """ data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central) # if data was Raw, make as if it was an Epoch so the following routine is only written once if isinstance(data, dict) is False: data = {0: data} # Create time windows if isinstance(times, tuple): times = list(times) if isinstance(times, list): if isinstance(times[0], list) is False: times = [times] else: times = [[0, None]] # Deal with names if isinstance(names, str): prefix = [names] * len(times) if len(times) > 1: for time_index, time_window in enumerate(times): prefix[time_index] = prefix[time_index] + "_%.2f_%.2f" %(time_window[0], time_window[1]) else: prefix = names # Iterate complexity_all = pd.DataFrame() for time_index, time_window in enumerate(times): if len(times) > 1 and verbose is True: print("Computing complexity features... window " + str(time_window) + "/" + str(len(times))) complexity_features = {} # Compute complexity for each channel for each epoch index = 0 for epoch_index, epoch in data.items(): if len(times) == 1 and verbose is True: print("Computing complexity features... " + str(round(index/len(data.items())*100, 2)) + "%") index +=1 df = epoch[time_window[0]:time_window[1]].copy() complexity_features[epoch_index] = {} for channel in df: signal = df[channel].values features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e) for key, feature in features.items(): if key in complexity_features[epoch_index].keys(): complexity_features[epoch_index][key].append(feature) else: complexity_features[epoch_index][key] = [feature] for epoch_index, epoch in complexity_features.items(): for feature in epoch: complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean() # Convert to dataframe complexity_features = pd.DataFrame.from_dict(complexity_features, orient="index") complexity_features.columns = [prefix[time_index] + "_" + s for s in complexity_features.columns] complexity_all = pd.concat([complexity_all, complexity_features], axis=1) return(complexity_all)
java
private void closeConfig(Config config) { if (config instanceof WebSphereConfig) { try { ((WebSphereConfig) config).close(); } catch (IOException e) { throw new ConfigException(Tr.formatMessage(tc, "could.not.close.CWMCG0004E", e)); } } }
java
public void remove(SchemaObject object, Right right) { if (right.isFull) { clear(); return; } if (isFull) { isFull = false; isFullSelect = isFullInsert = isFullUpdate = isFullReferences = isFullDelete = true; } if (right.isFullDelete) { isFullDelete = false; } if (!isFullSelect && selectColumnSet == null) {} else if (right.isFullSelect) { isFullSelect = false; selectColumnSet = null; } else if (right.selectColumnSet != null) { if (isFullSelect) { isFullSelect = false; selectColumnSet = ((Table) object).getColumnNameSet(); } selectColumnSet.removeAll(right.selectColumnSet); if (selectColumnSet.isEmpty()) { selectColumnSet = null; } } if (!isFullInsert && insertColumnSet == null) {} else if (right.isFullInsert) { isFullInsert = false; insertColumnSet = null; } else if (right.insertColumnSet != null) { if (isFullInsert) { isFullInsert = false; insertColumnSet = ((Table) object).getColumnNameSet(); } insertColumnSet.removeAll(right.insertColumnSet); if (insertColumnSet.isEmpty()) { insertColumnSet = null; } } if (!isFullUpdate && updateColumnSet == null) {} else if (right.isFullUpdate) { isFullUpdate = false; updateColumnSet = null; } else if (right.updateColumnSet != null) { if (isFullUpdate) { isFullUpdate = false; updateColumnSet = ((Table) object).getColumnNameSet(); } updateColumnSet.removeAll(right.updateColumnSet); if (updateColumnSet.isEmpty()) { updateColumnSet = null; } } if (!isFullReferences && referencesColumnSet == null) {} else if (right.isFullReferences) { isFullReferences = false; referencesColumnSet = null; } else if (right.referencesColumnSet != null) { if (isFullReferences) { isFullReferences = false; referencesColumnSet = ((Table) object).getColumnNameSet(); } referencesColumnSet.removeAll(right.referencesColumnSet); if (referencesColumnSet.isEmpty()) { referencesColumnSet = null; } } if (!isFullTrigger && triggerColumnSet == null) {} else if (right.isFullTrigger) { isFullTrigger = false; triggerColumnSet = null; } else if (right.triggerColumnSet != null) { if (isFullTrigger) { isFullTrigger = false; triggerColumnSet = ((Table) object).getColumnNameSet(); } triggerColumnSet.removeAll(right.triggerColumnSet); if (triggerColumnSet.isEmpty()) { triggerColumnSet = null; } } }
python
def _handleInvertAxesSelected(self, evt): """Called when the invert all menu item is selected""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): self._menu.Check(self._axisId[i], False) else: self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
java
private void runEDBPostHooks(EDBCommit commit) { for (EDBPostCommitHook hook : postCommitHooks) { try { hook.onPostCommit(commit); } catch (ServiceUnavailableException e) { // Ignore } catch (Exception e) { logger.error("Error while performing EDBPostCommitHook", e); } } }
java
@Override public GetUsagePlanKeysResult getUsagePlanKeys(GetUsagePlanKeysRequest request) { request = beforeClientExecution(request); return executeGetUsagePlanKeys(request); }
python
def library_path(): ''' library_path() yields the path of the neuropythy library. ''' return os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
python
def _process_file_continue_ftp_response(self, response: FTPResponse): '''Process a restarted content response.''' if response.request.restart_value and response.restart_value: self.open_file(self._filename, response, mode='ab+') else: self._raise_cannot_continue_error()
python
def hash_stream(fileobj, hasher=None, blocksize=65536): """Read from fileobj stream, return hash of its contents. Args: fileobj: File-like object with read() hasher: Hash object such as hashlib.sha1(). Defaults to sha1. blocksize: Read from fileobj this many bytes at a time. """ hasher = hasher or hashlib.sha1() buf = fileobj.read(blocksize) while buf: hasher.update(buf) buf = fileobj.read(blocksize) return hasher
java
public boolean contains( VersionID m ) { for ( Object _versionId : _versionIds ) { VersionID vi = (VersionID) _versionId; boolean check = vi.match( m ); if ( check ) { return true; } } return false; }
java
public CexIOOrder placeCexIOMarketOrder(MarketOrder marketOrder) throws IOException { CexIOOrder order = cexIOAuthenticated.placeOrder( signatureCreator, marketOrder.getCurrencyPair().base.getCurrencyCode(), marketOrder.getCurrencyPair().counter.getCurrencyCode(), new CexioPlaceOrderRequest( (marketOrder.getType() == BID ? CexIOOrder.Type.buy : CexIOOrder.Type.sell), null, marketOrder.getOriginalAmount(), "market")); if (order.getErrorMessage() != null) { throw new ExchangeException(order.getErrorMessage()); } return order; }
python
def ekopn(fname, ifname, ncomch): """ Open a new E-kernel file and prepare the file for writing. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopn_c.html :param fname: Name of EK file. :type fname: str :param ifname: Internal file name. :type ifname: str :param ncomch: The number of characters to reserve for comments. :type ncomch: int :return: Handle attached to new EK file. :rtype: int """ fname = stypes.stringToCharP(fname) ifname = stypes.stringToCharP(ifname) ncomch = ctypes.c_int(ncomch) handle = ctypes.c_int() libspice.ekopn_c(fname, ifname, ncomch, ctypes.byref(handle)) return handle.value
java
public static void process(GrayU8 input, GrayU8 output , int radius, @Nullable IWorkArrays work ) { if( work == null ) work = new IWorkArrays(); work.reset(256); final IWorkArrays _work = work; int w = 2*radius+1; // sanity check to make sure the image isn't too small to be processed by this algorithm if( input.width < w || input.height < w ) return; // defines what the median is. technically this is an approximation because if even it's the ave // of the two elements in the middle. I'm not aware of libraries which actually do this. int threshold = (w*w)/2+1; //CONCURRENT_BELOW BoofConcurrency.loopBlocks(radius, output.height-radius, w,(y0,y1)->{ final int y0 = radius, y1 = input.height-radius; int[] histogram = _work.pop(); for( int y = y0; y < y1; y++ ) { int seed = input.startIndex + (y-radius)*input.stride; Arrays.fill(histogram,0); // compute the median value for the first x component and initialize the system for( int i = 0; i < w; i++ ) { int idx = seed + i*input.stride; int end = idx + w; while( idx < end ) { histogram[(input.data[idx++]&0xFF)]++; } } // Compute the median value int count = 0, median = 0; while( true ) { count += histogram[median]; if( count >= threshold ) break; median++; } output.data[ output.startIndex+y*output.stride+radius] = (byte)median; // remove the left most pixel from the histogram count += removeSide(input.data,input.stride, w, histogram, seed, median); for( int x = radius+1; x < input.width-radius; x++ ) { seed = input.startIndex + (y - radius) * input.stride + (x - radius); // add the right most pixels to the histogram count += addSide(input.data, input.stride, w, histogram, seed + w - 1, median); // find the median, using the previous solution as a starting point if (count >= threshold) { while (count >= threshold) { count -= histogram[median--]; } median += 1; count += histogram[median]; } else { while (count < threshold) { median += 1; count += histogram[median]; } } output.data[output.startIndex + y * output.stride + x] = (byte) median; // remove the left most pixels from the histogram count += removeSide(input.data, input.stride, w, histogram, seed, median); } } //CONCURRENT_ABOVE }}); }
python
def dict2obj(d): """Convert a dict to an object or namespace >>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]} >>> obj = dict2obj(d) >>> obj.b.c 2 >>> obj.d ['hi', {'foo': 'bar'}] >>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]} >>> obj = dict2obj(d) >>> obj.d.hi.foo 'bar' """ if isinstance(d, (Mapping, list, tuple)): try: d = dict(d) except (ValueError, TypeError): return d else: return d obj = Object() for k, v in viewitems(d): obj.__dict__[k] = dict2obj(v) return obj
java
@Override public JvmDeclaredType createType(BinaryClass binaryClass) { if (useASM) { try { createTypeTask.start(); return doCreateType(binaryClass); } catch (Exception e) { throw new RuntimeException(e); } finally { createTypeTask.stop(); } } else { try { ReflectURIHelper uriHelper = new ReflectURIHelper(); ReflectionTypeFactory reflectionBased = new ReflectionTypeFactory(uriHelper); Class<?> clazz = Class.forName(binaryClass.getName(), false, classLoader); return reflectionBased.createType(clazz); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } }
python
def singleChoiceParam(parameters, name, type_converter = str): """ single choice parameter value. Returns -1 if no value was chosen. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'""" param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name)) value = int(param.find('Value').text) values = param.find('Values') if value < 0: return value return type_converter(values[value].text)
python
def sum_abs_distance(labels, preds): """ Compute the sum of abs distances. :param labels: A float tensor of shape [batch_size, ..., X] representing the labels. :param preds: A float tensor of shape [batch_size, ..., X] representing the predictions. :return: A float tensor of shape [batch_size, ...] representing the summed absolute distance. """ with tf.variable_scope("sum_abs_distance"): return tf.reduce_sum(tf.abs(preds - labels), axis=-1)
java
@BetaApi public final Operation deleteNodeGroup(ProjectZoneNodeGroupName nodeGroup) { DeleteNodeGroupHttpRequest request = DeleteNodeGroupHttpRequest.newBuilder() .setNodeGroup(nodeGroup == null ? null : nodeGroup.toString()) .build(); return deleteNodeGroup(request); }
python
def imap_tr(imap, *args, **kwargs): ''' imap_tr(m, ...) yields a copy of the immutable map m in which the keywords have been translated according to the given arguments. Arguments may be any number of dictionaries followed by any number of keyword arguments, all of which are merged left-to-right then used as the translation. ''' if not is_imap(imap): raise TypeError('IMap object required of imap_tr') return imap.tr(*args, **kwargs)
python
def generate(self, output_dir, work, matches_filename): """Generates HTML reports showing the text of each witness to `work` with its matches in `matches` highlighted. :param output_dir: directory to write report to :type output_dir: `str` :param work: name of work to highlight :type text_name: `str` :param matches_filename: file containing matches to highlight :type matches_filename: `str` :rtype: `str` """ template = self._get_template() matches = pd.read_csv(matches_filename) for siglum in self._corpus.get_sigla(work): subm = matches[(matches[constants.WORK_FIELDNAME] != work) | (matches[constants.SIGLUM_FIELDNAME] != siglum)] content = self._generate_base(work, siglum) content = self._highlight(content, subm) content = self._format_content(content) text_list = self._generate_text_list(subm) report_name = '{}-{}.html'.format(work, siglum) self._write(work, siglum, content, output_dir, report_name, template, True, text_list=text_list)
python
def Parse(self, statentry, file_object, knowledge_base): """Parse the Plist file.""" _ = knowledge_base kwargs = {} try: kwargs["aff4path"] = file_object.urn except AttributeError: pass direct_copy_items = [ "Label", "Disabled", "UserName", "GroupName", "Program", "StandardInPath", "StandardOutPath", "StandardErrorPath", "LimitLoadToSessionType", "EnableGlobbing", "EnableTransactions", "OnDemand", "RunAtLoad", "RootDirectory", "WorkingDirectory", "Umask", "TimeOut", "ExitTimeOut", "ThrottleInterval", "InitGroups", "StartOnMount", "StartInterval", "Debug", "WaitForDebugger", "Nice", "ProcessType", "AbandonProcessGroup", "LowPriorityIO", "LaunchOnlyOnce" ] string_array_items = [ "LimitLoadToHosts", "LimitLoadFromHosts", "LimitLoadToSessionType", "ProgramArguments", "WatchPaths", "QueueDirectories" ] flag_only_items = ["SoftResourceLimits", "HardResourceLimits", "Sockets"] plist = {} try: plist = biplist.readPlist(file_object) except (biplist.InvalidPlistException, ValueError, IOError) as e: plist["Label"] = "Could not parse plist: %s" % e # These are items that can be directly copied for key in direct_copy_items: kwargs[key] = plist.get(key) # These could be a string, they could be an array, we don't know and neither # does Apple so we check. for key in string_array_items: elements = plist.get(key) if isinstance(elements, string_types): kwargs[key] = [elements] else: kwargs[key] = elements # These are complex items that can appear in multiple data structures # so we only flag on their existence for key in flag_only_items: if plist.get(key): kwargs[key] = True if plist.get("inetdCompatability") is not None: kwargs["inetdCompatabilityWait"] = plist.get("inetdCompatability").get( "Wait") keepalive = plist.get("KeepAlive") if isinstance(keepalive, bool) or keepalive is None: kwargs["KeepAlive"] = keepalive else: keepalivedict = {} keepalivedict["SuccessfulExit"] = keepalive.get("SuccessfulExit") keepalivedict["NetworkState"] = keepalive.get("NetworkState") pathstates = keepalive.get("PathState") if pathstates is not None: keepalivedict["PathState"] = [] for pathstate in pathstates: keepalivedict["PathState"].append( rdf_plist.PlistBoolDictEntry( name=pathstate, value=pathstates[pathstate])) otherjobs = keepalive.get("OtherJobEnabled") if otherjobs is not None: keepalivedict["OtherJobEnabled"] = [] for otherjob in otherjobs: keepalivedict["OtherJobEnabled"].append( rdf_plist.PlistBoolDictEntry( name=otherjob, value=otherjobs[otherjob])) kwargs["KeepAliveDict"] = rdf_plist.LaunchdKeepAlive(**keepalivedict) envvars = plist.get("EnvironmentVariables") if envvars is not None: kwargs["EnvironmentVariables"] = [] for envvar in envvars: kwargs["EnvironmentVariables"].append( rdf_plist.PlistStringDictEntry(name=envvar, value=envvars[envvar])) startcalendarinterval = plist.get("StartCalendarInterval") if startcalendarinterval is not None: if isinstance(startcalendarinterval, dict): kwargs["StartCalendarInterval"] = [ rdf_plist.LaunchdStartCalendarIntervalEntry( Minute=startcalendarinterval.get("Minute"), Hour=startcalendarinterval.get("Hour"), Day=startcalendarinterval.get("Day"), Weekday=startcalendarinterval.get("Weekday"), Month=startcalendarinterval.get("Month")) ] else: kwargs["StartCalendarInterval"] = [] for entry in startcalendarinterval: kwargs["StartCalendarInterval"].append( rdf_plist.LaunchdStartCalendarIntervalEntry( Minute=entry.get("Minute"), Hour=entry.get("Hour"), Day=entry.get("Day"), Weekday=entry.get("Weekday"), Month=entry.get("Month"))) yield rdf_plist.LaunchdPlist(**kwargs)
java
@BetaApi public final Operation startWithEncryptionKeyInstance( String instance, InstancesStartWithEncryptionKeyRequest instancesStartWithEncryptionKeyRequestResource) { StartWithEncryptionKeyInstanceHttpRequest request = StartWithEncryptionKeyInstanceHttpRequest.newBuilder() .setInstance(instance) .setInstancesStartWithEncryptionKeyRequestResource( instancesStartWithEncryptionKeyRequestResource) .build(); return startWithEncryptionKeyInstance(request); }
java
public static void main(String[] args) { try { final CrawlToFile crawl = new CrawlToFile(args); crawl.crawl(); } catch (ParseException e) { System.err.print(e.getMessage()); } catch (IllegalArgumentException e) { System.err.println(e.getMessage()); } }
java
public static int hash32(final String text) { final byte[] bytes = text.getBytes(); return hash32(bytes, bytes.length); }
python
def number_of_extents(self): """int: number of extents.""" if not self._is_parsed: self._Parse() self._is_parsed = True return len(self._extents)
python
def preparedir(target_dir, remove_content=True): """Prepare a folder for analysis. This method creates the folder if it is not created, and removes the file in the folder if the folder already existed. """ if os.path.isdir(target_dir): if remove_content: nukedir(target_dir, False) return True else: try: os.makedirs(target_dir) return True except Exception as e: print("Failed to create folder: %s\n%s" % (target_dir, e)) return False
python
def victims(self, filters=None, params=None): """ Gets all victims from a tag. """ victim = self._tcex.ti.victim(None) for v in self.tc_requests.victims_from_tag( victim, self.name, filters=filters, params=params ): yield v
python
def cmd_create(self, name, auto=False): """Create a new migration.""" LOGGER.setLevel('INFO') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) if auto: auto = self.models router.create(name, auto=auto)
java
private JsonNode parseProcessOutput(String processOutput) { JsonNode credentialsJson = Jackson.jsonNodeOf(processOutput); if (!credentialsJson.isObject()) { throw new IllegalStateException("Process did not return a JSON object."); } JsonNode version = credentialsJson.get("Version"); if (version == null || !version.isInt() || version.asInt() != 1) { throw new IllegalStateException("Unsupported credential version: " + version); } return credentialsJson; }
python
def on_channel_open(self, channel): """Called by pika when the channel has been opened. The channel object is passed in so we can make use of it. Since the channel is now open, we'll start consuming. :param pika.channel.Channel channel: The channel object """ logger.debug('Channel opened') self._channel = channel self.add_on_channel_close_callback() self.setup_qos()
java
public Matrix calcOrig() { if (!Coordinates.equals(getSource().getSize(), getSize())) { throw new RuntimeException( "Cannot change Matrix size. Use calc(Ret.NEW) or calc(Ret.LINK) instead."); } long[] newCoordinates = new long[position.length]; for (long[] c : newContent.allCoordinates()) { Coordinates.plus(newCoordinates, position, c); getSource().setAsObject(getObject(newCoordinates), newCoordinates); } getSource().fireValueChanged(); return getSource(); }
java
public static String getTypeName(Type type) { if(type instanceof Class) { Class<?> clazz = (Class<?>) type; return clazz.isArray() ? (getTypeName(clazz.getComponentType()) + "[]") : clazz.getName(); } else { return type.toString(); } }
java
public List<Versioned<V>> getWithCustomTimeout(CompositeVoldemortRequest<K, V> requestWrapper) { validateTimeout(requestWrapper.getRoutingTimeoutInMs()); for(int attempts = 0; attempts < this.metadataRefreshAttempts; attempts++) { try { long startTimeInMs = System.currentTimeMillis(); String keyHexString = ""; if(logger.isDebugEnabled()) { ByteArray key = (ByteArray) requestWrapper.getKey(); keyHexString = RestUtils.getKeyHexString(key); debugLogStart("GET", requestWrapper.getRequestOriginTimeInMs(), startTimeInMs, keyHexString); } List<Versioned<V>> items = store.get(requestWrapper); if(logger.isDebugEnabled()) { int vcEntrySize = 0; for(Versioned<V> vc: items) { vcEntrySize += ((VectorClock) vc.getVersion()).getVersionMap().size(); } debugLogEnd("GET", requestWrapper.getRequestOriginTimeInMs(), startTimeInMs, System.currentTimeMillis(), keyHexString, vcEntrySize); } return items; } catch(InvalidMetadataException e) { logger.info("Received invalid metadata exception during get [ " + e.getMessage() + " ] on store '" + storeName + "'. Rebootstrapping"); bootStrap(); } } throw new VoldemortException(this.metadataRefreshAttempts + " metadata refresh attempts failed."); }
python
def longest_bar_prefix_value(self): """ Calculates the longest progress bar prefix in order to keep all progress bars left-aligned. :return: Length of the longest task prefix in character unit. """ longest = 0 for key, t in self.tasks.items(): size = len(t.prefix) if size > longest: longest = size return longest
java
public boolean isExcluded(NodeData state) { for (ExcludingRule rule : excludingRules) { if (rule.suiteFor(state)) { return true; } } return false; }
java
@NonNull public static <T> Data<T> fromCursor(@NonNull Callable<Cursor> loader, @NonNull RowMapper<T> rowMapper) { return fromCursor(loader, rowMapper, DataExecutors.defaultExecutor()); }
java
public static boolean bindPort(Session session, String remoteHost, int remotePort, int localPort) throws JschRuntimeException { if (session != null && session.isConnected()) { try { session.setPortForwardingL(localPort, remoteHost, remotePort); } catch (JSchException e) { throw new JschRuntimeException(e, "From [{}] mapping to [{}] error!", remoteHost, localPort); } return true; } return false; }
python
def add_stream_logger(level=logging.DEBUG, name=None): """ Add a stream logger. This can be used for printing all SDK calls to stdout while working in an interactive session. Note this is a logger for the entire module, which will apply to all environments started in the same session. If you need a specific logger pass a ``logfile`` to :func:`~sdk.init` Args: level(int): :mod:`logging` log level name(str): logger name, will default to the root logger. Returns: None """ logger = logging.getLogger(name) logger.setLevel(level) handler = logging.StreamHandler() handler.setFormatter(get_default_log_formatter()) handler.setLevel(level) logger.addHandler(handler)
java
@Override public WildFiles convert(final String valueStr, final boolean _caseSensitive, final Object target) throws ParseException { wildFile.add(valueStr); return wildFile; }
python
def load_clients_file(filename, configuration_class=ClientConfiguration): """ Loads client configurations from a YAML file. :param filename: YAML file name. :type filename: unicode | str :param configuration_class: Class of the configuration object to create. :type configuration_class: class :return: A dictionary of client configuration objects. :rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration] """ with open(filename, 'r') as f: return load_clients(f, configuration_class=configuration_class)
java
public TrxMessageHeader createReplyHeader() { Map<String,Object> mapInHeader = this.getMessageHeaderMap(); Map<String,Object> mapInInfo = this.getMessageInfoMap(); Map<String,Object> mapReplyHeader = new HashMap<String,Object>(); Map<String,Object> mapReplyInfo = new HashMap<String,Object>(); this.moveMapInfo(mapReplyHeader, mapInHeader, TrxMessageHeader.DESTINATION_PARAM, TrxMessageHeader.SOURCE_PARAM); this.moveMapInfo(mapReplyHeader, mapInHeader, TrxMessageHeader.SOURCE_PARAM, TrxMessageHeader.DESTINATION_PARAM); this.moveMapInfo(mapReplyHeader, mapInHeader, TrxMessageHeader.ORIG_LOG_TRX_ID, TrxMessageHeader.LOG_TRX_ID); this.moveMapInfo(mapReplyHeader, mapInHeader, TrxMessageHeader.REGISTRY_ID, TrxMessageHeader.REGISTRY_ID); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.MESSAGE_CODE, TrxMessageHeader.MESSAGE_RESPONSE_ID); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.MESSAGE_CODE, TrxMessageHeader.MESSAGE_RESPONSE_CODE); this.moveMapInfo(mapReplyInfo, mapInHeader, TrxMessageHeader.MESSAGE_CODE, TrxMessageHeader.MESSAGE_RESPONSE_CODE); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.EXTERNAL_MESSAGE_CLASS, TrxMessageHeader.MESSAGE_RESPONSE_CLASS); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.MESSAGE_MARSHALLER_CLASS, TrxMessageHeader.MESSAGE_RESPONSE_MARSHALLER_CLASS); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.MESSAGE_VERSION, TrxMessageHeader.MESSAGE_VERSION); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.MESSAGE_VERSION_ID, TrxMessageHeader.MESSAGE_VERSION_ID); this.moveMapInfo(mapReplyInfo, mapInInfo, TrxMessageHeader.SCHEMA_LOCATION, TrxMessageHeader.SCHEMA_LOCATION); TrxMessageHeader trxMessageHeader = new TrxMessageHeader(null, mapReplyHeader); trxMessageHeader.setMessageInfoMap(mapReplyInfo); return trxMessageHeader; }
python
def start(self): """ Start the patch """ self._patcher = mock.patch(target=self.target) MockClient = self._patcher.start() instance = MockClient.return_value instance.model.side_effect = mock.Mock( side_effect=self.model )
python
def get_bool(self, name, default=None): """Retrieves an environment variable value as ``bool``. Integer values are converted as expected: zero evaluates to ``False``, and non-zero to ``True``. String values of ``'true'`` and ``'false'`` are evaluated case insensitive. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: bool: The environment variable's value as a ``bool``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value could not be interpreted as a ``bool``. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return bool(self.get_int(name))
python
def _get_apphook_field_names(model): """ Return all foreign key field names for a AppHookConfig based model """ from .models import AppHookConfig # avoid circular dependencies fields = [] for field in model._meta.fields: if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig): fields.append(field) return [field.name for field in fields]
java
public ServiceFuture<LabInner> updateAsync(String resourceGroupName, String labAccountName, String labName, LabFragment lab, final ServiceCallback<LabInner> serviceCallback) { return ServiceFuture.fromResponse(updateWithServiceResponseAsync(resourceGroupName, labAccountName, labName, lab), serviceCallback); }
python
def get_console_output(self, instance_id): """ Retrieves the console output for the specified instance. :type instance_id: string :param instance_id: The instance ID of a running instance on the cloud. :rtype: :class:`boto.ec2.instance.ConsoleOutput` :return: The console output as a ConsoleOutput object """ params = {} self.build_list_params(params, [instance_id], 'InstanceId') return self.get_object('GetConsoleOutput', params, ConsoleOutput, verb='POST')
java
private void doValidationCompaction(ColumnFamilyStore cfs, Validator validator) throws IOException { // this isn't meant to be race-proof, because it's not -- it won't cause bugs for a CFS to be dropped // mid-validation, or to attempt to validate a droped CFS. this is just a best effort to avoid useless work, // particularly in the scenario where a validation is submitted before the drop, and there are compactions // started prior to the drop keeping some sstables alive. Since validationCompaction can run // concurrently with other compactions, it would otherwise go ahead and scan those again. if (!cfs.isValid()) return; Refs<SSTableReader> sstables = null; try { String snapshotName = validator.desc.sessionId.toString(); int gcBefore; boolean isSnapshotValidation = cfs.snapshotExists(snapshotName); if (isSnapshotValidation) { // If there is a snapshot created for the session then read from there. sstables = cfs.getSnapshotSSTableReader(snapshotName); // Computing gcbefore based on the current time wouldn't be very good because we know each replica will execute // this at a different time (that's the whole purpose of repair with snaphsot). So instead we take the creation // time of the snapshot, which should give us roughtly the same time on each replica (roughtly being in that case // 'as good as in the non-snapshot' case) gcBefore = cfs.gcBefore(cfs.getSnapshotCreationTime(snapshotName)); } else { // flush first so everyone is validating data that is as similar as possible StorageService.instance.forceKeyspaceFlush(cfs.keyspace.getName(), cfs.name); // we don't mark validating sstables as compacting in DataTracker, so we have to mark them referenced // instead so they won't be cleaned up if they do get compacted during the validation if (validator.desc.parentSessionId == null || ActiveRepairService.instance.getParentRepairSession(validator.desc.parentSessionId) == null) sstables = cfs.selectAndReference(ColumnFamilyStore.CANONICAL_SSTABLES).refs; else { ColumnFamilyStore.RefViewFragment refView = cfs.selectAndReference(ColumnFamilyStore.UNREPAIRED_SSTABLES); sstables = refView.refs; Set<SSTableReader> currentlyRepairing = ActiveRepairService.instance.currentlyRepairing(cfs.metadata.cfId, validator.desc.parentSessionId); if (!Sets.intersection(currentlyRepairing, Sets.newHashSet(refView.sstables)).isEmpty()) { logger.error("Cannot start multiple repair sessions over the same sstables"); throw new RuntimeException("Cannot start multiple repair sessions over the same sstables"); } ActiveRepairService.instance.getParentRepairSession(validator.desc.parentSessionId).addSSTables(cfs.metadata.cfId, refView.sstables); } if (validator.gcBefore > 0) gcBefore = validator.gcBefore; else gcBefore = getDefaultGcBefore(cfs); } // Create Merkle tree suitable to hold estimated partitions for given range. // We blindly assume that partition is evenly distributed on all sstables for now. long numPartitions = 0; for (SSTableReader sstable : sstables) { numPartitions += sstable.estimatedKeysForRanges(Collections.singleton(validator.desc.range)); } // determine tree depth from number of partitions, but cap at 20 to prevent large tree. int depth = numPartitions > 0 ? (int) Math.min(Math.floor(Math.log(numPartitions)), 20) : 0; MerkleTree tree = new MerkleTree(cfs.partitioner, validator.desc.range, MerkleTree.RECOMMENDED_DEPTH, (int) Math.pow(2, depth)); long start = System.nanoTime(); try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables, validator.desc.range)) { CompactionIterable ci = new ValidationCompactionIterable(cfs, scanners.scanners, gcBefore); Iterator<AbstractCompactedRow> iter = ci.iterator(); metrics.beginCompaction(ci); try { // validate the CF as we iterate over it validator.prepare(cfs, tree); while (iter.hasNext()) { if (ci.isStopRequested()) throw new CompactionInterruptedException(ci.getCompactionInfo()); AbstractCompactedRow row = iter.next(); validator.add(row); } validator.complete(); } finally { if (isSnapshotValidation) { cfs.clearSnapshot(snapshotName); } metrics.finishCompaction(ci); } } if (logger.isDebugEnabled()) { // MT serialize may take time long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); logger.debug("Validation finished in {} msec, depth {} for {} keys, serialized size {} bytes for {}", duration, depth, numPartitions, MerkleTree.serializer.serializedSize(tree, 0), validator.desc); } } finally { if (sstables != null) sstables.release(); } }
python
def register_graphql_handlers( app: "Application", engine_sdl: str = None, engine_schema_name: str = "default", executor_context: dict = None, executor_http_endpoint: str = "/graphql", executor_http_methods: List[str] = None, engine: Engine = None, subscription_ws_endpoint: Optional[str] = None, graphiql_enabled: bool = False, graphiql_options: Optional[Dict[str, Any]] = None, ) -> "Application": """Register a Tartiflette Engine to an app Pass a SDL or an already initialized Engine, not both, not neither. Keyword Arguments: app {aiohttp.web.Application} -- The application to register to. engine_sdl {str} -- The SDL defining your API (default: {None}) engine_schema_name {str} -- The name of your sdl (default: {"default"}) executor_context {dict} -- Context dict that will be passed to the resolvers (default: {None}) executor_http_endpoint {str} -- Path part of the URL the graphql endpoint will listen on (default: {"/graphql"}) executor_http_methods {list[str]} -- List of HTTP methods allowed on the endpoint (only GET and POST are supported) (default: {None}) engine {Engine} -- An already initialized Engine (default: {None}) subscription_ws_endpoint {Optional[str]} -- Path part of the URL the WebSocket GraphQL subscription endpoint will listen on (default: {None}) graphiql_enabled {bool} -- Determines whether or not we should handle a GraphiQL endpoint (default: {False}) graphiql_options {dict} -- Customization options for the GraphiQL instance (default: {None}) Raises: Exception -- On bad sdl/engine parameter combinaison. Exception -- On unsupported HTTP Method. Return: The app object. """ # pylint: disable=too-many-arguments,too-many-locals if (not engine_sdl and not engine) or (engine and engine_sdl): raise Exception( "an engine OR an engine_sdl should be passed here, not both, not none" ) if not executor_context: executor_context = {} executor_context["app"] = app if not executor_http_methods: executor_http_methods = ["GET", "POST"] if not engine: engine = Engine(engine_sdl, engine_schema_name) app["ttftt_engine"] = engine for method in executor_http_methods: try: app.router.add_route( method, executor_http_endpoint, partial( getattr(Handlers, "handle_%s" % method.lower()), executor_context, ), ) except AttributeError: raise Exception("Unsupported < %s > http method" % method) _set_subscription_ws_handler(app, subscription_ws_endpoint, engine) _set_graphiql_handler( app, graphiql_enabled, graphiql_options, executor_http_endpoint, executor_http_methods, subscription_ws_endpoint, ) return app
python
def from_aid(cls, aid): """Retrieve the Assay record for the specified AID. :param int aid: The PubChem Assay Identifier (AID). """ record = json.loads(request(aid, 'aid', 'assay', 'description').read().decode())['PC_AssayContainer'][0] return cls(record)
python
def conf_int(self, alpha=0.05, coefs=None, return_df=False): """ Creates the dataframe or array of lower and upper bounds for the (1-alpha)% confidence interval of the estimated parameters. Used when creating the statsmodels summary. Parameters ---------- alpha : float, optional. Should be between 0.0 and 1.0. Determines the (1-alpha)% confidence interval that will be reported. Default == 0.05. coefs : array-like, optional. Should contain strings that denote the coefficient names that one wants the confidence intervals for. Default == None because that will return the confidence interval for all variables. return_df : bool, optional. Determines whether the returned value will be a dataframe or a numpy array. Default = False. Returns ------- pandas dataframe or ndarray. Depends on return_df kwarg. The first column contains the lower bound to the confidence interval whereas the second column contains the upper values of the confidence intervals. """ # Get the critical z-value for alpha / 2 z_critical = scipy.stats.norm.ppf(1.0 - alpha / 2.0, loc=0, scale=1) # Calculate the lower and upper values for the confidence interval. lower = self.params - z_critical * self.standard_errors upper = self.params + z_critical * self.standard_errors # Name the series of lower / upper values for the confidence interval. lower.name = "lower" upper.name = "upper" # Combine the various series. combined = pd.concat((lower, upper), axis=1) # Subset the combined dataframe if need be. if coefs is not None: combined = combined.loc[coefs, :] # Return the desired object, whether dataframe or array if return_df: return combined else: return combined.values
java
private boolean judgeCaptionLocation(ArrayList<TextPiece> linesOfAPage, TableCandidate tc, Vector distinctY, ArrayList<TextPiece> wordsOfAPage) { /* * by default, the caption position is above the table data area */ boolean aboveCaption = true; Config config = new Config(); float captionY = linesOfAPage.get(tc.getCaptionStartLine()).getY(); float captionEndY = linesOfAPage.get(tc.getCaptionEndLine()).getEndY(); int yId_distinctY = distinctY.indexOf(captionY); if (yId_distinctY < 0) { int i = 1; while (i < distinctY.size()) { if (captionY > Float.valueOf( distinctY.elementAt(i - 1).toString().trim()) .floatValue() && captionY < Float.valueOf( distinctY.elementAt(i).toString().trim()) .floatValue()) { yId_distinctY = i; break; } i++; } } int endYId_distinctY = yId_distinctY + tc.captionLineCount() - 1; if ((distinctY.size() - endYId_distinctY < 4) || (captionEndY > 720.0)) aboveCaption = false; /* * Method 1: compares the text density, the areas in both directions are * defined as six lines */ if ((distinctY.size() - endYId_distinctY > 6) && (yId_distinctY > 6) && (aboveCaption == true)) { float captionX = tc.getCaptionX(); float captionEndX = tc.getCaptonEndX(); float testAreaX = captionX; float testAreaEndX = captionEndX; /* * Manually keep the area wide enough, for those short table caption */ if (testAreaEndX - captionX < 40.0f) testAreaEndX = testAreaX + 60.0f; int txtDensity_Top = 0; int txtDensity_Below = 0; int lineNumThisPage = linesOfAPage.size(); /* * compare the text density in both directions in LINE level for * (int i=0; i<lineNumThisPage; i++) { TextPiece thisLine = * linesOfAPage.get(i); //---Calculate the density in the top area * if ( (thisLine.getY()>= * Float.valueOf(distinctY.get(yId_distinctY- * 5).toString().trim()).floatValue()) && (thisLine.getEndY()< * Float. * valueOf(distinctY.get(yId_distinctY).toString().trim()).floatValue * () ) )// && //(thisLine.getX()>=captionX) && * //(thisLine.getEndX()<=captionEndX)) txtDensity_Top = * txtDensity_Top + thisLine.getText().trim().length(); * //---Calculate the density in the below area else { if ( * (thisLine.getEndY()<= * Float.valueOf(distinctY.get(endYId_distinctY * +5).toString().trim()).floatValue()) && (thisLine.getY()> * Float.valueOf * (distinctY.get(endYId_distinctY).toString().trim()).floatValue() * ) )// && //(thisLine.getX()>=captionX) && * //(thisLine.getEndX()<=captionEndX)) txtDensity_Below = * txtDensity_Below + thisLine.getText().trim().length(); } } */ // compare the text density in both directions in WORD level int wordNumThisPage = wordsOfAPage.size(); String txtInTopArea = ""; String txtInBelowArea = ""; boolean foundFigureCaption = false; /* * Judges whether there are figure caption above, if yes, do not * have to check the text density */ for (int i = 0; i < linesOfAPage.size(); i++) { TextPiece thisLine = linesOfAPage.get(i); if ((thisLine.getY() >= Float.valueOf( distinctY.get(yId_distinctY - 5).toString().trim()) .floatValue()) && (thisLine.getEndY() < Float.valueOf( distinctY.get(yId_distinctY).toString().trim()) .floatValue()) && (thisLine.getText().replaceAll(" ", "") .startsWith("Figure") == true || thisLine .getText().replaceAll(" ", "") .startsWith("FIGURE") == true)) { aboveCaption = true; foundFigureCaption = true; } } /* * If no figure caption found above, we have to compare the density * in both directions */ if (foundFigureCaption == false) { for (int i = 0; i < wordNumThisPage; i++) { TextPiece thisWord = wordsOfAPage.get(i); /* * ---Calculate the density in the top area */ if ((thisWord.getY() >= Float.valueOf( distinctY.get(yId_distinctY - 5).toString().trim()) .floatValue()) && (thisWord.getEndY() < Float.valueOf( distinctY.get(yId_distinctY).toString() .trim()).floatValue()) && (thisWord.getX() >= testAreaX) && (thisWord.getEndX() <= testAreaEndX)) { txtDensity_Top = txtDensity_Top + thisWord.getText().trim().length(); txtInTopArea = txtInTopArea + thisWord.getText(); } /* * Calculates the density in the below area */ else { if ((thisWord.getEndY() <= Float.valueOf( distinctY.get(endYId_distinctY + 6).toString() .trim()).floatValue()) && (thisWord.getY() > Float.valueOf( distinctY.get(endYId_distinctY) .toString().trim()) .floatValue()) && (thisWord.getX() >= captionX) && (thisWord.getEndX() <= captionEndX)) { txtDensity_Below = txtDensity_Below + thisWord.getText().trim().length(); txtInBelowArea = txtInBelowArea + thisWord.getText(); } } } // System.out.println("txt in Top: " + txtInTopArea); // System.out.println("txt in Below: " + txtInBelowArea); if (txtDensity_Top < txtDensity_Below) aboveCaption = false; } } /* * TODO! method 2: the areas to compare the text density in both * directions are defined with Height=55.0 * * if ( (captionY-docInfo.getMinY()>config.DENSITY_AREA_HEIGHT) && * (docInfo.getMaxY()-captionEndY>config.DENSITY_AREA_HEIGHT) && * (aboveCaption==true) ) { * * * } */ // if (aboveCaption==true) // System.out.println("The caption location is: TOP!"); // else System.out.println("The caption location is: BELOW!"); tc.setTopCaption(aboveCaption); return aboveCaption; }
java
private void setIndicesAndTypes() { DeleteByQueryRequest innerRequest = request.request(); innerRequest.indices(query.getIndexArr()); String[] typeArr = query.getTypeArr(); if (typeArr!=null){ innerRequest.getSearchRequest().types(typeArr); } // String[] typeArr = query.getTypeArr(); // if (typeArr != null) { // request.set(typeArr); // } }
python
def _get_framed(self, buf, offset, insert_payload): """Returns the framed message and updates the CRC. """ header_offset = offset + self._header_len self.length = insert_payload(buf, header_offset, self.payload) struct.pack_into(self._header_fmt, buf, offset, self.preamble, self.msg_type, self.sender, self.length) crc_offset = header_offset + self.length preamble_bytes = 1 crc_over_len = self._header_len + self.length - preamble_bytes self.crc = crc16jit(buf, offset+1, 0, crc_over_len) struct.pack_into(self._crc_fmt, buf, crc_offset, self.crc) length = preamble_bytes + crc_over_len + self._crc_len return length
python
def remote_run(cmd, instance_name, detach=False, retries=1): """Run command on GCS instance, optionally detached.""" if detach: cmd = SCREEN.format(command=cmd) args = SSH.format(instance_name=instance_name).split() args.append(cmd) for i in range(retries + 1): try: if i > 0: tf.logging.info("Retry %d for %s", i, args) return sp.check_call(args) except sp.CalledProcessError as e: if i == retries: raise e
java
@Override @Description("Gets all of the service references for the specified resource type") public TabularData getReferences(@Name("ResourceType") String resourceType) throws OpenDataException { List<ServiceReference> references = ((ComponentBindingsProviderFactoryImpl) componentBindingsProviderFactory) .getReferences(resourceType); String[] itemNames = { Constants.SERVICE_PID, ComponentBindingsProvider.PRIORITY, ComponentBindingsProvider.RESOURCE_TYPE_PROP }; String[] itemDescriptions = { "The Service ID", "The Priority on which the binding service will be called", "The resource types this service will bind to" }; OpenType<?>[] itemTypes = { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING }; CompositeType snapshotType = new CompositeType("references", "References", itemNames, itemDescriptions, itemTypes); TabularType quoteTableType = new TabularType("references", "References", snapshotType, new String[] { Constants.SERVICE_PID }); TabularData td = new TabularDataSupport(quoteTableType); for (ServiceReference reference : references) { Map<String, Object> data = new HashMap<String, Object>(); for (String itemName : itemNames) { if (reference.getProperty(itemName) instanceof String[]) { data.put(itemName, Arrays.toString((String[]) reference .getProperty(itemName))); } else { data.put(itemName, reference.getProperty(itemName)); } } td.put(new CompositeDataSupport(snapshotType, data)); } return td; }
python
def get_msms_annotations(self, representative_only=True, force_rerun=False): """Run MSMS on structures and store calculations. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.letter_annotations['*-msms']`` Args: representative_only (bool): If analysis should only be run on the representative structure force_rerun (bool): If calculations should be rerun even if an output file exists """ if representative_only: if self.representative_structure: try: self.representative_structure.get_msms_annotations(outdir=self.structure_dir, force_rerun=force_rerun) except TypeError: log.error('{}: MSMS SeqRecord length mismatch with {}'.format(self.id, self.representative_structure)) except: log.error('{}: unknown MSMS error with {}'.format(self.id, self.representative_structure)) else: log.warning('{}: no representative structure set, cannot run MSMS'.format(self.id)) else: for s in self.structures: try: s.get_msms_annotations(outdir=self.structure_dir) except TypeError: log.error('{}: MSMS SeqRecord length mismatch with {}'.format(self.id, s.id)) except Exception as e: log.error('{}: unknown MSMS error with {}'.format(self.id, s.id)) print(e)
python
def log_estimator_evaluation_result(self, eval_results): """Log the evaluation result for a estimator. The evaluate result is a directory that contains metrics defined in model_fn. It also contains a entry for global_step which contains the value of the global step when evaluation was performed. Args: eval_results: dict, the result of evaluate() from a estimator. """ if not isinstance(eval_results, dict): tf.logging.warning("eval_results should be directory for logging. Got %s", type(eval_results)) return global_step = eval_results[tf.GraphKeys.GLOBAL_STEP] for key in sorted(eval_results): if key != tf.GraphKeys.GLOBAL_STEP: self.log_metric(key, eval_results[key], global_step=global_step)
java
public synchronized Collection<Progress> getProgresses() { List<Progress> list = new ArrayList<>(progresses.size()); Iterator<WeakReference<Progress>> iter = progresses.iterator(); while(iter.hasNext()) { WeakReference<Progress> ref = iter.next(); if(ref.get() == null) { iter.remove(); } else { list.add(ref.get()); } } return list; }
python
def _prompt_wrapper(message, default=None, validator=None): """ Handle references piped from file """ class MockDocument: def __init__(self, text): self.text = text if HAS_INPUT: ret = prompt(message, default=default, validator=validator) else: ret = sys.stdin.readline().strip() print(message, ret) if validator: validator.validate(MockDocument(ret)) if "q" in ret: if not HAS_OUTPUT: print("User exit") sys.exit("User exit") return ret
python
def pop(self, key, default=None): """Remove specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised. """ if key not in self: if default is not None: return default raise KeyError(key) for map in [self._pb.IntMap, self._pb.FloatMap, self._pb.StringMap, self._pb.BoolMap]: if key in map.keys(): return map.pop(key)
java
public void addSubFileFilter() { // Override this if it is not correct. SubFileFilter listener = null; this.getMainRecord().addListener(listener = new SubFileFilter(this.getHeaderRecord())); if (!this.getMainRecord().getKeyArea().getField(DBConstants.MAIN_KEY_FIELD).isNullable()) listener.setFilterIfNull(true); // If the header record's key can't be null, don't display any detail if new record }
python
def exists(self, value=None): """ Return True if the given pk value exists for the given class. If no value is given, we use the value of the current field, which is the value of the "_pk" attribute of its instance. """ try: if not value: value = self.get() except (AttributeError, DoesNotExist): # If the instance is deleted, the _pk attribute doesn't exist # anymore. So we catch the AttributeError to return False (this pk # field doesn't exist anymore) in this specific case return False else: return self.connection.sismember(self.collection_key, value)
java
public String packageStatement( PackageDescrBuilder pkg ) throws RecognitionException { String pkgName = null; try { helper.start( pkg, PackageDescrBuilder.class, null ); match( input, DRL5Lexer.ID, DroolsSoftKeywords.PACKAGE, null, DroolsEditorType.KEYWORD ); if ( state.failed ) return pkgName; pkgName = qualifiedIdentifier(); if ( state.failed ) return pkgName; if ( state.backtracking == 0 ) { helper.setParaphrasesValue( DroolsParaphraseTypes.PACKAGE, pkgName ); } if ( input.LA( 1 ) == DRL5Lexer.SEMICOLON ) { match( input, DRL5Lexer.SEMICOLON, null, null, DroolsEditorType.SYMBOL ); if ( state.failed ) return pkgName; } } catch ( RecognitionException re ) { reportError( re ); } finally { helper.end( PackageDescrBuilder.class, pkg ); } return pkgName; }
java
public ListInstancesResponse listInstances(String clusterId, String instanceGroupId) { return listInstances(new ListInstancesRequest().withClusterId(clusterId).withInstanceGroupId(instanceGroupId)); }