repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
GoogleContainerTools/jib
jib-cli/src/main/java/com/google/cloud/tools/jib/cli/jar/JarFiles.java
3975
/* * Copyright 2020 Google LLC. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.google.cloud.tools.jib.cli.jar; import com.google.cloud.tools.jib.api.InvalidImageReferenceException; import com.google.cloud.tools.jib.api.JibContainerBuilder; import com.google.cloud.tools.jib.api.buildplan.FileEntriesLayer; import com.google.cloud.tools.jib.cli.ArtifactProcessor; import com.google.cloud.tools.jib.cli.CommonCliOptions; import com.google.cloud.tools.jib.cli.CommonContainerConfigCliOptions; import com.google.cloud.tools.jib.cli.ContainerBuilders; import com.google.cloud.tools.jib.cli.Jar; import com.google.cloud.tools.jib.plugins.common.logging.ConsoleLogger; import java.io.IOException; import java.util.Collections; import java.util.List; /** Class to build a container representation from the contents of a jar file. */ public class JarFiles { private JarFiles() {} /** * Generates a {@link JibContainerBuilder} from contents of a jar file. * * @param processor jar processor * @param jarOptions jar cli options * @param commonCliOptions common cli options * @param commonContainerConfigCliOptions common command line options shared between jar and war * command * @param logger console logger * @return JibContainerBuilder * @throws IOException if I/O error occurs when opening the jar file or if temporary directory * provided doesn't exist * @throws InvalidImageReferenceException if the base image reference is invalid */ public static JibContainerBuilder toJibContainerBuilder( ArtifactProcessor processor, Jar jarOptions, CommonCliOptions commonCliOptions, CommonContainerConfigCliOptions commonContainerConfigCliOptions, ConsoleLogger logger) throws IOException, InvalidImageReferenceException { String imageReference = commonContainerConfigCliOptions.getFrom().orElseGet(() -> getDefaultBaseImage(processor)); JibContainerBuilder containerBuilder = ContainerBuilders.create(imageReference, Collections.emptySet(), commonCliOptions, logger); List<FileEntriesLayer> layers = processor.createLayers(); List<String> customEntrypoint = commonContainerConfigCliOptions.getEntrypoint(); List<String> entrypoint = customEntrypoint.isEmpty() ? processor.computeEntrypoint(jarOptions.getJvmFlags()) : customEntrypoint; containerBuilder .setEntrypoint(entrypoint) .setFileEntriesLayers(layers) .setExposedPorts(commonContainerConfigCliOptions.getExposedPorts()) .setVolumes(commonContainerConfigCliOptions.getVolumes()) .setEnvironment(commonContainerConfigCliOptions.getEnvironment()) .setLabels(commonContainerConfigCliOptions.getLabels()) .setProgramArguments(commonContainerConfigCliOptions.getProgramArguments()); commonContainerConfigCliOptions.getUser().ifPresent(containerBuilder::setUser); commonContainerConfigCliOptions.getFormat().ifPresent(containerBuilder::setFormat); commonContainerConfigCliOptions.getCreationTime().ifPresent(containerBuilder::setCreationTime); return containerBuilder; } private static String getDefaultBaseImage(ArtifactProcessor processor) { if (processor.getJavaVersion() <= 8) { return "eclipse-temurin:8-jre"; } if (processor.getJavaVersion() <= 11) { return "eclipse-temurin:11-jre"; } return "eclipse-temurin:17-jre"; } }
apache-2.0
apache/tapestry4
framework/src/test/org/apache/tapestry/services/impl/EngineFactoryTest.java
4424
// Copyright 2004, 2005 The Apache Software Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.apache.tapestry.services.impl; import java.util.Locale; import org.apache.hivemind.ApplicationRuntimeException; import org.apache.hivemind.ErrorLog; import org.apache.hivemind.impl.DefaultClassResolver; import org.apache.hivemind.test.HiveMindTestCase; import org.apache.tapestry.IEngine; import org.apache.tapestry.engine.AbstractEngine; import org.apache.tapestry.engine.BaseEngine; import org.apache.tapestry.spec.IApplicationSpecification; /** * Tests for {@link org.apache.tapestry.services.impl.EngineFactoryImpl}. * * @author Howard Lewis Ship * @since 4.0 */ public class EngineFactoryTest extends HiveMindTestCase { public void testUseDefault() { IApplicationSpecification spec = newSpec(); // Training trainGetEngineClassName(spec, null); EngineFactoryImpl f = new EngineFactoryImpl(); f.setApplicationSpecification(spec); f.setClassResolver(new DefaultClassResolver()); f.setDefaultEngineClassName(BaseEngine.class.getName()); replayControls(); f.initializeService(); IEngine result = f.constructNewEngineInstance(Locale.CANADA_FRENCH); assertTrue(result instanceof BaseEngine); assertEquals(Locale.CANADA_FRENCH, result.getLocale()); verifyControls(); } private void trainGetEngineClassName(IApplicationSpecification spec, String engineClassName) { spec.getEngineClassName(); setReturnValue(spec, engineClassName); } private IApplicationSpecification newSpec() { return (IApplicationSpecification) newMock(IApplicationSpecification.class); } public void testDefinedInSpec() { IApplicationSpecification spec = newSpec(); trainGetEngineClassName(spec, EngineFixture.class.getName()); EngineFactoryImpl f = new EngineFactoryImpl(); f.setApplicationSpecification(spec); f.setClassResolver(new DefaultClassResolver()); replayControls(); f.initializeService(); IEngine result = f.constructNewEngineInstance(Locale.CHINESE); assertTrue(result instanceof EngineFixture); assertEquals(Locale.CHINESE, result.getLocale()); verifyControls(); } public void testUnableToInstantiate() { IApplicationSpecification spec = newSpec(); // Training trainGetEngineClassName(spec, AbstractEngine.class.getName()); EngineFactoryImpl f = new EngineFactoryImpl(); f.setApplicationSpecification(spec); f.setClassResolver(new DefaultClassResolver()); replayControls(); f.initializeService(); try { f.constructNewEngineInstance(Locale.CHINESE); unreachable(); } catch (ApplicationRuntimeException ex) { assertExceptionSubstring( ex, "Unable to instantiate engine as instance of class org.apache.tapestry.engine.AbstractEngine"); } verifyControls(); } public void testInvalidClass() { IApplicationSpecification spec = newSpec(); trainGetEngineClassName(spec, "foo.XyzzYx"); ErrorLog log = (ErrorLog) newMock(ErrorLog.class); log.error("Engine class 'foo.XyzzYx' not found.", null, null); EngineFactoryImpl f = new EngineFactoryImpl(); f.setApplicationSpecification(spec); f.setClassResolver(new DefaultClassResolver()); f.setErrorLog(log); f.setDefaultEngineClassName(BaseEngine.class.getName()); replayControls(); f.initializeService(); IEngine result = f.constructNewEngineInstance(Locale.CANADA_FRENCH); assertTrue(result instanceof BaseEngine); verifyControls(); } }
apache-2.0
nus-ncl/services-in-one
common/src/main/java/sg/ncl/common/authentication/AuthenticationProperties.java
836
package sg.ncl.common.authentication; import lombok.Getter; import org.springframework.boot.context.properties.ConfigurationProperties; import java.util.HashMap; import java.util.Map; /** * Base class for configuring authentication. * * @author Christopher Zhong * @version 1.0 */ @ConfigurationProperties(prefix = AuthenticationProperties.PREFIX) @Getter public class AuthenticationProperties { static final String PREFIX = "ncl.authentication"; /** * By default, all URIs (a.k.a. endpoints) require authentication. * However, the map highlights the URIs that does not require authentication. * <p/> * For example, "/point"="get,post" means that the GET and POST methods for the URI "/point" does not require authentication. */ private final Map<String, String> uri = new HashMap<>(); }
apache-2.0
viant/toolbox
storage/scp/service.go
8543
package scp import ( "bytes" "errors" "fmt" "github.com/lunixbochs/vtclean" "github.com/viant/toolbox" "github.com/viant/toolbox/cred" "github.com/viant/toolbox/ssh" "github.com/viant/toolbox/storage" "io" "io/ioutil" "net/url" "os" "path" "strings" "sync" ) const ( defaultSSHPort = 22 verificationSizeThreshold = 1024 * 1024 ) //NoSuchFileOrDirectoryError represents no such file or directory error var NoSuchFileOrDirectoryError = errors.New("No such file or directory") const unrecognizedOption = "unrecognized option" type service struct { fileService storage.Service config *cred.Config services map[string]ssh.Service multiSessions map[string]ssh.MultiCommandSession mutex *sync.Mutex } func (s *service) runCommand(session ssh.MultiCommandSession, URL string, command string) (string, error) { s.mutex.Lock() defer s.mutex.Unlock() output, _ := session.Run(command, nil, 5000) var stdout = s.stdout(output) return stdout, nil } func (s *service) stdout(output string) string { var result = make([]string, 0) lines := strings.Split(output, "\n") for _, line := range lines { result = append(result, vtclean.Clean(line, false)) } return strings.Join(result, "\n") } func (s *service) getMultiSession(parsedURL *url.URL) ssh.MultiCommandSession { s.mutex.Lock() defer s.mutex.Unlock() return s.multiSessions[parsedURL.Host] } func (s *service) getService(parsedURL *url.URL) (ssh.Service, error) { port := toolbox.AsInt(parsedURL.Port()) if port == 0 { port = 22 } key := parsedURL.Host s.mutex.Lock() defer s.mutex.Unlock() if service, ok := s.services[key]; ok { return service, nil } service, err := ssh.NewService(parsedURL.Hostname(), toolbox.AsInt(port), s.config) if err != nil { return nil, err } s.services[key] = service s.multiSessions[key], err = service.OpenMultiCommandSession(nil) if err != nil { return nil, err } return service, nil } //List returns a list of object for supplied URL func (s *service) List(URL string) ([]storage.Object, error) { parsedURL, err := url.Parse(URL) if err != nil { return nil, err } if parsedURL.Host == "127.0.0.1" || parsedURL.Host == "127.0.0.1:22" { var fileURL = toolbox.FileSchema + parsedURL.Path return s.fileService.List(fileURL) } _, err = s.getService(parsedURL) if err != nil { return nil, err } commandSession := s.getMultiSession(parsedURL) canListWithTimeStyle := commandSession.System() != "darwin" var parser = &Parser{IsoTimeStyle: canListWithTimeStyle} var URLPath = parsedURL.Path var result = make([]storage.Object, 0) var lsCommand = "" if canListWithTimeStyle { lsCommand += "ls -dltr --time-style=full-iso " + URLPath } else { lsCommand += "ls -dltrT " + URLPath } output, _ := s.runCommand(commandSession, URL, lsCommand) var stdout = vtclean.Clean(string(output), false) if strings.Contains(stdout, "unrecognized option") { if canListWithTimeStyle { lsCommand = "ls -dltr --full-time " + URLPath output, _ = s.runCommand(commandSession, URL, lsCommand) stdout = vtclean.Clean(string(output), false) } } if strings.Contains(stdout, unrecognizedOption) { return nil, fmt.Errorf("unable to list files with: %v, %v", lsCommand, stdout) } if strings.Contains(stdout, "No such file or directory") { return result, NoSuchFileOrDirectoryError } objects, err := parser.Parse(parsedURL, stdout, false) if err != nil { return nil, err } if len(objects) == 1 && objects[0].FileInfo().IsDir() { output, _ = s.runCommand(commandSession, URL, lsCommand+" "+path.Join(URLPath, "*")) stdout = vtclean.Clean(string(output), false) directoryObjects, err := parser.Parse(parsedURL, stdout, true) if err != nil { return nil, err } if len(directoryObjects) > 0 { objects = append(objects, directoryObjects...) } } return objects, nil } func (s *service) Exists(URL string) (bool, error) { parsedURL, err := url.Parse(URL) if err != nil { return false, err } if parsedURL.Host == "127.0.0.1" || parsedURL.Host == "127.0.0.1:22" { var fileURL = toolbox.FileSchema + parsedURL.Path return s.fileService.Exists(fileURL) } _, err = s.getService(parsedURL) if err != nil { return false, err } commandSession := s.getMultiSession(parsedURL) output, _ := s.runCommand(commandSession, URL, "ls -dltr "+parsedURL.Path) if strings.Contains(string(output), "No such file or directory") { return false, nil } return true, nil } func (s *service) StorageObject(URL string) (storage.Object, error) { objects, err := s.List(URL) if err != nil { return nil, err } if len(objects) == 0 { return nil, NoSuchFileOrDirectoryError } return objects[0], nil } //Download returns reader for downloaded storage object func (s *service) Download(object storage.Object) (io.ReadCloser, error) { if object == nil { return nil, fmt.Errorf("object was nil") } parsedURL, err := url.Parse(object.URL()) if err != nil { return nil, err } if parsedURL.Host == "127.0.0.1" || parsedURL.Host == "127.0.0.1:22" { var fileURL = toolbox.FileSchema + parsedURL.Path storageObject, err := s.fileService.StorageObject(fileURL) if err != nil { return nil, err } return s.fileService.Download(storageObject) } port := toolbox.AsInt(parsedURL.Port()) if port == 0 { port = defaultSSHPort } service, err := s.getService(parsedURL) if err != nil { return nil, err } content, err := service.Download(parsedURL.Path) if err != nil { return nil, err } return ioutil.NopCloser(bytes.NewReader(content)), nil } //Upload uploads provided reader content for supplied URL. func (s *service) Upload(URL string, reader io.Reader) error { return s.UploadWithMode(URL, storage.DefaultFileMode, reader) } //Upload uploads provided reader content for supplied URL. func (s *service) UploadWithMode(URL string, mode os.FileMode, reader io.Reader) error { if mode == 0 { mode = storage.DefaultFileMode } parsedURL, err := url.Parse(URL) if err != nil { return err } if parsedURL.Host == "127.0.0.1" || parsedURL.Host == "127.0.0.1:22" { var fileURL = toolbox.FileSchema + parsedURL.Path return s.fileService.UploadWithMode(fileURL, mode, reader) } port := toolbox.AsInt(parsedURL.Port()) if port == 0 { port = defaultSSHPort } //service, err := ssh.NewService(parsedURL.Hostname(), toolbox.AsInt(port), s.config) service, err := s.getService(parsedURL) if err != nil { return err } //defer service.Close() content, err := ioutil.ReadAll(reader) if err != nil { return fmt.Errorf("failed to upload - unable read: %v", err) } err = service.Upload(parsedURL.Path, mode, content) if err != nil { return fmt.Errorf("failed to upload: %v %v", URL, err) } return err } func (s *service) Register(schema string, service storage.Service) error { return errors.New("unsupported") } func (s *service) Close() error { for _, service := range s.services { service.Close() } for _, session := range s.multiSessions { session.Close() } return nil } //Delete removes passed in storage object func (s *service) Delete(object storage.Object) error { parsedURL, err := url.Parse(object.URL()) if err != nil { return err } if parsedURL.Host == "127.0.0.1" || parsedURL.Host == "127.0.0.1:22" { var fileURL = toolbox.FileSchema + parsedURL.Path storageObject, err := s.fileService.StorageObject(fileURL) if err != nil { return err } return s.fileService.Delete(storageObject) } port := toolbox.AsInt(parsedURL.Port()) if port == 0 { port = defaultSSHPort } service, err := ssh.NewService(parsedURL.Hostname(), toolbox.AsInt(port), s.config) if err != nil { return err } //defer service.Close() session, err := service.NewSession() if err != nil { return err } defer session.Close() if parsedURL.Path == "/" { return fmt.Errorf("invalid removal path: %v", parsedURL.Path) } _, err = session.Output("rm -rf " + parsedURL.Path) return err } //DownloadWithURL downloads content for passed in object URL func (s *service) DownloadWithURL(URL string) (io.ReadCloser, error) { object, err := s.StorageObject(URL) if err != nil { return nil, err } return s.Download(object) } //NewService create a new gc storage service func NewService(config *cred.Config) *service { return &service{ services: make(map[string]ssh.Service), config: config, multiSessions: make(map[string]ssh.MultiCommandSession), mutex: &sync.Mutex{}, fileService: storage.NewFileStorage(), } }
apache-2.0
drusellers/MassTransit
src/Persistence/MassTransit.MongoDbIntegration.Tests/Saga/MongoDbSagaRepositoryTestsForSendQuery.cs
3874
// Copyright 2007-2016 Chris Patterson, Dru Sellers, Travis Smith, et. al. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. namespace MassTransit.MongoDbIntegration.Tests.Saga { using System; using System.Threading.Tasks; using MassTransit.Saga; using MongoDbIntegration.Saga; using MongoDbIntegration.Saga.Context; using MongoDbIntegration.Saga.Pipeline; using MongoDB.Driver; using Moq; using NUnit.Framework; using Pipeline; [TestFixture] public class MongoDbSagaRepositoryTestsForSendQuery { [Test] public void ThenMissingPipeNotCalled() { _sagaPolicy.Verify(x => x.Missing(_sagaQueryConsumeContext.Object, It.IsAny<MissingPipe<SimpleSaga, InitiateSimpleSaga>>()), Times.Never); } [Test] public void ThenSagaSentToInstance() { _sagaPolicy.Verify(x => x.Existing(_sagaConsumeContext.Object, _nextPipe.Object)); } [Test] public async Task ThenVersionIncremeted() { var saga = await SagaRepository.GetSaga(_correlationId); Assert.That(saga.Version, Is.EqualTo(1)); } Guid _correlationId; Mock<ISagaPolicy<SimpleSaga, InitiateSimpleSaga>> _sagaPolicy; Mock<SagaQueryConsumeContext<SimpleSaga, InitiateSimpleSaga>> _sagaQueryConsumeContext; Mock<IPipe<SagaConsumeContext<SimpleSaga, InitiateSimpleSaga>>> _nextPipe; Mock<SagaConsumeContext<SimpleSaga, InitiateSimpleSaga>> _sagaConsumeContext; Mock<IMongoDbSagaConsumeContextFactory> _sagaConsumeContextFactory; [OneTimeSetUp] public async Task GivenAMongoDbSagaRepository_WhenSendingQuery() { _correlationId = Guid.NewGuid(); var saga = new SimpleSaga {CorrelationId = _correlationId}; await SagaRepository.InsertSaga(saga); _sagaQueryConsumeContext = new Mock<SagaQueryConsumeContext<SimpleSaga, InitiateSimpleSaga>>(); _sagaQueryConsumeContext.Setup(x => x.Query.FilterExpression).Returns(x => x.CorrelationId == _correlationId); _sagaPolicy = new Mock<ISagaPolicy<SimpleSaga, InitiateSimpleSaga>>(); _nextPipe = new Mock<IPipe<SagaConsumeContext<SimpleSaga, InitiateSimpleSaga>>>(); _sagaConsumeContext = new Mock<SagaConsumeContext<SimpleSaga, InitiateSimpleSaga>>(); _sagaConsumeContext.Setup(x => x.CorrelationId).Returns(_correlationId); _sagaConsumeContextFactory = new Mock<IMongoDbSagaConsumeContextFactory>(); _sagaConsumeContextFactory.Setup( m => m.Create(It.IsAny<IMongoCollection<SimpleSaga>>(), _sagaQueryConsumeContext.Object, It.Is<SimpleSaga>(x => x.CorrelationId == _correlationId), true)).Returns(_sagaConsumeContext.Object); var repository = new MongoDbSagaRepository<SimpleSaga>(SagaRepository.Instance, _sagaConsumeContextFactory.Object); await repository.SendQuery(_sagaQueryConsumeContext.Object, _sagaPolicy.Object, _nextPipe.Object); } [OneTimeTearDown] public async Task Kill() { await SagaRepository.DeleteSaga(_correlationId); } } }
apache-2.0
KalinduDN/kalindudn.github.io
react/features/large-video/components/LargeVideo.web.js
2858
/* @flow */ import React, { Component } from 'react'; import { Watermarks } from '../../base/react'; import { VideoStatusLabel } from '../../video-status-label'; /** * Implements a React {@link Component} which represents the large video (a.k.a. * the conference participant who is on the local stage) on Web/React. * * @extends Component */ export default class LargeVideo extends Component { /** * Implements React's {@link Component#render()}. * * @inheritdoc * @returns {ReactElement} */ render() { return ( <div className = 'videocontainer' id = 'largeVideoContainer'> <div id = 'sharedVideo'> <div id = 'sharedVideoIFrame' /> </div> <div id = 'etherpad' /> <Watermarks /> <div id = 'dominantSpeaker'> <div className = 'dynamic-shadow' /> <img id = 'dominantSpeakerAvatar' src = '' /> </div> <span id = 'remoteConnectionMessage' /> <div> <div className = 'video_blurred_container'> <video autoPlay = { true } id = 'largeVideoBackground' muted = 'true' /> </div> { /** * FIXME: the architecture of elements related to the * large video and the naming. The background is not * part of largeVideoWrapper because we are controlling * the size of the video through largeVideoWrapper. * That's why we need another container for the the * background and the largeVideoWrapper in order to * hide/show them. */ } <div id = 'largeVideoWrapper'> <video autoPlay = { true } id = 'largeVideo' muted = { true } /> </div> </div> <span id = 'localConnectionMessage' /> <VideoStatusLabel /> <span className = 'video-state-indicator centeredVideoLabel' id = 'recordingLabel'> <span id = 'recordingLabelText' /> <img className = 'recordingSpinner' id = 'recordingSpinner' src = 'images/spin.svg' /> </span> </div> ); } }
apache-2.0
fredska/MapGenerator
MapGenerator/src/com/fska/map/screens/MapGenScreen.java
347
package com.fska.map.screens; import com.badlogic.gdx.Screen; public enum MapGenScreen { VORONOI { @Override protected Screen getScreenInstance(){ return new VoronoiScreen(); } }, PERLIN{ @Override protected Screen getScreenInstance(){ return new PerlinNoiseScreen(); } }; protected abstract Screen getScreenInstance(); }
apache-2.0
worldline/TrainingJEEAngular
trainingapp/src/components/slides/ecosystem/typescript1/typescript1.ts
555
import {Component, Inject, ElementRef} from '@angular/core'; import {SlideCommon} from '../../slideCommon/slideCommon'; import {HOST_SLIDE_CONTAINER_CLASS} from '../../../../services/constants'; @Component({ selector:'Typescript1', templateUrl:'src/components/slides/ecosystem/typescript1/typescript1.html', styleUrls: ['src/components/slides/ecosystem/typescript1/typescript1.css'] }) export class Typescript1 extends SlideCommon{ constructor(elt: ElementRef, @Inject(HOST_SLIDE_CONTAINER_CLASS) hostClass: string) { super(elt, hostClass); } }
apache-2.0
tochti/docMa-handler
docs/search.go
3977
package docs import ( "bytes" "fmt" "strings" "time" "github.com/tochti/docMa-handler/labels" "gopkg.in/gorp.v1" ) type ( Interval struct { From time.Time `json:"from"` To time.Time `json:"to"` } SearchForm struct { Labels string `json:"labels"` DocNumbers string `json:"doc_numbers"` DateOfScan Interval `json:"date_of_scan"` } ) func SearchDocs(db *gorp.DbMap, searchForm SearchForm) ([]Doc, error) { // If no search param set return if len(searchForm.Labels) == 0 && len(searchForm.DocNumbers) == 0 && searchForm.DateOfScan.From.IsZero() && searchForm.DateOfScan.To.IsZero() { return []Doc{}, nil } selParam := []interface{}{} filters := []*bytes.Buffer{} froms := []string{} // Create labels filter if len(searchForm.Labels) > 0 { froms = append(froms, fmt.Sprintf("labels as %v", labels.LabelsTable)) froms = append(froms, fmt.Sprintf("docs_labels as %v", DocsLabelsTable)) l := parseQueryString(searchForm.Labels) filter := bytes.NewBufferString("labels.name = ?") selParam = append(selParam, l[0]) for _, v := range l[1:] { selParam = append(selParam, v) filter.WriteString(" OR labels.name = ?") } filters = append(filters, bytes.NewBufferString( fmt.Sprintf(`( (%v) AND docs_labels.label_id = labels.id AND docs.id = docs_labels.doc_id )`, filter.String()), ), ) } // Create doc number filter if len(searchForm.DocNumbers) > 0 { froms = append(froms, fmt.Sprintf("doc_numbers as %v", DocNumbersTable)) l := parseQueryString(searchForm.DocNumbers) filter := bytes.NewBufferString("doc_numbers.number = ?") selParam = append(selParam, l[0]) for _, v := range l[1:] { selParam = append(selParam, v) filter.WriteString(" OR doc_numbers.number = ?") } filters = append(filters, bytes.NewBufferString( fmt.Sprintf(`( (%v) AND docs.id = doc_numbers.doc_id )`, filter.String()), ), ) } // Create date of scan filter if !searchForm.DateOfScan.From.IsZero() || !searchForm.DateOfScan.To.IsZero() { if !searchForm.DateOfScan.From.IsZero() && !searchForm.DateOfScan.To.IsZero() { // Filter from x to y filters = append(filters, bytes.NewBufferString("(docs.date_of_scan BETWEEN ? AND ?)"), ) selParam = append(selParam, searchForm.DateOfScan.From, searchForm.DateOfScan.To, ) } else if !searchForm.DateOfScan.From.IsZero() { // Filter from X to infinity filters = append(filters, bytes.NewBufferString("(docs.date_of_scan >= ?)"), ) selParam = append(selParam, searchForm.DateOfScan.From) } else if !searchForm.DateOfScan.To.IsZero() { // Filter -infinity to X filters = append(filters, bytes.NewBufferString("(docs.date_of_scan <= ?)"), ) selParam = append(selParam, searchForm.DateOfScan.To) } } sel := bytes.NewBufferString(` SELECT docs.id, docs.name, docs.barcode, docs.date_of_scan, docs.date_of_receipt`) sel.WriteString(fmt.Sprintf(` FROM docs as %v `, DocsTable)) if len(froms) == 1 { sel.WriteString(fmt.Sprintf(",%v\n", froms[0])) } else if len(froms) > 1 { for _, v := range froms[:len(froms)-1] { sel.WriteString(fmt.Sprintf(",\n%v\n", v)) } sel.WriteString(fmt.Sprintf(",%v\n", froms[len(froms)-1])) } sel.WriteString("WHERE\n") sel.WriteString(filters[0].String()) for _, v := range filters[1:] { sel.WriteString(fmt.Sprintf("\t\nAND %v", v.String())) } sel.WriteString("\nGROUP BY docs.name") //fmt.Println(sel) //fmt.Println(selParam) r := []Doc{} _, err := db.Select(&r, sel.String(), selParam...) if err != nil { return []Doc{}, err } return r, nil } func dateSQLFormat(t time.Time) string { return fmt.Sprintf("%v-%v-%v 00:00:00", t.Year(), t.Day(), t.Day(), ) } func parseQueryString(str string) []string { l := strings.Split(str, ",") for i, v := range l { l[i] = strings.TrimSpace(v) if l[i] == "" { l = append(l[0:i], l[i+1:]...) } } return l }
apache-2.0
code4wt/nutch-learning
src/java/org/apache/nutch/indexer/IndexingFilters.java
2222
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nutch.indexer; // Commons Logging imports import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.nutch.plugin.PluginRepository; import org.apache.nutch.parse.Parse; import org.apache.hadoop.conf.Configuration; import org.apache.nutch.crawl.CrawlDatum; import org.apache.nutch.crawl.Inlinks; import org.apache.hadoop.io.Text; import java.lang.invoke.MethodHandles; /** Creates and caches {@link IndexingFilter} implementing plugins. */ public class IndexingFilters { public static final String INDEXINGFILTER_ORDER = "indexingfilter.order"; private static final Logger LOG = LoggerFactory .getLogger(MethodHandles.lookup().lookupClass()); private IndexingFilter[] indexingFilters; public IndexingFilters(Configuration conf) { indexingFilters = (IndexingFilter[]) PluginRepository.get(conf) .getOrderedPlugins(IndexingFilter.class, IndexingFilter.X_POINT_ID, INDEXINGFILTER_ORDER); } /** Run all defined filters. */ public NutchDocument filter(NutchDocument doc, Parse parse, Text url, CrawlDatum datum, Inlinks inlinks) throws IndexingException { for (int i = 0; i < this.indexingFilters.length; i++) { doc = this.indexingFilters[i].filter(doc, parse, url, datum, inlinks); // break the loop if an indexing filter discards the doc if (doc == null) return null; } return doc; } }
apache-2.0
opencb/bionetdb
bionetdb-server/src/main/java/org/opencb/bionetdb/server/exception/VersionException.java
789
/* * Copyright 2015 OpenCB * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.opencb.bionetdb.server.exception; @SuppressWarnings("serial") public class VersionException extends Exception { public VersionException(String msg) { super(msg); } }
apache-2.0
Sloy/gallego
src/main/java/com/sloydev/gallego/Supplier.java
2233
/* * Copyright (C) 2007 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.sloydev.gallego; /** * A class that can supply objects of a single type; a pre-Java-8 version of {@code * java.util.function.Supplier}. Semantically, this could be a factory, generator, builder, * closure, or something else entirely. No guarantees are implied by this interface. * <p> * <p>See the Guava User Guide article on <a href= * "https://github.com/google/guava/wiki/FunctionalExplained">the use of functional types</a>. * <p> * <h3>For Java 8+ users</h3> * <p> * <p>This interface is now a legacy type. Use {@code java.util.function.Supplier} (or the * appropriate primitive specialization such as {@code IntSupplier}) instead whenever possible. * Otherwise, at least reduce <i>explicit</i> dependencies on this type by using lambda expressions * or method references instead of classes, leaving your code easier to migrate in the future. * <p> * <p>To use an existing supplier instance (say, named {@code supplier}) in a context where the * <i>other type</i> of supplier is expected, use the method reference {@code supplier::get}. A * future version of {@code com.google.common.base.Supplier} will be made to <i>extend</i> {@code * java.util.function.Supplier}, making conversion code necessary only in one direction. At that * time, this interface will be officially discouraged. * * @author Harry Heymann * @since 2.0 */ public interface Supplier<T> { /** * Retrieves an instance of the appropriate type. The returned object may or may not be a new * instance, depending on the implementation. * * @return an instance of the appropriate type */ T get(); }
apache-2.0
nipunas/blogplatform
WebApp-B2C-DotNet/Controllers/HomeController.cs
1430
using BlogOperations.Operations; using System; using System.Collections.Generic; using System.Linq; using System.Security.Claims; using System.Web; using System.Web.Mvc; namespace BlogPlatform.Controllers { public class HomeController : Controller { public ActionResult Index() { BlogOperator oper = new BlogOperator(); Claim oidClaim = ClaimsPrincipal.Current.Claims.FirstOrDefault(c => c.Type == "http://schemas.microsoft.com/identity/claims/objectidentifier"); bool hasBlog = oidClaim != null ? oper.UserHasBlog(oidClaim.Value) : false; if (hasBlog) { ViewBag.HasBlog = true; } else { ViewBag.HasBlog = false; } return View(); } // You can use the PolicyAuthorize decorator to execute a certain policy if the user is not already signed into the app. [Authorize] public ActionResult Claims() { Claim displayName = ClaimsPrincipal.Current.FindFirst(ClaimsPrincipal.Current.Identities.First().NameClaimType); ViewBag.DisplayName = displayName != null ? displayName.Value : string.Empty; return View(); } public ActionResult Error(string message) { ViewBag.Message = message; return View("Error"); } } }
apache-2.0
ern/elasticsearch
server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java
13997
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.CardinalityUpperBound; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.time.ZonedDateTime; import java.util.List; import java.util.Map; public class DateRangeAggregationBuilder extends AbstractRangeBuilder<DateRangeAggregationBuilder, RangeAggregator.Range> { public static final String NAME = "date_range"; public static final ValuesSourceRegistry.RegistryKey<RangeAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( NAME, RangeAggregatorSupplier.class ); public static final ObjectParser<DateRangeAggregationBuilder, String> PARSER = ObjectParser.fromBuilder( NAME, DateRangeAggregationBuilder::new ); static { ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, true); PARSER.declareBoolean(DateRangeAggregationBuilder::keyed, RangeAggregator.KEYED_FIELD); PARSER.declareObjectArray((agg, ranges) -> { for (RangeAggregator.Range range : ranges) { agg.addRange(range); } }, (p, c) -> RangeAggregator.Range.PARSER.parse(p, null), RangeAggregator.RANGES_FIELD); } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(DateRangeAggregationBuilder.class); public static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(REGISTRY_KEY, List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE), RangeAggregator::build, true); builder.register( REGISTRY_KEY, CoreValuesSourceType.BOOLEAN, ( String name, AggregatorFactories factories, ValuesSourceConfig valuesSourceConfig, InternalRange.Factory<?, ?> rangeFactory, RangeAggregator.Range[] ranges, boolean keyed, AggregationContext context, Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata) -> { DEPRECATION_LOGGER.deprecate( DeprecationCategory.AGGREGATIONS, "Range-boolean", "Running Range or DateRange aggregations on [boolean] fields is deprecated" ); return RangeAggregator.build( name, factories, valuesSourceConfig, rangeFactory, ranges, keyed, context, parent, cardinality, metadata ); }, true ); } public DateRangeAggregationBuilder(String name) { super(name, InternalDateRange.FACTORY); } protected DateRangeAggregationBuilder( DateRangeAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata ) { super(clone, factoriesBuilder, metadata); } @Override protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) { return new DateRangeAggregationBuilder(this, factoriesBuilder, metadata); } /** * Read from a stream. */ public DateRangeAggregationBuilder(StreamInput in) throws IOException { super(in, InternalDateRange.FACTORY, RangeAggregator.Range::new); } @Override public String getType() { return NAME; } @Override protected ValuesSourceRegistry.RegistryKey<?> getRegistryKey() { return REGISTRY_KEY; } @Override protected ValuesSourceType defaultValueSourceType() { return CoreValuesSourceType.DATE; } /** * Add a new range to this aggregation. * * @param key * the key to use for this range in the response * @param from * the lower bound on the dates, inclusive * @param to * the upper bound on the dates, exclusive */ public DateRangeAggregationBuilder addRange(String key, String from, String to) { addRange(new RangeAggregator.Range(key, from, to)); return this; } /** * Same as {@link #addRange(String, String, String)} but the key will be * automatically generated based on <code>from</code> and <code>to</code>. */ public DateRangeAggregationBuilder addRange(String from, String to) { return addRange(null, from, to); } /** * Add a new range with no lower bound. * * @param key * the key to use for this range in the response * @param to * the upper bound on the dates, exclusive */ public DateRangeAggregationBuilder addUnboundedTo(String key, String to) { addRange(new RangeAggregator.Range(key, null, to)); return this; } /** * Same as {@link #addUnboundedTo(String, String)} but the key will be * computed automatically. */ public DateRangeAggregationBuilder addUnboundedTo(String to) { return addUnboundedTo(null, to); } /** * Add a new range with no upper bound. * * @param key * the key to use for this range in the response * @param from * the lower bound on the distances, inclusive */ public DateRangeAggregationBuilder addUnboundedFrom(String key, String from) { addRange(new RangeAggregator.Range(key, from, null)); return this; } /** * Same as {@link #addUnboundedFrom(String, String)} but the key will be * computed automatically. */ public DateRangeAggregationBuilder addUnboundedFrom(String from) { return addUnboundedFrom(null, from); } /** * Add a new range to this aggregation. * * @param key * the key to use for this range in the response * @param from * the lower bound on the dates, inclusive * @param to * the upper bound on the dates, exclusive */ public DateRangeAggregationBuilder addRange(String key, double from, double to) { addRange(new RangeAggregator.Range(key, from, to)); return this; } /** * Same as {@link #addRange(String, double, double)} but the key will be * automatically generated based on <code>from</code> and <code>to</code>. */ public DateRangeAggregationBuilder addRange(double from, double to) { return addRange(null, from, to); } /** * Add a new range with no lower bound. * * @param key * the key to use for this range in the response * @param to * the upper bound on the dates, exclusive */ public DateRangeAggregationBuilder addUnboundedTo(String key, double to) { addRange(new RangeAggregator.Range(key, null, to)); return this; } /** * Same as {@link #addUnboundedTo(String, double)} but the key will be * computed automatically. */ public DateRangeAggregationBuilder addUnboundedTo(double to) { return addUnboundedTo(null, to); } /** * Add a new range with no upper bound. * * @param key * the key to use for this range in the response * @param from * the lower bound on the distances, inclusive */ public DateRangeAggregationBuilder addUnboundedFrom(String key, double from) { addRange(new RangeAggregator.Range(key, from, null)); return this; } /** * Same as {@link #addUnboundedFrom(String, double)} but the key will be * computed automatically. */ public DateRangeAggregationBuilder addUnboundedFrom(double from) { return addUnboundedFrom(null, from); } /** * Add a new range to this aggregation. * * @param key * the key to use for this range in the response * @param from * the lower bound on the dates, inclusive * @param to * the upper bound on the dates, exclusive */ public DateRangeAggregationBuilder addRange(String key, ZonedDateTime from, ZonedDateTime to) { addRange(new RangeAggregator.Range(key, convertDateTime(from), convertDateTime(to))); return this; } private static Double convertDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } else { return (double) dateTime.toInstant().toEpochMilli(); } } /** * Same as {@link #addRange(String, ZonedDateTime, ZonedDateTime)} but the key will be * automatically generated based on <code>from</code> and <code>to</code>. */ public DateRangeAggregationBuilder addRange(ZonedDateTime from, ZonedDateTime to) { return addRange(null, from, to); } /** * Add a new range with no lower bound. * * @param key * the key to use for this range in the response * @param to * the upper bound on the dates, exclusive */ public DateRangeAggregationBuilder addUnboundedTo(String key, ZonedDateTime to) { addRange(new RangeAggregator.Range(key, null, convertDateTime(to))); return this; } /** * Same as {@link #addUnboundedTo(String, ZonedDateTime)} but the key will be * computed automatically. */ public DateRangeAggregationBuilder addUnboundedTo(ZonedDateTime to) { return addUnboundedTo(null, to); } /** * Add a new range with no upper bound. * * @param key * the key to use for this range in the response * @param from * the lower bound on the distances, inclusive */ public DateRangeAggregationBuilder addUnboundedFrom(String key, ZonedDateTime from) { addRange(new RangeAggregator.Range(key, convertDateTime(from), null)); return this; } /** * Same as {@link #addUnboundedFrom(String, ZonedDateTime)} but the key will be * computed automatically. */ public DateRangeAggregationBuilder addUnboundedFrom(ZonedDateTime from) { return addUnboundedFrom(null, from); } @Override protected DateRangeAggregatorFactory innerBuild( AggregationContext context, ValuesSourceConfig config, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder ) throws IOException { RangeAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config); // We need to call processRanges here so they are parsed and we know whether `now` has been used before we make // the decision of whether to cache the request RangeAggregator.Range[] ranges = processRanges(range -> { DocValueFormat parser = config.format(); assert parser != null; double from = range.getFrom(); double to = range.getTo(); String fromAsString = range.getFromAsString(); String toAsString = range.getToAsString(); if (fromAsString != null) { from = parser.parseDouble(fromAsString, false, context::nowInMillis); } else if (Double.isFinite(from)) { // from/to provided as double should be converted to string and parsed regardless to support // different formats like `epoch_millis` vs. `epoch_second` with numeric input from = parser.parseDouble(Long.toString((long) from), false, context::nowInMillis); } if (toAsString != null) { to = parser.parseDouble(toAsString, false, context::nowInMillis); } else if (Double.isFinite(to)) { to = parser.parseDouble(Long.toString((long) to), false, context::nowInMillis); } return new RangeAggregator.Range(range.getKey(), from, fromAsString, to, toAsString); }); if (ranges.length == 0) { throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation"); } return new DateRangeAggregatorFactory( name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier ); } }
apache-2.0
JavaSaBr/jME3-SpaceShift-Editor
src/main/java/com/ss/editor/ui/component/creator/impl/material/MaterialFileCreator.java
4309
package com.ss.editor.ui.component.creator.impl.material; import static com.ss.editor.FileExtensions.JME_MATERIAL; import static com.ss.rlib.common.util.ObjectUtils.notNull; import static java.nio.file.StandardOpenOption.*; import com.jme3.material.Material; import com.ss.editor.FileExtensions; import com.ss.editor.Messages; import com.ss.editor.annotation.BackgroundThread; import com.ss.editor.annotation.FromAnyThread; import com.ss.editor.annotation.FxThread; import com.ss.editor.extension.property.EditablePropertyType; import com.ss.editor.manager.ResourceManager; import com.ss.editor.plugin.api.file.creator.GenericFileCreator; import com.ss.editor.plugin.api.property.PropertyDefinition; import com.ss.editor.ui.component.creator.FileCreatorDescription; import com.ss.editor.util.EditorUtil; import com.ss.editor.util.MaterialSerializer; import com.ss.rlib.common.util.StringUtils; import com.ss.rlib.common.util.VarTable; import com.ss.rlib.common.util.array.Array; import com.ss.rlib.common.util.array.ArrayFactory; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.io.IOException; import java.io.PrintWriter; import java.nio.file.Files; import java.nio.file.Path; /** * The creator to create a new material. * * @author JavaSaBr */ public class MaterialFileCreator extends GenericFileCreator { public static final FileCreatorDescription DESCRIPTION = new FileCreatorDescription(); private static final ResourceManager RESOURCE_MANAGER = ResourceManager.getInstance(); private static final String PBR_MAT_DEF = "Common/MatDefs/Light/PBRLighting.j3md"; private static final String LIGHTING_MAT_DEF = "Common/MatDefs/Light/Lighting.j3md"; private static final String PROP_MAT_DEF = "matDef"; static { DESCRIPTION.setFileDescription(Messages.MATERIAL_FILE_CREATOR_FILE_DESCRIPTION); DESCRIPTION.setConstructor(MaterialFileCreator::new); } /** * The list of available definitions. */ @Nullable private Array<String> definitions; @Override @FromAnyThread protected @NotNull String getTitleText() { return Messages.MATERIAL_FILE_CREATOR_TITLE; } @Override @FromAnyThread protected @NotNull String getFileExtension() { return JME_MATERIAL; } @Override @FromAnyThread protected @NotNull Array<PropertyDefinition> getPropertyDefinitions() { definitions = RESOURCE_MANAGER.getAvailableResources(FileExtensions.JME_MATERIAL_DEFINITION); String def; if (definitions.contains(PBR_MAT_DEF)) { def = PBR_MAT_DEF; } else if (definitions.contains(LIGHTING_MAT_DEF)) { def = LIGHTING_MAT_DEF; } else { def = definitions.first(); } var result = ArrayFactory.<PropertyDefinition>newArray(PropertyDefinition.class); result.add(new PropertyDefinition(EditablePropertyType.STRING_FROM_LIST, Messages.MATERIAL_FILE_CREATOR_MATERIAL_TYPE_LABEL, PROP_MAT_DEF, def, definitions)); return result; } /** * Get the list of available definitions. * * @return the list of available definitions. */ @FromAnyThread private @NotNull Array<String> getDefinitions() { return notNull(definitions); } @Override @FxThread protected boolean validate(@NotNull VarTable vars) { var matDef = vars.get(PROP_MAT_DEF, String.class, StringUtils.EMPTY); if (matDef.isEmpty() || !getDefinitions().contains(matDef)) { return false; } return super.validate(vars); } @Override @BackgroundThread protected void writeData(@NotNull VarTable vars, @NotNull Path resultFile) throws IOException { super.writeData(vars, resultFile); var assetManager = EditorUtil.getAssetManager(); var matDef = vars.get(PROP_MAT_DEF, String.class); var material = new Material(assetManager, matDef); material.getAdditionalRenderState(); var materialContent = MaterialSerializer.serializeToString(material); try (var out = new PrintWriter(Files.newOutputStream(resultFile, WRITE, TRUNCATE_EXISTING, CREATE))) { out.print(materialContent); } } }
apache-2.0
drankye/directory-server
core-api/src/main/java/org/apache/directory/server/core/api/EntryToResponseCursor.java
8409
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.directory.server.core.api; import java.util.Iterator; import java.util.Map; import org.apache.directory.api.ldap.model.constants.Loggers; import org.apache.directory.api.ldap.model.cursor.AbstractCursor; import org.apache.directory.api.ldap.model.cursor.ClosureMonitor; import org.apache.directory.api.ldap.model.cursor.Cursor; import org.apache.directory.api.ldap.model.cursor.CursorException; import org.apache.directory.api.ldap.model.cursor.SearchCursor; import org.apache.directory.api.ldap.model.entry.Entry; import org.apache.directory.api.ldap.model.exception.LdapException; import org.apache.directory.api.ldap.model.message.Control; import org.apache.directory.api.ldap.model.message.IntermediateResponse; import org.apache.directory.api.ldap.model.message.LdapResult; import org.apache.directory.api.ldap.model.message.Referral; import org.apache.directory.api.ldap.model.message.Response; import org.apache.directory.api.ldap.model.message.ResultCodeEnum; import org.apache.directory.api.ldap.model.message.ResultResponse; import org.apache.directory.api.ldap.model.message.SearchRequest; import org.apache.directory.api.ldap.model.message.SearchResultDone; import org.apache.directory.api.ldap.model.message.SearchResultDoneImpl; import org.apache.directory.api.ldap.model.message.SearchResultEntry; import org.apache.directory.api.ldap.model.message.SearchResultEntryImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A cursor to get SearchResponses after setting the underlying cursor's * ServerEntry object in SearchResultEnty object * * @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a> */ public class EntryToResponseCursor extends AbstractCursor<Response> implements SearchCursor { /** A dedicated log for cursors */ private static final Logger LOG_CURSOR = LoggerFactory.getLogger( Loggers.CURSOR_LOG.getName() ); /** Speedup for logs */ private static final boolean IS_DEBUG = LOG_CURSOR.isDebugEnabled(); /** the underlying cursor */ private Cursor<Entry> wrapped; /** a reference to hold the SearchResultDone response */ private SearchResultDone searchDoneResp; /** The done flag */ private boolean done; /** The messsage ID */ private int messageId; /** The search request */ private SearchRequest searchRequest; public EntryToResponseCursor( SearchRequest searchRequest, int messageId, Cursor<Entry> wrapped ) { if ( IS_DEBUG ) { LOG_CURSOR.debug( "Creating EntryToResponseCursor {}", this ); } this.searchRequest = searchRequest; this.wrapped = wrapped; this.messageId = messageId; } public Iterator<Response> iterator() { throw new UnsupportedOperationException(); } /** * {@inheritDoc} */ public void after( Response resp ) throws LdapException, CursorException { throw new UnsupportedOperationException(); } /** * {@inheritDoc} */ public void afterLast() throws LdapException, CursorException { wrapped.afterLast(); } /** * {@inheritDoc} */ public boolean available() { return wrapped.available(); } /** * {@inheritDoc} */ public void before( Response resp ) throws LdapException, CursorException { throw new UnsupportedOperationException(); } /** * {@inheritDoc} */ public void beforeFirst() throws LdapException, CursorException { wrapped.beforeFirst(); } /** * {@inheritDoc} */ public void close() { if ( IS_DEBUG ) { LOG_CURSOR.debug( "Closing EntryToResponseCursor {}", this ); } wrapped.close(); } /** * {@inheritDoc} */ public void close( Exception e ) { if ( IS_DEBUG ) { LOG_CURSOR.debug( "Closing EntryToResponseCursor {}", this ); } wrapped.close( e ); } /** * {@inheritDoc} */ public boolean first() throws LdapException, CursorException { return wrapped.first(); } /** * {@inheritDoc} */ public Response get() throws CursorException { Entry entry = wrapped.get(); SearchResultEntry se = new SearchResultEntryImpl( messageId ); se.setEntry( entry ); return se; } /** * gives the SearchResultDone message received at the end of search results * * @return the SearchResultDone message, null if the search operation fails for any reason */ public SearchResultDone getSearchResultDone() { return searchDoneResp; } /** * {@inheritDoc} */ public boolean isClosed() { return wrapped.isClosed(); } /** * {@inheritDoc} */ public boolean last() throws LdapException, CursorException { return wrapped.last(); } /** * {@inheritDoc} */ public boolean next() throws LdapException, CursorException { boolean next = wrapped.next(); if ( !next ) { searchDoneResp = new SearchResultDoneImpl( messageId ); ResultCodeEnum re = ResultCodeEnum.SUCCESS; ResultResponse processedResp = searchRequest.getResultResponse(); LdapResult filledResult = processedResp.getLdapResult(); if ( filledResult.getResultCode() != re ) { re = filledResult.getResultCode(); } searchDoneResp.getLdapResult().setResultCode( re ); Map<String, Control> ctrls = processedResp.getControls(); if ( ctrls != null ) { Iterator<Control> itr = ctrls.values().iterator(); while ( itr.hasNext() ) { searchDoneResp.addControl( itr.next() ); } } done = true; } return next; } /** * {@inheritDoc} */ public boolean previous() throws LdapException, CursorException { return wrapped.previous(); } /** * {@inheritDoc} */ public void setClosureMonitor( ClosureMonitor monitor ) { wrapped.setClosureMonitor( monitor ); } /** * {@inheritDoc} */ public boolean isDone() { return done; } /** * {@inheritDoc} */ public boolean isReferral() { return false; } /** * {@inheritDoc} */ public Referral getReferral() throws LdapException { throw new LdapException(); } /** * {@inheritDoc} */ public boolean isEntry() { return true; } /** * {@inheritDoc} */ public Entry getEntry() throws LdapException { if ( !done && wrapped.available() ) { try { return wrapped.get(); } catch ( Exception e ) { throw new LdapException( e ); } } throw new LdapException(); } /** * {@inheritDoc} */ public boolean isIntermediate() { return false; } /** * {@inheritDoc} */ public IntermediateResponse getIntermediate() throws LdapException { throw new LdapException(); } }
apache-2.0
avadev/AvaTaxClientLibrary
generator/ClientApiGenerator/Filters/Filter.cs
222
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace ClientApiGenerator.Filters { // base filter class public class Filter { } }
apache-2.0
aduprat/james-mailbox
store/src/main/java/org/apache/james/mailbox/store/quota/ListeningQuotaManager.java
9153
/**************************************************************** * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, * * software distributed under the License is distributed on an * * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * * KIND, either express or implied. See the License for the * * specific language governing permissions and limitations * * under the License. * ****************************************************************/ package org.apache.james.mailbox.store.quota; import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.james.mailbox.MailboxListener; import org.apache.james.mailbox.MailboxSession; import org.apache.james.mailbox.QuotaManager; import org.apache.james.mailbox.exception.MailboxException; import org.apache.james.mailbox.model.MailboxPath; import org.apache.james.mailbox.model.MessageRange; import org.apache.james.mailbox.model.Quota; import org.apache.james.mailbox.store.MailboxSessionMapperFactory; import org.apache.james.mailbox.store.StoreMailboxManager; import org.apache.james.mailbox.store.mail.model.Mailbox; import org.apache.james.mailbox.store.mail.model.Message; import org.apache.james.mailbox.store.mail.MessageMapper; import org.apache.james.mailbox.store.mail.MessageMapper.FetchType; /** * {@link QuotaManager} which will keep track of quota by listing for {@link org.apache.james.mailbox.MailboxListener.Event}'s. * * The whole quota is keeped in memory after it was lazy-fetched on the first access * * */ @SuppressWarnings({ "unchecked", "rawtypes" }) public abstract class ListeningQuotaManager implements QuotaManager, MailboxListener{ private MailboxSessionMapperFactory factory; private ConcurrentHashMap<String, AtomicLong> counts = new ConcurrentHashMap<String, AtomicLong>(); private ConcurrentHashMap<String, AtomicLong> sizes = new ConcurrentHashMap<String, AtomicLong>(); private boolean calculateWhenUnlimited = false; public ListeningQuotaManager(StoreMailboxManager<?> manager) throws MailboxException { this.factory = manager.getMapperFactory(); manager.addGlobalListener(this, null); } protected MailboxSessionMapperFactory<?> getFactory() { return factory; } public void setCalculateUsedWhenUnlimited(boolean calculateWhenUnlimited) { this.calculateWhenUnlimited = calculateWhenUnlimited; } @Override public Quota getMessageQuota(MailboxSession session) throws MailboxException { long max = getMaxMessage(session); if (max != Quota.UNLIMITED || calculateWhenUnlimited) { String id = session.getUser().getUserName(); AtomicLong count = counts.get(id); if (count == null) { long mc = 0; List<Mailbox> mailboxes = factory.getMailboxMapper(session).findMailboxWithPathLike(new MailboxPath(session.getPersonalSpace(), id, "%")); for (int i = 0; i < mailboxes.size(); i++) { mc += factory.getMessageMapper(session).countMessagesInMailbox(mailboxes.get(i)); } AtomicLong c = counts.putIfAbsent(id, new AtomicLong(mc)); if (c != null) { count = c; } } return QuotaImpl.quota(max, count != null ? count.get() : 0); } else { return QuotaImpl.unlimited(); } } @Override public Quota getStorageQuota(MailboxSession session) throws MailboxException { long max = getMaxStorage(session); if (max != Quota.UNLIMITED || calculateWhenUnlimited) { MessageMapper mapper = factory.getMessageMapper(session); String id = session.getUser().getUserName(); AtomicLong size = sizes.get(id); if (size == null) { final AtomicLong mSizes = new AtomicLong(0); List<Mailbox> mailboxes = factory.getMailboxMapper(session).findMailboxWithPathLike(new MailboxPath(session.getPersonalSpace(), id, "%")); for (int i = 0; i < mailboxes.size(); i++) { long messageSizes = 0; Iterator<Message> messages = mapper.findInMailbox(mailboxes.get(i), MessageRange.all(), FetchType.Metadata, -1); while(messages.hasNext()) { messageSizes += messages.next().getFullContentOctets(); } mSizes.set(mSizes.get() + messageSizes); } AtomicLong s = sizes.putIfAbsent(id, mSizes); if (s != null) { size = s; } else { size = mSizes; } } return QuotaImpl.quota(max, size.get()); } else { return QuotaImpl.unlimited(); } } /** * Return the maximum storage which is allowed for the given {@link MailboxSession} (in fact the user which the session is bound to) * * The returned valued must be in <strong>bytes</strong> * * @param session * @return maxBytes * @throws MailboxException */ protected abstract long getMaxStorage(MailboxSession session) throws MailboxException; /** * Return the maximum message count which is allowed for the given {@link MailboxSession} (in fact the user which the session is bound to) * * @param session * @return maximum of allowed message count * @throws MailboxException */ protected abstract long getMaxMessage(MailboxSession session) throws MailboxException; @Override public void event(Event event) { String id = event.getSession().getUser().getUserName(); if (event instanceof Added) { Added added = (Added) event; long s = 0; long c = 0; Iterator<Long> uids = added.getUids().iterator();; while(uids.hasNext()) { long uid = uids.next(); s += added.getMetaData(uid).getSize(); c++; } AtomicLong size = sizes.get(id); if (size != null) { while(true) { long expected = size.get(); long newValue = expected + s; if (size.compareAndSet(expected, newValue)) { break; } } } AtomicLong count = counts.get(id); if (count != null) { while(true) { long expected = count.get(); long newValue = expected + c; if (count.compareAndSet(expected, newValue)) { break; } } } } else if (event instanceof Expunged) { Expunged expunged = (Expunged) event; long s = 0; long c = 0; Iterator<Long> uids = expunged.getUids().iterator();; while(uids.hasNext()) { long uid = uids.next(); s += expunged.getMetaData(uid).getSize(); c++; } AtomicLong size = sizes.get(id); if (size != null) { while(true) { long expected = size.get(); long newValue = expected - s; if (size.compareAndSet(expected, newValue)) { break; } } } AtomicLong count = counts.get(id); if (count != null) { while(true) { long expected = count.get(); long newValue = expected - c; if (count.compareAndSet(expected, newValue)) { break; } } } } else if (event instanceof MailboxAdded) { counts.putIfAbsent(id, new AtomicLong(0)); sizes.putIfAbsent(id, new AtomicLong(0)); } } /** * Get never closed * * @return false */ public boolean isClosed() { return false; } }
apache-2.0
wilkmaia/wm-representacoes-2
prototype/cities/ma.js
6626
module.exports = [{ name: 'Açailândia' }, { name: 'Afonso Cunha' }, { name: 'Água Doce do Maranhão' }, { name: 'Alcântara' }, { name: 'Aldeias Altas' }, { name: 'Altamira do Maranhão' }, { name: 'Alto Alegre do Maranhão' }, { name: 'Alto Alegre do Pindaré' }, { name: 'Alto Parnaíba' }, { name: 'Amapá do Maranhão' }, { name: 'Amarante do Maranhão' }, { name: 'Anajatuba' }, { name: 'Anapurus' }, { name: 'Apicum-Açu' }, { name: 'Araguanã' }, { name: 'Araioses' }, { name: 'Arame' }, { name: 'Arari' }, { name: 'Axixá' }, { name: 'Bacabal' }, { name: 'Bacabeira' }, { name: 'Bacuri' }, { name: 'Bacurituba' }, { name: 'Balsas' }, { name: 'Barão de Grajaú' }, { name: 'Barra do Corda' }, { name: 'Barreirinhas' }, { name: 'Bela Vista do Maranhão' }, { name: 'Belágua' }, { name: 'Benedito Leite' }, { name: 'Bequimão' }, { name: 'Bernardo do Mearim' }, { name: 'Boa Vista do Gurupi' }, { name: 'Bom Jardim' }, { name: 'Bom Jesus das Selvas' }, { name: 'Bom Lugar' }, { name: 'Brejo' }, { name: 'Brejo de Areia' }, { name: 'Buriti' }, { name: 'Buriti Bravo' }, { name: 'Buriticupu' }, { name: 'Buritirana' }, { name: 'Cachoeira Grande' }, { name: 'Cajapió' }, { name: 'Cajari' }, { name: 'Campestre do Maranhão' }, { name: 'Cândido Mendes' }, { name: 'Cantanhede' }, { name: 'Capinzal do Norte' }, { name: 'Carolina' }, { name: 'Carutapera' }, { name: 'Caxias' }, { name: 'Cedral' }, { name: 'Central do Maranhão' }, { name: 'Centro do Guilherme' }, { name: 'Centro Novo do Maranhão' }, { name: 'Chapadinha' }, { name: 'Cidelândia' }, { name: 'Codó' }, { name: 'Coelho Neto' }, { name: 'Colinas' }, { name: 'Conceição do Lago-Açu' }, { name: 'Coroatá' }, { name: 'Cururupu' }, { name: 'Davinópolis' }, { name: 'Dom Pedro' }, { name: 'Duque Bacelar' }, { name: 'Esperantinópolis' }, { name: 'Estreito' }, { name: 'Feira Nova do Maranhão' }, { name: 'Fernando Falcão' }, { name: 'Formosa da Serra Negra' }, { name: 'Fortaleza dos Nogueiras' }, { name: 'Fortuna' }, { name: 'Godofredo Viana' }, { name: 'Gonçalves Dias' }, { name: 'Governador Archer' }, { name: 'Governador Edison Lobão' }, { name: 'Governador Eugênio Barros' }, { name: 'Governador Luiz Rocha' }, { name: 'Governador Newton Bello' }, { name: 'Governador Nunes Freire' }, { name: 'Graça Aranha' }, { name: 'Grajaú' }, { name: 'Guimarães' }, { name: 'Humberto de Campos' }, { name: 'Icatu' }, { name: 'Igarapé do Meio' }, { name: 'Igarapé Grande' }, { name: 'Imperatriz' }, { name: 'Itaipava do Grajaú' }, { name: 'Itapecuru Mirim' }, { name: 'Itinga do Maranhão' }, { name: 'Jatobá' }, { name: 'Jenipapo dos Vieiras' }, { name: 'João Lisboa' }, { name: 'Joselândia' }, { name: 'Junco do Maranhão' }, { name: 'Lago da Pedra' }, { name: 'Lago do Junco' }, { name: 'Lago dos Rodrigues' }, { name: 'Lago Verde' }, { name: 'Lagoa do Mato' }, { name: 'Lagoa Grande do Maranhão' }, { name: 'Lajeado Novo' }, { name: 'Lima Campos' }, { name: 'Loreto' }, { name: 'Luís Domingues' }, { name: 'Magalhães de Almeida' }, { name: 'Maracaçumé' }, { name: 'Marajá do Sena' }, { name: 'Maranhãozinho' }, { name: 'Mata Roma' }, { name: 'Matinha' }, { name: 'Matões' }, { name: 'Matões do Norte' }, { name: 'Milagres do Maranhão' }, { name: 'Mirador' }, { name: 'Miranda do Norte' }, { name: 'Mirinzal' }, { name: 'Monção' }, { name: 'Montes Altos' }, { name: 'Morros' }, { name: 'Nina Rodrigues' }, { name: 'Nova Colinas' }, { name: 'Nova Iorque' }, { name: 'Nova Olinda do Maranhão' }, { name: 'Olho d`Água das Cunhãs' }, { name: 'Olinda Nova do Maranhão' }, { name: 'Paço do Lumiar' }, { name: 'Palmeirândia' }, { name: 'Paraibano' }, { name: 'Parnarama' }, { name: 'Passagem Franca' }, { name: 'Pastos Bons' }, { name: 'Paulino Neves' }, { name: 'Paulo Ramos' }, { name: 'Pedreiras' }, { name: 'Pedro do Rosário' }, { name: 'Penalva' }, { name: 'Peri Mirim' }, { name: 'Peritoró' }, { name: 'Pindaré-Mirim' }, { name: 'Pinheiro' }, { name: 'Pio XII' }, { name: 'Pirapemas' }, { name: 'Poção de Pedras' }, { name: 'Porto Franco' }, { name: 'Porto Rico do Maranhão' }, { name: 'Presidente Dutra' }, { name: 'Presidente Juscelino' }, { name: 'Presidente Médici' }, { name: 'Presidente Sarney' }, { name: 'Presidente Vargas' }, { name: 'Primeira Cruz' }, { name: 'Raposa' }, { name: 'Riachão' }, { name: 'Ribamar Fiquene' }, { name: 'Rosário' }, { name: 'Sambaíba' }, { name: 'Santa Filomena do Maranhão' }, { name: 'Santa Helena' }, { name: 'Santa Inês' }, { name: 'Santa Luzia' }, { name: 'Santa Luzia do Paruá' }, { name: 'Santa Quitéria do Maranhão' }, { name: 'Santa Rita' }, { name: 'Santana do Maranhão' }, { name: 'Santo Amaro do Maranhão' }, { name: 'Santo Antônio dos Lopes' }, { name: 'São Benedito do Rio Preto' }, { name: 'São Bento' }, { name: 'São Bernardo' }, { name: 'São Domingos do Azeitão' }, { name: 'São Domingos do Maranhão' }, { name: 'São Félix de Balsas' }, { name: 'São Francisco do Brejão' }, { name: 'São Francisco do Maranhão' }, { name: 'São João Batista' }, { name: 'São João do Carú' }, { name: 'São João do Paraíso' }, { name: 'São João do Soter' }, { name: 'São João dos Patos' }, { name: 'São José de Ribamar' }, { name: 'São José dos Basílios' }, { name: 'São Luís' }, { name: 'São Luís Gonzaga do Maranhão' }, { name: 'São Mateus do Maranhão' }, { name: 'São Pedro da Água Branca' }, { name: 'São Pedro dos Crentes' }, { name: 'São Raimundo das Mangabeiras' }, { name: 'São Raimundo do Doca Bezerra' }, { name: 'São Roberto' }, { name: 'São Vicente Ferrer' }, { name: 'Satubinha' }, { name: 'Senador Alexandre Costa' }, { name: 'Senador La Rocque' }, { name: 'Serrano do Maranhão' }, { name: 'Sítio Novo' }, { name: 'Sucupira do Norte' }, { name: 'Sucupira do Riachão' }, { name: 'Tasso Fragoso' }, { name: 'Timbiras' }, { name: 'Timon' }, { name: 'Trizidela do Vale' }, { name: 'Tufilândia' }, { name: 'Tuntum' }, { name: 'Turiaçu' }, { name: 'Turilândia' }, { name: 'Tutóia' }, { name: 'Urbano Santos' }, { name: 'Vargem Grande' }, { name: 'Viana' }, { name: 'Vila Nova dos Martírios' }, { name: 'Vitória do Mearim' }, { name: 'Vitorino Freire' }, { name: 'Zé Doca' }];
apache-2.0
brianfcoleman/libvideocapture
src/videocapture/windows/COMUtilities.hpp
2992
#ifndef VIDEO_CAPTURE_COM_UTILITIES_H #define VIDEO_CAPTURE_COM_UTILITIES_H #define BOOST_BIND_ENABLE_STDCALL #define BOOST_MEM_FN_ENABLE_STDCALL #include <vector> #include "boost/shared_ptr.hpp" #include "boost/bind.hpp" #include "boost/mem_fn.hpp" #include "wtypes.h" #include "oleauto.h" namespace VideoCapture { boost::shared_ptr<void> comInitializerSharedPtr(); template<typename COMInterface> boost::shared_ptr< COMInterface> comInterfaceSharedPtr(COMInterface* pCOMInterface) { boost::shared_ptr<COMInterface> comInterfacePtr( pCOMInterface, boost::mem_fn(&COMInterface::Release)); return comInterfacePtr; } template<typename COMInterface> boost::shared_ptr< COMInterface> createInstanceCOMInterface( const CLSID& classId, const IID& interfaceId) { COMInterface* pCOMInterface = 0; HRESULT result = CoCreateInstance( classId, 0, CLSCTX_INPROC_SERVER, interfaceId, reinterpret_cast<void**>(&pCOMInterface)); if (FAILED(result)) { boost::shared_ptr<COMInterface> comInterfacePtr; return comInterfacePtr; } boost::shared_ptr<COMInterface> comInterfacePtr( comInterfaceSharedPtr(pCOMInterface)); return comInterfacePtr; } template< typename ParentCOMInterface, typename ChildCOMInterface> boost::shared_ptr< ChildCOMInterface> queryCOMInterface( boost::shared_ptr<ParentCOMInterface> pParentCOMInterface, const IID& childInterfaceId) { if (!pParentCOMInterface) { boost::shared_ptr<ChildCOMInterface> comInterfacePtr; return comInterfacePtr; } ChildCOMInterface* pChildCOMInterface = 0; HRESULT result = pParentCOMInterface->QueryInterface( childInterfaceId, reinterpret_cast<void**>(&pChildCOMInterface)); if (FAILED(result)) { boost::shared_ptr<ChildCOMInterface> comInterfacePtr; return comInterfacePtr; } boost::shared_ptr<ChildCOMInterface> comInterfacePtr( comInterfaceSharedPtr(pChildCOMInterface)); return comInterfacePtr; } template<typename COMInterface> std::vector< boost::shared_ptr<COMInterface>> comInterfaceSharedPtrVector( COMInterface** comInterfacePtrArray, const std::size_t arrayElementCount) { if (!comInterfacePtrArray) { std::vector<boost::shared_ptr<COMInterface>> comInterfacePtrVector; return comInterfacePtrVector; } std::vector<boost::shared_ptr<COMInterface>> comInterfacePtrVector; for (std::size_t i = 0; i < arrayElementCount; ++i) { COMInterface* pCOMInterface = comInterfacePtrArray[i]; if (!pCOMInterface) { continue; } boost::shared_ptr<COMInterface> comInterfacePtr( comInterfaceSharedPtr(pCOMInterface)); if (!comInterfacePtr) { continue; } comInterfacePtrVector.push_back(comInterfacePtr); } return comInterfacePtrVector; } boost::shared_ptr<VARIANT> variantSharedPtr(VARIANT* pVariant); const std::string utf8StringFromBasicString(const BSTR basicString); } #endif // VIDEO_CAPTURE_COM_UTILITIES_H
apache-2.0
mkoslacz/Moviper
moviper-recyclerview/src/main/java/com/mateuszkoslacz/moviper/iface/view/MvpDataView.java
347
package com.mateuszkoslacz.moviper.iface.view; import com.hannesdorfmann.mosby.mvp.MvpView; /** * Created by mateuszkoslacz on 01.12.2016. * <p> * The root view interface for every MVP ViewHolder */ public interface MvpDataView<DataObject> extends MvpView { DataObject getDataObject(); void setDataObject(DataObject dataObject); }
apache-2.0
vishvananda/wormhole
main/wormhole/wormhole.go
93
package main import ( "github.com/vishvananda/wormhole/cli" ) func main() { cli.Main() }
apache-2.0
REBOOTERS/My-MVP
app/src/main/java/huyifei/mymvp/datastorage/room/dao/UserDao.java
808
package huyifei.mymvp.datastorage.room.dao; import androidx.room.Dao; import androidx.room.Delete; import androidx.room.Insert; import androidx.room.OnConflictStrategy; import androidx.room.Query; import java.util.List; import huyifei.mymvp.datastorage.room.entity.UserEntity; import io.reactivex.Flowable; /** * @version V1.0 * @author: Rookie * @date: 2018-08-04 09:26 */ @Dao public interface UserDao { @Query("select * from user") List<UserEntity> getAll(); @Query("select * from user") Flowable<List<UserEntity>> getAllAsync(); @Query("select * from user where name = :name") UserEntity getUserByName(String name); @Insert(onConflict = OnConflictStrategy.REPLACE) void addUser(UserEntity userEntity); @Delete void delete(UserEntity userEntity); }
apache-2.0
wil3/lacus
src/main/java/edu/stevens/cpe/math/MLMatrixUtils.java
8259
/******************************************************************************* * Copyright 2013 William Koch * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package edu.stevens.cpe.math; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Random; import com.google.common.base.Function; import cern.colt.function.DoubleFunction; import cern.colt.function.IntIntDoubleFunction; import cern.colt.matrix.DoubleFactory2D; import cern.colt.matrix.DoubleMatrix1D; import cern.colt.matrix.DoubleMatrix2D; import cern.colt.matrix.impl.DenseDoubleMatrix2D; import cern.colt.matrix.impl.SparseDoubleMatrix2D; import cern.colt.matrix.linalg.Algebra; import cern.colt.matrix.linalg.EigenvalueDecomposition; import cern.jet.math.Functions; public class MLMatrixUtils { private static Random random = new Random(System.nanoTime()); public static String prettyPrint(DoubleMatrix2D matrix){ ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); double [][] arr = matrix.toArray(); DecimalFormat df = new DecimalFormat("##.###"); for (int i=0; i<arr.length; i++){ for (int j=0; j<arr[i].length; j++){ ps.printf(df.format(arr[i][j])+"\t"); } ps.print("\r\n"); } return baos.toString(); } private static final DoubleFunction noise = new DoubleFunction(){ @Override public double apply(double argument) { double noise = (random.nextDouble() > .5) ? 0.0001 : -0.0001; return argument + noise; } }; public static void addNoise(DoubleMatrix2D matrix){ matrix.assign(noise); } private static final DoubleFunction customRandomizer = new DoubleFunction(){ @Override public double apply(double argument) { // TODO Auto-generated method stub Random random = new Random(System.nanoTime()); return random.nextDouble(); } }; public static DoubleMatrix2D sprand(int m, int n, double density){ int nnzwanted = (int)Math.round(m * n * Math.min(density, 1)); DoubleFactory2D factory = DoubleFactory2D.sparse; DoubleMatrix2D i = factory.random(nnzwanted, 1).assign(customRandomizer).assign(Functions.mult(m)).assign(Functions.floor);//.assign(Functions.plus(1)); DoubleMatrix2D j = factory.random(nnzwanted, 1).assign(customRandomizer).assign(Functions.mult(n)).assign(Functions.floor);//.assign(Functions.plus(1)); DoubleMatrix2D rows=null; DoubleMatrix2D cols=null; //Combine i j DoubleMatrix2D ij = unique(factory.appendColumns(i, j)); if (ij.cardinality() != 0){ //separate into i and j again rows = ij.viewSelection(null, new int[]{0}); cols= ij.viewSelection(null, new int[]{1}); } DoubleMatrix2D rands = factory.random(rows.rows(), 1); double [][] R = new double [m][n]; for (int x=0; x<rows.rows(); x++ ){ // for (int y=0; y<n; y++){ R[(int)rows.get(x, 0)][(int)cols.get(x, 0)] = rands.get(x, 0); // } } DoubleMatrix2D y = new SparseDoubleMatrix2D(R); return y; } /** * Matlab unique(A,'rows') * * Get rid of duplicate rows */ public static DoubleMatrix2D unique(DoubleMatrix2D A){ A = A.viewSorted(0); //Keep track of row indices that are duplicates final ArrayList<Integer> dups = new ArrayList<Integer>(); for (int x=0; x<A.rows(); x++){ if (dups.contains(x)){ continue; } for (int y=x+1;y<A.rows(); y++ ){ if (!dups.contains(y)){ if (A.viewRow(x).equals(A.viewRow(y))){ dups.add(y); } } } } //Get unique values in row, return in sorted order final ArrayList<Integer> alKeepRows = new ArrayList<Integer>(); for (int i = 0; i< A.rows(); i++){ if (!dups.contains(i)){ alKeepRows.add(i); } } //Ugh conver to primative int array int [] keepRows = new int[alKeepRows.size()]; for (int x=0; x<alKeepRows.size(); x++){ keepRows[x] = alKeepRows.get(x); } return A.viewSelection(keepRows, null); } public static double getSpectralRadius(DoubleMatrix2D A){ EigenvalueDecomposition eigen = new EigenvalueDecomposition(A); DoubleMatrix1D eigenValues = eigen.getRealEigenvalues(); return eigenValues.assign(Functions.abs).viewSorted().get(eigenValues.size()-1); } public static DoubleMatrix2D convertArrayTo2DMatrix(long [] array){ double [] preciseSpikes = new double [ array.length]; for (int i=0; i<preciseSpikes.length; i++){ preciseSpikes[i] = array[i]; } DoubleFactory2D factory = DoubleFactory2D.dense; return factory.make(new double [][]{preciseSpikes}); } public static DoubleMatrix2D convertArrayTo2DMatrix(double [] array){ double [] preciseSpikes = new double [ array.length]; for (int i=0; i<preciseSpikes.length; i++){ preciseSpikes[i] = array[i]; } DoubleFactory2D factory = DoubleFactory2D.dense; return factory.make(new double [][]{preciseSpikes}); } /** * * @param n => (n_max x N) Matrix where each row is state of reservoir for each time step * @param y => (n_max x L) Matrix where each row is state of output for each time step * @param alpha The idenity coefficient. This can be found using cross-validation * @return */ public static DoubleMatrix2D ridge(DoubleMatrix2D n, DoubleMatrix2D y, double alpha){ DoubleFactory2D factory = DoubleFactory2D.dense; //force same type, this is giving a cast error. Bug on their part? DoubleMatrix2D _n = factory.make(n.toArray()); DoubleMatrix2D _y = factory.make(y.toArray()); //aI DoubleMatrix2D scaledIdentity = factory.identity(_n.rows()).assign(Functions.mult(alpha)); Algebra alg = new Algebra(); // ((n x n^T) + aI)^-1 // (n_max x N) x (N x n_max) DoubleMatrix2D inverse = alg.inverse(n.zMult(_n.viewDice(), null).assign(scaledIdentity, Functions.plus)); //System.out.println("-1=" + inverse); //X = ((n x n^T) + aI)^-1 x n // (n_max x n_max) x DoubleMatrix2D X = inverse.zMult(_n, null); //System.out.println("X=" + X); //((n x n^T) + aI)^-1 x n x y DoubleMatrix2D weights = X.viewDice().zMult(_y, null); //X.viewDice() return weights; } /** * (n^-1 x y)' */ public static DoubleMatrix2D WienerHopf1(DoubleMatrix2D n, DoubleMatrix2D y) { //N = number of reservoir units //K = number of inputs //S = n_max x (N + K) //output matrix D = n_max x L = 200 x 1 Algebra alg = new Algebra(); //DoubleMatrix2D W = alg.inverse(X_state).zMult(Y_state.viewDice(), null); //System.out.println(W); DoubleMatrix2D S_inv = alg.inverse(n); DoubleFactory2D factory = DoubleFactory2D.dense; y = factory.make(y.toArray()); //S^-1 = 10 x 200 return S_inv.zMult(y, null); } /** * ( (n'n)^-1(n'y) ) */ public static DoubleMatrix2D WienerHopf2(DoubleMatrix2D n, DoubleMatrix2D y) { //P = S'D = (N x n_max) x (n_max x L) = (N x L) //R = S'S = (N x n_max) x (n_max x N) = (N x N) //W = (R^-1P)' = (R^-1)'(P)' = (N x N) x (N x L) return null; } public static DoubleMatrix2D psuedoInverse(DoubleMatrix2D n, DoubleMatrix2D y){ Algebra alg = new Algebra(); DoubleMatrix2D S_inv = alg.inverse(n.viewDice()); return S_inv.zMult(y, null); } /** * @param args */ public static void main(String[] args) { double [][] R = new double [] [] { {5, 6},{ 1, 2}};//, {1,2}, {3,4}, {5,6}}; DoubleFactory2D sparse = DoubleFactory2D.sparse; DoubleMatrix2D R2 = sparse.make(R); double radius = getSpectralRadius(R2); System.out.println(radius); //DoubleFactory2D factory = DoubleFactory2D.dense; //DoubleMatrix2D R1 = factory.make(R); //System.out.println(R1); //DoubleMatrix2D m = sprand(10,10,.2); //System.out.println(unique(R1)); } }
apache-2.0
mpi2/PhenotypeData
web/src/main/java/uk/ac/ebi/phenotype/web/controller/registerinterest/CaptchaFilter.java
3923
package uk.ac.ebi.phenotype.web.controller.registerinterest; import org.apache.http.NameValuePair; import org.apache.http.message.BasicNameValuePair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.configurationprocessor.json.JSONException; import org.springframework.boot.configurationprocessor.json.JSONObject; import org.springframework.stereotype.Component; import org.springframework.web.filter.OncePerRequestFilter; import uk.ac.ebi.phenotype.web.util.CaptchaHttpProxy; import javax.servlet.FilterChain; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.util.ArrayList; import java.util.List; @Component public class CaptchaFilter extends OncePerRequestFilter { private final Logger log = LoggerFactory.getLogger(this.getClass().getCanonicalName()); private CaptchaHttpProxy captchaHttpProxy = new CaptchaHttpProxy(); // See https://www.google.com/recaptcha for setup @Value("${recaptcha.secret}") private String recaptchaSecret; @Value("${recaptcha.url}") private String recaptchaUrl; @Value("${recaptcha.response.param}") private String recaptchaResponseParam; @Value("${base_url}") private String baseUrl; @Override protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain chain) throws ServletException, IOException { if ( ("POST".equalsIgnoreCase(request.getMethod())) && ( request.getServletPath().contains("rilogin") || request.getServletPath().contains("sendNewAccountEmail") ) ) { System.out.println("\n"); System.out.println("Request path " + request.getRequestURI()); System.out.println("Request method " + request.getMethod()); log.info("URL = " + request.getRequestURL()); if ( ! validateRecaptcha(request)) { String target = request.getHeader("referer"); if ((target == null) || ! (target.startsWith(baseUrl))) { target = baseUrl + "/rilogin"; } if (target.endsWith("sendNewAccountEmail")) { // sendNewAccountEmail is a POST and will throw a 405 if redirected, as there is no GET. Remap to New account. target = baseUrl + "/newAccountRequest"; } target += "?error=true"; response.sendRedirect(target); return; } } chain.doFilter(request, response); } /** * Contact the google recaptcha service and validate the user is a human * * @param request the request * @return true if the user is not a bot * @throws IOException when the server fails to respond appropriately */ private boolean validateRecaptcha(HttpServletRequest request) { boolean success = false; List<NameValuePair> params = new ArrayList<>(); params.add(new BasicNameValuePair("secret", recaptchaSecret)); params.add(new BasicNameValuePair("response", request.getParameter(recaptchaResponseParam))); String body = ""; try { body = captchaHttpProxy.getContent(recaptchaUrl, params); if (body != null) { JSONObject recaptchaResponse = new JSONObject(body); success = recaptchaResponse.getBoolean("success"); } } catch (IOException | JSONException e) { log.info("Exception from recaptcha service", e); } log.debug("Response from google recaptcha service: " + body); return success; } }
apache-2.0
cpieloth/leipzigdata4j
src/main/java/examples/AddressClientExample.java
787
package examples; import de.leipzigdata.address.AddressClient; import de.leipzigdata.address.entity.Address; import java.net.URI; import java.net.URISyntaxException; import java.util.List; /** * @author cpieloth */ public class AddressClientExample { public static void main(String[] args) { AddressClient client = new AddressClient(); try { Address address = client.getAddress(new URI("http://leipzig-data.de/Data/04179.Leipzig.AmKanal.28")); System.out.println(address); } catch (URISyntaxException e) { e.printStackTrace(); } List<Address> addresses = client.getAddresses(); System.out.println("Address count: " + addresses.size()); addresses.forEach(System.out::println); } }
apache-2.0
openstack/sahara
sahara/tests/unit/service/api/v2/base.py
3799
# Copyright (c) 2017 EasyStack Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.plugins import base as pl_base from sahara.plugins import provisioning as pr_base SAMPLE_CLUSTER = { 'plugin_name': 'fake', 'hadoop_version': 'test_version', 'tenant_id': 'tenant_1', 'name': 'test_cluster', 'user_keypair_id': 'my_keypair', 'node_groups': [ { 'auto_security_group': True, 'name': 'ng_1', 'flavor_id': '42', 'node_processes': ['p1', 'p2'], 'count': 1 }, { 'auto_security_group': False, 'name': 'ng_2', 'flavor_id': '42', 'node_processes': ['p3', 'p4'], 'count': 3 }, { 'auto_security_group': False, 'name': 'ng_3', 'flavor_id': '42', 'node_processes': ['p3', 'p4'], 'count': 1 } ], 'cluster_configs': { 'service_1': { 'config_2': 'value_2' }, 'service_2': { 'config_1': 'value_1' } }, } SCALE_DATA = { 'resize_node_groups': [ { 'name': 'ng_1', 'count': 3, }, { 'name': 'ng_2', 'count': 2, } ], 'add_node_groups': [ { 'auto_security_group': True, 'name': 'ng_4', 'flavor_id': '42', 'node_processes': ['p1', 'p2'], 'count': 1 }, ] } SCALE_DATA_SPECIFIC_INSTANCE = { 'resize_node_groups': [ { 'name': 'ng_1', 'count': 3, }, { 'name': 'ng_2', 'count': 1, 'instances': ['ng_2_0'] } ], 'add_node_groups': [] } SCALE_DATA_N_SPECIFIC_INSTANCE = { 'resize_node_groups': [ { 'name': 'ng_1', 'count': 3, }, { 'name': 'ng_2', 'count': 1, 'instances': ['ng_2_0', 'ng_2_2'] } ], 'add_node_groups': [] } class FakePlugin(pr_base.ProvisioningPluginBase): _info = {} name = "fake" def __init__(self, calls_order): self.calls_order = calls_order def configure_cluster(self, cluster): pass def start_cluster(self, cluster): pass def get_description(self): return "Some description" def get_title(self): return "Fake plugin" def validate(self, cluster): self.calls_order.append('validate') def get_open_ports(self, node_group): self.calls_order.append('get_open_ports') def validate_scaling(self, cluster, to_be_enlarged, additional): self.calls_order.append('validate_scaling') def get_versions(self): return ['0.1', '0.2'] def get_node_processes(self, version): return {'HDFS': ['namenode', 'datanode']} def get_configs(self, version): return [] def recommend_configs(self, cluster, scaling=False): self.calls_order.append('recommend_configs') class FakePluginManager(pl_base.PluginManager): def __init__(self, calls_order): super(FakePluginManager, self).__init__() self.plugins['fake'] = FakePlugin(calls_order)
apache-2.0
air-project/project-dict
src/main/java/com/air/utils/dict/cache/RedisSyncAble.java
472
package com.air.utils.dict.cache; import java.util.List; import com.air.utils.dict.entity.BaseDict; public interface RedisSyncAble { /** * 保存数据插入到数据库,分发到其他redis等等 * @param cacheKey 缓存键 * @param dicts 字典 */ void save(final String cacheKey,List<BaseDict> dicts); /** * * @param cacheKey 缓存键 * @return 获取数据字典 */ List<BaseDict> getDict(final String cacheKey); }
apache-2.0
htcondor/htcondor
src/condor_utils/network_adapter.linux.cpp
8598
/*************************************************************** * * Copyright (C) 1990-2008, Condor Team, Computer Sciences Department, * University of Wisconsin-Madison, WI. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ***************************************************************/ #include "condor_common.h" #include "condor_debug.h" #include "condor_uid.h" #include "internet.h" #include "network_adapter.linux.h" #if HAVE_NET_IF_H # include <net/if.h> #endif #if HAVE_LINUX_SOCKIOS_H # include <linux/sockios.h> #endif #if HAVE_LINUX_TYPES_H # include <linux/types.h> #endif #if HAVE_OS_TYPES_H # include <os_types.h> #endif #if HAVE_LINUX_ETHTOOL_H # include <linux/ethtool.h> #endif // For now, the only wake-on-lan type we use is UDP magic #if defined(HAVE_LINUX_ETHTOOL_H) # define WAKE_MASK ( 0 | (WAKE_MAGIC) ) #endif // Possible values for the above (OR them together) : // WAKE_PHY // WAKE_UCAST // WAKE_MCAST // WAKE_BCAST // WAKE_ARP // WAKE_MAGIC // WAKE_MAGICSECURE /*************************************************************** * LinuxNetworkAdapter class ***************************************************************/ /// Constructor LinuxNetworkAdapter::LinuxNetworkAdapter ( const condor_sockaddr& ip_addr ) noexcept : UnixNetworkAdapter( ip_addr ) { m_wol_support_mask = 0; m_wol_enable_mask = 0; } LinuxNetworkAdapter::LinuxNetworkAdapter ( const char *name ) noexcept : UnixNetworkAdapter( name ) { m_wol_support_mask = 0; m_wol_enable_mask = 0; } /// Destructor LinuxNetworkAdapter::~LinuxNetworkAdapter ( void ) noexcept { } bool LinuxNetworkAdapter::findAdapter( const condor_sockaddr& ip_addr ) { bool found = false; # if (HAVE_STRUCT_IFCONF) && (HAVE_STRUCT_IFREQ) && (HAVE_DECL_SIOCGIFCONF) struct ifconf ifc; int sock; int num_req = 3; // Should be enough for a machine // with lo, eth0, eth1 // Get a 'control socket' for the operations sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock < 0) { derror( "Cannot get control socket for WOL detection" ); return false; } // Loop 'til we've read in all the interfaces, keep increasing // the number that we try to read each time //struct sockaddr_in in_addr; condor_sockaddr addr; ifc.ifc_buf = NULL; // [TODO:IPV6] // ifreq never returns IPv6 address // should change to getifaddrs() while( !found ) { int size = num_req * sizeof(struct ifreq); ifc.ifc_buf = (char *) calloc( num_req, sizeof(struct ifreq) ); ifc.ifc_len = size; int status = ioctl( sock, SIOCGIFCONF, &ifc ); if ( status < 0 ) { derror( "ioctl(SIOCGIFCONF)" ); break; } // Did we find it in the ifc? int num = ifc.ifc_len / sizeof(struct ifreq); struct ifreq *ifr = ifc.ifc_req; for ( int i = 0; i < num; i++, ifr++ ) { //struct sockaddr_in *in = (struct sockaddr_in*)&(ifr->ifr_addr); condor_sockaddr in(&ifr->ifr_addr); //MemCopy( &in_addr, in, sizeof(struct sockaddr_in) ); addr = in; //if ( in->sin_addr.s_addr == ip_addr ) { if ( in.compare_address(ip_addr) ) { setIpAddr( *ifr ); setName( *ifr ); found = true; break; } } // If the length returned by ioctl() is the same as the size // we started with, it probably overflowed.... try again if ( (!found) && (ifc.ifc_len == size) ) { num_req += 2; free( ifc.ifc_buf ); ifc.ifc_buf = NULL; } else { break; } } // Make sure we free up the buffer memory if ( ifc.ifc_buf ) { free( ifc.ifc_buf ); } if ( found ) { dprintf( D_FULLDEBUG, "Found interface %s that matches %s\n", interfaceName( ), addr.to_sinful().c_str() ); } else { m_if_name = NULL; dprintf( D_FULLDEBUG, "No interface for address %s\n", addr.to_sinful().c_str() ); } // Don't forget to close the socket! close( sock ); #endif return found; } bool LinuxNetworkAdapter::findAdapter( const char *name ) { bool found = false; # if (HAVE_STRUCT_IFCONF) && (HAVE_STRUCT_IFREQ) && (HAVE_DECL_SIOCGIFCONF) struct ifreq ifr; int sock; // Get a 'control socket' for the operations sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock < 0) { derror( "Cannot get control socket for WOL detection" ); return false; } // Loop 'til we've read in all the interfaces, keep increasing // the number that we try to read each time getName( ifr, name ); int status = ioctl( sock, SIOCGIFADDR, &ifr ); if ( status < 0 ) { derror( "ioctl(SIOCGIFADDR)" ); } else { found = true; setIpAddr( ifr ); } if ( found ) { dprintf( D_FULLDEBUG, "Found interface %s with ip %s\n", name, m_ip_addr.to_ip_string().c_str() ); } else { m_if_name = NULL; dprintf( D_FULLDEBUG, "No interface for name %s\n", name ); } // Don't forget to close the socket! close( sock ); #endif return found; } bool LinuxNetworkAdapter::getAdapterInfo( void ) { bool ok = true; # if (HAVE_STRUCT_IFCONF) && (HAVE_STRUCT_IFREQ) && (HAVE_DECL_SIOCGIFCONF) struct ifreq ifr; int sock; int status; // Get a 'control socket' for the operations sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock < 0) { derror( "Cannot get control socket for WOL detection" ); return false; } // Get the hardware address getName( ifr ); status = ioctl( sock, SIOCGIFHWADDR, &ifr ); if ( status < 0 ) { derror( "ioctl(SIOCGIFHWADDR)" ); } else { setHwAddr( ifr ); } // Get the net mask getName( ifr ); ifr.ifr_addr.sa_family = AF_INET; status = ioctl( sock, SIOCGIFNETMASK, &ifr ); if ( status < 0 ) { derror( "ioctl(SIOCGIFNETMASK)" ); } else { setNetMask( ifr ); } // And, we're done close(sock); # endif return ok; } bool LinuxNetworkAdapter::detectWOL ( void ) { bool ok = false; #if (HAVE_DECL_SIOCETHTOOL) && (HAVE_STRUCT_IFREQ) && (HAVE_LINUX_ETHTOOL_H) int err; struct ethtool_wolinfo wolinfo; struct ifreq ifr; memset(&ifr, '\0', sizeof(struct ifreq)); // Open control socket. int sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock < 0) { dprintf( D_ALWAYS, "Cannot get control socket for WOL detection\n" ); return false; } // Fill in the WOL request and the ioctl request wolinfo.cmd = ETHTOOL_GWOL; getName( ifr ); ifr.ifr_data = (char *)(& wolinfo); priv_state saved_priv = set_priv( PRIV_ROOT ); err = ioctl(sock, SIOCETHTOOL, &ifr); set_priv( saved_priv ); if ( err < 0 ) { if ( (EPERM != errno) || (geteuid() == 0) ) { derror( "ioctl(SIOCETHTOOL/GWOL)" ); dprintf( D_ALWAYS, "You can safely ignore the above error if you're not" " using hibernation\n" ); } m_wol_support_mask = 0; m_wol_enable_mask = 0; } else { m_wol_support_mask = wolinfo.supported; m_wol_enable_mask = wolinfo.wolopts; ok = true; } // For now, all we support is the "magic" packet setWolBits( NetworkAdapterBase::WOL_HW_SUPPORT, m_wol_support_mask ); setWolBits( NetworkAdapterBase::WOL_HW_ENABLED, m_wol_enable_mask ); dprintf( D_FULLDEBUG, "%s supports Wake-on: %s (raw: 0x%02x)\n", m_if_name, isWakeSupported() ? "yes" : "no", m_wol_support_mask ); dprintf( D_FULLDEBUG, "%s enabled Wake-on: %s (raw: 0x%02x)\n", m_if_name, isWakeEnabled() ? "yes" : "no", m_wol_enable_mask ); close( sock ); # endif return ok; } struct WolTable { unsigned bit_mask; NetworkAdapterBase::WOL_BITS wol_bits; }; static WolTable wol_table [] = { # if (HAVE_LINUX_ETHTOOL_H) { WAKE_PHY, NetworkAdapterBase::WOL_PHYSICAL }, { WAKE_UCAST, NetworkAdapterBase::WOL_UCAST }, { WAKE_MCAST, NetworkAdapterBase::WOL_MCAST }, { WAKE_BCAST, NetworkAdapterBase::WOL_BCAST }, { WAKE_ARP, NetworkAdapterBase::WOL_ARP }, { WAKE_MAGIC, NetworkAdapterBase::WOL_MAGIC }, { WAKE_MAGICSECURE, NetworkAdapterBase::WOL_MAGICSECURE }, # endif { 0, NetworkAdapterBase::WOL_NONE } }; void LinuxNetworkAdapter::setWolBits ( WOL_TYPE type, unsigned bits ) { if ( type == WOL_HW_SUPPORT ) { wolResetSupportBits( ); } else { wolResetEnableBits( ); } for( unsigned bit = 0; wol_table[bit].bit_mask; bit++ ) { if ( wol_table[bit].bit_mask & bits ) { wolSetBit( type, wol_table[bit].wol_bits ); } } }
apache-2.0
cloud-elements/element-connect
src/main/webapp/scripts/common/CloudElementsUtils.js
4472
/** * This is Utils class used for common/generic/utils functions * that will be used across the board * * Created by Ramana on 11/3/14. */ var CloudElementsUtils = Class.extend({ /** * Check if passed in value is undefined or null * * @param obj * @returns {boolean} */ isEmpty: function(obj) { return !angular.isDefined(obj) || obj===null || obj.length === 0; }, /** * Reads the URL for search string and returns an object with key/value pair * * @returns {{}} */ pageParameters: function () { var me = this; var locationString = window.location.search.substring(1); if(!me.isEmpty(locationString) && locationString.length > 0 && me.$cookies.cebulkparams == null && (locationString.indexOf('apiKey') > -1 || locationString.indexOf('email') > -1)) { me.$cookies.cebulkparams = locationString; window.location.href = window.location.origin + window.location.pathname; return; } else if (!me.isEmpty(locationString) && locationString.length > 0 && (locationString.indexOf('token') > -1 || locationString.indexOf('appName') > -1)) { //In this scenario just extract the token and refresh the window with out token me.$cookies.cebulkparams = locationString; var params = this.getParamsFromURI(locationString); if(!me.isEmpty(params.key)) { window.location.href = window.location.origin + window.location.pathname+'?key='+params.key; } else { window.location.href = window.location.origin + window.location.pathname; } return; } locationString = me.$cookies.cebulkparams; me.$cookies.cebulkparams = null; if(me.isEmpty(locationString) || locationString == "null") { locationString = window.location.search.substring(1); } if(!me.isEmpty(locationString) && locationString.length > 0) { return this.getParamsFromURI(locationString); } return {}; }, getParamsFromURI: function(query) { // This function is anonymous, is executed immediately and // the return value is assigned to QueryString! var query_string = {}; var vars = query.split("&"); for (var i=0;i<vars.length;i++) { var pair = vars[i].split("="); if(pair.length > 2) { var extendedVal = ''; for(var j=2; j< pair.length; j++) { if(pair[j]=="") { extendedVal += '='; } else { extendedVal += pair[j]; } } pair[1] = pair[1]+extendedVal; } // If first entry with this name if (typeof query_string[pair[0]] === "undefined") { query_string[pair[0]] = pair[1]; // If second entry with this name } else if (typeof query_string[pair[0]] === "string") { var arr = [ query_string[pair[0]], pair[1] ]; query_string[pair[0]] = arr; // If third or later entry with this name } else { query_string[pair[0]].push(pair[1]); } } return query_string; }, orderObjects: function(dataStore, sortBy) { dataStore = this.orderBy(dataStore, sortBy); if(!this.isEmpty(dataStore)) { for(var i=0; i< dataStore.length; i++) { var dObj = dataStore[i]; if(!this.isEmpty(dObj.fields) && dObj.fields.length > 0) { dObj.fields = this.orderObjects(dObj.fields, sortBy); } } } return dataStore; } }); /** * Utils object creation * */ (function (){ var utils = Class.extend({ instance: new CloudElementsUtils(), /** * Initialize and configure */ $get:['$http', '$filter', '$cookies', function($http, $filter, $cookies){ this.instance.$http = $http; this.instance.orderBy= $filter('orderBy'); this.instance.$cookies = $cookies return this.instance; }] }) angular.module('CloudElementsUtils',[]) .provider('CloudElementsUtils',utils); }());
apache-2.0
markusgumbel/dshl7
hl7-javasig/gencode/org/hl7/rim/decorators/QuerySpecDecorator.java
6828
/* THIS FILE IS GENERATED AUTOMATICALLY - DO NOT MODIFY. The contents of this file are subject to the Health Level-7 Public License Version 1.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.hl7.org/HPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The Original Code is all this file. The Initial Developer of the Original Code is automatically generated from HL7 copyrighted standards specification which may be subject to different license. Portions created by Initial Developer are Copyright (C) 2002-2004 Health Level Seven, Inc. All Rights Reserved. THIS FILE IS GENERATED AUTOMATICALLY - DO NOT MODIFY. */ package org.hl7.rim.decorators; import org.hl7.rim.QuerySpec; import org.hl7.rim.decorators.QueryEventDecorator; import org.hl7.types.CS; import org.hl7.types.SET; import org.hl7.types.II; import org.hl7.types.INT; import org.hl7.types.CE; import org.hl7.types.TS; import org.hl7.rim.SortControl; import org.hl7.types.impl.CSnull; import org.hl7.types.impl.SETnull; import org.hl7.types.impl.IInull; import org.hl7.types.impl.INTnull; import org.hl7.types.impl.CEnull; import org.hl7.types.impl.TSnull; import /*org.hl7.rim.AssociationSet*/java.util.List; import java.util.Collections; /** Implementation of org.hl7.rim.QuerySpec as an abstract decorator, i.e., a class that returns NULL/NA or nothing for all property accessprs amd that raises UnsupportedOperationExceptions for all mutators. This is used to adapt custom application classes to the RIM class interfaces and only bother about mapping those properties that actually apply to the application class. This can be done in one of two ways: (1) the client class can extend the decorator directly, and implement the applicable properties, or (2) the abstract decorator can be extend to a concrete decorator, which would hold a reference to the client object and method bodies to delegate and adapt the applicable properties. @see org.hl7.rim.QuerySpec */ public abstract class QuerySpecDecorator extends org.hl7.rim.decorators.QueryEventDecorator implements QuerySpec { /** Property accessor, returns NULL/NA if not overloaded.modifyCode. @see org.hl7.rim.QuerySpec#getModifyCode */ public CS getModifyCode() { return CSnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.modifyCode. @see org.hl7.rim.QuerySpec#setModifyCode */ public void setModifyCode(CS modifyCode) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns NULL/NA if not overloaded.responseElementGroupId. @see org.hl7.rim.QuerySpec#getResponseElementGroupId */ public SET<II> getResponseElementGroupId() { return SETnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.responseElementGroupId. @see org.hl7.rim.QuerySpec#setResponseElementGroupId */ public void setResponseElementGroupId(SET<II> responseElementGroupId) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns NULL/NA if not overloaded.responseModalityCode. @see org.hl7.rim.QuerySpec#getResponseModalityCode */ public CS getResponseModalityCode() { return CSnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.responseModalityCode. @see org.hl7.rim.QuerySpec#setResponseModalityCode */ public void setResponseModalityCode(CS responseModalityCode) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns NULL/NA if not overloaded.responsePriorityCode. @see org.hl7.rim.QuerySpec#getResponsePriorityCode */ public CS getResponsePriorityCode() { return CSnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.responsePriorityCode. @see org.hl7.rim.QuerySpec#setResponsePriorityCode */ public void setResponsePriorityCode(CS responsePriorityCode) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns NULL/NA if not overloaded.initialQuantity. @see org.hl7.rim.QuerySpec#getInitialQuantity */ public INT getInitialQuantity() { return INTnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.initialQuantity. @see org.hl7.rim.QuerySpec#setInitialQuantity */ public void setInitialQuantity(INT initialQuantity) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns NULL/NA if not overloaded.initialQuantityCode. @see org.hl7.rim.QuerySpec#getInitialQuantityCode */ public CE getInitialQuantityCode() { return CEnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.initialQuantityCode. @see org.hl7.rim.QuerySpec#setInitialQuantityCode */ public void setInitialQuantityCode(CE initialQuantityCode) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns NULL/NA if not overloaded.executionAndDeliveryTime. @see org.hl7.rim.QuerySpec#getExecutionAndDeliveryTime */ public TS getExecutionAndDeliveryTime() { return TSnull.NI; /* should be NA, but that causes trouble for string-literal hibernated properties. */ } /** Property mutator, does nothing if not overloaded.executionAndDeliveryTime. @see org.hl7.rim.QuerySpec#setExecutionAndDeliveryTime */ public void setExecutionAndDeliveryTime(TS executionAndDeliveryTime) { /*throw new UnsupportedOperationException();*/ } /** Property accessor, returns an empty collection if not overloaded.sortControl. @see org.hl7.rim.QuerySpec#getSortControl */ public /*AssociationSet*/List<org.hl7.rim.SortControl> getSortControl() { return Collections.EMPTY_LIST; } /** Property mutator, does nothing if not overloaded.sortControl. @see org.hl7.rim.QuerySpec#setSortControl */ public void setSortControl(/*AssociationSet*/List<org.hl7.rim.SortControl> sortControl) { /* throw new UnsupportedOperationException(); */ } /** Association adder, throws UnsupportedOperationException if not overloaded sortControl. @see org.hl7.rim.QuerySpec#setSortControl */ public void addSortControl(org.hl7.rim.SortControl sortControl) { throw new UnsupportedOperationException(); } }
apache-2.0
tangyouyi1513/openrasp
agent/java/engine/src/main/java/com/baidu/openrasp/tool/decompile/DecompilerProvider.java
2357
/* * Copyright 2017-2018 Baidu Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.baidu.openrasp.tool.decompile; import com.strobel.assembler.metadata.TypeDefinition; import com.strobel.decompiler.DecompilationOptions; import com.strobel.decompiler.DecompilerSettings; import com.strobel.decompiler.PlainTextOutput; import java.io.StringWriter; /** * @description: 反编译类 * @author: anyang * @create: 2018/10/19 10:01 */ public class DecompilerProvider { private DecompilerSettings settings; private DecompilationOptions decompilationOptions; private TypeDefinition type; private String textContent = ""; public void generateContent() { final StringWriter stringwriter = new StringWriter(); PlainTextOutput plainTextOutput = new PlainTextOutput(stringwriter) { @Override public void writeDefinition(String text, Object definition, boolean isLocal) { super.writeDefinition(text, definition, isLocal); } @Override public void writeReference(String text, Object reference, boolean isLocal) { super.writeReference(text, reference, isLocal); } }; plainTextOutput.setUnicodeOutputEnabled(decompilationOptions.getSettings().isUnicodeOutputEnabled()); settings.getLanguage().decompileType(type, plainTextOutput, decompilationOptions); textContent = stringwriter.toString(); } public String getTextContent() { return textContent; } public void setDecompilerReferences(DecompilerSettings settings, DecompilationOptions decompilationOptions) { this.settings = settings; this.decompilationOptions = decompilationOptions; } public void setType(TypeDefinition type) { this.type = type; } }
apache-2.0
hortonworks/cloudbreak
flow/src/test/java/com/sequenceiq/flow/core/StateConverterAdapterTest.java
706
package com.sequenceiq.flow.core; import org.junit.Assert; import org.junit.Test; import com.sequenceiq.flow.core.restart.DefaultRestartAction; public class StateConverterAdapterTest { private final StateConverterAdapter<TestState> stateConverterAdapter = new StateConverterAdapter<>(TestState.class); @Test public void convertTest() { TestState state = stateConverterAdapter.convert("TEST_STATE"); Assert.assertEquals(TestState.TEST_STATE, state); } private enum TestState implements FlowState { TEST_STATE; @Override public Class<? extends RestartAction> restartAction() { return DefaultRestartAction.class; } } }
apache-2.0
tensorflow/tflite-micro
tensorflow/lite/micro/memory_arena_threshold_test.cc
12205
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/micro/all_ops_resolver.h" #include "tensorflow/lite/micro/memory_helpers.h" #include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" #include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/models/keyword_scrambled_model_data.h" #include "tensorflow/lite/micro/recording_micro_allocator.h" #include "tensorflow/lite/micro/recording_micro_interpreter.h" #include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/micro/testing/test_conv_model.h" /** * Tests to ensure arena memory allocation does not regress by more than 3%. */ namespace { // Ensure memory doesn't expand more that 3%: constexpr float kAllocationThreshold = 0.03; // TODO(b/160617245): Record persistent allocations to provide a more accurate // number here. constexpr float kAllocationTailMiscCeiling = 2 * 1024; const bool kIs64BitSystem = (sizeof(void*) == 8); constexpr int kKeywordModelTensorArenaSize = 22 * 1024; uint8_t keyword_model_tensor_arena[kKeywordModelTensorArenaSize]; constexpr int kKeywordModelTensorCount = 54; constexpr int kKeywordModelNodeAndRegistrationCount = 15; // NOTE: These values are measured on x86-64: // TODO(b/158651472): Consider auditing these values on non-64 bit systems. // TODO(b/199414774): use expression for hardcoded constants such as // kKeywordModelTotalSize. // // Run this test with '--copt=-DTF_LITE_STATIC_MEMORY' to get optimized memory // runtime values: #ifdef TF_LITE_STATIC_MEMORY // Total size contributed by the keyword model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kKeywordModelOnlyTotalSize = 14304; // Tail size contributed by the kdyword model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kKeywordModelOnlyTailSize = 13632; constexpr int kKeywordModelPersistentTfLiteTensorDataSize = 128; constexpr int kKeywordModelPersistentBufferDataSize = 676; #else // Total size contributed by the keyword model excluding the // RecordingMicroAllocator's overhead. // TODO(b/207157610): replace magic number that depends on OPs constexpr int kKeywordModelOnlyTotalSize = 14752; // Tail size contributed by the keyword model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kKeywordModelOnlyTailSize = 14080; constexpr int kKeywordModelPersistentTfLiteTensorDataSize = 224; constexpr int kKeywordModelPersistentBufferDataSize = 676; #endif constexpr int kKeywordModelHeadSize = 672; constexpr int kKeywordModelTfLiteTensorVariableBufferDataSize = 10240; constexpr int kKeywordModelPersistentTfLiteTensorQuantizationData = 64; constexpr int kKeywordModelOpRuntimeDataSize = 148; constexpr int kTestConvModelArenaSize = 12 * 1024; uint8_t test_conv_tensor_arena[kTestConvModelArenaSize]; constexpr int kTestConvModelTensorCount = 15; constexpr int kTestConvModelNodeAndRegistrationCount = 7; // NOTE: These values are measured on x86-64: // TODO(b/158651472): Consider auditing these values on non-64 bit systems. #ifdef TF_LITE_STATIC_MEMORY // Total size contributed by the conv model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kTestConvModelOnlyTotalSize = 9488; // Tail size contributed by the conv model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kTestConvModelOnlyTailSize = 1744; constexpr int kTestConvModelPersistentTfLiteTensorDataSize = 128; constexpr int kTestConvModelPersistentBufferDataSize = 680; #else // Total size contributed by the conv model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kTestConvModelOnlyTotalSize = 9760; // Tail size contributed by the conv model excluding the // RecordingMicroAllocator's overhead // TODO(b/207157610): replace magic number that depends on OPs constexpr int kTestConvModelOnlyTailSize = 2016; constexpr int kTestConvModelPersistentTfLiteTensorDataSize = 224; constexpr int kTestConvModelPersistentBufferDataSize = 680; #endif constexpr int kTestConvModelHeadSize = 7744; constexpr int kTestConvModelOpRuntimeDataSize = 136; constexpr int kTestConvModelPersistentTfLiteTensorQuantizationData = 0; struct ModelAllocationThresholds { size_t tensor_count = 0; size_t node_and_registration_count = 0; size_t total_alloc_size = 0; size_t head_alloc_size = 0; size_t tail_alloc_size = 0; size_t tensor_variable_buffer_data_size = 0; size_t persistent_tflite_tensor_data_size = 0; size_t persistent_tflite_tensor_quantization_data_size = 0; size_t op_runtime_data_size = 0; size_t persistent_buffer_data = 0; }; void EnsureAllocatedSizeThreshold(const char* allocation_type, size_t actual, size_t expected) { // TODO(b/158651472): Better auditing of non-64 bit systems: if (kIs64BitSystem) { // 64-bit systems should check floor and ceiling to catch memory savings: TF_LITE_MICRO_EXPECT_NEAR(actual, expected, expected * kAllocationThreshold); if (actual != expected) { TF_LITE_REPORT_ERROR(tflite::GetMicroErrorReporter(), "%s threshold failed: %d != %d", allocation_type, actual, expected); } } else { // Non-64 bit systems should just expect allocation does not exceed the // ceiling: TF_LITE_MICRO_EXPECT_LE(actual, expected + expected * kAllocationThreshold); } } void ValidateModelAllocationThresholds( const tflite::RecordingMicroAllocator& allocator, const ModelAllocationThresholds& thresholds) { MicroPrintf("Overhead from RecordingMicroAllocator is %d", tflite::RecordingMicroAllocator::GetDefaultTailUsage()); allocator.PrintAllocations(); EnsureAllocatedSizeThreshold( "Total", allocator.GetSimpleMemoryAllocator()->GetUsedBytes(), thresholds.total_alloc_size); EnsureAllocatedSizeThreshold( "Head", allocator.GetSimpleMemoryAllocator()->GetHeadUsedBytes(), thresholds.head_alloc_size); EnsureAllocatedSizeThreshold( "Tail", allocator.GetSimpleMemoryAllocator()->GetTailUsedBytes(), thresholds.tail_alloc_size); EnsureAllocatedSizeThreshold( "TfLiteEvalTensor", allocator .GetRecordedAllocation( tflite::RecordedAllocationType::kTfLiteEvalTensorData) .used_bytes, sizeof(TfLiteEvalTensor) * thresholds.tensor_count); EnsureAllocatedSizeThreshold( "VariableBufferData", allocator .GetRecordedAllocation( tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData) .used_bytes, thresholds.tensor_variable_buffer_data_size); EnsureAllocatedSizeThreshold( "PersistentTfLiteTensor", allocator .GetRecordedAllocation( tflite::RecordedAllocationType::kPersistentTfLiteTensorData) .used_bytes, thresholds.persistent_tflite_tensor_data_size); EnsureAllocatedSizeThreshold( "PersistentTfliteTensorQuantizationData", allocator .GetRecordedAllocation(tflite::RecordedAllocationType:: kPersistentTfLiteTensorQuantizationData) .used_bytes, thresholds.persistent_tflite_tensor_quantization_data_size); EnsureAllocatedSizeThreshold( "PersistentBufferData", allocator .GetRecordedAllocation( tflite::RecordedAllocationType::kPersistentBufferData) .used_bytes, thresholds.persistent_buffer_data); EnsureAllocatedSizeThreshold( "NodeAndRegistration", allocator .GetRecordedAllocation( tflite::RecordedAllocationType::kNodeAndRegistrationArray) .used_bytes, sizeof(tflite::NodeAndRegistration) * thresholds.node_and_registration_count); // Ensure tail allocation recording is not missing any large chunks: size_t tail_est_length = sizeof(TfLiteEvalTensor) * thresholds.tensor_count + thresholds.tensor_variable_buffer_data_size + sizeof(tflite::NodeAndRegistration) * thresholds.node_and_registration_count + thresholds.op_runtime_data_size; TF_LITE_MICRO_EXPECT_LE(thresholds.tail_alloc_size - tail_est_length, kAllocationTailMiscCeiling); } } // namespace TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestKeywordModelMemoryThreshold) { tflite::AllOpsResolver all_ops_resolver; tflite::RecordingMicroInterpreter interpreter( tflite::GetModel(g_keyword_scrambled_model_data), all_ops_resolver, keyword_model_tensor_arena, kKeywordModelTensorArenaSize, tflite::GetMicroErrorReporter()); interpreter.AllocateTensors(); ModelAllocationThresholds thresholds; thresholds.tensor_count = kKeywordModelTensorCount; thresholds.node_and_registration_count = kKeywordModelNodeAndRegistrationCount; thresholds.total_alloc_size = kKeywordModelOnlyTotalSize + tflite::RecordingMicroAllocator::GetDefaultTailUsage(); thresholds.head_alloc_size = kKeywordModelHeadSize; thresholds.tail_alloc_size = kKeywordModelOnlyTailSize + tflite::RecordingMicroAllocator::GetDefaultTailUsage(); thresholds.tensor_variable_buffer_data_size = kKeywordModelTfLiteTensorVariableBufferDataSize; thresholds.op_runtime_data_size = kKeywordModelOpRuntimeDataSize; thresholds.persistent_buffer_data = kKeywordModelPersistentBufferDataSize; thresholds.persistent_tflite_tensor_data_size = kKeywordModelPersistentTfLiteTensorDataSize; thresholds.persistent_tflite_tensor_quantization_data_size = kKeywordModelPersistentTfLiteTensorQuantizationData; ValidateModelAllocationThresholds(interpreter.GetMicroAllocator(), thresholds); } TF_LITE_MICRO_TEST(TestConvModelMemoryThreshold) { tflite::AllOpsResolver all_ops_resolver; tflite::RecordingMicroInterpreter interpreter( tflite::GetModel(kTestConvModelData), all_ops_resolver, test_conv_tensor_arena, kTestConvModelArenaSize, tflite::GetMicroErrorReporter()); interpreter.AllocateTensors(); ModelAllocationThresholds thresholds; thresholds.tensor_count = kTestConvModelTensorCount; thresholds.node_and_registration_count = kTestConvModelNodeAndRegistrationCount; thresholds.total_alloc_size = kTestConvModelOnlyTotalSize + tflite::RecordingMicroAllocator::GetDefaultTailUsage(); thresholds.head_alloc_size = kTestConvModelHeadSize; thresholds.tail_alloc_size = kTestConvModelOnlyTailSize + tflite::RecordingMicroAllocator::GetDefaultTailUsage(); thresholds.op_runtime_data_size = kTestConvModelOpRuntimeDataSize; thresholds.persistent_buffer_data = kTestConvModelPersistentBufferDataSize; thresholds.persistent_tflite_tensor_data_size = kTestConvModelPersistentTfLiteTensorDataSize; thresholds.persistent_tflite_tensor_quantization_data_size = kTestConvModelPersistentTfLiteTensorQuantizationData; ValidateModelAllocationThresholds(interpreter.GetMicroAllocator(), thresholds); } TF_LITE_MICRO_TESTS_END
apache-2.0
ostap0207/remotify.me
remotify.common/src/main/java/messages/device/CommandMessage.java
353
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package messages.device; import messages.Message; /** * * @author Ostap */ public abstract class CommandMessage implements Message{ public static final long serialVersionUID = 1L; public String computerUID; public String deviceUID; }
apache-2.0
codesoftware/NSIGEMCO
src/main/java/co/com/codesoftware/server/ProductosHomeEntity.java
5041
package co.com.codesoftware.server; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * <p>Clase Java para productosHomeEntity complex type. * * <p>El siguiente fragmento de esquema especifica el contenido que se espera que haya en esta clase. * * <pre> * &lt;complexType name="productosHomeEntity"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="categoria" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/> * &lt;element name="estado" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="id" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/> * &lt;element name="nombre" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="rutaImagen" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="sede" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/> * &lt;element name="subcategoria" type="{http://www.w3.org/2001/XMLSchema}int" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "productosHomeEntity", propOrder = { "categoria", "estado", "id", "nombre", "rutaImagen", "sede", "subcategoria" }) public class ProductosHomeEntity { protected Integer categoria; protected String estado; protected Integer id; protected String nombre; protected String rutaImagen; protected Integer sede; protected Integer subcategoria; /** * Obtiene el valor de la propiedad categoria. * * @return * possible object is * {@link Integer } * */ public Integer getCategoria() { return categoria; } /** * Define el valor de la propiedad categoria. * * @param value * allowed object is * {@link Integer } * */ public void setCategoria(Integer value) { this.categoria = value; } /** * Obtiene el valor de la propiedad estado. * * @return * possible object is * {@link String } * */ public String getEstado() { return estado; } /** * Define el valor de la propiedad estado. * * @param value * allowed object is * {@link String } * */ public void setEstado(String value) { this.estado = value; } /** * Obtiene el valor de la propiedad id. * * @return * possible object is * {@link Integer } * */ public Integer getId() { return id; } /** * Define el valor de la propiedad id. * * @param value * allowed object is * {@link Integer } * */ public void setId(Integer value) { this.id = value; } /** * Obtiene el valor de la propiedad nombre. * * @return * possible object is * {@link String } * */ public String getNombre() { return nombre; } /** * Define el valor de la propiedad nombre. * * @param value * allowed object is * {@link String } * */ public void setNombre(String value) { this.nombre = value; } /** * Obtiene el valor de la propiedad rutaImagen. * * @return * possible object is * {@link String } * */ public String getRutaImagen() { return rutaImagen; } /** * Define el valor de la propiedad rutaImagen. * * @param value * allowed object is * {@link String } * */ public void setRutaImagen(String value) { this.rutaImagen = value; } /** * Obtiene el valor de la propiedad sede. * * @return * possible object is * {@link Integer } * */ public Integer getSede() { return sede; } /** * Define el valor de la propiedad sede. * * @param value * allowed object is * {@link Integer } * */ public void setSede(Integer value) { this.sede = value; } /** * Obtiene el valor de la propiedad subcategoria. * * @return * possible object is * {@link Integer } * */ public Integer getSubcategoria() { return subcategoria; } /** * Define el valor de la propiedad subcategoria. * * @param value * allowed object is * {@link Integer } * */ public void setSubcategoria(Integer value) { this.subcategoria = value; } }
apache-2.0
atsu85/java2typescript-jackson
src/test/resources/java2typescript/jackson/module/ClassWithGenericTypeTest.classWithGenericTypeParams.d.ts
176
export interface ClassWithGenericTypeParams<K, V> { stringField: string; genericFieldK: K; genericFieldV: V; booleansByStrings: { [key: string ]: boolean;}; }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-securityhub/src/main/java/com/amazonaws/services/securityhub/model/transform/ListTagsForResourceResultJsonUnmarshaller.java
2982
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.securityhub.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.securityhub.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * ListTagsForResourceResult JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ListTagsForResourceResultJsonUnmarshaller implements Unmarshaller<ListTagsForResourceResult, JsonUnmarshallerContext> { public ListTagsForResourceResult unmarshall(JsonUnmarshallerContext context) throws Exception { ListTagsForResourceResult listTagsForResourceResult = new ListTagsForResourceResult(); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return listTagsForResourceResult; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("Tags", targetDepth)) { context.nextToken(); listTagsForResourceResult.setTags(new MapUnmarshaller<String, String>(context.getUnmarshaller(String.class), context .getUnmarshaller(String.class)).unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return listTagsForResourceResult; } private static ListTagsForResourceResultJsonUnmarshaller instance; public static ListTagsForResourceResultJsonUnmarshaller getInstance() { if (instance == null) instance = new ListTagsForResourceResultJsonUnmarshaller(); return instance; } }
apache-2.0
GoogleCloudPlatform/compute-virtual-ethernet-windows
src/rx_ring.cpp
14120
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "rx_ring.h" // NOLINT: include directory #include <ndis.h> #include "abi.h" // NOLINT: include directory #include "adapter_statistics.h" // NOLINT: include directory #include "device_parameters.h" // NOLINT: include directory #include "packet_assembler.h" // NOLINT: include directory #include "ring_base.h" // NOLINT: include directory #include "rx_packet.h" // NOLINT: include directory #include "rx_ring_entry.h" // NOLINT: include directory #include "spin_lock_context.h" // NOLINT: include directory #include "trace.h" // NOLINT: include directory #include "utils.h" // NOLINT: include directory #include "rx_ring.tmh" // NOLINT: trace message header namespace { // Special number picked by device to help identify whether the packet should be // be processed. constexpr int kSeqPrimeNumber = 7; // Max packet data size the driver can process asynchronously. If the MTU is // larger than half a page plus headers, a single page can not support two // packets and so the synchronous path with a copy must be used. constexpr int kMaxAsyncDataSize = PAGE_SIZE / 2 - sizeof(ETH_HEADER) - kPacketHeaderPadding; // Mask use to flip the point to the first half and second half of the page. constexpr UINT32 kDataRingFlipMask = PAGE_SIZE / 2; constexpr ULONG kMicrosecondsBetweenPollingForRelease = 1000000; // 1s. // We have enough notification blocks to tx_max and rx_max. So we start rx // notify block from tx_num_slices. inline UINT GetRxNotifyBlockId(UINT tx_num_slices, UINT id) { return tx_num_slices + id; } // Return the sequence number from flags_sequence field inside rx descriptor. UINT32 GetSequenceNumber(UINT16 flags_sequence) { return RtlUshortByteSwap(flags_sequence) & 0x7; } // Return the next value in range [1, kSeqPrimeNumber]. UINT32 GetNextSequenceNumber(UINT32 seq_number) { return seq_number == kSeqPrimeNumber ? 1 : seq_number + 1; } // Point the rx data ring offset to unused location. // For now, driver just flip the offset between first and second half of the // memory page. In the future, it can be extended to smartly move the offset to // unused buffer. void AdjustRxDataRingOffset(UINT64 current_offset, RxDataRingSlot* rx_data_ring) { UINT64 new_offset = current_offset ^ kDataRingFlipMask; rx_data_ring->queue_page_list_offset = RtlUlonglongByteSwap(new_offset); } } // namespace RxRing::~RxRing() { PAGED_CODE(); Release(); } bool RxRing::Init(UINT32 id, UINT32 slice, UINT32 traffic_class, UINT32 num_descriptor, bool use_raw_addressing, QueuePageList* queue_page_list, UINT32 notify_id, UINT32 max_data_size, AdapterResources* adapter_resource, AdapterStatistics* statistics, const DeviceCounter* device_counters) { PAGED_CODE(); // num_descriptor is expected to be power of 2. NT_ASSERT((num_descriptor & (num_descriptor - 1)) == 0); num_descriptor_ = num_descriptor; descriptor_mask_ = num_descriptor_ - 1; packet_seq_number_ = 1; checksum_offload_enabled_ = false; rss_enabled_ = false; rss_hash_function_ = 0; rss_hash_type_ = 0; max_data_size_ = max_data_size; NdisAllocateSpinLock(&seq_counter_spin_lock_); // The current implementation requires that we have one descriptor per page. NT_ASSERT(num_descriptor_ == queue_page_list->num_pages()); DEBUGP(GVNIC_INFO, "[%s] Allocating resource for rx: %u with %u slots", __FUNCTION__, id, num_descriptor_); if (!RingBase::Init(id, slice, traffic_class, use_raw_addressing, queue_page_list, notify_id, adapter_resource, statistics, device_counters)) { return false; } if (!descriptor_ring_.Allocate(adapter_resource->miniport_handle(), num_descriptor_)) { DEBUGP(GVNIC_ERROR, "[%s] ERROR: Memory allocation failed for rx descriptor ring", __FUNCTION__); return false; } if (!data_ring_.Allocate(adapter_resource->miniport_handle(), num_descriptor_)) { DEBUGP(GVNIC_ERROR, "[%s] ERROR: Memory allocation failed for rx data ring", __FUNCTION__); return false; } rx_ring_entries_ = AllocateMemory<RxRingEntry>( adapter_resource->miniport_handle(), num_descriptor_); if (rx_ring_entries_ == nullptr) { return false; } if (!InitRxEntries(adapter_resource->net_buffer_list_pool(), adapter_resource->miniport_handle())) { return false; } return true; } void RxRing::UpdateRssConfig(const RSSConfiguration& rss_config) { rss_enabled_ = rss_config.is_enabled(); rss_hash_function_ = rss_config.hash_func(); rss_hash_type_ = rss_config.hash_type(); #ifdef DBG rss_secret_key_ = rss_config.hash_secret_key(); indirection_table_entry_count_ = rss_config.indirection_table_size(); indirection_table_ = rss_config.get_indirection_table(); #endif } bool RxRing::InitRxEntries(NDIS_HANDLE pool_handle, NDIS_HANDLE miniport_handle) { for (UINT i = 0; i < num_descriptor_; i++) { rx_ring_entries_[i].descriptor = descriptor_ring_.virtual_address() + i; rx_ring_entries_[i].data = data_ring_.virtual_address() + i; rx_ring_entries_[i].ring_pending_count = &pending_async_packet_count_; if (use_raw_addressing()) { rx_ring_entries_[i].data->queue_page_list_offset = RtlUlonglongByteSwap( queue_page_list()->page_physical_address()[i].QuadPart); } else { rx_ring_entries_[i].data->queue_page_list_offset = RtlUlonglongByteSwap(i * PAGE_SIZE); } rx_ring_entries_[i].pending_count = 0; // First half of the page. rx_ring_entries_[i].packet_addr[0] = OffsetToPointer(queue_page_list()->pages()[i], kPacketHeaderPadding); rx_ring_entries_[i].eth_header[0] = reinterpret_cast<ETH_HEADER*>(rx_ring_entries_[i].packet_addr[0]); rx_ring_entries_[i].ipv4_header[0] = reinterpret_cast<IPv4Header*>( OffsetToPointer(rx_ring_entries_[i].eth_header[0], sizeof(ETH_HEADER))); rx_ring_entries_[i].net_buffer_lists[0] = NdisAllocateNetBufferAndNetBufferList( pool_handle, /*ContextSize=*/0, /*ContextBackFill=*/0, /*MdlChain=*/nullptr, /*DataOffset=*/0, /*DataLength=*/0); if (rx_ring_entries_[i].net_buffer_lists[0] == nullptr) { DEBUGP(GVNIC_ERROR, "[%s] ERROR: Fail to allocate NET_BUFFER_LIST.", __FUNCTION__); return false; } // Second half of the page. rx_ring_entries_[i].packet_addr[1] = OffsetToPointer( queue_page_list()->pages()[i], kPacketHeaderPadding + PAGE_SIZE / 2); rx_ring_entries_[i].eth_header[1] = reinterpret_cast<ETH_HEADER*>(rx_ring_entries_[i].packet_addr[1]); rx_ring_entries_[i].ipv4_header[1] = reinterpret_cast<IPv4Header*>( OffsetToPointer(rx_ring_entries_[i].eth_header[1], sizeof(ETH_HEADER))); rx_ring_entries_[i].net_buffer_lists[1] = NdisAllocateNetBufferAndNetBufferList( pool_handle, /*ContextSize=*/0, /*ContextBackFill=*/0, /*MdlChain=*/nullptr, /*DataOffset=*/0, /*DataLength=*/0); if (rx_ring_entries_[i].net_buffer_lists[1] == nullptr) { DEBUGP(GVNIC_ERROR, "[%s] ERROR: Fail to allocate NET_BUFFER_LIST.", __FUNCTION__); return false; } for (auto& net_buffer_list : rx_ring_entries_[i].net_buffer_lists) { net_buffer_list->SourceHandle = miniport_handle; net_buffer_list->Status = NDIS_STATUS_SUCCESS; } rx_ring_entries_[i].rsc_next = nullptr; rx_ring_entries_[i].rsc_last = nullptr; } return true; } void RxRing::Release() { PAGED_CODE(); bool was_initialized = Invalidate(); if (!was_initialized) { return; } PrepareForRelease(); while (!IsSafeToRelease()) { DEBUGP(GVNIC_WARNING, "[%s] WARNING: Rx ring has outstanding async packets and cannot be " "released. Waiting for %lu microseconds.", __FUNCTION__, kMicrosecondsBetweenPollingForRelease); NdisMSleep(kMicrosecondsBetweenPollingForRelease); } for (UINT i = 0; i < num_descriptor_; i++) { for (auto& net_buffer_list : rx_ring_entries_[i].net_buffer_lists) { if (net_buffer_list != nullptr) { NdisFreeNetBufferList(net_buffer_list); net_buffer_list = nullptr; } } } FreeMemory(rx_ring_entries_); rx_ring_entries_ = nullptr; if (data_ring_) { data_ring_.Release(); } if (descriptor_ring_) { descriptor_ring_.Release(); } NdisFreeSpinLock(&seq_counter_spin_lock_); RingBase::Release(); } void RxRing::SetInitFreeSlot() { // packet_counter starts with num_descriptors_. // The way device figure out how many slot has been process is do a diff // between new doorbell and the old value, with init value 0. // // On init, we report num_descriptor_ so device knows that all slots are free. // // As a side affect, our packet_counter also needs to start with // num_descriptors_ so when new packets is processed and the increased counter // gets pushed to device, it get the correct delta. packet_counter_ = num_descriptor_; WriteDoorbell(num_descriptor_); } bool RxRing::ProcessPendingPackets(bool is_dpc_level, PacketAssembler* packet_assembler) { NT_ASSERT(packet_assembler != nullptr); SpinLockContext lock_context(&seq_counter_spin_lock_, is_dpc_level); UINT32 packet_idx = packet_counter_ & descriptor_mask_; UINT32 current_packet_counter = packet_counter_; RxRingEntry* cur_entry = rx_ring_entries_ + packet_idx; RxDescriptor* cur_desc = cur_entry->descriptor; // Assume we can process all packets by default. bool is_all_packet_processed = true; // The way we detect new packets in rx_ring is as follows: // We have a 3 bit fields in flags_sequence of rx descriptor. This field // increments from 1 to 7 each time device writes an new Entry, and then wraps // back to 1. Driver keeps track of the next expected sequence number and // compare it with the sequence number from the descriptor. If it matches, // this is a new packet. // // The algorithm works because the number of descriptor is power of 2 and can // not be multiple of 7. So when device reuse the same descriptor, it is // guaranteed that the sequence number will be different. while (GetSequenceNumber(cur_desc->flags_sequence) == packet_seq_number_) { if (!packet_assembler->CanAllocateNBL()) { DEBUGP(GVNIC_VERBOSE, "[%s]: reach max net_buffer_list to indicate.", __FUNCTION__); is_all_packet_processed = false; break; } RxPacket rx_packet{*cur_entry}; if (checksum_offload_enabled_) { rx_packet.SetChecksumInfo(); } if (rss_enabled_) { #ifdef DBG rx_packet.SetSecretKey(rss_secret_key_); rx_packet.SetIndirectionTable(indirection_table_entry_count_, indirection_table_); #endif rx_packet.SetRssInfo(RtlUlongByteSwap(cur_desc->rss_hash), rss_hash_type_, rss_hash_function_); } // Once we are required to handle a single packet synchronously, we need // to handle all following packets synchronously to prevent out of order // packets, as we indicate all asynchronous packets at once followed by // all synchronous packets. // // TODO(b/178720499): Allow alternating chains of async and sync NBLs. bool required_synchronous_packet_handling = false; // Currently, we only allow one pending packet at max. NT_ASSERT(cur_entry->pending_count < 2); NT_ASSERT(cur_entry->pending_count >= 0); if (!IsPrepareForRelease() && max_data_size_ <= kMaxAsyncDataSize && cur_entry->pending_count == 0 && !required_synchronous_packet_handling) { // If there is no pending packets on current data page, we flip the // data pointer to the other half of the page and let OS handle the packet // asynchronously to reduce data copy. // Driver has a fairly large number of pages so most likely, packets will // be processed in async way. AdjustRxDataRingOffset(rx_packet.queue_page_list_offset(), cur_entry->data); NET_BUFFER_LIST* net_buffer_list = packet_assembler->ProcessAsyncPacket(&rx_packet); if (net_buffer_list == nullptr) { is_all_packet_processed = false; break; } IncreaseRxDataRingPendingCount(cur_entry, net_buffer_list); } else { // Cannot flip the pointer so just process it synchronously. required_synchronous_packet_handling = true; if (packet_assembler->ProcessSyncPacket(&rx_packet) == nullptr) { is_all_packet_processed = false; break; } } packet_counter_ += 1; packet_idx = packet_counter_ & descriptor_mask_; cur_entry = rx_ring_entries_ + packet_idx; cur_desc = cur_entry->descriptor; packet_seq_number_ = GetNextSequenceNumber(packet_seq_number_); } if (current_packet_counter != packet_counter_) { WriteDoorbell(packet_counter_); } DEBUGP(GVNIC_VERBOSE, "[%s] RxRing id - %u: packet_counter - %u, seq_number - %u, all " "packets processed - %u", __FUNCTION__, id(), packet_counter_, packet_seq_number_, is_all_packet_processed); return is_all_packet_processed; }
apache-2.0
gregjones60/keycloak
model/invalidation-cache/infinispan/src/main/java/org/keycloak/models/cache/infinispan/ClientAdapter.java
17103
package org.keycloak.models.cache.infinispan; import org.keycloak.models.*; import org.keycloak.models.cache.CacheRealmProvider; import org.keycloak.models.cache.RealmCache; import org.keycloak.models.cache.entities.CachedClient; import java.util.*; /** * @author <a href="mailto:bill@burkecentral.com">Bill Burke</a> * @version $Revision: 1 $ */ public class ClientAdapter implements ClientModel { protected CacheRealmProvider cacheSession; protected RealmModel cachedRealm; protected RealmCache cache; protected ClientModel updated; protected CachedClient cached; public ClientAdapter(RealmModel cachedRealm, CachedClient cached, CacheRealmProvider cacheSession, RealmCache cache) { this.cachedRealm = cachedRealm; this.cache = cache; this.cacheSession = cacheSession; this.cached = cached; } private void getDelegateForUpdate() { if (updated == null) { cacheSession.registerApplicationInvalidation(getId()); updated = cacheSession.getDelegate().getClientById(getId(), cachedRealm); if (updated == null) throw new IllegalStateException("Not found in database"); } } @Override public void updateClient() { if (updated != null) updated.updateClient(); } @Override public String getId() { if (updated != null) return updated.getId(); return cached.getId(); } public Set<String> getWebOrigins() { if (updated != null) return updated.getWebOrigins(); return cached.getWebOrigins(); } public void setWebOrigins(Set<String> webOrigins) { getDelegateForUpdate(); updated.setWebOrigins(webOrigins); } public void addWebOrigin(String webOrigin) { getDelegateForUpdate(); updated.addWebOrigin(webOrigin); } public void removeWebOrigin(String webOrigin) { getDelegateForUpdate(); updated.removeWebOrigin(webOrigin); } public Set<String> getRedirectUris() { if (updated != null) return updated.getRedirectUris(); return cached.getRedirectUris(); } public void setRedirectUris(Set<String> redirectUris) { getDelegateForUpdate(); updated.setRedirectUris(redirectUris); } public void addRedirectUri(String redirectUri) { getDelegateForUpdate(); updated.addRedirectUri(redirectUri); } public void removeRedirectUri(String redirectUri) { getDelegateForUpdate(); updated.removeRedirectUri(redirectUri); } public boolean isEnabled() { if (updated != null) return updated.isEnabled(); return cached.isEnabled(); } public void setEnabled(boolean enabled) { getDelegateForUpdate(); updated.setEnabled(enabled); } @Override public String getClientAuthenticatorType() { if (updated != null) return updated.getClientAuthenticatorType(); return cached.getClientAuthenticatorType(); } @Override public void setClientAuthenticatorType(String clientAuthenticatorType) { getDelegateForUpdate(); updated.setClientAuthenticatorType(clientAuthenticatorType); } public boolean validateSecret(String secret) { return secret.equals(getSecret()); } public String getSecret() { if (updated != null) return updated.getSecret(); return cached.getSecret(); } public void setSecret(String secret) { getDelegateForUpdate(); updated.setSecret(secret); } public String getRegistrationToken() { if (updated != null) return updated.getRegistrationToken(); return cached.getRegistrationToken(); } public void setRegistrationToken(String registrationToken) { getDelegateForUpdate(); updated.setRegistrationToken(registrationToken); } public boolean isPublicClient() { if (updated != null) return updated.isPublicClient(); return cached.isPublicClient(); } public void setPublicClient(boolean flag) { getDelegateForUpdate(); updated.setPublicClient(flag); } public boolean isFrontchannelLogout() { if (updated != null) return updated.isPublicClient(); return cached.isFrontchannelLogout(); } public void setFrontchannelLogout(boolean flag) { getDelegateForUpdate(); updated.setFrontchannelLogout(flag); } @Override public boolean isFullScopeAllowed() { if (updated != null) return updated.isFullScopeAllowed(); return cached.isFullScopeAllowed(); } @Override public void setFullScopeAllowed(boolean value) { getDelegateForUpdate(); updated.setFullScopeAllowed(value); } public Set<RoleModel> getScopeMappings() { if (updated != null) return updated.getScopeMappings(); Set<RoleModel> roles = new HashSet<RoleModel>(); for (String id : cached.getScope()) { roles.add(cacheSession.getRoleById(id, getRealm())); } return roles; } public void addScopeMapping(RoleModel role) { getDelegateForUpdate(); updated.addScopeMapping(role); } public void deleteScopeMapping(RoleModel role) { getDelegateForUpdate(); updated.deleteScopeMapping(role); } public Set<RoleModel> getRealmScopeMappings() { Set<RoleModel> roleMappings = getScopeMappings(); Set<RoleModel> appRoles = new HashSet<RoleModel>(); for (RoleModel role : roleMappings) { RoleContainerModel container = role.getContainer(); if (container instanceof RealmModel) { if (((RealmModel) container).getId().equals(cachedRealm.getId())) { appRoles.add(role); } } } return appRoles; } public RealmModel getRealm() { return cachedRealm; } public int getNotBefore() { if (updated != null) return updated.getNotBefore(); return cached.getNotBefore(); } public void setNotBefore(int notBefore) { getDelegateForUpdate(); updated.setNotBefore(notBefore); } @Override public String getProtocol() { if (updated != null) return updated.getProtocol(); return cached.getProtocol(); } @Override public void setProtocol(String protocol) { getDelegateForUpdate(); updated.setProtocol(protocol); } @Override public void setAttribute(String name, String value) { getDelegateForUpdate(); updated.setAttribute(name, value); } @Override public void removeAttribute(String name) { getDelegateForUpdate(); updated.removeAttribute(name); } @Override public String getAttribute(String name) { if (updated != null) return updated.getAttribute(name); return cached.getAttributes().get(name); } @Override public Map<String, String> getAttributes() { if (updated != null) return updated.getAttributes(); Map<String, String> copy = new HashMap<String, String>(); copy.putAll(cached.getAttributes()); return copy; } @Override public Set<ProtocolMapperModel> getProtocolMappers() { if (updated != null) return updated.getProtocolMappers(); return cached.getProtocolMappers(); } @Override public ProtocolMapperModel addProtocolMapper(ProtocolMapperModel model) { getDelegateForUpdate(); return updated.addProtocolMapper(model); } @Override public void removeProtocolMapper(ProtocolMapperModel mapping) { getDelegateForUpdate(); updated.removeProtocolMapper(mapping); } @Override public void updateProtocolMapper(ProtocolMapperModel mapping) { getDelegateForUpdate(); updated.updateProtocolMapper(mapping); } @Override public ProtocolMapperModel getProtocolMapperById(String id) { for (ProtocolMapperModel mapping : cached.getProtocolMappers()) { if (mapping.getId().equals(id)) return mapping; } return null; } @Override public ProtocolMapperModel getProtocolMapperByName(String protocol, String name) { for (ProtocolMapperModel mapping : cached.getProtocolMappers()) { if (mapping.getProtocol().equals(protocol) && mapping.getName().equals(name)) return mapping; } return null; } @Override public String getClientId() { if (updated != null) return updated.getClientId(); return cached.getClientId(); } @Override public void setClientId(String clientId) { getDelegateForUpdate(); updated.setClientId(clientId); cacheSession.registerRealmInvalidation(cachedRealm.getId()); } @Override public String getName() { if (updated != null) return updated.getName(); return cached.getName(); } @Override public void setName(String name) { getDelegateForUpdate(); updated.setName(name); } @Override public String getDescription() { if (updated != null) return updated.getDescription(); return cached.getDescription(); } @Override public void setDescription(String description) { getDelegateForUpdate(); updated.setDescription(description); } @Override public boolean isSurrogateAuthRequired() { if (updated != null) return updated.isSurrogateAuthRequired(); return cached.isSurrogateAuthRequired(); } @Override public void setSurrogateAuthRequired(boolean surrogateAuthRequired) { getDelegateForUpdate(); updated.setSurrogateAuthRequired(surrogateAuthRequired); } @Override public String getManagementUrl() { if (updated != null) return updated.getManagementUrl(); return cached.getManagementUrl(); } @Override public void setManagementUrl(String url) { getDelegateForUpdate(); updated.setManagementUrl(url); } @Override public String getRootUrl() { if (updated != null) return updated.getRootUrl(); return cached.getRootUrl(); } @Override public void setRootUrl(String url) { getDelegateForUpdate(); updated.setRootUrl(url); } @Override public String getBaseUrl() { if (updated != null) return updated.getBaseUrl(); return cached.getBaseUrl(); } @Override public void setBaseUrl(String url) { getDelegateForUpdate(); updated.setBaseUrl(url); } @Override public List<String> getDefaultRoles() { if (updated != null) return updated.getDefaultRoles(); return cached.getDefaultRoles(); } @Override public void addDefaultRole(String name) { getDelegateForUpdate(); updated.addDefaultRole(name); } @Override public void updateDefaultRoles(String[] defaultRoles) { getDelegateForUpdate(); updated.updateDefaultRoles(defaultRoles); } @Override public Set<RoleModel> getClientScopeMappings(ClientModel client) { Set<RoleModel> roleMappings = client.getScopeMappings(); Set<RoleModel> appRoles = new HashSet<RoleModel>(); for (RoleModel role : roleMappings) { RoleContainerModel container = role.getContainer(); if (container instanceof RealmModel) { } else { ClientModel app = (ClientModel)container; if (app.getId().equals(getId())) { appRoles.add(role); } } } return appRoles; } @Override public boolean isBearerOnly() { if (updated != null) return updated.isBearerOnly(); return cached.isBearerOnly(); } @Override public void setBearerOnly(boolean only) { getDelegateForUpdate(); updated.setBearerOnly(only); } @Override public boolean isConsentRequired() { if (updated != null) return updated.isConsentRequired(); return cached.isConsentRequired(); } @Override public void setConsentRequired(boolean consentRequired) { getDelegateForUpdate(); updated.setConsentRequired(consentRequired); } @Override public boolean isStandardFlowEnabled() { if (updated != null) return updated.isStandardFlowEnabled(); return cached.isStandardFlowEnabled(); } @Override public void setStandardFlowEnabled(boolean standardFlowEnabled) { getDelegateForUpdate(); updated.setStandardFlowEnabled(standardFlowEnabled); } @Override public boolean isImplicitFlowEnabled() { if (updated != null) return updated.isImplicitFlowEnabled(); return cached.isImplicitFlowEnabled(); } @Override public void setImplicitFlowEnabled(boolean implicitFlowEnabled) { getDelegateForUpdate(); updated.setImplicitFlowEnabled(implicitFlowEnabled); } @Override public boolean isDirectAccessGrantsEnabled() { if (updated != null) return updated.isDirectAccessGrantsEnabled(); return cached.isDirectAccessGrantsEnabled(); } @Override public void setDirectAccessGrantsEnabled(boolean directAccessGrantsEnabled) { getDelegateForUpdate(); updated.setDirectAccessGrantsEnabled(directAccessGrantsEnabled); } @Override public boolean isServiceAccountsEnabled() { if (updated != null) return updated.isServiceAccountsEnabled(); return cached.isServiceAccountsEnabled(); } @Override public void setServiceAccountsEnabled(boolean serviceAccountsEnabled) { getDelegateForUpdate(); updated.setServiceAccountsEnabled(serviceAccountsEnabled); } @Override public RoleModel getRole(String name) { if (updated != null) return updated.getRole(name); String id = cached.getRoles().get(name); if (id == null) return null; return cacheSession.getRoleById(id, cachedRealm); } @Override public RoleModel addRole(String name) { getDelegateForUpdate(); RoleModel role = updated.addRole(name); cacheSession.registerRoleInvalidation(role.getId()); return role; } @Override public RoleModel addRole(String id, String name) { getDelegateForUpdate(); RoleModel role = updated.addRole(id, name); cacheSession.registerRoleInvalidation(role.getId()); return role; } @Override public boolean removeRole(RoleModel role) { cacheSession.registerRoleInvalidation(role.getId()); getDelegateForUpdate(); return updated.removeRole(role); } @Override public Set<RoleModel> getRoles() { if (updated != null) return updated.getRoles(); Set<RoleModel> roles = new HashSet<RoleModel>(); for (String id : cached.getRoles().values()) { RoleModel roleById = cacheSession.getRoleById(id, cachedRealm); if (roleById == null) continue; roles.add(roleById); } return roles; } @Override public int getNodeReRegistrationTimeout() { if (updated != null) return updated.getNodeReRegistrationTimeout(); return cached.getNodeReRegistrationTimeout(); } @Override public void setNodeReRegistrationTimeout(int timeout) { getDelegateForUpdate(); updated.setNodeReRegistrationTimeout(timeout); } @Override public Map<String, Integer> getRegisteredNodes() { if (updated != null) return updated.getRegisteredNodes(); return cached.getRegisteredNodes(); } @Override public void registerNode(String nodeHost, int registrationTime) { getDelegateForUpdate(); updated.registerNode(nodeHost, registrationTime); } @Override public void unregisterNode(String nodeHost) { getDelegateForUpdate(); updated.unregisterNode(nodeHost); } @Override public boolean hasScope(RoleModel role) { if (updated != null) return updated.hasScope(role); if (cached.isFullScopeAllowed() || cached.getScope().contains(role.getId())) return true; Set<RoleModel> roles = getScopeMappings(); for (RoleModel mapping : roles) { if (mapping.hasRole(role)) return true; } roles = getRoles(); if (roles.contains(role)) return true; for (RoleModel mapping : roles) { if (mapping.hasRole(role)) return true; } return false; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || !(o instanceof ClientModel)) return false; ClientModel that = (ClientModel) o; return that.getId().equals(getId()); } @Override public int hashCode() { return getId().hashCode(); } }
apache-2.0
xiaoliable/EssayClassifier
src/com/winga/xxl/classifier/calculation/IDocVectorAccessor.java
494
package com.winga.xxl.classifier.calculation; import java.util.Map; import com.winga.xxl.classifier.data.store.IDocuments; /** * <p>CreatDate : 2014-9-1</p> * <p>Description : Access the document's title or content vector map conveniently. </p> * <p>( Title and content are indicators currently. )</p> * @author xiaoxiao * @version 1.0 * */ public interface IDocVectorAccessor { Map<Long, Integer> getVector(IDocuments doc); void setVector(IDocuments doc , Map<Long, Integer> v); }
apache-2.0
sidheshenator/autopsy
Core/src/org/sleuthkit/autopsy/timeline/events/db/EventDB.java
44301
/* * Autopsy Forensic Browser * * Copyright 2013 Basis Technology Corp. * Contact: carrier <at> sleuthkit <dot> org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.sleuthkit.autopsy.timeline.events.db; import com.google.common.base.Stopwatch; import com.google.common.collect.HashMultimap; import com.google.common.collect.SetMultimap; import java.io.File; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.TimeZone; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.logging.Level; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.joda.time.DateTimeZone; import org.joda.time.Interval; import org.joda.time.Period; import org.openide.util.Exceptions; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.timeline.TimeLineController; import org.sleuthkit.autopsy.timeline.events.AggregateEvent; import org.sleuthkit.autopsy.timeline.events.TimeLineEvent; import org.sleuthkit.autopsy.timeline.events.type.BaseTypes; import org.sleuthkit.autopsy.timeline.events.type.EventType; import org.sleuthkit.autopsy.timeline.events.type.RootEventType; import org.sleuthkit.autopsy.timeline.filters.Filter; import org.sleuthkit.autopsy.timeline.filters.HideKnownFilter; import org.sleuthkit.autopsy.timeline.filters.IntersectionFilter; import org.sleuthkit.autopsy.timeline.filters.TextFilter; import org.sleuthkit.autopsy.timeline.filters.TypeFilter; import org.sleuthkit.autopsy.timeline.filters.UnionFilter; import org.sleuthkit.autopsy.timeline.utils.RangeDivisionInfo; import org.sleuthkit.autopsy.timeline.zooming.DescriptionLOD; import org.sleuthkit.autopsy.timeline.zooming.EventTypeZoomLevel; import org.sleuthkit.autopsy.timeline.zooming.TimeUnits; import org.sleuthkit.autopsy.timeline.zooming.ZoomParams; import org.sleuthkit.datamodel.TskData; import org.sqlite.SQLiteJDBCLoader; /** * This class provides access to the Timeline SQLite database. This * class borrows a lot of ideas and techniques from {@link SleuthkitCase}, * Creating an abstract base class for sqlite databases, or using a higherlevel * persistence api may make sense in the future. */ public class EventDB { private static final String ARTIFACT_ID_COLUMN = "artifact_id"; // NON-NLS private static final String BASE_TYPE_COLUMN = "base_type"; // NON-NLS private static final String EVENT_ID_COLUMN = "event_id"; // NON-NLS //column name constants////////////////////// private static final String FILE_ID_COLUMN = "file_id"; // NON-NLS private static final String FULL_DESCRIPTION_COLUMN = "full_description"; // NON-NLS private static final String KNOWN_COLUMN = "known_state"; // NON-NLS private static final String LAST_ARTIFACT_ID_KEY = "last_artifact_id"; // NON-NLS private static final String LAST_OBJECT_ID_KEY = "last_object_id"; // NON-NLS private static final java.util.logging.Logger LOGGER = Logger.getLogger(EventDB.class.getName()); private static final String MED_DESCRIPTION_COLUMN = "med_description"; // NON-NLS private static final String SHORT_DESCRIPTION_COLUMN = "short_description"; // NON-NLS private static final String SUB_TYPE_COLUMN = "sub_type"; // NON-NLS private static final String TIME_COLUMN = "time"; // NON-NLS private static final String WAS_INGEST_RUNNING_KEY = "was_ingest_running"; // NON-NLS static { //make sure sqlite driver is loaded // possibly redundant try { Class.forName("org.sqlite.JDBC"); // NON-NLS } catch (ClassNotFoundException ex) { LOGGER.log(Level.SEVERE, "Failed to load sqlite JDBC driver", ex); // NON-NLS } } /** * public factory method. Creates and opens a connection to a database at * the given path. If a database does not already exist at that path, one is * created. * * @param dbPath * * @return */ public static EventDB getEventDB(String dbPath) { try { EventDB eventDB = new EventDB(dbPath + File.separator + "events.db"); // NON-NLS return eventDB; } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "sql error creating database connection", ex); // NON-NLS return null; } catch (Exception ex) { LOGGER.log(Level.SEVERE, "error creating database connection", ex); // NON-NLS return null; } } static List<Integer> getActiveSubTypes(TypeFilter filter) { if (filter.isActive()) { if (filter.getSubFilters().isEmpty()) { return Collections.singletonList(RootEventType.allTypes.indexOf(filter.getEventType())); } else { return filter.getSubFilters().stream().flatMap((Filter t) -> getActiveSubTypes((TypeFilter) t).stream()).collect(Collectors.toList()); } } else { return Collections.emptyList(); } } static String getSQLWhere(IntersectionFilter filter) { return filter.getSubFilters().stream() .filter(Filter::isActive) .map(EventDB::getSQLWhere) .collect(Collectors.joining(" and ", "( ", ")")); // NON-NLS } static String getSQLWhere(UnionFilter filter) { return filter.getSubFilters().stream() .filter(Filter::isActive) .map(EventDB::getSQLWhere) .collect(Collectors.joining(" or ", "( ", ")")); // NON-NLS } private static String getSQLWhere(Filter filter) { //TODO: this is here so that the filters don't depend, even implicitly, on the db, but it leads to some nasty code //it would all be much easier if all the getSQLWhere methods where moved to their respective filter classes String result = ""; if (filter == null) { return "1"; } else if (filter instanceof HideKnownFilter) { result = getSQLWhere((HideKnownFilter) filter); } else if (filter instanceof TextFilter) { result = getSQLWhere((TextFilter) filter); } else if (filter instanceof TypeFilter) { result = getSQLWhere((TypeFilter) filter); } else if (filter instanceof IntersectionFilter) { result = getSQLWhere((IntersectionFilter) filter); } else if (filter instanceof UnionFilter) { result = getSQLWhere((UnionFilter) filter); } else { return "1"; } result = StringUtils.deleteWhitespace(result).equals("(1and1and1)") ? "1" : result; // NON-NLS //System.out.println(result); return result; } private static String getSQLWhere(HideKnownFilter filter) { return (filter.isActive()) ? "(known_state is not '" + TskData.FileKnown.KNOWN.getFileKnownValue() + "')" // NON-NLS : "1"; } private static String getSQLWhere(TextFilter filter) { if (filter.isActive()) { if (StringUtils.isBlank(filter.getText())) { return "1"; } String strip = StringUtils.strip(filter.getText()); return "((" + MED_DESCRIPTION_COLUMN + " like '%" + strip + "%') or (" // NON-NLS + FULL_DESCRIPTION_COLUMN + " like '%" + strip + "%') or (" // NON-NLS + SHORT_DESCRIPTION_COLUMN + " like '%" + strip + "%'))"; // NON-NLS } else { return "1"; } } /** * generate a sql where clause for the given type filter, while trying to be * as simple as possible to improve performance. * * @param filter * * @return */ private static String getSQLWhere(TypeFilter filter) { if (filter.isActive() == false) { return "0"; } else if (filter.getEventType() instanceof RootEventType) { //if the filter is a root filter and all base type filtes and subtype filters are active, if (filter.getSubFilters().stream().allMatch(f -> f.isActive() && ((TypeFilter) f).getSubFilters().stream().allMatch(Filter::isActive))) { return "1"; //then collapse clause to true } } return "(" + SUB_TYPE_COLUMN + " in (" + StringUtils.join(getActiveSubTypes(filter), ",") + "))"; // NON-NLS } private volatile Connection con; private final String dbPath; private PreparedStatement getDBInfoStmt; private PreparedStatement getEventByIDStmt; private PreparedStatement getMaxTimeStmt; private PreparedStatement getMinTimeStmt; private PreparedStatement insertRowStmt; private final Set<PreparedStatement> preparedStatements = new HashSet<>(); private PreparedStatement recordDBInfoStmt; private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(true); //use fairness policy private final Lock DBLock = rwLock.writeLock(); //using exclusing lock for all db ops for now private EventDB(String dbPath) throws SQLException, Exception { this.dbPath = dbPath; initializeDB(); } @Override public void finalize() throws Throwable { try { closeDBCon(); } finally { super.finalize(); } } public Interval getSpanningInterval(Collection<Long> eventIDs) { Interval span = null; dbReadLock(); try (Statement stmt = con.createStatement(); //You can't inject multiple values into one ? paramater in prepared statement, //so we make new statement each time... ResultSet rs = stmt.executeQuery("select Min(time), Max(time) from events where event_id in (" + StringUtils.join(eventIDs, ", ") + ")");) { // NON-NLS while (rs.next()) { span = new Interval(rs.getLong("Min(time)"), rs.getLong("Max(time)") + 1, DateTimeZone.UTC); // NON-NLS } } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "Error executing get spanning interval query.", ex); // NON-NLS } finally { dbReadUnlock(); } return span; } EventTransaction beginTransaction() { return new EventTransaction(); } void closeDBCon() { if (con != null) { try { closeStatements(); con.close(); } catch (SQLException ex) { LOGGER.log(Level.WARNING, "Failed to close connection to evetns.db", ex); // NON-NLS } } con = null; } void commitTransaction(EventTransaction tr, Boolean notify) { if (tr.isClosed()) { throw new IllegalArgumentException("can't close already closed transaction"); // NON-NLS } tr.commit(notify); } int countAllEvents() { int result = -1; dbReadLock(); //TODO convert this to prepared statement -jm try (ResultSet rs = con.createStatement().executeQuery("select count(*) as count from events")) { // NON-NLS while (rs.next()) { result = rs.getInt("count"); // NON-NLS break; } } catch (SQLException ex) { Exceptions.printStackTrace(ex); } finally { dbReadUnlock(); } return result; } Map<EventType, Long> countEvents(ZoomParams params) { if (params.getTimeRange() != null) { return countEvents(params.getTimeRange().getStartMillis() / 1000, params.getTimeRange().getEndMillis() / 1000, params.getFilter(), params.getTypeZoomLevel()); } else { return Collections.emptyMap(); } } /** * Lock to protect against read while it is in a write transaction state. * Supports multiple concurrent readers if there is no writer. MUST always * call dbReadUnLock() as early as possible, in the same thread where * dbReadLock() was called. */ void dbReadLock() { DBLock.lock(); } /** * Release previously acquired read lock acquired in this thread using * dbReadLock(). Call in "finally" block to ensure the lock is always * released. */ void dbReadUnlock() { DBLock.unlock(); } //////////////general database logic , mostly borrowed from sleuthkitcase void dbWriteLock() { //Logger.getLogger("LOCK").log(Level.INFO, "Locking " + rwLock.toString()); DBLock.lock(); } /** * Release previously acquired write lock acquired in this thread using * dbWriteLock(). Call in "finally" block to ensure the lock is always * released. */ void dbWriteUnlock() { //Logger.getLogger("LOCK").log(Level.INFO, "UNLocking " + rwLock.toString()); DBLock.unlock(); } void dropTable() { //TODO: use prepared statement - jm dbWriteLock(); try (Statement createStatement = con.createStatement()) { createStatement.execute("drop table if exists events"); // NON-NLS } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "could not drop old events table", ex); // NON-NLS } finally { dbWriteUnlock(); } } List<AggregateEvent> getAggregatedEvents(ZoomParams params) { return getAggregatedEvents(params.getTimeRange(), params.getFilter(), params.getTypeZoomLevel(), params.getDescrLOD()); } Interval getBoundingEventsInterval(Interval timeRange, Filter filter) { long start = timeRange.getStartMillis() / 1000; long end = timeRange.getEndMillis() / 1000; final String sqlWhere = getSQLWhere(filter); dbReadLock(); try (Statement stmt = con.createStatement(); //can't use prepared statement because of complex where clause ResultSet rs = stmt.executeQuery(" select (select Max(time) from events where time <=" + start + " and " + sqlWhere + ") as start,(select Min(time) from events where time >= " + end + " and " + sqlWhere + ") as end")) { // NON-NLS while (rs.next()) { long start2 = rs.getLong("start"); // NON-NLS long end2 = rs.getLong("end"); // NON-NLS if (end2 == 0) { end2 = getMaxTime(); } //System.out.println(start2 + " " + start + " " + end + " " + end2); return new Interval(start2 * 1000, (end2 + 1) * 1000, TimeLineController.getJodaTimeZone()); } } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "Failed to get MIN time.", ex); // NON-NLS } finally { dbReadUnlock(); } return null; } TimeLineEvent getEventById(Long eventID) { TimeLineEvent result = null; dbReadLock(); try { getEventByIDStmt.clearParameters(); getEventByIDStmt.setLong(1, eventID); try (ResultSet rs = getEventByIDStmt.executeQuery()) { while (rs.next()) { result = constructTimeLineEvent(rs); break; } } } catch (SQLException sqlEx) { LOGGER.log(Level.SEVERE, "exception while querying for event with id = " + eventID, sqlEx); // NON-NLS } finally { dbReadUnlock(); } return result; } Set<Long> getEventIDs(Interval timeRange, Filter filter) { return getEventIDs(timeRange.getStartMillis() / 1000, timeRange.getEndMillis() / 1000, filter); } Set<Long> getEventIDs(Long startTime, Long endTime, Filter filter) { if (Objects.equals(startTime, endTime)) { endTime++; } Set<Long> resultIDs = new HashSet<>(); dbReadLock(); final String query = "select event_id from events where time >= " + startTime + " and time <" + endTime + " and " + getSQLWhere(filter); // NON-NLS //System.out.println(query); try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery(query)) { while (rs.next()) { resultIDs.add(rs.getLong(EVENT_ID_COLUMN)); } } catch (SQLException sqlEx) { LOGGER.log(Level.SEVERE, "failed to execute query for event ids in range", sqlEx); // NON-NLS } finally { dbReadUnlock(); } return resultIDs; } long getLastArtfactID() { return getDBInfo(LAST_ARTIFACT_ID_KEY, -1); } long getLastObjID() { return getDBInfo(LAST_OBJECT_ID_KEY, -1); } /** @return maximum time in seconds from unix epoch */ Long getMaxTime() { dbReadLock(); try (ResultSet rs = getMaxTimeStmt.executeQuery()) { while (rs.next()) { return rs.getLong("max"); // NON-NLS } } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "Failed to get MAX time.", ex); // NON-NLS } finally { dbReadUnlock(); } return -1l; } /** @return maximum time in seconds from unix epoch */ Long getMinTime() { dbReadLock(); try (ResultSet rs = getMinTimeStmt.executeQuery()) { while (rs.next()) { return rs.getLong("min"); // NON-NLS } } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "Failed to get MIN time.", ex); // NON-NLS } finally { dbReadUnlock(); } return -1l; } boolean getWasIngestRunning() { return getDBInfo(WAS_INGEST_RUNNING_KEY, 0) != 0; } /** * create the table and indices if they don't already exist * * * @return the number of rows in the table , count > 0 indicating an * existing table */ final synchronized void initializeDB() { try { if (isClosed()) { openDBCon(); } configureDB(); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem accessing database", ex); // NON-NLS } dbWriteLock(); try { try (Statement stmt = con.createStatement()) { String sql = "CREATE TABLE if not exists db_info " // NON-NLS + " ( key TEXT, " // NON-NLS + " value INTEGER, " // NON-NLS + "PRIMARY KEY (key))"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating db_info table", ex); // NON-NLS } try (Statement stmt = con.createStatement()) { String sql = "CREATE TABLE if not exists events " // NON-NLS + " (event_id INTEGER PRIMARY KEY, " // NON-NLS + " file_id INTEGER, " // NON-NLS + " artifact_id INTEGER, " // NON-NLS + " time INTEGER, " // NON-NLS + " sub_type INTEGER, " // NON-NLS + " base_type INTEGER, " // NON-NLS + " full_description TEXT, " // NON-NLS + " med_description TEXT, " // NON-NLS + " short_description TEXT, " // NON-NLS + " known_state INTEGER)"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating database table", ex); // NON-NLS } try (Statement stmt = con.createStatement()) { String sql = "CREATE INDEX if not exists file_idx ON events(file_id)"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating file_idx", ex); // NON-NLS } try (Statement stmt = con.createStatement()) { String sql = "CREATE INDEX if not exists artifact_idx ON events(artifact_id)"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating artifact_idx", ex); // NON-NLS } //for common queries the covering indexes below were better, but having the time index 'blocke' them // try (Statement stmt = con.createStatement()) { // String sql = "CREATE INDEX if not exists time_idx ON events(time)"; // stmt.execute(sql); // } catch (SQLException ex) { // LOGGER.log(Level.SEVERE, "problem creating time_idx", ex); // } try (Statement stmt = con.createStatement()) { String sql = "CREATE INDEX if not exists sub_type_idx ON events(sub_type, time)"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating sub_type_idx", ex); // NON-NLS } try (Statement stmt = con.createStatement()) { String sql = "CREATE INDEX if not exists base_type_idx ON events(base_type, time)"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating base_type_idx", ex); // NON-NLS } try (Statement stmt = con.createStatement()) { String sql = "CREATE INDEX if not exists known_idx ON events(known_state)"; // NON-NLS stmt.execute(sql); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "problem creating known_idx", ex); // NON-NLS } try { insertRowStmt = prepareStatement( "INSERT INTO events (file_id ,artifact_id, time, sub_type, base_type, full_description, med_description, short_description, known_state) " // NON-NLS + "VALUES (?,?,?,?,?,?,?,?,?)"); // NON-NLS getMaxTimeStmt = prepareStatement("select Max(time) as max from events"); // NON-NLS getMinTimeStmt = prepareStatement("select Min(time) as min from events"); // NON-NLS getEventByIDStmt = prepareStatement("select * from events where event_id = ?"); // NON-NLS recordDBInfoStmt = prepareStatement("insert or replace into db_info (key, value) values (?, ?)"); // NON-NLS getDBInfoStmt = prepareStatement("select value from db_info where key = ?"); // NON-NLS } catch (SQLException sQLException) { LOGGER.log(Level.SEVERE, "failed to prepareStatment", sQLException); // NON-NLS } } finally { dbWriteUnlock(); } } void insertEvent(long time, EventType type, Long objID, Long artifactID, String fullDescription, String medDescription, String shortDescription, TskData.FileKnown known) { EventTransaction trans = beginTransaction(); insertEvent(time, type, objID, artifactID, fullDescription, medDescription, shortDescription, known, trans); commitTransaction(trans, true); } /** * use transactions to update files * * @param f * @param tr */ void insertEvent(long time, EventType type, Long objID, Long artifactID, String fullDescription, String medDescription, String shortDescription, TskData.FileKnown known, EventTransaction tr) { if (tr.isClosed()) { throw new IllegalArgumentException("can't update database with closed transaction"); // NON-NLS } int typeNum; int superTypeNum; typeNum = RootEventType.allTypes.indexOf(type); superTypeNum = type.getSuperType().ordinal(); dbWriteLock(); try { //"INSERT INTO events (file_id ,artifact_id, time, sub_type, base_type, full_description, med_description, short_description) " insertRowStmt.clearParameters(); if (objID != null) { insertRowStmt.setLong(1, objID); } else { insertRowStmt.setNull(1, Types.INTEGER); } if (artifactID != null) { insertRowStmt.setLong(2, artifactID); } else { insertRowStmt.setNull(2, Types.INTEGER); } insertRowStmt.setLong(3, time); if (typeNum != -1) { insertRowStmt.setInt(4, typeNum); } else { insertRowStmt.setNull(4, Types.INTEGER); } insertRowStmt.setInt(5, superTypeNum); insertRowStmt.setString(6, fullDescription); insertRowStmt.setString(7, medDescription); insertRowStmt.setString(8, shortDescription); insertRowStmt.setByte(9, known == null ? TskData.FileKnown.UNKNOWN.getFileKnownValue() : known.getFileKnownValue()); insertRowStmt.executeUpdate(); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "failed to insert event", ex); // NON-NLS } finally { dbWriteUnlock(); } } boolean isClosed() throws SQLException { if (con == null) { return true; } return con.isClosed(); } void openDBCon() { try { if (con == null || con.isClosed()) { con = DriverManager.getConnection("jdbc:sqlite:" + dbPath); // NON-NLS } } catch (SQLException ex) { LOGGER.log(Level.WARNING, "Failed to open connection to events.db", ex); // NON-NLS } } void recordLastArtifactID(long lastArtfID) { recordDBInfo(LAST_ARTIFACT_ID_KEY, lastArtfID); } void recordLastObjID(Long lastObjID) { recordDBInfo(LAST_OBJECT_ID_KEY, lastObjID); } void recordWasIngestRunning(boolean wasIngestRunning) { recordDBInfo(WAS_INGEST_RUNNING_KEY, (wasIngestRunning ? 1 : 0)); } void rollBackTransaction(EventTransaction trans) { trans.rollback(); } boolean tableExists() { //TODO: use prepared statement - jm try (Statement createStatement = con.createStatement(); ResultSet executeQuery = createStatement.executeQuery("SELECT name FROM sqlite_master WHERE type='table' AND name='events'")) { // NON-NLS if (executeQuery.getString("name").equals("events") == false) { // NON-NLS return false; } } catch (SQLException ex) { Exceptions.printStackTrace(ex); } return true; } private void closeStatements() throws SQLException { for (PreparedStatement pStmt : preparedStatements) { pStmt.close(); } } private void configureDB() throws SQLException { dbWriteLock(); //this should match Sleuthkit db setupt try (Statement statement = con.createStatement()) { //reduce i/o operations, we have no OS crash recovery anyway statement.execute("PRAGMA synchronous = OFF;"); // NON-NLS //we don't use this feature, so turn it off for minimal speed up on queries //this is deprecated and not recomended statement.execute("PRAGMA count_changes = OFF;"); // NON-NLS //this made a big difference to query speed statement.execute("PRAGMA temp_store = MEMORY"); // NON-NLS //this made a modest improvement in query speeds statement.execute("PRAGMA cache_size = 50000"); // NON-NLS //we never delete anything so... statement.execute("PRAGMA auto_vacuum = 0"); // NON-NLS //allow to query while in transaction - no need read locks statement.execute("PRAGMA read_uncommitted = True;"); // NON-NLS } finally { dbWriteUnlock(); } try { LOGGER.log(Level.INFO, String.format("sqlite-jdbc version %s loaded in %s mode", // NON-NLS SQLiteJDBCLoader.getVersion(), SQLiteJDBCLoader.isNativeMode() ? "native" : "pure-java")); // NON-NLS } catch (Exception exception) { } } private TimeLineEvent constructTimeLineEvent(ResultSet rs) throws SQLException { EventType type = RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN)); return new TimeLineEvent(rs.getLong(EVENT_ID_COLUMN), rs.getLong(FILE_ID_COLUMN), rs.getLong(ARTIFACT_ID_COLUMN), rs.getLong(TIME_COLUMN), type, rs.getString(FULL_DESCRIPTION_COLUMN), rs.getString(MED_DESCRIPTION_COLUMN), rs.getString(SHORT_DESCRIPTION_COLUMN), TskData.FileKnown.valueOf(rs.getByte(KNOWN_COLUMN))); } /** * count all the events with the given options and return a map organizing * the counts in a hierarchy from date > eventtype> count * * * @param startTime events before this time will be excluded (seconds from * unix epoch) * @param endTime events at or after this time will be excluded (seconds * from unix epoch) * @param filter only events that pass this filter will be counted * @param zoomLevel only events of this type or a subtype will be counted * and the counts will be organized into bins for each of the subtypes of * the given event type * * @return a map organizing the counts in a hierarchy from date > eventtype> * count */ private Map<EventType, Long> countEvents(Long startTime, Long endTime, Filter filter, EventTypeZoomLevel zoomLevel) { if (Objects.equals(startTime, endTime)) { endTime++; } Map<EventType, Long> typeMap = new HashMap<>(); //do we want the root or subtype column of the databse final boolean useSubTypes = (zoomLevel == EventTypeZoomLevel.SUB_TYPE); //get some info about the range of dates requested final String queryString = "select count(*), " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) // NON-NLS + " from events where time >= " + startTime + " and time < " + endTime + " and " + getSQLWhere(filter) // NON-NLS + " GROUP BY " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN); // NON-NLS ResultSet rs = null; dbReadLock(); //System.out.println(queryString); try (Statement stmt = con.createStatement();) { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); rs = stmt.executeQuery(queryString); stopwatch.stop(); // System.out.println(stopwatch.elapsedMillis() / 1000.0 + " seconds"); while (rs.next()) { EventType type = useSubTypes ? RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN)) : BaseTypes.values()[rs.getInt(BASE_TYPE_COLUMN)]; typeMap.put(type, rs.getLong("count(*)")); // NON-NLS } } catch (Exception ex) { LOGGER.log(Level.SEVERE, "error getting count of events from db.", ex); // NON-NLS } finally { try { rs.close(); } catch (SQLException ex) { Exceptions.printStackTrace(ex); } dbReadUnlock(); } return typeMap; } /** * //TODO: update javadoc //TODO: split this into helper methods * * get a list of {@link AggregateEvent}s. * * General algorithm is as follows: * * - get all aggregate events, via one db query. * - sort them into a map from (type, description)-> aggevent * - for each key in map, merge the events and accumulate them in a list * to return * * * @param timeRange the Interval within in which all returned aggregate * events will be. * @param filter only events that pass the filter will be included in * aggregates events returned * @param zoomLevel only events of this level will be included * @param lod description level of detail to use when grouping events * * * @return a list of aggregate events within the given timerange, that pass * the supplied filter, aggregated according to the given event type and * description zoom levels */ private List<AggregateEvent> getAggregatedEvents(Interval timeRange, Filter filter, EventTypeZoomLevel zoomLevel, DescriptionLOD lod) { String descriptionColumn = getDescriptionColumn(lod); final boolean useSubTypes = (zoomLevel.equals(EventTypeZoomLevel.SUB_TYPE)); //get some info about the time range requested RangeDivisionInfo rangeInfo = RangeDivisionInfo.getRangeDivisionInfo(timeRange); //use 'rounded out' range long start = timeRange.getStartMillis() / 1000;//.getLowerBound(); long end = timeRange.getEndMillis() / 1000;//Millis();//rangeInfo.getUpperBound(); if (Objects.equals(start, end)) { end++; } //get a sqlite srtftime format string String strfTimeFormat = getStrfTimeFormat(rangeInfo.getPeriodSize()); //effectively map from type to (map from description to events) Map<EventType, SetMultimap< String, AggregateEvent>> typeMap = new HashMap<>(); //get all agregate events in this time unit dbReadLock(); String query = "select strftime('" + strfTimeFormat + "',time , 'unixepoch'" + (TimeLineController.getTimeZone().get().equals(TimeZone.getDefault()) ? ", 'localtime'" : "") + ") as interval, group_concat(event_id) as event_ids, Min(time), Max(time), " + descriptionColumn + ", " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) // NON-NLS + " from events where time >= " + start + " and time < " + end + " and " + getSQLWhere(filter) // NON-NLS + " group by interval, " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) + " , " + descriptionColumn // NON-NLS + " order by Min(time)"; // NON-NLS //System.out.println(query); ResultSet rs = null; try (Statement stmt = con.createStatement(); // scoop up requested events in groups organized by interval, type, and desription ) { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); rs = stmt.executeQuery(query); stopwatch.stop(); //System.out.println(stopwatch.elapsedMillis() / 1000.0 + " seconds"); while (rs.next()) { EventType type = useSubTypes ? RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN)) : BaseTypes.values()[rs.getInt(BASE_TYPE_COLUMN)]; AggregateEvent aggregateEvent = new AggregateEvent( new Interval(rs.getLong("Min(time)") * 1000, rs.getLong("Max(time)") * 1000, TimeLineController.getJodaTimeZone()), // NON-NLS type, Arrays.asList(rs.getString("event_ids").split(",")), // NON-NLS rs.getString(descriptionColumn), lod); //put events in map from type/descrition -> event SetMultimap<String, AggregateEvent> descrMap = typeMap.get(type); if (descrMap == null) { descrMap = HashMultimap.<String, AggregateEvent>create(); typeMap.put(type, descrMap); } descrMap.put(aggregateEvent.getDescription(), aggregateEvent); } } catch (SQLException ex) { Exceptions.printStackTrace(ex); } finally { try { rs.close(); } catch (SQLException ex) { Exceptions.printStackTrace(ex); } dbReadUnlock(); } //result list to return ArrayList<AggregateEvent> aggEvents = new ArrayList<>(); //save this for use when comparing gap size Period timeUnitLength = rangeInfo.getPeriodSize().getPeriod(); //For each (type, description) key, merge agg events for (SetMultimap<String, AggregateEvent> descrMap : typeMap.values()) { for (String descr : descrMap.keySet()) { //run through the sorted events, merging together adjacent events Iterator<AggregateEvent> iterator = descrMap.get(descr).stream() .sorted((AggregateEvent o1, AggregateEvent o2) -> Long.compare(o1.getSpan().getStartMillis(), o2.getSpan().getStartMillis())) .iterator(); AggregateEvent current = iterator.next(); while (iterator.hasNext()) { AggregateEvent next = iterator.next(); Interval gap = current.getSpan().gap(next.getSpan()); //if they overlap or gap is less one quarter timeUnitLength //TODO: 1/4 factor is arbitrary. review! -jm if (gap == null || gap.toDuration().getMillis() <= timeUnitLength.toDurationFrom(gap.getStart()).getMillis() / 4) { //merge them current = AggregateEvent.merge(current, next); } else { //done merging into current, set next as new current aggEvents.add(current); current = next; } } aggEvents.add(current); } } //at this point we should have a list of aggregate events. //one per type/description spanning consecutive time units as determined in rangeInfo return aggEvents; } private long getDBInfo(String key, long defaultValue) { dbReadLock(); try { getDBInfoStmt.setString(1, key); try (ResultSet rs = getDBInfoStmt.executeQuery()) { long result = defaultValue; while (rs.next()) { result = rs.getLong("value"); // NON-NLS } return result; } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "failed to read key: " + key + " from db_info", ex); // NON-NLS } finally { dbReadUnlock(); } } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "failed to set key: " + key + " on getDBInfoStmt ", ex); // NON-NLS } return defaultValue; } private String getDescriptionColumn(DescriptionLOD lod) { switch (lod) { case FULL: return FULL_DESCRIPTION_COLUMN; case MEDIUM: return MED_DESCRIPTION_COLUMN; case SHORT: default: return SHORT_DESCRIPTION_COLUMN; } } private String getStrfTimeFormat(TimeUnits info) { switch (info) { case DAYS: return "%Y-%m-%dT00:00:00"; // NON-NLS case HOURS: return "%Y-%m-%dT%H:00:00"; // NON-NLS case MINUTES: return "%Y-%m-%dT%H:%M:00"; // NON-NLS case MONTHS: return "%Y-%m-01T00:00:00"; // NON-NLS case SECONDS: return "%Y-%m-%dT%H:%M:%S"; // NON-NLS case YEARS: return "%Y-01-01T00:00:00"; // NON-NLS default: return "%Y-%m-%dT%H:%M:%S"; // NON-NLS } } private PreparedStatement prepareStatement(String queryString) throws SQLException { PreparedStatement prepareStatement = con.prepareStatement(queryString); preparedStatements.add(prepareStatement); return prepareStatement; } private void recordDBInfo(String key, long value) { dbWriteLock(); try { recordDBInfoStmt.setString(1, key); recordDBInfoStmt.setLong(2, value); recordDBInfoStmt.executeUpdate(); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "failed to set dbinfo key: " + key + " value: " + value, ex); // NON-NLS } finally { dbWriteUnlock(); } } /** * inner class that can reference access database connection */ public class EventTransaction { private boolean closed = false; /** * factory creation method * * @param con the {@link ava.sql.Connection} * * @return a LogicalFileTransaction for the given connection * * @throws SQLException */ private EventTransaction() { //get the write lock, released in close() dbWriteLock(); try { con.setAutoCommit(false); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "failed to set auto-commit to to false", ex); // NON-NLS } } private void rollback() { if (!closed) { try { con.rollback(); } catch (SQLException ex1) { LOGGER.log(Level.SEVERE, "Exception while attempting to rollback!!", ex1); // NON-NLS } finally { close(); } } } private void commit(Boolean notify) { if (!closed) { try { con.commit(); // make sure we close before we update, bc they'll need locks close(); if (notify) { // fireNewEvents(newEvents); } } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "Error commiting events.db.", ex); // NON-NLS rollback(); } } } private void close() { if (!closed) { try { con.setAutoCommit(true); } catch (SQLException ex) { LOGGER.log(Level.SEVERE, "Error setting auto-commit to true.", ex); // NON-NLS } finally { closed = true; dbWriteUnlock(); } } } public Boolean isClosed() { return closed; } } public class MultipleTransactionException extends IllegalStateException { private static final String CANNOT_HAVE_MORE_THAN_ONE_OPEN_TRANSACTION = "cannot have more than one open transaction"; // NON-NLS public MultipleTransactionException() { super(CANNOT_HAVE_MORE_THAN_ONE_OPEN_TRANSACTION); } } }
apache-2.0
kaiwinter/android-remote-notifications
android-remote-notifications/src/test/java/com/github/kaiwinter/androidremotenotifications/model/UpdatePolicyTest.java
2141
package com.github.kaiwinter.androidremotenotifications.model; import org.junit.Test; import java.util.Date; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; /** * Tests if the execution time for a notification update from the server is correctly calculated. */ public final class UpdatePolicyTest { private static final long DAY_IN_MILLISECONDS = 24 * 60 * 60 * 1000; /** * Tests if an update is made if no previous update ran. */ @Test public void testFirstTime() { boolean shouldUpdate = UpdatePolicy.NOW.shouldUpdate(null); assertTrue(shouldUpdate); } @Test public void testNow() { boolean shouldUpdate = UpdatePolicy.NOW.shouldUpdate(new Date()); assertTrue(shouldUpdate); } /** * Tests if a weekly update is recognized if 8 days have passed. */ @Test public void testWeeklyUpdate() { Date date = new Date(System.currentTimeMillis() - (8 * DAY_IN_MILLISECONDS)); boolean shouldUpdate = UpdatePolicy.WEEKLY.shouldUpdate(date); assertTrue(shouldUpdate); } /** * Tests if no weekly update is recognized if 5 days have passed. */ @Test public void testWeeklyNoUpdate() { Date date = new Date(System.currentTimeMillis() - (5 * DAY_IN_MILLISECONDS)); boolean shouldUpdate = UpdatePolicy.WEEKLY.shouldUpdate(date); assertFalse(shouldUpdate); } /** * Tests if a monthly update is recognized if 32 days have passed. */ @Test public void testMonthlyUpdate() { Date date = new Date(System.currentTimeMillis() - (32 * DAY_IN_MILLISECONDS)); boolean shouldUpdate = UpdatePolicy.MONTHLY.shouldUpdate(date); assertTrue(shouldUpdate); } /** * Tests if no monthly update is recognized if 15 days have passed. */ @Test public void testMonthlyNoUpdate() { Date date = new Date(System.currentTimeMillis() - (15 * DAY_IN_MILLISECONDS)); boolean shouldUpdate = UpdatePolicy.MONTHLY.shouldUpdate(date); assertFalse(shouldUpdate); } }
apache-2.0
imbeebo/harlemcodetrotters
GonqBox/src/gonqbox/servlets/MakePublicPrivate.java
1509
package gonqbox.servlets; import java.io.IOException; import java.util.ResourceBundle; import javax.servlet.RequestDispatcher; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.jsp.jstl.core.Config; import gonqbox.Pages; import gonqbox.dao.DAO; import gonqbox.models.User; @WebServlet(name = "makePublicPrivate", urlPatterns = { "/makePublicPrivate" }) public class MakePublicPrivate extends HttpServlet { private static final long serialVersionUID = 6409933650931180714L; private ResourceBundle bundle = null; @Override protected void doPost(HttpServletRequest request, HttpServletResponse responce) throws ServletException, IOException { String loc = Config.get(request.getSession(), Config.FMT_LOCALE).toString(); bundle = ResourceBundle.getBundle("ui_"+loc); if(request.getSession().getAttribute("user") == null){ request.setAttribute("index_messenger_err",bundle.getObject("noUserInSession")); RequestDispatcher rd=request.getRequestDispatcher(Pages.INDEX.toString()); rd.forward(request,responce); } boolean checkedState = Boolean.parseBoolean(request.getParameter("checkedState")); int fileID = Integer.parseInt(request.getParameter("fileID")); DAO.getInstance().changePublicity(fileID, checkedState); } }
apache-2.0
liulhdarks/darks-learning
src/main/java/darks/learning/common/minispantree/MiniSpanTree.java
1836
/** * * Copyright 2014 The Darks Learning Project (Liu lihua) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package darks.learning.common.minispantree; import java.util.List; import darks.learning.common.blas.Matrix; /** * Mini-span tree abstract class * * @author lihua.llh * */ public abstract class MiniSpanTree<T, E> { protected List<? extends GraphNode<T>> nodes; protected Matrix<? extends GraphEdge<E>> edges; /** * Initialize graph * * @param builder */ public void initialize(GraphBuilder<T, E> builder) { buildGraph(builder); } protected void buildGraph(GraphBuilder<T, E> builder) { nodes = builder.buildNodes(); edges = builder.buildEdges(); } /** * Execute to build mini-span tree */ public void buildTree() { buildTree(0); } /** * Execute to build mini-span tree from start position * * @param startIndex Start position */ public abstract void buildTree(int startIndex); /** * Get result nodes index * @return Nodes index list */ public abstract List<Integer> getResultNodesIndex(); /** * Get result nodes * @return Result nodes */ public abstract List<? extends GraphNode<T>> getResultNodes(); /** * Get result edges * @return Result edge */ public abstract List<? extends GraphEdge<E>> getResultEdges(); }
apache-2.0
aevum/libgdx-cpp
src/Box2D/Dynamics/Joints/b2Joint.cpp
4866
/* * Copyright (c) 2006-2007 Erin Catto http://www.box2d.org * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #include <Box2D/Common/b2BlockAllocator.h> #include <Box2D/Dynamics/Joints/b2DistanceJoint.h> #include <Box2D/Dynamics/Joints/b2FrictionJoint.h> #include <Box2D/Dynamics/Joints/b2GearJoint.h> #include <Box2D/Dynamics/Joints/b2Joint.h> #include <Box2D/Dynamics/Joints/b2MouseJoint.h> #include <Box2D/Dynamics/Joints/b2PrismaticJoint.h> #include <Box2D/Dynamics/Joints/b2PulleyJoint.h> #include <Box2D/Dynamics/Joints/b2RevoluteJoint.h> #include <Box2D/Dynamics/Joints/b2RopeJoint.h> #include <Box2D/Dynamics/Joints/b2WeldJoint.h> #include <Box2D/Dynamics/Joints/b2WheelJoint.h> #include <Box2D/Dynamics/b2Body.h> #include <new> b2Joint* b2Joint::Create(const b2JointDef* def, b2BlockAllocator* allocator) { b2Joint* joint = nullptr; switch (def->type) { case e_distanceJoint: { void* mem = allocator->Allocate(sizeof(b2DistanceJoint)); joint = new (mem) b2DistanceJoint((b2DistanceJointDef*)def); } break; case e_mouseJoint: { void* mem = allocator->Allocate(sizeof(b2MouseJoint)); joint = new (mem) b2MouseJoint((b2MouseJointDef*)def); } break; case e_prismaticJoint: { void* mem = allocator->Allocate(sizeof(b2PrismaticJoint)); joint = new (mem) b2PrismaticJoint((b2PrismaticJointDef*)def); } break; case e_revoluteJoint: { void* mem = allocator->Allocate(sizeof(b2RevoluteJoint)); joint = new (mem) b2RevoluteJoint((b2RevoluteJointDef*)def); } break; case e_pulleyJoint: { void* mem = allocator->Allocate(sizeof(b2PulleyJoint)); joint = new (mem) b2PulleyJoint((b2PulleyJointDef*)def); } break; case e_gearJoint: { void* mem = allocator->Allocate(sizeof(b2GearJoint)); joint = new (mem) b2GearJoint((b2GearJointDef*)def); } break; case e_wheelJoint: { void* mem = allocator->Allocate(sizeof(b2WheelJoint)); joint = new (mem) b2WheelJoint((b2WheelJointDef*)def); } break; case e_weldJoint: { void* mem = allocator->Allocate(sizeof(b2WeldJoint)); joint = new (mem) b2WeldJoint((b2WeldJointDef*)def); } break; case e_frictionJoint: { void* mem = allocator->Allocate(sizeof(b2FrictionJoint)); joint = new (mem) b2FrictionJoint((b2FrictionJointDef*)def); } break; case e_ropeJoint: { void* mem = allocator->Allocate(sizeof(b2RopeJoint)); joint = new (mem) b2RopeJoint((b2RopeJointDef*)def); } break; default: b2Assert(false); break; } return joint; } void b2Joint::Destroy(b2Joint* joint, b2BlockAllocator* allocator) { joint->~b2Joint(); switch (joint->m_type) { case e_distanceJoint: allocator->Free(joint, sizeof(b2DistanceJoint)); break; case e_mouseJoint: allocator->Free(joint, sizeof(b2MouseJoint)); break; case e_prismaticJoint: allocator->Free(joint, sizeof(b2PrismaticJoint)); break; case e_revoluteJoint: allocator->Free(joint, sizeof(b2RevoluteJoint)); break; case e_pulleyJoint: allocator->Free(joint, sizeof(b2PulleyJoint)); break; case e_gearJoint: allocator->Free(joint, sizeof(b2GearJoint)); break; case e_wheelJoint: allocator->Free(joint, sizeof(b2WheelJoint)); break; case e_weldJoint: allocator->Free(joint, sizeof(b2WeldJoint)); break; case e_frictionJoint: allocator->Free(joint, sizeof(b2FrictionJoint)); break; case e_ropeJoint: allocator->Free(joint, sizeof(b2RopeJoint)); break; default: b2Assert(false); break; } } b2Joint::b2Joint(const b2JointDef* def) { b2Assert(def->bodyA != def->bodyB); m_type = def->type; m_prev = nullptr; m_next = nullptr; m_bodyA = def->bodyA; m_bodyB = def->bodyB; m_index = 0; m_collideConnected = def->collideConnected; m_islandFlag = false; m_userData = def->userData; m_edgeA.joint = nullptr; m_edgeA.other = nullptr; m_edgeA.prev = nullptr; m_edgeA.next = nullptr; m_edgeB.joint = nullptr; m_edgeB.other = nullptr; m_edgeB.prev = nullptr; m_edgeB.next = nullptr; } bool b2Joint::IsActive() const { return m_bodyA->IsActive() && m_bodyB->IsActive(); }
apache-2.0
MathieuPomerleau/JhinBotCore
JhinBot/Database/Implementation/UnitOfWork.cs
2537
using System; using System.Threading.Tasks; using JhinBot.Database; using JhinBot.Repositories; using JhinBot.Repositories.SQLite_Repo; using JhinBot.Database.Implementation; namespace JhinBot { public class UnitOfWork : IUnitOfWork { public JhinBotContext _context { get; } private ICommandRepository _commands; public ICommandRepository CommandRepo => _commands ?? (_commands = new CommandSQLiteRepo(_context)); private IReactionRepository _reactions; public IReactionRepository ReactionRepo => _reactions ?? (_reactions = new ReactionSQLiteRepo(_context)); private IEventRepository _events; public IEventRepository EventRepo => _events ?? (_events = new EventSQLiteRepo(_context)); private IBotConfigRepository _botConfigs; public IBotConfigRepository BotConfigRepo => _botConfigs ?? (_botConfigs = new BotConfigSQLiteRepo(_context)); private ILoggingConfigRepository _loggingConfigs; public ILoggingConfigRepository LoggingConfigRepo => _loggingConfigs ?? (_loggingConfigs = new LoggingConfigSQLiteRepo(_context)); private IGuildConfigRepository _guildConfigs; public IGuildConfigRepository GuildConfigRepo => _guildConfigs ?? (_guildConfigs = new GuildConfigSQLiteRepo(_context)); private ISelfAssignedRoleRepository _selfAssignedRoles; public ISelfAssignedRoleRepository SelfAssignedRoleRepo => _selfAssignedRoles ?? (_selfAssignedRoles = new SelfAssignedRoleSQLiteRepo(_context)); private IUserMessageHistoryRepository _userMessageHistory; public IUserMessageHistoryRepository UserMessageHistoryRepo => _userMessageHistory ?? (_userMessageHistory = new UserMessageHistorySQLiteRepo(_context)); private ITagRepository _tagRepository; public ITagRepository TagRepository => _tagRepository ?? (_tagRepository = new TagSQLiteRepo(_context)); public UnitOfWork(JhinBotContext context) { _context = context; } public int Complete() => _context.SaveChanges(); public Task<int> CompleteAsync() => _context.SaveChangesAsync(); private bool disposed = false; public void Dispose() { Dispose(true); GC.SuppressFinalize(this); } protected void Dispose(bool disposing) { if (!disposed) if (disposing) _context.Dispose(); disposed = true; } } }
apache-2.0
projectodd/kubernetes
plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go
13055
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bootstrappolicy import ( "strings" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" rbac "k8s.io/kubernetes/pkg/apis/rbac" ) const saRolePrefix = "system:controller:" var ( // controllerRoles is a slice of roles used for controllers controllerRoles = []rbac.ClusterRole{} // controllerRoleBindings is a slice of roles used for controllers controllerRoleBindings = []rbac.ClusterRoleBinding{} ) func addControllerRole(role rbac.ClusterRole) { if !strings.HasPrefix(role.Name, saRolePrefix) { glog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix) } for _, existingRole := range controllerRoles { if role.Name == existingRole.Name { glog.Fatalf("role %q was already registered", role.Name) } } controllerRoles = append(controllerRoles, role) addClusterRoleLabel(controllerRoles) controllerRoleBindings = append(controllerRoleBindings, rbac.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie()) addClusterRoleBindingLabel(controllerRoleBindings) } func eventsRule() rbac.PolicyRule { return rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie() } func init() { addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "cronjob-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(), rbac.NewRule("get", "list", "watch", "create", "update", "delete").Groups(batchGroup).Resources("jobs").RuleOrDie(), rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(), rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(), rbac.NewRule("update").Groups(extensionsGroup).Resources("daemonsets/status").RuleOrDie(), rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbac.NewRule("list", "watch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "deployment-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup).Resources("deployments").RuleOrDie(), rbac.NewRule("update").Groups(extensionsGroup).Resources("deployments/status").RuleOrDie(), rbac.NewRule("get", "list", "watch", "create", "update", "delete").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), // TODO: remove "update" once // https://github.com/kubernetes/kubernetes/issues/36897 is resolved. rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "disruption-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("deployments").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), rbac.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "endpoint-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(), rbac.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"}, Rules: []rbac.PolicyRule{ // the GC controller needs to run list/watches, selective gets, and updates against any resource rbac.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(autoscalingGroup, extensionsGroup).Resources("horizontalpodautoscalers").RuleOrDie(), rbac.NewRule("update").Groups(autoscalingGroup, extensionsGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(), rbac.NewRule("get", "update").Groups(legacyGroup).Resources("replicationcontrollers/scale").RuleOrDie(), // TODO this should be removable when the HPA contoller is fixed rbac.NewRule("get", "update").Groups(extensionsGroup).Resources("replicationcontrollers/scale").RuleOrDie(), rbac.NewRule("get", "update").Groups(extensionsGroup).Resources("deployments/scale", "replicasets/scale").RuleOrDie(), rbac.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(), // TODO: fix MetricsClient to no longer require root proxy access // TODO: restrict this to the appropriate namespace rbac.NewRule("proxy").Groups(legacyGroup).Resources("services").Names("https:heapster:", "http:heapster:").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "job-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(), rbac.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(), rbac.NewRule("list", "watch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "namespace-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(), rbac.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(), rbac.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "node-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "update").Groups(legacyGroup).Resources("nodes").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(), rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(), rbac.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), // glusterfs rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), rbac.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "pod-garbage-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbac.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "replicaset-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), rbac.NewRule("update").Groups(extensionsGroup).Resources("replicasets/status").RuleOrDie(), rbac.NewRule("list", "watch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "replication-controller"}, Rules: []rbac.PolicyRule{ // 1.0 controllers needed get, update, so without these old controllers break on new servers rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(), rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"}, Rules: []rbac.PolicyRule{ // quota can count quota on anything for reconcilation, so it needs full viewing powers rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(), rbac.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "route-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "service-account-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "service-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(), rbac.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(), rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), eventsRule(), }, }) addControllerRole(rbac.ClusterRole{ ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "statefulset-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(), rbac.NewRule("get", "create", "delete", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbac.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), eventsRule(), }, }) } // ControllerRoles returns the cluster roles used by controllers func ControllerRoles() []rbac.ClusterRole { return controllerRoles } // ControllerRoleBindings returns the role bindings used by controllers func ControllerRoleBindings() []rbac.ClusterRoleBinding { return controllerRoleBindings }
apache-2.0
jrmoser/Portfolio_Bootstrap
first-web-page/Javasript/script.js
231
/** * Created by jarodmoser on 9/4/15. */ $(function() { $(window).scroll(function() { if ($(window).scrollTop() >= 200) { $(".header").addClass('smaller'); } else { $(".header").removeClass("smaller"); } }); });
apache-2.0
huahuajjh/requisition_land
WebRoot/assets/js/ticketState.js
623
function toTicketNumber(name){ switch (name) { case "LOSSOFREPORT": return 1; case "EXCHANGEED": return 2; case "MENDED": return 3; case "USED": return 4; case "RECEIVED": return 5; case "CASHED": return 6; case "NORMAL": return 7; default: return 0; } } function toTicketStr(name){ switch (name) { case "LOSSOFREPORT": return "挂失"; case "EXCHANGEED": return "已换券"; case "MENDED": return "已补券"; case "USED": return "已兑换"; case "RECEIVED": return "已领取"; case "CASHED": return "已兑现"; case "NORMAL": return "正常"; default: return ""; } }
apache-2.0
cacristo/ceos-project
jexan/src/test/java/net/ceos/project/poi/annotated/bean/FreeElementAdvancedObject.java
3683
/** * Copyright 2016 Carlos CRISTO ABREU * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.ceos.project.poi.annotated.bean; import java.util.Date; import net.ceos.project.poi.annotated.annotation.XlsConfiguration; import net.ceos.project.poi.annotated.annotation.XlsFreeElement; import net.ceos.project.poi.annotated.annotation.XlsSheet; import net.ceos.project.poi.annotated.definition.TitleOrientationType; @XlsSheet(title = "Xls Free Element objects") @XlsConfiguration(nameFile = "FreeElementAdvancedObjects") public class FreeElementAdvancedObject { @XlsFreeElement(title = "Free String", showTitle = true, titleOrientation = TitleOrientationType.TOP, row = 2, cell = 2) private String freeString; @XlsFreeElement(title = "Free Double", showTitle = true, titleOrientation = TitleOrientationType.LEFT, row = 3, cell = 2) private Double freeDouble; @XlsFreeElement(title = "Free Primitive int", showTitle = true, titleOrientation = TitleOrientationType.RIGHT, row = 4, cell = 2) private int freePrimitiveInt; @XlsFreeElement(title = "Free Date", showTitle = true, titleOrientation = TitleOrientationType.TOP, row = 2, cell = 3) private Date freeDate; @XlsFreeElement(title = "Free Long", showTitle = true, titleOrientation = TitleOrientationType.LEFT, row = 5, cell = 2) private Long freeLong; @XlsFreeElement(title = "Free Primitive Boolean", showTitle = true, titleOrientation = TitleOrientationType.BOTTOM, row = 2, cell = 4) private boolean freePrimitiveBoolean; public FreeElementAdvancedObject() { } /** * @return the freeString */ protected String getFreeString() { return freeString; } /** * @param freeString * the freeString to set */ protected void setFreeString(String freeString) { this.freeString = freeString; } /** * @return the freeDouble */ protected Double getFreeDouble() { return freeDouble; } /** * @param freeDouble * the freeDouble to set */ protected void setFreeDouble(Double freeDouble) { this.freeDouble = freeDouble; } /** * @return the freePrimitiveInt */ protected int getFreePrimitiveInt() { return freePrimitiveInt; } /** * @param freePrimitiveInt * the freePrimitiveInt to set */ protected void setFreePrimitiveInt(int freePrimitiveInt) { this.freePrimitiveInt = freePrimitiveInt; } /** * @return the freeDate */ protected Date getFreeDate() { return freeDate; } /** * @param freeDate * the freeDate to set */ protected void setFreeDate(Date freeDate) { this.freeDate = freeDate; } /** * @return the freeLong */ protected Long getFreeLong() { return freeLong; } /** * @param freeLong * the freeLong to set */ protected void setFreeLong(Long freeLong) { this.freeLong = freeLong; } /** * @return the freePrimitiveBoolean */ protected boolean isFreePrimitiveBoolean() { return freePrimitiveBoolean; } /** * @param freePrimitiveBoolean * the freePrimitiveBoolean to set */ protected void setFreePrimitiveBoolean(boolean freePrimitiveBoolean) { this.freePrimitiveBoolean = freePrimitiveBoolean; } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-kinesis/src/main/java/com/amazonaws/services/kinesisfirehose/model/transform/OrcSerDeMarshaller.java
4969
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.kinesisfirehose.model.transform; import java.util.List; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.kinesisfirehose.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * OrcSerDeMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class OrcSerDeMarshaller { private static final MarshallingInfo<Integer> STRIPESIZEBYTES_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("StripeSizeBytes").build(); private static final MarshallingInfo<Integer> BLOCKSIZEBYTES_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("BlockSizeBytes").build(); private static final MarshallingInfo<Integer> ROWINDEXSTRIDE_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("RowIndexStride").build(); private static final MarshallingInfo<Boolean> ENABLEPADDING_BINDING = MarshallingInfo.builder(MarshallingType.BOOLEAN) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("EnablePadding").build(); private static final MarshallingInfo<Double> PADDINGTOLERANCE_BINDING = MarshallingInfo.builder(MarshallingType.DOUBLE) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("PaddingTolerance").build(); private static final MarshallingInfo<String> COMPRESSION_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("Compression").build(); private static final MarshallingInfo<List> BLOOMFILTERCOLUMNS_BINDING = MarshallingInfo.builder(MarshallingType.LIST) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("BloomFilterColumns").build(); private static final MarshallingInfo<Double> BLOOMFILTERFALSEPOSITIVEPROBABILITY_BINDING = MarshallingInfo.builder(MarshallingType.DOUBLE) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("BloomFilterFalsePositiveProbability").build(); private static final MarshallingInfo<Double> DICTIONARYKEYTHRESHOLD_BINDING = MarshallingInfo.builder(MarshallingType.DOUBLE) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("DictionaryKeyThreshold").build(); private static final MarshallingInfo<String> FORMATVERSION_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("FormatVersion").build(); private static final OrcSerDeMarshaller instance = new OrcSerDeMarshaller(); public static OrcSerDeMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(OrcSerDe orcSerDe, ProtocolMarshaller protocolMarshaller) { if (orcSerDe == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(orcSerDe.getStripeSizeBytes(), STRIPESIZEBYTES_BINDING); protocolMarshaller.marshall(orcSerDe.getBlockSizeBytes(), BLOCKSIZEBYTES_BINDING); protocolMarshaller.marshall(orcSerDe.getRowIndexStride(), ROWINDEXSTRIDE_BINDING); protocolMarshaller.marshall(orcSerDe.getEnablePadding(), ENABLEPADDING_BINDING); protocolMarshaller.marshall(orcSerDe.getPaddingTolerance(), PADDINGTOLERANCE_BINDING); protocolMarshaller.marshall(orcSerDe.getCompression(), COMPRESSION_BINDING); protocolMarshaller.marshall(orcSerDe.getBloomFilterColumns(), BLOOMFILTERCOLUMNS_BINDING); protocolMarshaller.marshall(orcSerDe.getBloomFilterFalsePositiveProbability(), BLOOMFILTERFALSEPOSITIVEPROBABILITY_BINDING); protocolMarshaller.marshall(orcSerDe.getDictionaryKeyThreshold(), DICTIONARYKEYTHRESHOLD_BINDING); protocolMarshaller.marshall(orcSerDe.getFormatVersion(), FORMATVERSION_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
skoulouzis/lobcder
lobcder-master/src/main/java/nl/uva/cs/lobcder/optimization/LDClustering.java
26382
/* * Copyright 2014 S. Koulouzis. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nl.uva.cs.lobcder.optimization; import io.milton.common.Path; import io.milton.http.Request; import io.milton.http.Request.Method; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; import javax.naming.NamingException; import nl.uva.cs.lobcder.predictors.DBMapPredictor; import nl.uva.cs.lobcder.resources.LogicalData; import nl.uva.cs.lobcder.util.PropertiesHelper; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instance; import java.util.logging.Level; import java.util.logging.Logger; import static nl.uva.cs.lobcder.predictors.DBMapPredictor.type; import static nl.uva.cs.lobcder.util.PropertiesHelper.PREDICTION_TYPE.method; import static nl.uva.cs.lobcder.util.PropertiesHelper.PREDICTION_TYPE.resource; import static nl.uva.cs.lobcder.util.PropertiesHelper.PREDICTION_TYPE.state; /** * * @author S. Koulouzis */ public class LDClustering extends DBMapPredictor implements Runnable { private FastVector metdataAttributes; private int k; public LDClustering() throws NamingException, ParseException, Exception { initAttributes(); k = PropertiesHelper.KNN(); type = PropertiesHelper.getPredictionType(); buildOrUpdateDataset(); } private void initAttributes() throws ParseException, Exception { int index = 0; Attribute uidAttribute = new Attribute("uid", index++); // Declare a nominal attribute along with its values FastVector verbVector = new FastVector(Request.Method.values().length); for (Request.Method m : Request.Method.values()) { verbVector.addElement(m.code); } Attribute verbAttribute = new Attribute("verb", verbVector, index++); Attribute checksumAttribute = new Attribute("checksum", (FastVector) null, index++); Attribute contentTypeAttribute = new Attribute("contentType", (FastVector) null, index++); Attribute createDateAttribute = new Attribute("createDate", "yyyy-MM-dd HH:mm:ss", index++); Attribute locationPreferenceAttribute = new Attribute("locationPreference", (FastVector) null, index++); Attribute descriptionAttribute = new Attribute("description", (FastVector) null, index++); Attribute validationDateAttribute = new Attribute("validationDate", "yyyy-MM-dd HH:mm:ss", index++); Attribute lengthAttribute = new Attribute("length", index++); Attribute modifiedDateAttribute = new Attribute("modifiedDate", "yyyy-MM-dd HH:mm:ss", index++); Attribute pathAttribute = new Attribute("name", (FastVector) null, index++); Attribute parentRefAttribute = new Attribute("parentRef", index++); Attribute statusAttribute = new Attribute("status", (FastVector) null, index++); FastVector typeVector = new FastVector(3); typeVector.addElement(nl.uva.cs.lobcder.util.Constants.LOGICAL_DATA); typeVector.addElement(nl.uva.cs.lobcder.util.Constants.LOGICAL_FILE); typeVector.addElement(nl.uva.cs.lobcder.util.Constants.LOGICAL_FOLDER); Attribute typeAttribute = new Attribute("type", typeVector, index++); // Declare the class attribute along with its values FastVector supervisedVector = new FastVector(2); supervisedVector.addElement("true"); supervisedVector.addElement("false"); Attribute supervisedAttribute = new Attribute("supervised", supervisedVector, index++); Attribute ownerAttribute = new Attribute("owner", (FastVector) null, index++); // Declare the feature vector metdataAttributes = new FastVector(); metdataAttributes.addElement(uidAttribute);//0 metdataAttributes.addElement(verbAttribute);//1 metdataAttributes.addElement(checksumAttribute);//2 metdataAttributes.addElement(contentTypeAttribute);//3 metdataAttributes.addElement(createDateAttribute);//4 metdataAttributes.addElement(locationPreferenceAttribute);//5 metdataAttributes.addElement(descriptionAttribute);//6 metdataAttributes.addElement(validationDateAttribute);//7 metdataAttributes.addElement(lengthAttribute);//8 metdataAttributes.addElement(modifiedDateAttribute);//9 metdataAttributes.addElement(pathAttribute);//10 metdataAttributes.addElement(parentRefAttribute);//11 metdataAttributes.addElement(statusAttribute);//12 metdataAttributes.addElement(typeAttribute);//13 metdataAttributes.addElement(supervisedAttribute);//14 metdataAttributes.addElement(ownerAttribute);//15 } private void buildOrUpdateDataset() throws SQLException, Exception { if (type.equals(method)) { getMethodInstances(Method.HEAD); // addFeatures(connection, i, res.getUid()); } else { try (Connection connection = getConnection()) { try (PreparedStatement ps = connection.prepareStatement("SELECT uid, parentRef, ownerId, datatype, ldName, " + "createDate, modifiedDate, ldLength, contentTypesStr, pdriGroupRef, " + "isSupervised, checksum, lastValidationDate, lockTokenID, lockScope, " + "lockType, lockedByUser, lockDepth, lockTimeout, description, locationPreference, status " + "FROM ldata_table")) { ResultSet rs = ps.executeQuery(); while (rs.next()) { LogicalData res = new LogicalData(); res.setUid(rs.getLong(1)); res.setParentRef(rs.getLong(2)); res.setOwner(rs.getString(3)); res.setType(rs.getString(4)); res.setName(rs.getString(5)); res.setCreateDate(rs.getTimestamp(6).getTime()); res.setModifiedDate(rs.getTimestamp(7).getTime()); res.setLength(rs.getLong(8)); res.setContentTypesAsString(rs.getString(9)); res.setPdriGroupId(rs.getLong(10)); res.setSupervised(rs.getBoolean(11)); res.setChecksum(rs.getString(12)); res.setLastValidationDate(rs.getLong(13)); res.setLockTokenID(rs.getString(14)); res.setLockScope(rs.getString(15)); res.setLockType(rs.getString(16)); res.setLockedByUser(rs.getString(17)); res.setLockDepth(rs.getString(18)); res.setLockTimeout(rs.getLong(19)); res.setDescription(rs.getString(20)); // res.setDataLocationPreference(rs.getString(21)); res.setStatus(rs.getString(22)); ArrayList<MyInstance> ins = getInstances(res, null); for (MyInstance i : ins) { addFeatures(connection, i, res.getUid()); } } } } } } @Override public void run() { // throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } @Override public Vertex getNextState(Vertex currentState) { ArrayList<Vertex> states = new ArrayList<>(); String rName = currentState.getResourceName(); if (!rName.endsWith("/")) { rName += "/"; } rName = rName.replaceFirst("/lobcder/dav/", ""); try (Connection connection = getConnection()) { LogicalData data = getLogicalDataByPath(Path.path(rName), connection); Instance instance = getInstances(data, currentState.getMethod()).get(0); double[] features = instance.toDoubleArray(); switch (type) { case state: return getNextLobState(connection, features); case resource: return getNextResourceState(connection, features); case method: return getNextMethodState(connection, features); default: return getNextLobState(connection, features); } } catch (SQLException ex) { Logger.getLogger(LDClustering.class.getName()).log(Level.SEVERE, null, ex); } return null; } private void addFeatures(Connection connection, MyInstance inst, Long uid) throws SQLException { boolean exists = false; try (PreparedStatement ps = connection.prepareStatement("select uid " + "from features_table WHERE methodName = ? AND ldataRef = ?")) { Method requestMethod = inst.getMethod(); if (requestMethod != null) { ps.setString(1, requestMethod.code); } else { ps.setString(1, null); } ps.setLong(2, uid); ResultSet rs = ps.executeQuery(); exists = rs.next(); } if (!exists) { addLobStateFeatures(connection, inst, uid); // switch (type) { // case state: // addLobStateFeatures(connection, inst, uid); // break; // case resource: // addResourceFeatures(connection, inst, uid); // break; // case method: // addMethodFeatures(connection, inst, uid); // break; // default: // addLobStateFeatures(connection, inst, uid); // break; // } } } private ArrayList<MyInstance> getInstances(LogicalData n, Method method) { switch (type) { case state: return getlobStateInstances(n, method); case resource: return getResourceInstances(n); case method: return getMethodInstances(method); default: return getlobStateInstances(n, method); } // return getlobStateInstances(n, method); } private ArrayList<MyInstance> getlobStateInstances(LogicalData n, Method method) { ArrayList<MyInstance> inst = new ArrayList<>(); if (method == null) { for (Request.Method m : Request.Method.values()) { int index = 0; MyInstance instance = new MyInstance(metdataAttributes.size()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getUid()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), m.code); instance.setMethod(m); String att = n.getChecksum(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); att = n.getContentTypesAsString(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getCreateDate()); // att = n.getDataLocationPreference(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); att = n.getDescription(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getLastValidationDate()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getLength()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getModifiedDate()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getParentRef()); att = n.getStatus(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getType()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), String.valueOf(n.getSupervised())); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getOwner()); inst.add(instance); } } else { int index = 0; MyInstance instance = new MyInstance(metdataAttributes.size()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getUid()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), method.code); instance.setMethod(method); String att = n.getChecksum(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); att = n.getContentTypesAsString(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getCreateDate()); // att = n.getDataLocationPreference(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); att = n.getDescription(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getLastValidationDate()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getLength()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getModifiedDate()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getParentRef()); att = n.getStatus(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getType()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), String.valueOf(n.getSupervised())); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getOwner()); inst.add(instance); } return inst; } private ArrayList<MyInstance> getMethodInstances(Method method) { ArrayList<MyInstance> inst = new ArrayList<>(); if (method == null) { for (Request.Method m : Request.Method.values()) { int index = 0; MyInstance instance = new MyInstance(metdataAttributes.size()); index++; instance.setValue((Attribute) metdataAttributes.elementAt(index++), m.code); instance.setMethod(m); inst.add(instance); } } else { int index = 0; MyInstance instance = new MyInstance(metdataAttributes.size()); index++; instance.setValue((Attribute) metdataAttributes.elementAt(index++), method.code); instance.setMethod(method); inst.add(instance); } return inst; } private ArrayList<MyInstance> getResourceInstances(LogicalData n) { ArrayList<MyInstance> inst = new ArrayList<>(); int index = 0; MyInstance instance = new MyInstance(metdataAttributes.size()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getUid()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), Method.ACL.code); instance.setMethod(null); String att = n.getChecksum(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); att = n.getContentTypesAsString(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getCreateDate()); // att = n.getDataLocationPreference(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); att = n.getDescription(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getLastValidationDate()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getLength()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getModifiedDate()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getParentRef()); att = n.getStatus(); instance.setValue((Attribute) metdataAttributes.elementAt(index++), (att != null) ? att : "NON"); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getType()); instance.setValue((Attribute) metdataAttributes.elementAt(index++), String.valueOf(n.getSupervised())); instance.setValue((Attribute) metdataAttributes.elementAt(index++), n.getOwner()); inst.add(instance); return inst; } private Vertex getNextLobState(Connection connection, double[] features) throws SQLException { String query = "SELECT ldataRef, methodName, " + "POW((f1 - ?), 2) + POW((f2 - ?), 2) + POW((f3 - ?), 2) + " + "POW((f4 - ?), 2) + POW((f5 - ?), 2) + POW((f6 - ?), 2) + " + "POW((f7 - ?), 2)+ POW((f8 - ?), 2)+ POW((f9 - ?), 2)+ " + "POW((f10 - ?), 2)+ POW((f11 - ?), 2)+ POW((f12 - ?), 2)+ " + "POW((f13 - ?), 2)+ POW((f14 - ?), 2)+ POW((f15 - ?), 2)+ " + "POW((f16 - ?), 2)" + "AS dist FROM features_table ORDER BY dist ASC LIMIT ?"; try (PreparedStatement preparedStatement = connection.prepareStatement( query)) { int index = 1; for (int i = 1; i < features.length; i++) { preparedStatement.setDouble(index++, features[i]); } preparedStatement.setInt(features.length, k); ResultSet rs = preparedStatement.executeQuery(); while (rs.next()) { String path = getPathforLogicalData(getLogicalDataByUid(rs.getLong(1), connection), connection); Vertex state = new Vertex(Method.valueOf(rs.getString(2)), path); Logger.getLogger(LDClustering.class.getName()).log(Level.INFO, "State: {0}", state.getID()); } } return null; } private Vertex getNextMethodState(Connection connection, double[] features) throws SQLException { String query = "SELECT ldataRef, methodName, " + "POW((f2 - ?), 2)" + "AS dist FROM features_table ORDER BY dist ASC LIMIT ?"; try (PreparedStatement preparedStatement = connection.prepareStatement( query)) { preparedStatement.setDouble(1, features[1]); preparedStatement.setInt(2, k); ResultSet rs = preparedStatement.executeQuery(); while (rs.next()) { String path = getPathforLogicalData(getLogicalDataByUid(rs.getLong(1), connection), connection); Vertex state = new Vertex(Method.valueOf(rs.getString(2)), path); Logger.getLogger(LDClustering.class.getName()).log(Level.INFO, "State: {0}", state.getID()); } } return null; } private Vertex getNextResourceState(Connection connection, double[] features) throws SQLException { String query = "SELECT ldataRef, " + "methodName, " + "POW((f1 - ?), 2) + " + "POW((f3 - ?), 2) + " + "POW((f4 - ?), 2) + " + "POW((f5 - ?), 2) + " + "POW((f6 - ?), 2) + " + "POW((f7 - ?), 2)+ " + "POW((f8 - ?), 2)+ " + "POW((f9 - ?), 2)+ " + "POW((f10 - ?), 2)+ " + "POW((f11 - ?), 2)+ " + "POW((f12 - ?), 2)+ " + "POW((f13 - ?), 2)+ " + "POW((f14 - ?), 2)+ " + "POW((f15 - ?), 2)+ " + "POW((f16 - ?), 2)" + "AS dist FROM features_table ORDER BY dist ASC LIMIT ?"; try (PreparedStatement preparedStatement = connection.prepareStatement( query)) { int index = 0; for (int i = 0; i < features.length; i++) { if (i != 1) { index++; preparedStatement.setDouble(index, features[i]); } } index++; preparedStatement.setInt(index, k); ResultSet rs = preparedStatement.executeQuery(); while (rs.next()) { String path = getPathforLogicalData(getLogicalDataByUid(rs.getLong(1), connection), connection); Method rquestMethod; switch (type) { case resource: rquestMethod = null; break; default: rquestMethod = Method.valueOf(rs.getString(2)); break; } Vertex state = new Vertex(rquestMethod, path); Logger.getLogger(LDClustering.class.getName()).log(Level.INFO, "State: {0}", state.getID()); } } return null; } private void addLobStateFeatures(Connection connection, MyInstance inst, Long uid) throws SQLException { try (PreparedStatement ps = connection.prepareStatement("INSERT INTO " + "features_table (methodName, ldataRef, f1, f2, f3, f4, f5, " + "f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16) " + "VALUES (?, ?, ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")) { Method requestMethod = inst.getMethod(); if (requestMethod != null) { ps.setString(1, requestMethod.code); } else { ps.setString(1, null); } ps.setLong(2, uid); double[] features = inst.toDoubleArray(); if (type.equals(method)) { for (int i = 0; i < features.length; i++) { int index = i + 3; if (i == 1) { ps.setDouble(index, features[i]); } else { ps.setDouble(index, 0.0); } } } else { for (int i = 0; i < features.length; i++) { int index = i + 3; ps.setDouble(index, features[i]); } } ps.executeUpdate(); connection.commit(); } } private void addResourceFeatures(Connection connection, MyInstance inst, Long uid) throws SQLException { try (PreparedStatement ps = connection.prepareStatement("INSERT INTO " + "features_table (methodName, ldataRef, f1,f3, f4, f5, " + "f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16) " + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")) { Method requestMethod = inst.getMethod(); if (requestMethod != null) { ps.setString(1, requestMethod.code); } else { ps.setString(1, null); } ps.setLong(2, uid); double[] features = inst.toDoubleArray(); for (int i = 0; i < features.length; i++) { int index = i + 3; if (i == 1) { ps.setDouble(index, 0); } else { ps.setDouble(index, features[i]); } } ps.executeUpdate(); connection.commit(); } } private void addMethodFeatures(Connection connection, MyInstance inst, Long uid) throws SQLException { try (PreparedStatement ps = connection.prepareStatement("INSERT INTO " + "features_table (methodName, ldataRef, f1, f2, f3, f4, f5, " + "f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16) " + "VALUES (?, ?, ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")) { Method requestMethod = inst.getMethod(); if (requestMethod != null) { ps.setString(1, requestMethod.code); } else { ps.setString(1, null); } ps.setLong(2, uid); double[] features = inst.toDoubleArray(); ps.setDouble(3, features[1]); ps.executeUpdate(); connection.commit(); } } private class MyInstance extends Instance { private Method method; private MyInstance(int size) { super(size); } public void setMethod(Method method) { this.method = method; } public Method getMethod() { return this.method; } } }
apache-2.0
scala/scala
src/compiler/scala/tools/nsc/settings/StandardScalaSettings.scala
5439
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.tools.nsc package settings import scala.tools.util.PathResolver.Defaults /** Settings which aren't behind a -V, -W, -X, -Y, or -P option. * When possible, the val and the option have identical names. * The abstract settings are commented as to why they are as yet * implemented in MutableSettings rather than mutation-generically. */ trait StandardScalaSettings { _: MutableSettings => import StandardScalaSettings._ /** Path related settings. */ val bootclasspath = PathSetting ("-bootclasspath", "Override location of bootstrap class files.", Defaults.scalaBootClassPath) withAbbreviation "--boot-class-path" val classpath: PathSetting // is mutated directly in various places (thus inspiring this very effort) val extdirs = PathSetting ("-extdirs", "Override location of installed extensions.", Defaults.scalaExtDirs) withAbbreviation "--extension-directories" val javabootclasspath = PathSetting ("-javabootclasspath", "Override java boot classpath.", Defaults.javaBootClassPath) withAbbreviation "--java-boot-class-path" val javaextdirs = PathSetting ("-javaextdirs", "Override java extdirs classpath.", Defaults.javaExtDirs) withAbbreviation "--java-extension-directories" val sourcepath = PathSetting ("-sourcepath", "Specify location(s) of source files.", "") withAbbreviation "--source-path" // Defaults.scalaSourcePath val rootdir = PathSetting ("-rootdir", "The absolute path of the project root directory, usually the git/scm checkout. Used by -Wconf.", "") withAbbreviation "--root-directory" /** Other settings. */ val dependencyfile = StringSetting ("-dependencyfile", "file", "Set dependency tracking file.", ".scala_dependencies") withAbbreviation "--dependency-file" val deprecation = BooleanSetting ("-deprecation", "Emit warning and location for usages of deprecated APIs. See also -Wconf.") withAbbreviation "--deprecation" withPostSetHook { s => if (s.value) Wconf.tryToSet(List(s"cat=deprecation:w")) else Wconf.tryToSet(List(s"cat=deprecation:s")) } val encoding = StringSetting ("-encoding", "encoding", "Specify character encoding used by source files.", Properties.sourceEncoding) withAbbreviation "--encoding" val explaintypes = BooleanSetting ("-explaintypes", "Explain type errors in more detail.") withAbbreviation "--explain-types" val feature = BooleanSetting ("-feature", "Emit warning and location for usages of features that should be imported explicitly. See also -Wconf.") withAbbreviation "--feature" withPostSetHook { s => if (s.value) Wconf.tryToSet(List(s"cat=feature:w")) else Wconf.tryToSet(List(s"cat=feature:s")) } val g = ChoiceSetting ("-g", "level", "Set level of generated debugging info.", List("none", "source", "line", "vars", "notailcalls"), "vars") val help = BooleanSetting ("-help", "Print a synopsis of standard options") withAbbreviation "--help" withAbbreviation("-h") val nowarn = BooleanSetting ("-nowarn", "Generate no warnings.") withAbbreviation "--no-warnings" withPostSetHook { s => if (s) maxwarns.value = 0 } val optimise: BooleanSetting // depends on post hook which mutates other settings val print = BooleanSetting ("-print", "Print program with Scala-specific features removed.") withAbbreviation "--print" val target = ChoiceSetting ("-target", "target", "Target platform for object files.", AllTargetVersions, "8") withPreSetHook normalizeTarget _ withAbbreviation "--target" val unchecked = BooleanSetting ("-unchecked", "Enable additional warnings where generated code depends on assumptions. See also -Wconf.") withAbbreviation "--unchecked" withPostSetHook { s => if (s.value) Wconf.tryToSet(List(s"cat=unchecked:w")) else Wconf.tryToSet(List(s"cat=unchecked:s")) } val uniqid = BooleanSetting ("-uniqid", "Uniquely tag all identifiers in debugging output.") withAbbreviation "--unique-id" val usejavacp = BooleanSetting ("-usejavacp", "Utilize the java.class.path in classpath resolution.") withAbbreviation "--use-java-class-path" val usemanifestcp = BooleanSetting ("-usemanifestcp", "Utilize the manifest in classpath resolution.") withAbbreviation "--use-manifest-class-path" val verbose = BooleanSetting ("-verbose", "Output messages about what the compiler is doing.") withAbbreviation "--verbose" val version = BooleanSetting ("-version", "Print product version and exit.") withAbbreviation "--version" // Support passe prefixes of -target values: // - `jvm-` (from back when we also had `msil`) // - `1.` (from back when Java 2 was a possibility) // `-target:1.jvm-13` is ridiculous, though. private[this] def normalizeTarget(in: String): String = in.stripPrefix("jvm-").stripPrefix("1.") } object StandardScalaSettings { // not final in case some separately compiled client code wanted to depend on updated values val MinTargetVersion = 8 val MaxTargetVersion = 18 private val AllTargetVersions = (MinTargetVersion to MaxTargetVersion).map(_.toString).to(List) }
apache-2.0
shaolinwu/uimaster
modules/uipage/src/main/java/org/shaolin/uimaster/page/ajax/handlers/AjaxHandlerException.java
1486
/* * Copyright 2015 The UIMaster Project * * The UIMaster Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.shaolin.uimaster.page.ajax.handlers; import org.shaolin.uimaster.page.exception.AjaxException; //imports /** * Exception for Ajax * */ public class AjaxHandlerException extends AjaxException { /** * Constructs a AjaxHandlerException with a given exception reason * * @param aReason */ public AjaxHandlerException(String aReason) { super(aReason); } public AjaxHandlerException(String aReason,Throwable ex) { super(aReason,ex); } /** * Constructs a AjaxHandlerException with a given exception reason, an argument * array. * * @param aReason * @param args */ public AjaxHandlerException(String aReason, Object[] args) { super(aReason, args); } private static final long serialVersionUID = 6637940630141909726L; }
apache-2.0
zoozooll/MyExercise
meep/MeepLib/src/com/oregonscientific/meep/database/table/TableConversationLog.java
321
package com.oregonscientific.meep.database.table; public class TableConversationLog { public static final String TABLE_NAME = "conversationLog"; public static final String SENDER_ID = "senderId"; public static final String RECEIVER_ID = "receiverId"; public static final String CONVERSATION_ID = "conversationId"; }
apache-2.0
darranl/directory-shared
ldap/codec/core/src/main/java/org/apache/directory/api/ldap/codec/actions/compareResponse/InitCompareResponse.java
2791
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.directory.api.ldap.codec.actions.compareResponse; import org.apache.directory.api.asn1.DecoderException; import org.apache.directory.api.asn1.ber.grammar.GrammarAction; import org.apache.directory.api.asn1.ber.tlv.TLV; import org.apache.directory.api.i18n.I18n; import org.apache.directory.api.ldap.codec.api.LdapMessageContainer; import org.apache.directory.api.ldap.codec.decorators.CompareResponseDecorator; import org.apache.directory.api.ldap.model.message.CompareResponseImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The action used to initialize the CompareResponse * <pre> * LdapMessage ::= ... CompareResponse ... * CompareResponse ::= [APPLICATION 15] LDAPResult * </pre> * @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a> */ public class InitCompareResponse extends GrammarAction<LdapMessageContainer<CompareResponseDecorator>> { /** The logger */ private static final Logger LOG = LoggerFactory.getLogger( InitCompareResponse.class ); /** * Instantiates a new action. */ public InitCompareResponse() { super( "Compare Response initialization" ); } /** * {@inheritDoc} */ public void action( LdapMessageContainer<CompareResponseDecorator> container ) throws DecoderException { // Now, we can allocate the CompareResponse Object CompareResponseDecorator compareResponse = new CompareResponseDecorator( container.getLdapCodecService(), new CompareResponseImpl( container.getMessageId() ) ); container.setMessage( compareResponse ); // We will check that the request is not null TLV tlv = container.getCurrentTLV(); if ( tlv.getLength() == 0 ) { String msg = I18n.err( I18n.ERR_04094 ); LOG.error( msg ); throw new DecoderException( msg ); } LOG.debug( "Compare response " ); } }
apache-2.0
ctripcorp/dal
dal-client/src/main/java/com/ctrip/platform/dal/dao/datasource/cluster/ShardMeta.java
283
package com.ctrip.platform.dal.dao.datasource.cluster; import com.ctrip.framework.dal.cluster.client.base.HostSpec; import java.util.Set; /** * @author c7ch23en */ public interface ShardMeta extends ClusterMeta { int shardIndex(); Set<HostSpec> configuredHosts(); }
apache-2.0
alim1369/sos
src/sos/ambulance_v2/tools/SOSParticleFilter.java
11179
package sos.ambulance_v2.tools; import java.util.ArrayList; import java.util.Collections; import sos.base.entities.Human; /** * ParticleFilter by ZJU 2008 * edited by @r@mik ali * future --> check and expand the size of damage array for more exact prediction */ public class SOSParticleFilter { private int m_dmg_ob; private int m_hp_ob; private int m_bury; private int m_lastUpdate; private float[][] m_particles; private int m_particles_time; public boolean m_particlesNeedResample = false; private int[] m_deadTime; // private boolean m_propertyChanged; private int m_time_needRefresh; private int buriedness; private static boolean HAVE_REFUGE; // aramik public static int HP_PRECISION; public static int ARRAY_PRECISION; public static int DAMAGE_PRECISION; private static float[] brokenRateTable = new float[10]; static { for (int i = 0; i < 10; i++) { brokenRateTable[i] = 5 + 10 * i; } } public SOSParticleFilter(int hpPrecision, int damagePrecision, boolean haveRefuge) { m_dmg_ob = 0; m_bury = -1; m_hp_ob = 10000; m_lastUpdate = -1; m_particles = null; m_particles_time = 0; m_deadTime = new int[60]; m_propertyChanged = false; m_time_needRefresh = 50; ARRAY_PRECISION = 100; HP_PRECISION = hpPrecision; DAMAGE_PRECISION = damagePrecision; HAVE_REFUGE = haveRefuge; } public SOSParticleFilter(Human hu) { this(hu.getAgent().getConfig().getIntValue("perception.los.precision.hp"), hu.getAgent().getConfig().getIntValue("perception.los.precision.damage"), !hu.getAgent().model().refuges().isEmpty()); } public int[] getDeadTime() { return m_deadTime; } public void setDmg(int dmg, int time) { if (dmg != m_dmg_ob) m_propertyChanged = true; // if (dmg % DAMAGE_PRECISION < DAMAGE_PRECISION/2) // dmg = dmg - (dmg % DAMAGE_PRECISION); // else // dmg = dmg - (dmg % DAMAGE_PRECISION) + DAMAGE_PRECISION; m_dmg_ob = dmg; m_lastUpdate = time; } public void setHp(int hp, int time) { if (hp != m_hp_ob) m_propertyChanged = true; m_hp_ob = hp; m_lastUpdate = time; } public void setBury(int bury) { if (m_bury == -1) m_bury = bury; buriedness = bury; } private float[][] initTempParticles() { float[][] result = new float[60 * ARRAY_PRECISION][3]; int[] hpTable = new int[6]; int hp = getRealSensedValue(m_hp_ob, HP_PRECISION); int dmg = getRealSensedValue(m_dmg_ob, DAMAGE_PRECISION); hpTable[0] = hp - ((HP_PRECISION / 2) - 1); hpTable[1] = hp - (((HP_PRECISION * 3) / 10) - 1); hpTable[2] = hp - (HP_PRECISION / 5 - 1); hpTable[3] = hp + (HP_PRECISION / 5 + 1); hpTable[4] = hp + (((HP_PRECISION * 3) / 10) + 1); hpTable[5] = hp + ((HP_PRECISION / 2) - 1); // int worseDamage = getDamage(hpTable[0], m_dmg_ob, m_lastUpdate); int lessDamage = getDamage(hpTable[5], m_dmg_ob, m_lastUpdate); float[] dmgTable = new float[ARRAY_PRECISION]; float step = (worseDamage - lessDamage) / ARRAY_PRECISION; for (int i = 0; i < ARRAY_PRECISION; i++) { dmgTable[i] = lessDamage + step * i; } // for (int i = 0; i < 60 * ARRAY_PRECISION; i++) { int a = i / (10 * ARRAY_PRECISION); int b = (i % (10 * ARRAY_PRECISION)) / ARRAY_PRECISION; int c = i % 10; result[i][0] = hpTable[a]; if (dmg == 0) result[i][1] = (worseDamage + lessDamage) / 2; else result[i][1] = dmgTable[b]; if (m_bury == 0) result[i][2] = 100.0f; else result[i][2] = brokenRateTable[c]; } // return result; } private static int getDamage(int hp, int damage, int time) { int estimatedDamage = SimpleDeathTime.getEstimatedDamage(hp, time); damage = getRealSensedValue(damage, DAMAGE_PRECISION); return Math.min(Math.max(damage-DAMAGE_PRECISION/2, estimatedDamage), damage + DAMAGE_PRECISION/2); } // private void initParticles() { m_propertyChanged = false; m_time_needRefresh = m_lastUpdate; m_time_needRefresh += (int) (Math.random() * 20); m_time_needRefresh += 30; // m_particles = initTempParticles(); m_particles_time = m_lastUpdate; // m_deadTime = calculateDeathTimeAgent(); } // private float[] lifeSpan(float[] status, int time) { float[] result = new float[3]; float hp = status[0]; float dmg = status[1]; float brokenDmg = dmg * status[2] / 100.0f; float buryDmg = dmg - brokenDmg; for (int i = 0; i < time; i++) { buryDmg += buryDmg * buryDmg * 0.000035; buryDmg += 0.01; brokenDmg += brokenDmg * brokenDmg * 0.00025; brokenDmg += 0.01; hp -= buryDmg; hp -= brokenDmg; } result[0] = hp; result[1] = buryDmg + brokenDmg; result[2] = brokenDmg / result[1] * 100; return result; } // private int calculateDeathTime(float[] status) { float hp = status[0]; float dmg = status[1]; float brokenDmg = dmg * status[2] / 100.0f; float buryDmg = dmg - brokenDmg; int time = 0; while (hp > 0) { buryDmg += buryDmg * buryDmg * 0.000035; buryDmg += 0.01; brokenDmg += brokenDmg * brokenDmg * 0.00025; brokenDmg += 0.01; hp -= buryDmg; hp -= brokenDmg; time++; if (time > 1000) return m_particles_time + 1000; } return m_particles_time + time; } // private int calculateDeathTime(float[] status, int baseTime) { float hp = status[0]; float dmg = status[1]; float brokenDmg = dmg * status[2] / 100.0f; float buryDmg = dmg - brokenDmg; int time = 0; while (hp > 0) { buryDmg += buryDmg * buryDmg * 0.000035; buryDmg += 0.01; brokenDmg += brokenDmg * brokenDmg * 0.00025; brokenDmg += 0.01; hp -= buryDmg; hp -= brokenDmg; time++; if (time > 1000) return baseTime + 1000; } return baseTime + time; } // private int[] calculateDeathTimeAgent() { ArrayList<Integer> result = new ArrayList<Integer>(); for (int i = 0; i < 60; i++) { int index = 0; index = (int) (Math.random() * 60 * ARRAY_PRECISION); result.add(calculateDeathTime(m_particles[index])); } Collections.sort(result); int[] finalResult = new int[result.size()]; for (int i = 0; i < result.size(); i++) finalResult[i] = result.get(i); return finalResult; } public static int getRealSensedValue(int value, int preception) { if (value % preception < preception / 2) return value - (value % preception); else return value - (value % preception) + preception; } // private boolean checkParticle(float[] status) { if(status[0]>10000) return false; if(status[1]<0) return false; float hp = getRealSensedValue(m_hp_ob, HP_PRECISION); float dmg = getRealSensedValue(m_dmg_ob, DAMAGE_PRECISION); boolean isChecked = (status[0] > hp - (HP_PRECISION / 2) && status[0] < hp + (HP_PRECISION / 2) && status[1] > dmg - (DAMAGE_PRECISION / 2) && status[1] < dmg + (DAMAGE_PRECISION / 2)); System.out.println("checkParticle real hp="+hp+" real dmg="+dmg+" s[0]"+status[0]+" s[1]"+status[1]+" , isChecked?"+isChecked); return isChecked; } // private void updateParticlesAgent(int timeNow) { int time = m_lastUpdate - m_particles_time; if (time <= 0) return; //System.out.println("update time: "+time); boolean propertyChanged = m_propertyChanged; m_propertyChanged = false; ArrayList<float[]> newParticle = new ArrayList<float[]>(); for (int i = 0; i < 60 * ARRAY_PRECISION; i++) { float[] newElement = lifeSpan(m_particles[i], time); if (checkParticle(newElement)) { newParticle.add(newElement); } } // if (newParticle.size() == 0) { if (propertyChanged && m_particlesNeedResample) { m_particlesNeedResample = false; initParticles(); return; } //// check if resample is needed /// float[][] tmpParticle = initTempParticles(); ArrayList<Integer> deadTime = new ArrayList<Integer>(); int total = 0; for (int i = 0; i < 60; i++) { int index = 0; index = (int) (Math.random() * 60 * ARRAY_PRECISION); int t = calculateDeathTime(tmpParticle[index], m_lastUpdate); deadTime.add(t); total += t; } int death_avg = total / 60; total = 0; for (int i = 0; i < 60; i++) { total += Math.abs(deadTime.get(i) - death_avg); } int scatterNew = total; // deadTime = new ArrayList<Integer>(); total = 0; for (int i = 0; i < 60; i++) { int index = 0; index = (int) (Math.random() * 60 * ARRAY_PRECISION); int t = calculateDeathTime(m_particles[index]); deadTime.add(t); total += t; } death_avg = total / 60; total = 0; for (int i = 0; i < 60; i++) { total += Math.abs(deadTime.get(i) - death_avg); } int scatterOld = total; // if (scatterNew < scatterOld) { initParticles(); return; } // m_time_needRefresh = timeNow + 15; return; } // m_time_needRefresh = timeNow + 50; m_particles = new float[60 * ARRAY_PRECISION][3]; for (int i = 0; i < newParticle.size(); i++) m_particles[i] = newParticle.get(i); for (int i = newParticle.size(); i < 60 * ARRAY_PRECISION; i++) { int index = 0; index = (int) (Math.random() * (newParticle.size())); m_particles[i] = newParticle.get(index); } m_particles_time = m_lastUpdate; m_deadTime = calculateDeathTimeAgent(); System.out.println("D"); } // public int cycle(int time) { if (m_particles == null && m_lastUpdate > 0) { initParticles(); } if (m_propertyChanged == true || time > m_time_needRefresh) { updateParticlesAgent(time); } return m_time_needRefresh; } public void setParticlesNeedResample() { this.m_particlesNeedResample = true; } public int getProperDeadTime() { int index; if (HAVE_REFUGE) { index = 60 - m_bury; if (index < 5) index = 5; else if (index > 55) index = 55; } else index = 55; return getDeadTime()[index]; } public static void main(String[] args) { SOSParticleFilter pf = new SOSParticleFilter(1000, 100, true); // System.out.println(getDamage(8500, 100, 20)); // System.out.println(getDamage(9500, 100, 20)); // pf.setHp(8000, 50); // pf.setDmg(70, 50); // pf.cycle(50); // int hp = 6668, damage = 45, time =110; // int hp2 = 3552, damage2 = 67, time2 =166; // int hp = 7000, damage = 30, time =110; // int hp2 = 4000, damage2 = 100, time2 =166; // System.out.println(SimpleDeathTime.getEstimatedDamage(hp, time)); // int hp = 8000, damage = 100, time =20; // int hp2 = 5000, damage2 = 100, time2 =46,buriedness=60; // for (int i = 0; i < 49; i++) { // int hp = 10000, damage =33, time = i, buriedness = 60; // pf.setHp(hp, time); // pf.setDmg(damage, time); // pf.setBury(buriedness); // pf.cycle(time); // } int hp = 6000, damage =100, time = 165, buriedness = 60; // int hp2 = 5000, damage2 = 100, time2 = 46; // pf.setHp(hp, time); pf.setDmg(damage, time); pf.setBury(buriedness); pf.cycle(time); // // pf.setHp(hp2, time2); // pf.setDmg(damage2, time2); // pf.cycle(time2); int index = 60 - buriedness; if (index < 5) index = 5; else if (index > 55) index = 55; System.out.println("pf "+index+"==>dt:"+pf.getDeadTime()[index]); System.out.println(SimpleDeathTime.getEasyLifeTime(hp, damage, time)); // System.out.println(SimpleDeathTime.getEasyLifeTime(hp2, damage2, time2)); } }
apache-2.0
tmccarthy/chatty
src/chatty/util/StringUtil.java
5739
package chatty.util; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.regex.Pattern; /** * * @author tduva */ public class StringUtil { /** * Tries to turn the given Object into a List of Strings. * * If the given Object is a List, go through all items and copy those * that are Strings into a new List of Strings. * * @param obj * @return */ public static List<String> getStringList(Object obj) { List<String> result = new ArrayList<>(); if (obj instanceof List) { for (Object item : (List)obj) { if (item instanceof String) { result.add((String)item); } } } return result; } public static String join(Collection<String> items, String delimiter) { StringBuilder b = new StringBuilder(); Iterator<String> it = items.iterator(); while (it.hasNext()) { b.append(it.next()); if (it.hasNext()) { b.append(delimiter); } } return b.toString(); } /** * Shortens the given {@code input} to the {@code max} length. Only changes * the {@code input} if it actually exceeds {@code max} length, but if it * does, the returning text is 2 shorter than {@code max} because it also adds * ".." where it shortened the text. * * Positive {@code max} length values shorten the {@code input} at the end, * negative values shorten the {@code input} at the start. * * @param input The {@code String} to shorten * @param max The maximum length the String should have after this * @return The modified {@code String} if {@code input} exceeds the * {@code max} length, the original value otherwise */ public static String shortenTo(String input, int max) { if (input != null && input.length() > Math.abs(max)) { if (max > 2) { return input.substring(0, max-2)+".."; } else if (max < -2) { return ".."+input.substring(input.length() + max + 2 ); // abcd -3 } else { return ".."; } } return input; } public static String shortenTo(String input, int max, int min) { if (input != null && input.length() > max) { if (min+2 > max) { min = max-2; } if (max > 2) { String start = input.substring(0, min); String end = input.substring(input.length() - (max - min - 2)); return start+".."+end; } else { return ".."; } } return input; } public static String trim(String s) { if (s == null) { return null; } return s.trim(); } public static String nullToString(String s) { if (s == null) { return ""; } return s; } public static String toLowerCase(String s) { return s != null ? s.toLowerCase(Locale.ENGLISH) : null; } /** * Removes leading and trailing whitespace and removes and duplicate * whitespace in the middle. Due to the way it works, it also replaces any * whitespace characters that are not a space with a space (e.g. tabs). * * @param s The String * @see removeDuplicateWhitespace(String text) * @return The modified String or null if the given String was null */ public static String trimAll(String s) { if (s == null) { return s; } return removeDuplicateWhitespace(s).trim(); } private static final Pattern WHITESPACE = Pattern.compile("\\s+"); /** * Replaces all occurences of one or more consecutive whitespace characters * with a single space. So it also replaces any whitespace characters that * are not a space with a space (e.g. tabs). * * @param text * @return */ public static String removeDuplicateWhitespace(String text) { return WHITESPACE.matcher(text).replaceAll(" "); } private static final Pattern LINEBREAK_CHARACTERS = Pattern.compile("[\\r\\n]+"); /** * Removes any linebreak characters from the given String and replaces them * with a space. Consecutive linebreak characters are replaced with only a * single space. * * @param s The String (can be empty or null) * @return The modified String or null if the given String was null */ public static String removeLinebreakCharacters(String s) { if (s == null) { return null; } return LINEBREAK_CHARACTERS.matcher(s).replaceAll(" "); } public static String append(String a, String sep, String b) { if (a == null || a.isEmpty()) { return b; } return a+sep+b; } /** * Checks if any of the String arguments is null or empty. * * @param input A number of String arguments * @return true if at least one of the arguments is null or empty, false * otherwise */ public static boolean isNullOrEmpty(String... input) { if (input == null) { return true; } for (String s : input) { if (s == null || s.isEmpty()) { return true; } } return false; } public static final void main(String[] args) { System.out.println(shortenTo("abcdefghi", 8, 5)); } }
apache-2.0
zjshen/hadoop-YARN-2928-POC
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractTimelineServicePublisher.java
5924
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.resourcemanager.metrics; import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher.TimelineServicePublisher; public abstract class AbstractTimelineServicePublisher extends CompositeService implements TimelineServicePublisher, EventHandler<SystemMetricsEvent> { private static final Log LOG = LogFactory .getLog(TimelineServiceV2Publisher.class); private Configuration conf; public AbstractTimelineServicePublisher(String name) { super(name); } @Override protected void serviceInit(Configuration conf) throws Exception { this.conf = conf; super.serviceInit(conf); } @Override protected void serviceStart() throws Exception { super.serviceStart(); } @Override protected void serviceStop() throws Exception { super.serviceStop(); } @Override public void handle(SystemMetricsEvent event) { switch (event.getType()) { case APP_CREATED: publishApplicationCreatedEvent((ApplicationCreatedEvent) event); break; case APP_FINISHED: publishApplicationFinishedEvent((ApplicationFinishedEvent) event); break; case APP_ACLS_UPDATED: publishApplicationACLsUpdatedEvent((ApplicationACLsUpdatedEvent) event); break; case APP_ATTEMPT_REGISTERED: publishAppAttemptRegisteredEvent((AppAttemptRegisteredEvent) event); break; case APP_ATTEMPT_FINISHED: publishAppAttemptFinishedEvent((AppAttemptFinishedEvent) event); break; case CONTAINER_CREATED: publishContainerCreatedEvent((ContainerCreatedEvent) event); break; case CONTAINER_FINISHED: publishContainerFinishedEvent((ContainerFinishedEvent) event); break; default: LOG.error("Unknown SystemMetricsEvent type: " + event.getType()); } } abstract void publishAppAttemptFinishedEvent(AppAttemptFinishedEvent event); abstract void publishAppAttemptRegisteredEvent(AppAttemptRegisteredEvent event); abstract void publishApplicationACLsUpdatedEvent( ApplicationACLsUpdatedEvent event); abstract void publishApplicationFinishedEvent(ApplicationFinishedEvent event); abstract void publishApplicationCreatedEvent(ApplicationCreatedEvent event); abstract void publishContainerCreatedEvent(ContainerCreatedEvent event); abstract void publishContainerFinishedEvent(ContainerFinishedEvent event); @Override public Dispatcher getDispatcher() { MultiThreadedDispatcher dispatcher = new MultiThreadedDispatcher( conf.getInt( YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE, YarnConfiguration.DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE)); dispatcher.setDrainEventsOnStop(); return dispatcher; } @Override public boolean publishRMContainerMetrics() { return true; } @Override public EventHandler<SystemMetricsEvent> getEventHandler() { return this; } @SuppressWarnings({ "rawtypes", "unchecked" }) public static class MultiThreadedDispatcher extends CompositeService implements Dispatcher { private List<AsyncDispatcher> dispatchers = new ArrayList<AsyncDispatcher>(); public MultiThreadedDispatcher(int num) { super(MultiThreadedDispatcher.class.getName()); for (int i = 0; i < num; ++i) { AsyncDispatcher dispatcher = createDispatcher(); dispatchers.add(dispatcher); addIfService(dispatcher); } } @Override public EventHandler getEventHandler() { return new CompositEventHandler(); } @Override public void register(Class<? extends Enum> eventType, EventHandler handler) { for (AsyncDispatcher dispatcher : dispatchers) { dispatcher.register(eventType, handler); } } public void setDrainEventsOnStop() { for (AsyncDispatcher dispatcher : dispatchers) { dispatcher.setDrainEventsOnStop(); } } private class CompositEventHandler implements EventHandler<Event> { @Override public void handle(Event event) { // Use hashCode (of ApplicationId) to dispatch the event to the child // dispatcher, such that all the writing events of one application will // be handled by one thread, the scheduled order of the these events // will be preserved int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size(); dispatchers.get(index).getEventHandler().handle(event); } } protected AsyncDispatcher createDispatcher() { return new AsyncDispatcher(); } } }
apache-2.0
markus1978/emf-fragments
de.hub.emffrag.tests/src/de/hub/emffrag/fragmentation/OppositeIndexedContentsTests.java
508
package de.hub.emffrag.fragmentation; import org.eclipse.emf.ecore.EReference; import org.junit.Test; import de.hub.emffrag.testmodels.testmodel.frag.meta.TestModelPackage; public class OppositeIndexedContentsTests extends OppositeFragmentedContentsTests { @Override protected EReference testedReference() { return TestModelPackage.eINSTANCE.getTestObjectWithIndexes_IndexedContents(); } @Test @Override public void testInverseRemoveOpposite() { // this is not supported for indexed sets } }
apache-2.0
nafae/developer
modules/dfp_axis/src/main/java/com/google/api/ads/dfp/axis/v201306/ReportJob.java
7422
/** * ReportJob.java * * This file was auto-generated from WSDL * by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter. */ package com.google.api.ads.dfp.axis.v201306; /** * Represents a report job that will be run to retrieve performance * and * statistics information about ad campaigns, networks, inventory * and sales. */ public class ReportJob implements java.io.Serializable { /* The unique ID of the {@code ReportJob}. This value is read-only * and is * assigned by Google. */ private java.lang.Long id; /* Holds the filtering criteria. */ private com.google.api.ads.dfp.axis.v201306.ReportQuery reportQuery; /* The status of the {@code ReportJob}. This attribute is read-only * and is * assigned by Google. */ private com.google.api.ads.dfp.axis.v201306.ReportJobStatus reportJobStatus; public ReportJob() { } public ReportJob( java.lang.Long id, com.google.api.ads.dfp.axis.v201306.ReportQuery reportQuery, com.google.api.ads.dfp.axis.v201306.ReportJobStatus reportJobStatus) { this.id = id; this.reportQuery = reportQuery; this.reportJobStatus = reportJobStatus; } /** * Gets the id value for this ReportJob. * * @return id * The unique ID of the {@code ReportJob}. This value is read-only * and is * assigned by Google. */ public java.lang.Long getId() { return id; } /** * Sets the id value for this ReportJob. * * @param id * The unique ID of the {@code ReportJob}. This value is read-only * and is * assigned by Google. */ public void setId(java.lang.Long id) { this.id = id; } /** * Gets the reportQuery value for this ReportJob. * * @return reportQuery * Holds the filtering criteria. */ public com.google.api.ads.dfp.axis.v201306.ReportQuery getReportQuery() { return reportQuery; } /** * Sets the reportQuery value for this ReportJob. * * @param reportQuery * Holds the filtering criteria. */ public void setReportQuery(com.google.api.ads.dfp.axis.v201306.ReportQuery reportQuery) { this.reportQuery = reportQuery; } /** * Gets the reportJobStatus value for this ReportJob. * * @return reportJobStatus * The status of the {@code ReportJob}. This attribute is read-only * and is * assigned by Google. */ public com.google.api.ads.dfp.axis.v201306.ReportJobStatus getReportJobStatus() { return reportJobStatus; } /** * Sets the reportJobStatus value for this ReportJob. * * @param reportJobStatus * The status of the {@code ReportJob}. This attribute is read-only * and is * assigned by Google. */ public void setReportJobStatus(com.google.api.ads.dfp.axis.v201306.ReportJobStatus reportJobStatus) { this.reportJobStatus = reportJobStatus; } private java.lang.Object __equalsCalc = null; public synchronized boolean equals(java.lang.Object obj) { if (!(obj instanceof ReportJob)) return false; ReportJob other = (ReportJob) obj; if (obj == null) return false; if (this == obj) return true; if (__equalsCalc != null) { return (__equalsCalc == obj); } __equalsCalc = obj; boolean _equals; _equals = true && ((this.id==null && other.getId()==null) || (this.id!=null && this.id.equals(other.getId()))) && ((this.reportQuery==null && other.getReportQuery()==null) || (this.reportQuery!=null && this.reportQuery.equals(other.getReportQuery()))) && ((this.reportJobStatus==null && other.getReportJobStatus()==null) || (this.reportJobStatus!=null && this.reportJobStatus.equals(other.getReportJobStatus()))); __equalsCalc = null; return _equals; } private boolean __hashCodeCalc = false; public synchronized int hashCode() { if (__hashCodeCalc) { return 0; } __hashCodeCalc = true; int _hashCode = 1; if (getId() != null) { _hashCode += getId().hashCode(); } if (getReportQuery() != null) { _hashCode += getReportQuery().hashCode(); } if (getReportJobStatus() != null) { _hashCode += getReportJobStatus().hashCode(); } __hashCodeCalc = false; return _hashCode; } // Type metadata private static org.apache.axis.description.TypeDesc typeDesc = new org.apache.axis.description.TypeDesc(ReportJob.class, true); static { typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "ReportJob")); org.apache.axis.description.ElementDesc elemField = new org.apache.axis.description.ElementDesc(); elemField.setFieldName("id"); elemField.setXmlName(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "id")); elemField.setXmlType(new javax.xml.namespace.QName("http://www.w3.org/2001/XMLSchema", "long")); elemField.setMinOccurs(0); elemField.setNillable(false); typeDesc.addFieldDesc(elemField); elemField = new org.apache.axis.description.ElementDesc(); elemField.setFieldName("reportQuery"); elemField.setXmlName(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "reportQuery")); elemField.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "ReportQuery")); elemField.setMinOccurs(0); elemField.setNillable(false); typeDesc.addFieldDesc(elemField); elemField = new org.apache.axis.description.ElementDesc(); elemField.setFieldName("reportJobStatus"); elemField.setXmlName(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "reportJobStatus")); elemField.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "ReportJobStatus")); elemField.setMinOccurs(0); elemField.setNillable(false); typeDesc.addFieldDesc(elemField); } /** * Return type metadata object */ public static org.apache.axis.description.TypeDesc getTypeDesc() { return typeDesc; } /** * Get Custom Serializer */ public static org.apache.axis.encoding.Serializer getSerializer( java.lang.String mechType, java.lang.Class _javaType, javax.xml.namespace.QName _xmlType) { return new org.apache.axis.encoding.ser.BeanSerializer( _javaType, _xmlType, typeDesc); } /** * Get Custom Deserializer */ public static org.apache.axis.encoding.Deserializer getDeserializer( java.lang.String mechType, java.lang.Class _javaType, javax.xml.namespace.QName _xmlType) { return new org.apache.axis.encoding.ser.BeanDeserializer( _javaType, _xmlType, typeDesc); } }
apache-2.0
vladn-ma/vladn-ovs-doc
doxygen/ovs_all/html/structmatch.js
249
var structmatch = [ [ "flow", "structmatch.html#a60796db468aa8dbe2291a3720891eb4d", null ], [ "tun_md", "structmatch.html#af1258eae4b664adfdee5cf22004c9149", null ], [ "wc", "structmatch.html#a13a0fd08308df2699c23b003aa19d784", null ] ];
apache-2.0
Dodd2013/DoddCMS
html/jssrc/ctrl/admin/contentManageCtrl.js
6951
/*global define*/ 'use strict'; /** * The main controller for the app. The controller: * - retrieves and persist the model via the todoStorage service * - exposes the model to the template and provides event handlers */ define(['angular', 'pnotify', 'jquery', 'bootstrapTableNg', 'bootstrapTableCN'], function(angular, PNotify, $) { PNotify.prototype.options.styling = "bootstrap3"; var CtrlName = "contentManageCtrl"; return { "route": { "path": "contentManage", "route": { url: '/contentManage', // resolve: {}, templateUrl: 'tpls/contentManage.html', controller: CtrlName } }, "ctrl": { "name": CtrlName, "fn": ['$scope', '$http','$state', function($scope, $http,$state) { $scope.premission = null; //获取数据用的ajax $scope.ajaxRequest = function(params) { // data you need // console.log(JSON.stringify(params.data)); var getdata = $http({ url: config.api + '/content', method: 'GET', withCredentials: true, params: params.data }); if ($scope.premission === null) { $http({ url: config.api + '/getPermission', method: 'GET', withCredentials: true, params: { functionModel: 402 } }).then(function(data) { $scope.premission = data.data; return getdata; }).then(function(data) { params.success(data.data); });; } else { getdata.then(function(data) { params.success(data.data); }); } } //bs-table $scope.tableCtrl = { options: { toolbar: "#toolbar", ajax: $scope.ajaxRequest, rowStyle: function(row, index) { return { classes: 'none' }; }, sidePagination: 'server', cache: false, height: 500, striped: true, pagination: true, pageSize: 10, pageList: [5, 10, 25, 50, 100, 200], search: true, showColumns: true, showRefresh: true, minimumCountColumns: 2, clickToSelect: false, maintainSelected: true, columns: [{ field: 'contentId', title: '内容序号', align: 'center', valign: 'middle', formatter: idFormatter, }, { field: 'contentTitle', title: '内容标题', align: 'center', valign: 'middle' }, { field: 'simpleTitle', title: '简单标题', align: 'center', valign: 'middle' }, { field: 'contentDESC', title: '内容描述', align: 'center', valign: 'middle' }, { field: 'contentType', title: '内容类型', align: 'center', valign: 'middle' }, { field: 'viewCount', title: '浏览量', align: 'center', valign: 'middle', sortable: true }, { field: 'createdAt', title: '创建时间', align: 'center', valign: 'middle', formatter: timeFormatter, sortable: true }, { field: 'updatedAt', title: '更新时间', align: 'center', valign: 'middle', formatter: timeFormatter, sortable: true }, { field: 'state', title: '审核状态', align: 'center', valign: 'middle', formatter: stateFormatter, sortable: true }, { field: 'op', title: '操作', align: 'center', valign: 'middle', clickToSelect: false, formatter: opFormatter // 操作按钮单元格 }] } }; function stateFormatter(value, row, index) { if (value == 1) return "审核通过"; if (value == 0) return "未审核"; if (value == -1) return "审核不通过"; }; function idFormatter(value, row, index) { return `<a href="/content?contentId=${value}" target='_bank'>${value}</a>` }; function timeFormatter(value, row, index) { if (value == null) return '未知时间'; var date = new Date(value); var localeString = date.toLocaleString(); return localeString; }; function opFormatter(value, row, index) { let editBtn = '' let deleteBtn = ''; let passBtn = ''; for (let pms of $scope.premission) { if (pms.permissionName === 'passContent') { passBtn = `<a data-op='pass' data-contentId='${row.contentId}' class='opBtn' title='审核通过'><span class='glyphicon glyphicon-ok-sign color-success'></span></a>` + `<a data-op='unpass' data-contentId='${row.contentId}' class='opBtn' title='未审核'><span class='glyphicon glyphicon-question-sign color-info'></span></a>` + `<a data-op='notpass' data-contentId='${row.contentId}' class='opBtn' title='审核不通过'><span class='glyphicon glyphicon-remove-sign color-danger'></span></a>` } if (pms.permissionName === 'editContent') { editBtn = `<a data-op='edit' data-contentId='${row.contentId}' class='opBtn' title='编辑内容'><span class='glyphicon glyphicon-edit'></span></a>`; } if (pms.permissionName === 'deleteContent') { deleteBtn = `<a data-op='delete' data-contentId='${row.contentId}' class='opBtn' title='删除内容'><span class='glyphicon glyphicon-trash'></span></a>`; } } return passBtn + editBtn + deleteBtn; }; $('#contentTable').on('click', '.opBtn', function(e) { let op = $(e.currentTarget).attr('data-op'); let contentId = $(e.currentTarget).attr('data-contentId'); if (op === 'edit') { $state.go('publish', {contentId:contentId}); } else if (op === 'delete') { $scope.$apply(function() { $scope.showRemove(contentId); }); } else if (op === 'pass' || op === 'unpass' || op === 'notpass') { $scope.pass(op, contentId); } }); $scope.pass = function(parmas, contentId) { $http({ url: config.api + '/content/pass', method: 'GET', withCredentials: true, headers: { 'Accept': "*/*" }, params: { contentId: contentId, op: parmas } }).then(function(data) { new PNotify({ type: 'success', text: `修改审核状态成功` }); $('#contentTable').bootstrapTable('refresh'); }) }; $scope.showRemove = function(contentId) { $scope.contentId = contentId; $('#deleteModal').modal('show'); }; //删除导航逻辑 $scope.deleteContentBtn = function() { $http({ url: config.api + '/content/delete', method: 'GET', withCredentials: true, params: { contentId: $scope.contentId, } }).then(function(data) { if (data.data.status === 'ok') { new PNotify({ type: 'danger', text: `删除文章成功` }); $('#deleteModal').modal('hide'); $('#contentTable').bootstrapTable('refresh'); } }); }; }] } }; });
apache-2.0
dbiir/jdbc-for-kafka
src/cn/edu/ruc/kafka/connection/Connection.java
8072
package cn.edu.ruc.kafka.connection; import java.sql.*; import java.util.Map; import java.util.Properties; import java.util.concurrent.Executor; /** * ConsumerConnection and ProducerConnection should extends this class. * This class implements the unused methods so that other sub-classes can be shorter and more clear. * @author Bian Haoqiong * @version 0.0.1 * @see java.sql.Connection */ public abstract class Connection implements java.sql.Connection { /* @Override public Statement createStatement() throws SQLException { return null; } */ @Override public PreparedStatement prepareStatement(String sql) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public CallableStatement prepareCall(String sql) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public String nativeSQL(String sql) throws SQLException { return sql; } @Override public void setAutoCommit(boolean autoCommit) throws SQLException { if (autoCommit) { throw new SQLFeatureNotSupportedException(); } } @Override public boolean getAutoCommit() throws SQLException { return false; } @Override public void commit() throws SQLException { } @Override public void rollback() throws SQLException { throw new SQLFeatureNotSupportedException(); } /* @Override public void close() throws SQLException { } */ /* @Override public boolean isClosed() throws SQLException { return false; } */ @Override public DatabaseMetaData getMetaData() throws SQLException { throw new SQLFeatureNotSupportedException(); } /* @Override public void setReadOnly(boolean readOnly) throws SQLException { } @Override public boolean isReadOnly() throws SQLException { return false; } */ @Override public void setCatalog(String catalog) throws SQLException { } @Override public String getCatalog() throws SQLException { return "kafka"; } @Override public void setTransactionIsolation(int level) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public int getTransactionIsolation() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public SQLWarning getWarnings() throws SQLException { return null; } @Override public void clearWarnings() throws SQLException { } @Override public cn.edu.ruc.kafka.statement.Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Map<String, Class<?>> getTypeMap() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void setTypeMap(Map<String, Class<?>> map) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void setHoldability(int holdability) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public int getHoldability() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Savepoint setSavepoint() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Savepoint setSavepoint(String name) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void rollback(Savepoint savepoint) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public cn.edu.ruc.kafka.statement.Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Clob createClob() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Blob createBlob() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public NClob createNClob() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public SQLXML createSQLXML() throws SQLException { throw new SQLFeatureNotSupportedException(); } /* @Override public boolean isValid(int timeout) throws SQLException { return false; } */ @Override public void setClientInfo(String name, String value) throws SQLClientInfoException { } @Override public void setClientInfo(Properties properties) throws SQLClientInfoException { } @Override public String getClientInfo(String name) throws SQLException { return null; } @Override public Properties getClientInfo() throws SQLException { return null; } @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void setSchema(String schema) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public String getSchema() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void abort(Executor executor) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { } @Override public int getNetworkTimeout() throws SQLException { return 0; } @Override public <T> T unwrap(Class<T> iface) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public boolean isWrapperFor(Class<?> iface) throws SQLException { throw new SQLFeatureNotSupportedException(); } }
apache-2.0
tectronics/hyracks
hyracks/hyracks-storage-common/src/main/java/org/apache/hyracks/storage/common/file/TransientLocalResourceRepository.java
2866
/* * Copyright 2009-2013 by The Regents of the University of California * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * you may obtain a copy of the License from * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hyracks.storage.common.file; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hyracks.api.exceptions.HyracksDataException; public class TransientLocalResourceRepository implements ILocalResourceRepository { private Map<String, LocalResource> name2ResourceMap = new HashMap<String, LocalResource>(); private Map<Long, LocalResource> id2ResourceMap = new HashMap<Long, LocalResource>(); @Override public LocalResource getResourceById(long id) throws HyracksDataException { return id2ResourceMap.get(id); } @Override public LocalResource getResourceByName(String name) throws HyracksDataException { return name2ResourceMap.get(name); } @Override public synchronized void insert(LocalResource resource) throws HyracksDataException { long id = resource.getResourceId(); if (id2ResourceMap.containsKey(id)) { throw new HyracksDataException("Duplicate resource"); } id2ResourceMap.put(id, resource); name2ResourceMap.put(resource.getResourceName(), resource); } @Override public synchronized void deleteResourceById(long id) throws HyracksDataException { LocalResource resource = id2ResourceMap.get(id); if (resource == null) { throw new HyracksDataException("Resource doesn't exist"); } id2ResourceMap.remove(id); name2ResourceMap.remove(resource.getResourceName()); } @Override public synchronized void deleteResourceByName(String name) throws HyracksDataException { LocalResource resource = name2ResourceMap.get(name); if (resource == null) { throw new HyracksDataException("Resource doesn't exist"); } id2ResourceMap.remove(resource.getResourceId()); name2ResourceMap.remove(name); } @Override public List<LocalResource> getAllResources() throws HyracksDataException { List<LocalResource> resources = new ArrayList<LocalResource>(); for (LocalResource resource : id2ResourceMap.values()) { resources.add(resource); } return resources; } }
apache-2.0
gennadykr/java_training
mantis-tests/src/test/resources/config_inc.php
517
<?php $g_hostname = 'localhost'; $g_db_type = 'mysqli'; $g_database_name = 'bugtracker'; $g_db_username = 'root'; $g_db_password = ''; $g_default_timezone = 'Europe/Moscow'; $g_crypto_master_salt = '09IPdmdNFCEQQdM5S663+Tf/GsIrNzM6WSJ23Kxiwlo='; $g_signup_use_captcha = OFF; $g_phpMailer_method = PHPMAILER_METHOD_SMTP; # or PHPMAILER_METHOD_SMTP, PHPMAILER_METHOD_SENDMAIL $g_smtp_host = 'localhost'; # used with PHPMAILER_METHOD_SMTP
apache-2.0
alee88/pl3
algm/kd.go
524
package algm import ( "strconv" "strings" ) type KdFilter struct { KdSet string } func (f KdFilter) Filt(s string) bool { if f.KdSet == "" { f.KdSet = allCond } max, _ := strconv.Atoi(string(s[0])) for i, _ := range s { t, _ := strconv.Atoi(string(s[i])) if max < t { max = t } } min, _ := strconv.Atoi(string(s[0])) for i, _ := range s { t, _ := strconv.Atoi(string(s[i])) if min > t { min = t } } if strings.Contains(f.KdSet, strconv.Itoa(max-min)) { return PASS } return FAIL }
apache-2.0
googleapis/gapic-generator-python
tests/integration/goldens/redis/google/cloud/redis_v1/services/cloud_redis/transports/grpc_asyncio.py
24595
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.redis_v1.types import cloud_redis from google.longrunning import operations_pb2 # type: ignore from .base import CloudRedisTransport, DEFAULT_CLIENT_INFO from .grpc import CloudRedisGrpcTransport class CloudRedisGrpcAsyncIOTransport(CloudRedisTransport): """gRPC AsyncIO backend transport for CloudRedis. Configures and manages Cloud Memorystore for Redis instances Google Cloud Memorystore for Redis v1 The ``redis.googleapis.com`` service implements the Google Cloud Memorystore for Redis API and defines the following resource model for managing Redis instances: - The service works with a collection of cloud projects, named: ``/projects/*`` - Each project has a collection of available locations, named: ``/locations/*`` - Each location has a collection of Redis instances, named: ``/instances/*`` - As such, Redis instances are resources of the form: ``/projects/{project_id}/locations/{location_id}/instances/{instance_id}`` Note that location_id must be referring to a GCP ``region``; for example: - ``projects/redpepper-1290/locations/us-central1/instances/my-redis`` This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel(cls, host: str = 'redis.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: aio.Channel: A gRPC AsyncIO channel object. """ return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs ) def __init__(self, *, host: str = 'redis.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: """Create the channel designed to connect to this service. This property caches on the instance; repeated calls return the same channel. """ # Return the channel from cache. return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsAsyncClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) # Return the client from cache. return self._operations_client @property def list_instances(self) -> Callable[ [cloud_redis.ListInstancesRequest], Awaitable[cloud_redis.ListInstancesResponse]]: r"""Return a callable for the list instances method over gRPC. Lists all Redis instances owned by a project in either the specified location (region) or all locations. The location should have the following format: - ``projects/{project_id}/locations/{location_id}`` If ``location_id`` is specified as ``-`` (wildcard), then all regions available to the project are queried, and the results are aggregated. Returns: Callable[[~.ListInstancesRequest], Awaitable[~.ListInstancesResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'list_instances' not in self._stubs: self._stubs['list_instances'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/ListInstances', request_serializer=cloud_redis.ListInstancesRequest.serialize, response_deserializer=cloud_redis.ListInstancesResponse.deserialize, ) return self._stubs['list_instances'] @property def get_instance(self) -> Callable[ [cloud_redis.GetInstanceRequest], Awaitable[cloud_redis.Instance]]: r"""Return a callable for the get instance method over gRPC. Gets the details of a specific Redis instance. Returns: Callable[[~.GetInstanceRequest], Awaitable[~.Instance]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'get_instance' not in self._stubs: self._stubs['get_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/GetInstance', request_serializer=cloud_redis.GetInstanceRequest.serialize, response_deserializer=cloud_redis.Instance.deserialize, ) return self._stubs['get_instance'] @property def create_instance(self) -> Callable[ [cloud_redis.CreateInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the create instance method over gRPC. Creates a Redis instance based on the specified tier and memory size. By default, the instance is accessible from the project's `default network <https://cloud.google.com/vpc/docs/vpc>`__. The creation is executed asynchronously and callers may check the returned operation to track its progress. Once the operation is completed the Redis instance will be fully functional. Completed longrunning.Operation will contain the new instance object in the response field. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Returns: Callable[[~.CreateInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'create_instance' not in self._stubs: self._stubs['create_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/CreateInstance', request_serializer=cloud_redis.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['create_instance'] @property def update_instance(self) -> Callable[ [cloud_redis.UpdateInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the update instance method over gRPC. Updates the metadata and configuration of a specific Redis instance. Completed longrunning.Operation will contain the new instance object in the response field. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Returns: Callable[[~.UpdateInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'update_instance' not in self._stubs: self._stubs['update_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/UpdateInstance', request_serializer=cloud_redis.UpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['update_instance'] @property def upgrade_instance(self) -> Callable[ [cloud_redis.UpgradeInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the upgrade instance method over gRPC. Upgrades Redis instance to the newer Redis version specified in the request. Returns: Callable[[~.UpgradeInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'upgrade_instance' not in self._stubs: self._stubs['upgrade_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/UpgradeInstance', request_serializer=cloud_redis.UpgradeInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['upgrade_instance'] @property def import_instance(self) -> Callable[ [cloud_redis.ImportInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the import instance method over gRPC. Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation. When complete, the instance will contain only data from the imported file. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Returns: Callable[[~.ImportInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'import_instance' not in self._stubs: self._stubs['import_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/ImportInstance', request_serializer=cloud_redis.ImportInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['import_instance'] @property def export_instance(self) -> Callable[ [cloud_redis.ExportInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the export instance method over gRPC. Export Redis instance data into a Redis RDB format file in Cloud Storage. Redis will continue serving during this operation. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Returns: Callable[[~.ExportInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'export_instance' not in self._stubs: self._stubs['export_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/ExportInstance', request_serializer=cloud_redis.ExportInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['export_instance'] @property def failover_instance(self) -> Callable[ [cloud_redis.FailoverInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the failover instance method over gRPC. Initiates a failover of the master node to current replica node for a specific STANDARD tier Cloud Memorystore for Redis instance. Returns: Callable[[~.FailoverInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'failover_instance' not in self._stubs: self._stubs['failover_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/FailoverInstance', request_serializer=cloud_redis.FailoverInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['failover_instance'] @property def delete_instance(self) -> Callable[ [cloud_redis.DeleteInstanceRequest], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the delete instance method over gRPC. Deletes a specific Redis instance. Instance stops serving and data is deleted. Returns: Callable[[~.DeleteInstanceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'delete_instance' not in self._stubs: self._stubs['delete_instance'] = self.grpc_channel.unary_unary( '/google.cloud.redis.v1.CloudRedis/DeleteInstance', request_serializer=cloud_redis.DeleteInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs['delete_instance'] def close(self): return self.grpc_channel.close() __all__ = ( 'CloudRedisGrpcAsyncIOTransport', )
apache-2.0
sgeb/go-tuikit
tuikit/progress_spinner.go
476
package tuikit import "time" type ProgressSpinner struct { *TextView spinRunes []rune current int } func NewProgressSpinner() *ProgressSpinner { ps := &ProgressSpinner{ TextView: NewTextView(), spinRunes: []rune{'|', '/', '—', '\\', '|', '/', '—', '\\'}, } go func() { l := len(ps.spinRunes) for _ = range time.Tick(110 * time.Millisecond) { ps.current = (ps.current + 1) % l ps.SetText(string(ps.spinRunes[ps.current])) } }() return ps }
apache-2.0
ThommyB/Idnator
AndroidID/Helpers/FileHelper.cs
3267
using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading.Tasks; namespace Idnator.Helpers { public static class FileHelper { public const string EXTENSIONS = "*.axml|*.xml"; public static void BeautifyFile(string path) { if (File.Exists(path)) { try { FileRemoveROAttribute(path); File.WriteAllText(path, XmlHelper.AddIds(path), Encoding.Unicode); ColoredConsole.WriteLine(string.Format("{0} SUCCESSFULLY generated.", path), ConsoleColor.Green); } catch (Exception ex) { ColoredConsole.WriteLine(string.Format("{0} file generation has failed!\n{1}", path, ex.Message), ConsoleColor.Red); } } else ColoredConsole.WriteLine(string.Format("{0} - NOT FOUND!", path), ConsoleColor.Red); } public static void BeautifyFiles(string directory) { if (Directory.Exists(directory)) { foreach (string file in GetFiles(directory, EXTENSIONS)) { BeautifyFile(file); } } else ColoredConsole.WriteLine("Directory NOT FOUND!", ConsoleColor.Red); } public static void FileRemoveROAttribute(string path) { if (File.Exists(path)) { FileAttributes attributes = File.GetAttributes(path); if ((attributes & FileAttributes.ReadOnly) == FileAttributes.ReadOnly) { attributes = RemoveAttribute(attributes, FileAttributes.ReadOnly); File.SetAttributes(path, attributes); } } } /// <summary> /// Checks whether given path is file or directory /// </summary> /// <param name="path"></param> /// <returns>true if given path is directory</returns> public static bool IsDirecotry(string path) { FileAttributes attrs = File.GetAttributes(path); return attrs.HasFlag(FileAttributes.Directory); } private static FileAttributes RemoveAttribute(FileAttributes attributes, FileAttributes attributesToRemove) { return attributes & ~attributesToRemove; } /// <summary> /// Retrieves all files in given directory, filtered by specific pattern /// </summary> /// <param name="directory"></param> /// <param name="searchPattern">Search pattern to filter returned files. For example "*.axml|*.xml" </param> /// <returns></returns> private static string[] GetFiles(string directory, string searchPattern) { string[] searchPatterns = searchPattern.Split('|'); List<string> files = new List<string>(); foreach (string sp in searchPatterns) files.AddRange(Directory.GetFiles(directory, sp)); files.Sort(); return files.ToArray(); } } }
apache-2.0
ppavlidis/Gemma
gemma-web/src/main/java/ubic/gemma/web/controller/expression/experiment/ExpressionExperimentEditValueObject.java
2082
/* * The gemma project * * Copyright (c) 2014 University of British Columbia * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ubic.gemma.web.controller.expression.experiment; import ubic.gemma.model.common.quantitationtype.QuantitationTypeValueObject; import ubic.gemma.model.expression.bioAssay.BioAssayValueObject; import ubic.gemma.model.expression.experiment.ExpressionExperimentDetailsValueObject; import ubic.gemma.model.expression.experiment.ExpressionExperimentValueObject; import java.util.Collection; /** * @author paul */ public class ExpressionExperimentEditValueObject extends ExpressionExperimentDetailsValueObject { private static final long serialVersionUID = 1630521876359566915L; private Collection<QuantitationTypeValueObject> quantitationTypes; private Collection<BioAssayValueObject> bioAssays; /** * Required when using the class as a spring bean. */ public ExpressionExperimentEditValueObject() { } public ExpressionExperimentEditValueObject( ExpressionExperimentValueObject eevo ) { super( eevo ); } public Collection<QuantitationTypeValueObject> getQuantitationTypes() { return this.quantitationTypes; } public void setQuantitationTypes( Collection<QuantitationTypeValueObject> qts ) { this.quantitationTypes = qts; } public Collection<BioAssayValueObject> getBioAssays() { return bioAssays; } public void setBioAssays( Collection<BioAssayValueObject> bioAssays ) { this.bioAssays = bioAssays; } }
apache-2.0
kbec/maisica-time
src/main/java/net/maisica/time/span/Span.java
947
/* * Copyright 2016 Kamil Becmer <kamil.becmer at maisica.pl>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.maisica.time.span; public interface Span<T extends Comparable<? super T>, U extends Comparable<? super U>> { public T getStart(); public U getDuration(); public T computeEnd(); public Span<T, U> withStart(T start); public Span<T, U> withDuration(U duration); }
apache-2.0
patrickfav/planb-android
planb-core/src/androidTest/java/at/favre/lib/planb/PlanBRecoverBehaviorFactoryTest.java
3977
package at.favre.lib.planb; import android.support.test.runner.AndroidJUnit4; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import at.favre.lib.planb.interfaces.CrashRecoverBehaviour; import at.favre.lib.planb.interfaces.RecoverBehaviorFactory; import at.favre.lib.planb.recover.DefaultBehavior; import at.favre.lib.planb.recover.StartActivityBehaviour; import at.favre.lib.planb.recover.SuppressCrashBehaviour; import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertNotNull; @RunWith(AndroidJUnit4.class) public class PlanBRecoverBehaviorFactoryTest { private RecoverBehaviorFactory factory; @Before public void setup() { factory = PlanB.behaviourFactory(); } @Test public void testCreateDefaultBehaviors() { check(factory.createDefaultHandlerBehaviour(), DefaultBehavior.class); check(factory.createDefaultHandlerBehaviour(new CrashRecoverBehaviour.CrashAction.Noop(), new CrashRecoverBehaviour.CrashAction.Noop()), DefaultBehavior.class); check(factory.createDefaultHandlerBehaviour(null, null), DefaultBehavior.class); check(factory.createDefaultHandlerBehaviour(null, new CrashRecoverBehaviour.CrashAction.Noop()), DefaultBehavior.class); } @Test public void testCreateSuppressBehaviors() { check(factory.createSuppressCrashBehaviour(), SuppressCrashBehaviour.class); check(factory.createSuppressCrashBehaviour(new CrashRecoverBehaviour.CrashAction.Noop(), new CrashRecoverBehaviour.CrashAction.Noop()), SuppressCrashBehaviour.class); check(factory.createSuppressCrashBehaviour(null, null), SuppressCrashBehaviour.class); check(factory.createSuppressCrashBehaviour(null, new CrashRecoverBehaviour.CrashAction.Noop()), SuppressCrashBehaviour.class); } @Test public void testCreateStartActivityBehaviors() { check(factory.createStartActivityCrashBehaviour(null), StartActivityBehaviour.class); check(factory.createStartActivityCrashBehaviour(null, new CrashRecoverBehaviour.CrashAction.Noop(), new CrashRecoverBehaviour.CrashAction.Noop()), StartActivityBehaviour.class); check(factory.createStartActivityCrashBehaviour(null, null, null), StartActivityBehaviour.class); check(factory.createStartActivityCrashBehaviour(null, null, new CrashRecoverBehaviour.CrashAction.Noop()), StartActivityBehaviour.class); } @Test public void testCreateRestartForegroundActivityBehaviors() { check(factory.createRestartForegroundActivityCrashBehaviour(), StartActivityBehaviour.class); check(factory.createRestartForegroundActivityCrashBehaviour(new CrashRecoverBehaviour.CrashAction.Noop(), new CrashRecoverBehaviour.CrashAction.Noop()), StartActivityBehaviour.class); check(factory.createRestartForegroundActivityCrashBehaviour(null, null), StartActivityBehaviour.class); check(factory.createRestartForegroundActivityCrashBehaviour(null, new CrashRecoverBehaviour.CrashAction.Noop()), StartActivityBehaviour.class); } @Test public void testCreateRestartLauncherActivityBehaviors() { check(factory.createRestartLauncherActivityCrashBehaviour(), StartActivityBehaviour.class); check(factory.createRestartLauncherActivityCrashBehaviour(null, null), StartActivityBehaviour.class); check(factory.createRestartLauncherActivityCrashBehaviour(null, new CrashRecoverBehaviour.CrashAction.Noop()), StartActivityBehaviour.class); } private void check(CrashRecoverBehaviour behaviour, Class<? extends CrashRecoverBehaviour> expectedClass) { assertNotNull(behaviour); assertNotNull(behaviour.getPostCrashAction()); assertNotNull(behaviour.getPreCrashAction()); assertEquals(expectedClass, behaviour.getClass()); behaviour.callDefaultExceptionHandler(); behaviour.killProcess(); behaviour.persistCrashData(); } }
apache-2.0
paetti1988/qmate
MATE/org.tud.inf.st.mbt.emf.graphicaleditor/src/org/tud/inf/st/mbt/emf/graphicaleditor/basics/LabeledRoundedRectangle.java
1447
package org.tud.inf.st.mbt.emf.graphicaleditor.basics; import org.eclipse.draw2d.Graphics; import org.eclipse.draw2d.Label; import org.eclipse.draw2d.RoundedRectangle; import org.eclipse.draw2d.geometry.Point; import org.eclipse.draw2d.geometry.Rectangle; import org.eclipse.emf.ecore.EStructuralFeature; import org.eclipse.swt.graphics.Color; public class LabeledRoundedRectangle extends MultiLabeledFigure{ private Label label; private RoundedRectangle shape; private EStructuralFeature labeledFeature; public LabeledRoundedRectangle(Color fg, Color bg, EStructuralFeature labeledFeature) { super(bg); this.labeledFeature = labeledFeature; shape = new RoundedRectangle(); setBgShape(shape); shape.setBackgroundColor(bg); label = new Label(); label.setForegroundColor(fg); setToolTip(new Label()); add(shape); add(label); } @Override protected void paintFigure(Graphics graphics) { Rectangle bounds = getBounds(); shape.setBounds(new Rectangle(bounds)); label.setBounds(new Rectangle(bounds)); shape.invalidate(); label.invalidate(); } @Override public EStructuralFeature getFeatureOn(Point p) { return labeledFeature; } @Override public Label getLabel(EStructuralFeature f) { return label; } @Override public void setBackgroundColor(Color bg) { shape.setBackgroundColor(bg); } @Override public void setForegroundColor(Color fg) { label.setForegroundColor(fg); } }
apache-2.0
anand1712/cloudpulse
cloudpulse/tests/unit/db/utils.py
1230
# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Cisco Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cloudpulse.db import api as db_api def get_cpulse_test(**kw): return { 'id': kw.get('id', 32), 'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'name': kw.get('name', 'dummy_cloudtest'), 'state': kw.get('state', 'created'), 'result': kw.get('state', 'success'), 'testtype': kw.get('testtype', 'periodic'), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at'), } def create_cpulse_test(**kw): test = get_cpulse_test(**kw) dbapi = db_api.get_instance() return dbapi.create_test(test)
apache-2.0
alvarosimon/one
src/sunstone/public/app/utils/tab-datatable.js
40493
/* -------------------------------------------------------------------------- */ /* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ /* a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* See the License for the specific language governing permissions and */ /* limitations under the License. */ /* -------------------------------------------------------------------------- */ define(function(require) { /* DEPENDENCIES */ require('datatables.net'); require('datatables.foundation'); var TemplateEmptyTable = require('hbs!./tab-datatable/empty-table'); var Sunstone = require('sunstone'); var SunstoneConfig = require('sunstone-config'); var Locale = require('utils/locale'); var Tips = require('utils/tips'); var OpenNebula = require('opennebula'); var Notifier = require('utils/notifier'); var OpenNebulaUser = require('opennebula/user'); var LabelsUtils = require('utils/labels/utils'); /* TEMPLATES */ var TemplateDataTableHTML = require('hbs!./tab-datatable/table'); var TemplateSearchInputHTML = require('hbs!./tab-datatable/search-input'); /* CONSTANTS */ var SPINNER = '<img src="images/ajax-loader.gif" alt="retrieving" class="loading_img"/>'; /* GLOBAL INITIALIZATION */ /* Set the defaults for DataTables initialisation */ $.extend(true, $.fn.dataTable.defaults, { dom: "t"+ "<'row'<'small-6 columns'li><'small-6 columns'p>>", renderer: 'foundation', autoWidth: false, language: { "sLengthMenu": "_MENU_", "emptyTable": TemplateEmptyTable() } } ); //$.extend(true, $.fn.dataTable.defaults, { // dom: "t<'row collapse'<'small-6 columns'i><'small-6 columns'lp>>", // renderer: 'foundation', // language: { // "sLengthMenu": "_MENU_", // "emptyTable": TemplateEmptyTable() // } //}); /* CONSTRUCTOR */ /* Child class must define: this.dataTableId this.resource this.dataTableOptions this.columns this.conf = { 'info': true, enable on click row will show the element 'action': true, enable actions on row elements 'select': true, enable selecting elements from the table 'selectOptions': { 'filter_fn': function(ds) { return ds.TYPE == 0; } } 'customTabContext': jquery selector used when the datatable has associated buttons. By default it will be the parent tab 'customTrListener': function executed when a tr is clicked. Arguments are (tableObj, tr) 'searchDropdownHTML': optional HTML to place inside a dropdown next to the search input } 1. The table HTML is returned calling the table.dataTableHTML attr 2. The table must be initialized after including it in the DOM, using the table.initilize() method 3. After that all the methods can be called on the table, depending on the functionalities enabled (info, action, select) */ function TabDatatable() { var that = this; if (that.conf.select) { if (!that.selectOptions.select_resource) { that.selectOptions.select_resource = Locale.tr("Please select a resource from the list"); } if (!that.selectOptions.you_selected) { that.selectOptions.you_selected = Locale.tr("You selected the following resource:"); } if (that.selectOptions.id_index == undefined) { that.selectOptions.id_index = 0; } $.extend(that.selectOptions, that.conf.selectOptions); that.selectOptions.fixed_ids_map_orig = {}; if (that.selectOptions.fixed_ids != undefined) { $.each(that.selectOptions.fixed_ids, function() { that.selectOptions.fixed_ids_map_orig[this] = true; }); } that.selectOptions.starred_ids_map = {}; if (that.selectOptions.starred_ids != undefined) { $.each(that.selectOptions.starred_ids, function() { that.selectOptions.starred_ids_map[this] = true; }); if (that.selectOptions.starred_icon == undefined) { that.selectOptions.starred_icon = '<i class="fa fa-star fa-fw"></i>'; } } if (that.selectOptions.multiple_choice == undefined) { that.selectOptions.multiple_choice = false; } } that.dataTableHTML = TemplateDataTableHTML({ 'dataTableId': this.dataTableId, 'columns': this.columns, 'conf': this.conf, 'selectOptions': this.selectOptions}); that.searchInputHTML = TemplateSearchInputHTML({ 'dataTableSearchId': this.dataTableId + 'Search', 'searchDropdownHTML': this.conf.searchDropdownHTML }); return that; } TabDatatable.prototype = { 'initialize': _initialize, 'initCheckAllBoxes': _initCheckAllBoxes, 'tableCheckboxesListener': _tableCheckboxesListener, 'onlyOneCheckboxListener': _onlyOneCheckboxListener, 'infoListener': _infoListener, 'updateElement': _updateElement, 'elements': _elements, 'updateView': _updateView, 'getElementData': _getElementData, 'waitingNodes': _waitingNodes, 'recountCheckboxes': _recountCheckboxes, 'filter': _filter, 'resetResourceTableSelect': _resetResourceTableSelect, 'refreshResourceTableSelect': _refreshResourceTableSelect, 'selectResourceTableSelect': _selectResourceTableSelect, 'retrieveResourceTableSelect': _retrieveResourceTableSelect, 'idInput': _idInput, 'initSelectResourceTableSelect': _initSelectResourceTableSelect, 'updateFn': _updateFn, 'list': _list, 'clearLabelsFilter': _clearLabelsFilter, 'setLabelsFilter': _setLabelsFilter } return TabDatatable; /* FUNCTION DEFINITIONS */ function _initialize(opts) { var that = this; if (this.conf.select) { if (opts && opts.selectOptions) { $.extend(this.selectOptions, opts.selectOptions); } this.initSelectResourceTableSelect(); } else { this.dataTableOptions.pageLength = parseInt(config['user_config']['page_length']); } this.dataTable = $('#' + this.dataTableId).dataTable(this.dataTableOptions); // Remember page length only for non selectable datatables if (!this.conf.select) { this.dataTable.on( 'length.dt', function ( e, settings, len ) { if (config['user_config']['page_length'] != len){ config['user_config']['page_length'] = len; var sunstone_setting = {'TABLE_DEFAULT_PAGE_LENGTH': len}; Sunstone.runAction("User.append_sunstone_setting", config['user_id'], sunstone_setting); } }); } $('#' + this.dataTableId + 'Search').on('input', function() { that.dataTable.fnFilter($(this).val()); return false; }); if(that.conf.searchDropdownHTML != undefined){ var context = $('#' + this.dataTableId + 'Search-wrapper'); if (that.setupSearch != undefined){ that.setupSearch(context); } else { _setupSearch(that, context); } $("a.advanced-search-clear", context).on('click', function(){ $("input,select", context).val("").trigger("input"); $("button.advanced-search", context).click(); }); $("input", context).on("keypress", function(e) { var code = e.keyCode || e.which; if (code == 13) { $("button.advanced-search", context).click(); } }); $("button.advanced-search", context).on('click', function(){ $('#' + that.dataTableId + 'Search-dropdown', context).foundation('close'); that.dataTable.fnDraw(true); return false; }); } this.dataTable.on('draw.dt', function() { that.recountCheckboxes(); }) if (this.selectOptions && this.selectOptions.id_index) { this.dataTable.fnSort([[this.selectOptions.id_index, config['user_config']['table_order']]]); } else { this.dataTable.fnSort([[1, SunstoneConfig.tableOrder]]); } if (this.conf.actions) { this.initCheckAllBoxes(); this.tableCheckboxesListener(); } if (this.conf.oneSelection == true) { this.onlyOneCheckboxListener(); $(".check_all", that.dataTable).hide(); } if (this.conf.info) { this.infoListener(_defaultTrListener); } else if (this.conf.customTrListener) { this.infoListener(this.conf.customTrListener); } else if (!this.conf.select){ this.infoListener(); } if (this.conf.select) { that.dataTable.fnSetColumnVis(0, false); } Foundation.reflow($('#' + this.dataTableId + 'Search-dropdown'), 'dropdown'); // For some reason the dropdown forces horizontal and vertical scrollbars, // and breaks the full-screen modal positioning (VNC). It gets fixed once // the dropdown is shown+hidden, so we force it now $('#' + this.dataTableId + 'Search-wrapper button.search-dropdown').click(); $('#' + this.dataTableId + 'Search-wrapper button.search-dropdown').click(); } function _setupSearch(that, context) { that.searchFields = []; $("[search-field]", context).each(function(){ that.searchFields.push( $(this).attr("search-field") ); }); that.searchVals = {}; that.searchFields.forEach(function(name){ that.searchVals[name] = ""; }); that.searchOps = {}; that.searchFields.forEach(function(name){ var op = $("[search-field="+name+"]", context).attr("search-operation"); if (op == undefined){ op = "match"; } that.searchOps[name] = op; }); $("[search-field]", context).on('input change', function(){ var name = $(this).attr("search-field"); if($(this).attr("type") == "date"){ var val = $(this).val(); if(val == ""){ that.searchVals[name] = ""; } else { that.searchVals[name] = parseInt( new Date(val).getTime() ) / 1000; } }else{ that.searchVals[name] = $(this).val(); } }); that.dataTable.on('search.dt', function() { var empty = true; for(var i=0; i < that.searchFields.length; i++){ var name = that.searchFields[i]; empty = $("[search-field="+name+"]", context).val() == ""; if(!empty){ break; } } if(empty){ $("button.search-dropdown", context).addClass("hollow"); } else { $("button.search-dropdown", context).removeClass("hollow"); } }); $.fn.dataTable.ext.search.push( function( settings, data, dataIndex ) { // This is a global search function, we need to apply it only if the // search is triggered for the current table if(that.dataTableId != settings.nTable.id){ return true; } try { var values = JSON.parse( decodeURIComponent(escape(atob(data[that.searchColumn]))) ); var match = true; for(var i=0; i < that.searchFields.length; i++){ var name = that.searchFields[i]; switch(that.searchOps[name]){ case "match": match = (values[name].match( that.searchVals[name] ) != null); break; case "<=": match = (that.searchVals[name] == "") || (values[name] <= that.searchVals[name]); break; case ">=": match = (that.searchVals[name] == "") || (values[name] >= that.searchVals[name]); break; case ">": match = (that.searchVals[name] == "") || (values[name] > that.searchVals[name]); break; case "<": match = (that.searchVals[name] == "") || (values[name] < that.searchVals[name]); break; case "==": match = (that.searchVals[name] == "") || (values[name] == that.searchVals[name]); break; } if (!match){ break; } } return match; } catch (err) {} return true; } ); } function _defaultTrListener(tableObj, tr) { var aData = tableObj.dataTable.fnGetData(tr); if (!aData) return true; var id = $(aData[0]).val(); if (!id) return true; Sunstone.showElement(tableObj.tabId, id); return false; } //Shows run a custom action when clicking on rows. function _infoListener(info_action) { var that = this; this.dataTable.on("click", 'tbody tr', function(e) { if ($(e.target).is('input') || $(e.target).is('select') || $(e.target).is('option')) { return true; } if (info_action) { //If ctrl is hold down, make check_box click if (e.ctrlKey || e.metaKey || $(e.target).is('input')) { $('.check_item', this).trigger('click'); } else { info_action(that, this); } } else { $('.check_item', this).trigger('click'); } return true; }); } //Add a listener to the check-all box of a datatable, enabling it to //check and uncheck all the checkboxes of its elements. function _initCheckAllBoxes() { var that = this; this.dataTable.on("change", '.check_all', function() { var table = $(this).closest('.dataTables_wrapper'); if ($(this).is(":checked")) { //check all $('tbody input.check_item', table).prop('checked', true).change(); $('td', table).addClass('markrowchecked'); } else { //uncheck all $('tbody input.check_item', table).prop('checked', false).change(); $('td', table).removeClass('markrowchecked'); }; that.recountCheckboxes(); }); } //Handle the activation of action buttons and the check_all box //when elements in a datatable are modified. function _recountCheckboxes() { var table = $('tbody', this.dataTable); var context; if (this.conf.customTabContext) { context = this.conf.customTabContext; } else { context = table.parents('.tab'); if ($(".sunstone-info", context).is(':visible')) { return; } } var nodes = $('tr', table); //visible nodes var total_length = nodes.length; var checked_length = $('input.check_item:checked', nodes).length; var last_action_b = $('.last_action_button', context); if (checked_length) { //at least 1 element checked //enable action buttons $('.top_button, .list_button', context).prop('disabled', false); //enable checkall box if (total_length == checked_length) { $('.check_all', this.dataTable).prop('checked', true); } else { $('.check_all', this.dataTable).prop('checked', false); }; } else { //no elements cheked //disable action buttons, uncheck checkAll $('.check_all', this.dataTable).prop('checked', false); $('.top_button, .list_button', context).prop('disabled', true).attr('disabled', 'disabled'); }; //any case the create dialog buttons should always be enabled. $('.create_dialog_button', context).prop('disabled', false); $('.alwaysActive', context).prop('disabled', false); } //Init action buttons and checkboxes listeners function _tableCheckboxesListener() { //Initialization - disable all buttons var context = this.conf.customTabContext || this.dataTable.parents('.tab'); $('.last_action_button', context).prop('disabled', true); $('.top_button, .list_button', context).prop('disabled', true); //These are always enabled $('.create_dialog_button', context).prop('disabled', false); $('.alwaysActive', context).prop('disabled', false); //listen to changes in the visible inputs var that = this; this.dataTable.on("change", 'tbody input.check_item', function() { var datatable = $(this).parents('table'); if ($(this).is(":checked")) { $(this).parents('tr').children().addClass('markrowchecked'); } else { $(this).parents('tr').children().removeClass('markrowchecked'); } that.recountCheckboxes(); }); } /* * onlyOneCheckboxListener: Only one box can be checked */ function _onlyOneCheckboxListener() { var that = this; this.dataTable.on("change", 'tbody input.check_item', function() { var checked = $(this).is(':checked'); $('td', that.dataTable).removeClass('markrowchecked'); $('input.check_item:checked', that.dataTable).prop('checked', false); $("td", $(this).closest('tr')).addClass('markrowchecked') $(this).prop('checked', checked); }); } // Updates a data_table, with a 2D array containing the new values // Does a partial redraw, so the filter and pagination are kept // fromArray if true do not process the list since it is already an array of elements function _updateView(request, list, fromArray) { var selected_row_id = null; var checked_row_ids = new Array(); var that = this; if (that.preUpdateView) { that.preUpdateView(); } if(that.conf.searchDropdownHTML != undefined){ that.searchSets = {}; try { that.searchFields.forEach(function(name){ that.searchSets[name] = new Set(); }); } catch(e){} } that.dataTable.DataTable().page.len(parseInt(config['user_config']['page_length'])); var row_id_index = this.dataTable.attr("row_id"); if (row_id_index != undefined) { $.each($(that.dataTable.fnGetNodes()), function() { if ($('td.markrow', this).length != 0) { var aData = that.dataTable.fnGetData(this); selected_row_id = aData[row_id_index]; } }); } $.each($(that.dataTable.fnGetNodes()), function() { if ($('td.markrowchecked', this).length != 0) { if (!isNaN($($('td', $(this))[1]).html())) { checked_row_ids.push($($('td', $(this))[1]).html()); } else { checked_row_ids.push($($('td', $(this))[0]).html()); } } }); // dataTable.fnSettings is undefined when the table has been detached from // the DOM if (that.dataTable && that.dataTable.fnSettings()) { var dTable_settings = that.dataTable.fnSettings(); var prev_start = dTable_settings._iDisplayStart; that.dataTable.fnClearTable(false); var item_list; if (fromArray) { item_list = list; } else { item_list = []; $.each(list, function() { var item = that.elementArray(this); if (item){ item_list.push(item); if(that.searchColumn != undefined){ try{ var values = JSON.parse( decodeURIComponent(escape(atob(item[that.searchColumn]))) ); that.searchFields.forEach(function(name){ that.searchSets[name].add(values[name]); }); }catch(e){} } } }); } if (item_list.length > 0) { that.dataTable.fnAddData(item_list, false); } var new_start = prev_start; if (new_start > item_list.length - 1) { if (item_list.length > 0) new_start = item_list.length - 1; else new_start = 0; } dTable_settings.iInitDisplayStart = new_start; that.dataTable.fnDraw(true); }; if (selected_row_id != undefined) { $.each($(that.dataTable.fnGetNodes()), function() { var aData = that.dataTable.fnGetData(this); if (aData[row_id_index] == selected_row_id) { $('td', this)[0].click(); } }); } if (checked_row_ids.length != 0) { $.each($(that.dataTable.fnGetNodes()), function() { var current_id = $($('td', this)[1]).html(); if (isNaN(current_id)) { current_id = $($('td', this)[0]).html(); } if (current_id) { if ($.inArray(current_id, checked_row_ids) != -1) { $('input.check_item:not(:checked)', this).first().click(); $('td', this).addClass('markrowchecked'); } } }); } if (that.labelsColumn && SunstoneConfig.isTabEnabled(that.tabId) && $("#" + that.tabId).is(':visible')) { LabelsUtils.insertLabelsDropdown(that.tabId); if (SunstoneConfig.isTabActionEnabled(that.tabId, that.resource+".menu_labels")){ LabelsUtils.insertLabelsMenu({'tabName': that.tabId}); } } if (that.postUpdateView) { that.postUpdateView(); } if(that.conf.searchDropdownHTML != undefined){ try { that.searchFields.forEach(function(name){ var st = ""; var dlist = $("datalist[search-datalist="+name+"]", $("#"+that.tabId)); if(dlist.length > 0){ that.searchSets[name].forEach(function(val){ st += '<option value="' + val + '">'; }); dlist.html(st); } }); } catch(e){} } } //replaces an element with id 'tag' in a dataTable with a new one function _updateElement(request, elementJSON) { var that = this; var elementId = elementJSON[that.xmlRoot].ID; var element = that.elementArray(elementJSON); $.each(that.dataTable.fnGetData(), function(index, aData) { if (aData[that.selectOptions.id_index] === elementId) { var nodes = that.dataTable.fnGetNodes(); var checkId = '#' + that.resource.toLowerCase() + '_' + elementId; var checkVal = $(checkId, nodes).prop('checked'); that.dataTable.fnUpdate(element, index, undefined, false); if (checkVal) { $(checkId, nodes).prop('checked', checkVal); } that.recountCheckboxes(); return false; } }); } function _getElementData(id, resource_tag) { // TODO If the element is not included in the visible rows of // the table, it will not be included in the fnGetNodes response var nodes = this.dataTable.fnGetNodes(); var tr = $('#' + resource_tag + '_' + id, nodes).closest('tr'); return this.dataTable.fnGetData(tr); } function _waitingNodes() { $('tr input.check_item:visible', this.dataTable).replaceWith(SPINNER); } //returns an array of ids of selected elements in a dataTable function _elements(opts) { var that = this; var selected_nodes = []; if (this.dataTable) { var tab = this.dataTable.parents(".tab") if (Sunstone.rightInfoVisible(tab)) { selected_nodes.push(Sunstone.rightInfoResourceId(tab)); } else { //Which rows of the datatable are checked? var nodes = $('tbody input.check_item:checked', this.dataTable); $.each(nodes, function() { selected_nodes.push($(this).val()); }); } }; if (opts && opts.names){ var pairs = []; $.each(selected_nodes, function(){ pairs.push({id: this, name: OpenNebula[that.resource].getName(this)}); }); return pairs; } return selected_nodes; } function _filter(value, columnId) { this.dataTable.fnFilter(value, columnId); } /* SELECT RESOURCE FUNCTION DEFINITIONS */ function _initSelectResourceTableSelect() { var that = this; var section = $('#' + that.dataTableId + 'Container'); if (that.selectOptions.id_index == undefined) { that.selectOptions.id_index = 0; } if (that.selectOptions.name_index == undefined) { that.selectOptions.name_index = 1; } if (that.selectOptions.dataTable_options == undefined) { that.selectOptions.dataTable_options = {}; } if (that.selectOptions.select_callback == undefined) { that.selectOptions.select_callback = function() {}; } if (that.selectOptions.unselect_callback == undefined) { that.selectOptions.unselect_callback = function() {}; } if (that.selectOptions.multiple_choice) { that.dataTableOptions.fnRowCallback = function(nRow, aData, iDisplayIndex, iDisplayIndexFull) { var row_id = aData[that.selectOptions.id_index]; var ids = $('#selected_ids_row_' + that.dataTableId, section).data("ids"); if (ids != undefined && ids[row_id]) { $("td", nRow).addClass('markrowchecked'); $('input.check_item', nRow).prop('checked', true); } else { $("td", nRow).removeClass('markrowchecked'); $('input.check_item', nRow).prop('checked', false); } }; } else { that.dataTableOptions.fnRowCallback = function(nRow, aData, iDisplayIndex, iDisplayIndexFull) { var row_id = aData[that.selectOptions.id_index]; var selected_id = $('#selected_resource_id_' + that.dataTableId, section).val(); if (row_id == selected_id) { $("td", nRow).addClass('markrow'); $('input.check_item', nRow).prop('checked', true); } else { $("td", nRow).removeClass('markrow'); $('input.check_item', nRow).prop('checked', false); } }; } $('#refresh_button_' + that.dataTableId, section).off("click"); section.on('click', '#refresh_button_' + that.dataTableId, function() { that.updateFn(); return false; }); $('#' + that.dataTableId + '_search', section).on('input', function() { that.dataTable.fnFilter($(this).val()); return false; }) if (that.selectOptions.read_only) { $('#selected_ids_row_' + that.dataTableId, section).hide(); } else if (that.selectOptions.multiple_choice) { $('#selected_resource_' + that.dataTableId, section).hide(); $('#select_resource_' + that.dataTableId, section).hide(); $('#selected_resource_multiple_' + that.dataTableId, section).hide(); $('#select_resource_multiple_' + that.dataTableId, section).show(); } else { $('#selected_resource_' + that.dataTableId, section).hide(); $('#select_resource_' + that.dataTableId, section).show(); $('#selected_resource_multiple_' + that.dataTableId, section).hide(); $('#select_resource_multiple_' + that.dataTableId, section).hide(); } $('#selected_resource_name_' + that.dataTableId, section).hide(); $('#selected_ids_row_' + that.dataTableId, section).data("options", that.selectOptions); if (that.selectOptions.read_only) { } else if (that.selectOptions.multiple_choice) { $('#selected_ids_row_' + that.dataTableId, section).data("ids", {}); function row_click(row, aData) { that.dataTable.unbind("draw"); var row_id = aData[that.selectOptions.id_index]; var row_name = aData[that.selectOptions.name_index]; var ids = $('#selected_ids_row_' + that.dataTableId, section).data("ids"); if (ids[row_id]) { delete ids[row_id]; // Happens if row is not yet rendered (i.e. higher unvisited page) if (row != undefined) { $("td", row).removeClass('markrowchecked'); $('input.check_item', row).prop('checked', false); } $('#selected_ids_row_' + that.dataTableId + ' span[row_id="' + row_id + '"]', section).remove(); that.selectOptions.unselect_callback(); } else { ids[row_id] = true; // Happens if row is not yet rendered (i.e. higher unvisited page) if (row != undefined) { $("td", row).addClass('markrowchecked'); $('input.check_item', row).prop('checked', true); } $('#selected_ids_row_' + that.dataTableId, section).append('<span row_id="' + row_id + '" class="radius label">' + row_name + ' <span class="fa fa-times blue"></span></span> '); that.selectOptions.select_callback(aData, that.selectOptions); } if ($.isEmptyObject(ids)) { $('#selected_resource_multiple_' + that.dataTableId, section).hide(); $('#select_resource_multiple_' + that.dataTableId, section).show(); } else { $('#selected_resource_multiple_' + that.dataTableId, section).show(); $('#select_resource_multiple_' + that.dataTableId, section).hide(); } return true; }; $('#' + that.dataTableId + ' tbody', section).on("click", "tr", function(e) { var aData = that.dataTable.fnGetData(this); if(aData != undefined){ row_click(this, aData); } }); $(section).on("click", '#selected_ids_row_' + that.dataTableId + ' span.fa.fa-times', function() { var row_id = $(this).parent("span").attr('row_id'); var found = false; var aData = that.dataTable.fnGetData(); // TODO: improve preformance, linear search $.each(aData, function(index, row) { if (row[that.selectOptions.id_index] == row_id) { found = true; row_click(that.dataTable.fnGetNodes(index), row); return false; } }); if (!found) { var ids = $('#selected_ids_row_' + that.dataTableId, section).data("ids"); delete ids[row_id]; $('#selected_ids_row_' + that.dataTableId + ' span[row_id="' + row_id + '"]', section).remove(); if ($.isEmptyObject(ids)) { $('#selected_resource_multiple_' + that.dataTableId, section).hide(); $('#select_resource_multiple_' + that.dataTableId, section).show(); } else { $('#selected_resource_multiple_' + that.dataTableId, section).show(); $('#select_resource_multiple_' + that.dataTableId, section).hide(); } } that.selectOptions.unselect_callback(aData, that.selectOptions); }); } else { $('#' + that.dataTableId + ' tbody', section).delegate("tr", "click", function(e) { that.dataTable.unbind("draw"); var aData = that.dataTable.fnGetData(this); $("td.markrow", that.dataTable).removeClass('markrow'); $('tbody input.check_item', that.dataTable).prop('checked', false); if (aData != undefined){ $("td", this).addClass('markrow'); $('input.check_item', this).prop('checked', true); $('#selected_resource_' + that.dataTableId, section).show(); $('#select_resource_' + that.dataTableId, section).hide(); $('#selected_resource_id_' + that.dataTableId, section).val(aData[that.selectOptions.id_index]).trigger("change"); $('#selected_resource_name_' + that.dataTableId, section).text(aData[that.selectOptions.name_index]).trigger("change"); $('#selected_resource_name_' + that.dataTableId, section).show(); that.selectOptions.select_callback(aData, that.selectOptions); } $('#selected_resource_id_' + that.dataTableId, section).removeData("pending_select"); return true; }); } Tips.setup(section); } function _resetResourceTableSelect() { var that = this; var section = $('#' + that.dataTableId + 'Container'); // TODO: do for multiple_choice // TODO: works for more than one page? $("td.markrow", that.dataTable).removeClass('markrow'); $('tbody input.check_item', that.dataTable).prop('checked', false); $('#' + that.dataTableId + '_search', section).val("").trigger("input"); $('#refresh_button_' + that.dataTableId).click(); $('#selected_resource_name_' + that.dataTableId, section).text("").hide(); $('#selected_resource_' + that.dataTableId, section).hide(); $('#select_resource_' + that.dataTableId, section).show(); } // Returns an ID, or an array of IDs for that.selectOptions.multiple_choice function _retrieveResourceTableSelect() { var that = this; var section = $('#' + that.dataTableId + 'Container'); if (that.selectOptions.multiple_choice) { var ids = $('#selected_ids_row_' + that.dataTableId, section).data("ids"); var arr = []; $.each(ids, function(key, val) { arr.push(key); }); return arr; } else { return $('#selected_resource_id_' + that.dataTableId, section).val(); } } /** * Returns the jquery selector for the ID input. Can be used to add attributes * to it, such as 'wizard_field' * @return {Object} jquery selector for the ID input */ function _idInput() { var that = this; var section = $('#' + that.dataTableId + 'Container'); if (that.selectOptions.multiple_choice) { return $('#selected_ids_row_' + that.dataTableId, section); } else { return $('#selected_resource_id_' + that.dataTableId, section); } } // Clicks the refresh button function _refreshResourceTableSelect() { var that = this; var section = $('#' + that.dataTableId + 'Container'); $('#refresh_button_' + that.dataTableId, section).click(); } /** * Clears the current selection, and selects the given IDs * @param {object} selectedResources Two alternatives, ids or names. * - selectedResources.ids must be a single ID, * or an array of IDs for options.multiple_choice * - selectedResources.names must be an array of {name, uname} */ function _selectResourceTableSelect(selectedResources) { var that = this; var section = $('#' + that.dataTableId + 'Container'); if (that.selectOptions.multiple_choice) { that.refreshResourceTableSelect(section, that.dataTableId); var data_ids = {}; $('#selected_ids_row_' + that.dataTableId + ' span[row_id]', section).remove(); if (selectedResources.ids == undefined) { selectedResources.ids = []; } // TODO: {name, uname} support for multiple_choice $.each(selectedResources.ids, function(index, row_id) { if (isNaN(row_id)) { return true; } data_ids[row_id] = true; var row_name = "" + row_id; row_name = OpenNebula[that.resource].getName(row_id); $('#selected_ids_row_' + that.dataTableId, section).append('<span row_id="' + row_id + '" class="radius label">' + row_name + ' <span class="fa fa-times blue"></span></span> '); }); $('#selected_ids_row_' + that.dataTableId, section).data("ids", data_ids); if ($.isEmptyObject(data_ids)) { $('#selected_resource_multiple_' + that.dataTableId, section).hide(); $('#select_resource_multiple_' + that.dataTableId, section).show(); } else { $('#selected_resource_multiple_' + that.dataTableId, section).show(); $('#select_resource_multiple_' + that.dataTableId, section).hide(); } that.dataTable.fnDraw(); } else { $("td.markrow", that.dataTable).removeClass('markrow'); $('tbody input.check_item', that.dataTable).prop('checked', false); $('#selected_resource_' + that.dataTableId, section).show(); $('#select_resource_' + that.dataTableId, section).hide(); var row_id = undefined; var row_name = ""; if (selectedResources.ids != undefined) { row_id = selectedResources.ids; row_name = "" + row_id; row_name = OpenNebula[that.resource].getName(row_id); } else if (selectedResources.names != undefined) { row_name = selectedResources.names.name; var row_uname = selectedResources.names.uname; $.each(that.dataTable.fnGetData(), function(index, row) { if (row[that.selectOptions.name_index] == row_name && row[that.selectOptions.uname_index] == row_uname) { row_id = row[that.selectOptions.id_index]; return false; } }); //if (row_id == undefined){ // $('#selected_resource_id_' + that.dataTableId, section).data("pending_select", selectedResources); //} } // $("td", this).addClass('markrow'); // $('input.check_item', this).prop('checked', true); if (row_id !== undefined) { $('#selected_resource_id_' + that.dataTableId, section).val(row_id).trigger("change"); } $('#selected_resource_name_' + that.dataTableId, section).text(row_name).trigger("change"); $('#selected_resource_name_' + that.dataTableId, section).show(); that.refreshResourceTableSelect(section, that.dataTableId); } } function _updateFn() { var that = this; var success_func = function (request, resource_list) { var list_array = []; var fixed_ids_map = $.extend({}, that.selectOptions.fixed_ids_map_orig); $.each(resource_list, function() { var add = true; if (that.selectOptions.filter_fn) { add = that.selectOptions.filter_fn(this[that.xmlRoot]); } if (that.selectOptions.fixed_ids != undefined) { add = (add && fixed_ids_map[this[that.xmlRoot].ID]); } var elementArray; if (add) { elementArray = that.elementArray(this); add = (elementArray != false); } if (add) { if (that.selectOptions.starred_ids != undefined){ if (that.selectOptions.starred_ids_map[this[that.xmlRoot].ID]){ elementArray[that.selectOptions.name_index] = (that.selectOptions.starred_icon + ' ' + elementArray[that.selectOptions.name_index]); } else { elementArray[that.selectOptions.name_index] = ('<i class="fa fa-fw"></i> ' + elementArray[that.selectOptions.name_index]); } } list_array.push(elementArray); delete fixed_ids_map[this[that.xmlRoot].ID]; } }); var n_columns = that.columns.length + 1; $.each(fixed_ids_map, function(id, v) { var empty = []; for (var i = 0; i <= n_columns; i++) { empty.push(""); } empty[that.selectOptions.id_index] = id; list_array.push(empty); }); that.updateView(null, list_array, true); var section = $('#' + that.dataTableId + 'Container'); var selectedResources = $('#selected_resource_id_' + that.dataTableId, section).data("pending_select"); if (selectedResources != undefined){ $('#selected_resource_id_' + that.dataTableId, section).removeData("pending_select"); that.selectResourceTableSelect(selectedResources); } } var error_func = function(request, error_json, container) { success_func(request, []); Notifier.onError(request, error_json, container); } if (that.selectOptions.zone_id == undefined) { OpenNebula[that.resource].list({ timeout: true, success: success_func, error: error_func }); } else { OpenNebula[that.resource].list_in_zone({ data: {zone_id: that.selectOptions.zone_id}, timeout: true, success: success_func, error: error_func }); } } // Used by panels that contain tables from other resources. // TODO: This is probably duplicated somewhere function _list() { var that = this; OpenNebula[that.resource].list({ success: function(req, resp) { that.updateView(req, resp); }, error: Notifier.onError }); } function _setLabelsFilter(regExp) { LabelsUtils.setLabelsFilter(this.dataTable, this.labelsColumn, regExp); } function _clearLabelsFilter() { LabelsUtils.clearLabelsFilter(this.dataTable, this.labelsColumn); } })
apache-2.0
aws/aws-sdk-java
aws-java-sdk-alexaforbusiness/src/main/java/com/amazonaws/services/alexaforbusiness/model/transform/UpdateGatewayGroupRequestMarshaller.java
2712
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.alexaforbusiness.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.alexaforbusiness.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * UpdateGatewayGroupRequestMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class UpdateGatewayGroupRequestMarshaller { private static final MarshallingInfo<String> GATEWAYGROUPARN_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("GatewayGroupArn").build(); private static final MarshallingInfo<String> NAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Name").build(); private static final MarshallingInfo<String> DESCRIPTION_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("Description").build(); private static final UpdateGatewayGroupRequestMarshaller instance = new UpdateGatewayGroupRequestMarshaller(); public static UpdateGatewayGroupRequestMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(UpdateGatewayGroupRequest updateGatewayGroupRequest, ProtocolMarshaller protocolMarshaller) { if (updateGatewayGroupRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updateGatewayGroupRequest.getGatewayGroupArn(), GATEWAYGROUPARN_BINDING); protocolMarshaller.marshall(updateGatewayGroupRequest.getName(), NAME_BINDING); protocolMarshaller.marshall(updateGatewayGroupRequest.getDescription(), DESCRIPTION_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
IEEERobotics/bot-control-android
src/edu/ncsu/ieee/botcontrol/ZMQClientView.java
4283
package edu.ncsu.ieee.botcontrol; import android.content.Context; import android.util.AttributeSet; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.widget.Button; import android.widget.EditText; import android.widget.ImageButton; import android.widget.LinearLayout; import android.widget.TextView; /** * A simple view group (layout) to manage and monitor a ZMQ client. */ public class ZMQClientView extends LinearLayout { private static final String TAG = "ZMQClientView"; public ImageButton btnLoadHostComputer = null; public TextView txtServerProtocol = null; public EditText txtServerHost = null; public TextView txtServerPort = null; public Button btnStartClient = null; public Button btnStopClient = null; public EditText txtMessage = null; public Button btnSend = null; public EditText txtClientConsole = null; private ZMQClientThread clientThread = null; private TextViewLogger consoleLogger = null; public ZMQClientView(Context context) { super(context); init(null, 0); } public ZMQClientView(Context context, AttributeSet attrs) { super(context, attrs); init(attrs, 0); } private void init(AttributeSet attrs, int defStyle) { // Initialize view elements from XML LayoutInflater inflater = (LayoutInflater) getContext().getSystemService(Context.LAYOUT_INFLATER_SERVICE); inflater.inflate(R.layout.view_zmqclient, this, true); // Obtain references to view elements btnLoadHostComputer = (ImageButton) findViewById(R.id.btnLoadHostComputer); txtServerProtocol = (TextView) findViewById(R.id.txtServerProtocol); txtServerHost = (EditText) findViewById(R.id.txtServerHost); txtServerPort = (TextView) findViewById(R.id.txtServerPort); btnStartClient = (Button) findViewById(R.id.btnStartClient); btnStopClient = (Button) findViewById(R.id.btnStopClient); txtMessage = (EditText) findViewById(R.id.txtMessage); btnSend = (Button) findViewById(R.id.btnSend); txtClientConsole = (EditText) findViewById(R.id.txtClientConsole); // Configure view elements txtServerProtocol.setText(ZMQServerThread.SERVER_PROTOCOL); txtServerHost.setText(ZMQClientThread.SERVER_HOST); txtServerPort.setText(String.valueOf(ZMQServerThread.SERVER_PORT)); // Hook up actions btnLoadHostComputer.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { txtServerHost.setText("10.0.2.2"); // NOTE: This is a special IP address for referring to the host computer from an emulator instance } }); btnStartClient.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { startClient(); } }); btnStopClient.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { stopClient(); } }); btnSend.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { if (clientThread != null && clientThread.isAlive()) { final String request = txtMessage.getText().toString().trim(); if (request.length() > 0) { // Start a new thread to avoid blocking the main (UI) thread (new Thread() { public void run() { consoleLogger.log("Sending : " + request); String reply = clientThread.serviceRequestSync(request, false); consoleLogger.log("Received: " + reply); } }).start(); } } } }); // Setup logger to append messages to console view consoleLogger = new TextViewLogger(txtClientConsole); // NOTE We can only modify views on main (UI) thread } @Override protected void onDetachedFromWindow() { stopClient(); // view is being destroyed, stop client thread if running super.onDetachedFromWindow(); } private void startClient() { stopClient(); // stop previously running client thread, if any Log.d(TAG, "startClient(): Starting client thread..."); clientThread = new ZMQClientThread(txtServerProtocol.getText().toString(), txtServerHost.getText().toString(), Integer.parseInt(txtServerPort.getText().toString())); clientThread.start(); } private void stopClient() { if (clientThread != null) { Log.d(TAG, "stopClient(): Stopping client thread..."); clientThread.term(); clientThread = null; } } }
apache-2.0
BriData/DBus
dbus-keeper/keeper-web/app/components/ResourceManage/DataSourceCreate/StartTopology.js
6509
import React, {PropTypes, Component} from 'react' import {Tag, Spin, Input, Form, Select, Table, Row, Col, Button, message} from 'antd' import {FormattedMessage} from 'react-intl' // 导入样式 const TextArea = Input.TextArea import styles from './res/styles/index.less' import Request from "@/app/utils/request"; import OperatingButton from '@/app/components/common/OperatingButton' const FormItem = Form.Item const Option = Select.Option export default class StartTopology extends Component { constructor(props) { super(props) this.tableWidthStyle = width => ({ width: `${parseFloat(width) / 100 * 1440}px` }) this.tableWidth = [ '10%', '10%', '10%', '10%', '16.6%', '16.6%', '16.7%', '10%', ] this.state = { dataSource: [], logContent: '', logLoading: false } } componentWillMount = () => { const {getLatestJarPath, dataSource} = this.props getLatestJarPath({dsId: dataSource.id}) } componentWillReceiveProps = nextProps => { const {jarPath} = nextProps if (jarPath.length) { this.setState({ dataSource: jarPath }) } } handleStart = record => { const {logContent} = this.state this.setState({logLoading: true}) const {topoJarStartApi} = this.props Request(topoJarStartApi, { data: { ...record, topologyType: record.topolotyType }, method: 'post' }) .then(res => { this.setState({logLoading: false}) if (res && res.status === 0 && res.payload && res.payload.indexOf('Finished submitting topology') >= 0) { message.success(res.message) this.setState({logContent: logContent + res.payload}) const {dataSource} = this.state this.setState({ dataSource: dataSource.map(ds => { if (ds.topolotyName === record.topolotyName) { return { ...ds, status: 'running' } } else { return { ...ds } } }) }) } else { this.setState({logContent: logContent + res.payload}) message.warn(res.payload) } }) .catch(error => { this.setState({logLoading: false}) error.response.data && error.response.data.message ? message.error(error.response.data.message) : message.error(error.message) }) } handleFinish = () => { const {dataSource} = this.state dataSource.forEach(ds => { if (ds.status === 'inactive') { message.warn(`${ds.topolotyName}没有启动`) } }) if (dataSource.every(ds => ds.status === 'running')) { window.location.href='/resource-manage/data-source' } } renderComponent = render => (text, record, index) => render(text, record, index); renderNomal = width => (text, record, index) => ( <div title={text} style={this.tableWidthStyle(width)} className={styles.ellipsis} > {text} </div> ) renderStatus = width => (text, record, index) => { if(text === 'inactive') text = 'stopped' let color switch (text) { case 'running': color = 'green' break case 'stopped': color = 'red' break default: color = '#929292' } return (<div style={this.tableWidthStyle(width)} title={text} className={styles.ellipsis}> <Tag color={color} style={{cursor: 'auto'}}> {text} </Tag> </div>) } renderOperating = width => (text, record, index) => { const {logLoading} = this.state return ( <div style={this.tableWidthStyle(width)}> <OperatingButton disabled={logLoading || record.status === 'running'} icon="caret-right" onClick={() => this.handleStart(record)}>启动</OperatingButton> </div> ) } render() { const {logLoading, logContent, dataSource} = this.state const columns = [ { title: 'dsName', width: this.tableWidth[0], dataIndex: 'dsName', key: 'dsName', render: this.renderComponent(this.renderNomal(this.tableWidth[0])) }, { title: 'Topology Type', width: this.tableWidth[1], dataIndex: 'topolotyType', key: 'topolotyType', render: this.renderComponent(this.renderNomal(this.tableWidth[1])) }, { title: 'Topology Name', width: this.tableWidth[2], dataIndex: 'topolotyName', key: 'topolotyName', render: this.renderComponent(this.renderNomal(this.tableWidth[2])) }, { title: 'Status', width: this.tableWidth[3], dataIndex: 'status', key: 'status', render: this.renderComponent(this.renderStatus(this.tableWidth[3])) }, { title: 'Jar Path', width: this.tableWidth[5], dataIndex: 'jarPath', key: 'jarPath', render: this.renderComponent(this.renderNomal(this.tableWidth[5])) }, { title: 'Jar Name', width: this.tableWidth[6], dataIndex: 'jarName', key: 'jarName', render: this.renderComponent(this.renderNomal(this.tableWidth[6])) }, { title: 'Operation', width: this.tableWidth[7], key: 'operation', render: this.renderComponent(this.renderOperating(this.tableWidth[7])) }, ] return ( <div className={styles.tableLayout}> <div className={styles.table}> <Table size="small" rowKey="topolotyName" dataSource={dataSource} columns={columns} pagination={false} /> </div> <div style={{marginTop: 10}}> <Spin spinning={logLoading} tip="正在启动中..."> <TextArea placeholder="storm log" autosize={{minRows: 10, maxRows: 20}} value={logContent} wrap='off'/> </Spin> </div> <div style={{marginTop: 10}}> <Row> <Col offset={22}> <Button type="primary" onClick={this.handleFinish} > 加线完成 </Button> </Col> </Row> </div> </div> ) } } StartTopology.propTypes = {}
apache-2.0
dreajay/hrms
src/main/java/com/hrms/service/impl/UserServiceImpl.java
714
package com.hrms.service.impl; import javax.annotation.Resource; import org.springframework.stereotype.Service; import com.hrms.dao.IUserDao; import com.hrms.model.User; import com.hrms.service.IUserService; @Service("userService") public class UserServiceImpl extends GenericServiceImpl<User, Integer> implements IUserService { private IUserDao userDao; public IUserDao getUserDao() { return userDao; } @Resource public void setUserDao(IUserDao userDao) { this.userDao = userDao; } public User findByName(String userName) { return userDao.findByName(userName); } public User findByTrueName(String userTrueName) { return userDao.findByTrueName(userTrueName); } }
apache-2.0
googleads/googleads-mobile-unity
mediation/UnityAds/source/plugin/Assets/GoogleMobileAds/Api/Mediation/UnityAds/UnityAds.cs
1143
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using UnityEngine; using GoogleMobileAds.Common.Mediation.UnityAds; using GoogleMobileAds.Mediation; namespace GoogleMobileAds.Api.Mediation.UnityAds { public class UnityAds { public static readonly IUnityAdsClient client = GetUnityAdsClient(); private static IUnityAdsClient GetUnityAdsClient() { return UnityAdsClientFactory.UnityAdsInstance (); } public static void SetGDPRConsentMetaData(bool consent) { client.SetGDPRConsentMetaData (consent); } } }
apache-2.0
dump247/aws-sdk-java
aws-java-sdk-config/src/main/java/com/amazonaws/services/config/model/transform/ConfigRuleEvaluationStatusJsonUnmarshaller.java
6441
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.config.model.transform; import java.util.Map; import java.util.Map.Entry; import com.amazonaws.services.config.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * ConfigRuleEvaluationStatus JSON Unmarshaller */ public class ConfigRuleEvaluationStatusJsonUnmarshaller implements Unmarshaller<ConfigRuleEvaluationStatus, JsonUnmarshallerContext> { public ConfigRuleEvaluationStatus unmarshall(JsonUnmarshallerContext context) throws Exception { ConfigRuleEvaluationStatus configRuleEvaluationStatus = new ConfigRuleEvaluationStatus(); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) return null; while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("ConfigRuleName", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setConfigRuleName(StringJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("ConfigRuleArn", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setConfigRuleArn(StringJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("ConfigRuleId", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setConfigRuleId(StringJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("LastSuccessfulInvocationTime", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setLastSuccessfulInvocationTime(DateJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("LastFailedInvocationTime", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setLastFailedInvocationTime(DateJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("LastSuccessfulEvaluationTime", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setLastSuccessfulEvaluationTime(DateJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("LastFailedEvaluationTime", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setLastFailedEvaluationTime(DateJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("FirstActivatedTime", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setFirstActivatedTime(DateJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("LastErrorCode", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setLastErrorCode(StringJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("LastErrorMessage", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setLastErrorMessage(StringJsonUnmarshaller .getInstance().unmarshall(context)); } if (context.testExpression("FirstEvaluationStarted", targetDepth)) { context.nextToken(); configRuleEvaluationStatus .setFirstEvaluationStarted(BooleanJsonUnmarshaller .getInstance().unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals( currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return configRuleEvaluationStatus; } private static ConfigRuleEvaluationStatusJsonUnmarshaller instance; public static ConfigRuleEvaluationStatusJsonUnmarshaller getInstance() { if (instance == null) instance = new ConfigRuleEvaluationStatusJsonUnmarshaller(); return instance; } }
apache-2.0
googleads/google-ads-php
src/Google/Ads/GoogleAds/V8/Enums/InteractionTypeEnum_InteractionType.php
684
<?php # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/ads/googleads/v8/enums/interaction_type.proto namespace Google\Ads\GoogleAds\V8\Enums; if (false) { /** * This class is deprecated. Use Google\Ads\GoogleAds\V8\Enums\InteractionTypeEnum\InteractionType instead. * @deprecated */ class InteractionTypeEnum_InteractionType {} } class_exists(InteractionTypeEnum\InteractionType::class); @trigger_error('Google\Ads\GoogleAds\V8\Enums\InteractionTypeEnum_InteractionType is deprecated and will be removed in the next major release. Use Google\Ads\GoogleAds\V8\Enums\InteractionTypeEnum\InteractionType instead', E_USER_DEPRECATED);
apache-2.0
krzysztof-magosa/saffron-php
example/src/routes.php
2084
<?php /** * Copyright 2014 Krzysztof Magosa * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Below Closure is called only when cache is empty. // When you change something here, you need to empty cache directory. // return function ($collection) { /** * // You will use name later to generate links. * // Each name has to be unique. * $collection->route('name') * ->setUri('/some/route/with/{parameter1}/and/{parameter2}') * ->setDomain('www.example.{tld}') * ->setMethod('GET') - you can also pass array with more methods * ->setHttp(false) - resource is accessible only by NON-https connection * ->setRequirements( * [ * 'parameter1' => '\w+', // parameter1 must be alphanumeric * 'parameter2' => '\d+', // parameter2 must be a number * 'tld' => 'com|org', // tld in domain must be com or org * ] * ) * ->setDefaults( * [ * 'parameter2' => 'value2', // when link doesn't contain parameter2, it has 'value2' * ] * ) * ->setTarget('HomeController', 'indexAction'); // you can omit action, the default is 'indexAction' */ $collection->route('home') ->setUri('/') ->setTarget('Site\Controller\HomeController'); $collection->route('product') ->setUri('/product/{slug}/{id}') ->setTarget('Site\Controller\ProductController') ->setRequirements( [ 'slug' => '\w+', 'id' => '\d+', ] ); };
apache-2.0
drakelord/wire
wire-runtime/src/main/java/com/squareup/wire/AndroidMessage.java
2076
/* * Copyright 2016 Square Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.squareup.wire; import android.os.Parcel; import android.os.Parcelable; import java.io.IOException; import java.lang.reflect.Array; import okio.ByteString; /** An Android-specific {@link Message} which adds support for {@link Parcelable}. */ public abstract class AndroidMessage<M extends Message<M, B>, B extends Message.Builder<M, B>> extends Message<M, B> implements Parcelable { /** Creates a new {@link Parcelable.Creator} using {@code adapter} for serialization. */ public static <E> Parcelable.Creator<E> newCreator(ProtoAdapter<E> adapter) { return new ProtoAdapterCreator<>(adapter); } protected AndroidMessage(ProtoAdapter<M> adapter, ByteString unknownFields) { super(adapter, unknownFields); } @Override public final void writeToParcel(Parcel dest, int flags) { dest.writeByteArray(encode()); } @Override public final int describeContents() { return 0; } private static final class ProtoAdapterCreator<M> implements Creator<M> { private final ProtoAdapter<M> adapter; ProtoAdapterCreator(ProtoAdapter<M> adapter) { this.adapter = adapter; } @Override public M createFromParcel(Parcel in) { try { return adapter.decode(in.createByteArray()); } catch (IOException e) { throw new RuntimeException(e); } } @Override public M[] newArray(int size) { //noinspection unchecked return (M[]) Array.newInstance(adapter.javaType, size); } } }
apache-2.0
zdila/gpx-animator
src/main/java/app/gpx_animator/ui/swing/DurationSpinnerModel.java
2861
/* * Copyright Contributors to the GPX Animator project. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package app.gpx_animator.ui.swing; import org.jetbrains.annotations.NonNls; import javax.swing.AbstractSpinnerModel; import java.io.Serial; import java.util.Objects; public final class DurationSpinnerModel extends AbstractSpinnerModel { @Serial private static final long serialVersionUID = 7220186634453532297L; private Long duration; private Field field = Field.SECOND; @Override public Object getValue() { return duration; } @Override public void setValue(final Object value) { if (!Objects.equals(duration, value)) { duration = (Long) value; fireStateChanged(); } } @Override public Object getNextValue() { return (duration == null ? 0 : duration) + getDiffMs(); } @Override public Object getPreviousValue() { return (duration == null ? 0 : duration) - getDiffMs(); } public Field getField() { return field; } public void setField(final Field field) { this.field = field; } @SuppressWarnings("PMD.MissingBreakInSwitch") // Calculations actions sum up from top to down private long getDiffMs() { long add = 1; switch (field) { case DAY: add *= 24; case HOUR: add *= 60; case MINUTE: add *= 60; case SECOND: add *= 1000; case MILLISECOND: break; default: throw new AssertionError(); } return add; } public enum Field { MILLISECOND("ms"), //NON-NLS SECOND("s"), //NON-NLS MINUTE("m"), //NON-NLS HOUR("h"), //NON-NLS DAY("d"); //NON-NLS private final String unit; Field(final String unit) { this.unit = unit; } public static Field fromUnit(@NonNls final String unit) { for (final var field : Field.values()) { if (field.getUnit().equals(unit)) { return field; } } return null; } public String getUnit() { return unit; } } }
apache-2.0
guoyang2011/myfinagle
ThriftDemo/src/main/scala/cn/changhong/thrift/ThriftServer.scala
3799
package cn.changhong.thrift import java.net.InetSocketAddress import java.util.concurrent.atomic.AtomicInteger import cn.changhong.core.{NewsModel, IndexNewsOperatorServices} import com.twitter.finagle.Service import com.twitter.finagle.builder.{ClientBuilder, ServerBuilder} import com.twitter.finagle.http.{Response, Http, Request, RichHttp} import com.twitter.finagle.stats.DefaultStatsReceiver import com.twitter.finagle.thrift.ThriftServerFramedCodec import com.twitter.finagle.tracing.Trace import com.twitter.finagle.zipkin.thrift.ZipkinTracer import com.twitter.finagle.zookeeper.{ZkAnnouncer, ZkResolver} import com.twitter.util.Future import org.apache.thrift.protocol.TBinaryProtocol import org.jboss.netty.buffer.ChannelBuffers /** * Created by yangguo on 15-1-5. */ class IndexNewsOperationImp(name:String) extends IndexNewsOperatorServices.FutureIface{ val count=new AtomicInteger(0) override def indexNews(indexNews: NewsModel): Future[Boolean] = Future.value{ if(count.addAndGet(1)%2==0) true else false } override def deleteArtificaillyNes(id: Int): Future[Int] = { Trace.recordBinary("id",id) if(count.getAndAdd(1)%2==0) HttpProxy.httpProxyClient1(Request()).map{rep=> println("Http proxy service="+new String(rep.getContent().array())) 0 }else HttpProxy.httpProxyClient2(Request()).map{rep=> println("Http proxy service="+new String(rep.getContent().array())) 1 } } } object HttpProxy { val path = "/http_proxy" val httpProxyClient1=client("zk!" + Start.zkHost + "!/http_proxy_1","test_http_proxy_client_1") val httpProxyClient2=client("zk!" + Start.zkHost + "!/http_proxy_2","test_http_proxy_client_2") def client(dest: String, name: String): Service[Request, Response] = { ClientBuilder() .dest(dest) //"zk!"+Start.zkHost+"!/http_proxy_2") .name(name) //("proxy_http_client_1") .codec(RichHttp[Request](Http())) .tracer(Start.tracer) .hostConnectionLimit(3) .build() } def startServer: Unit = { (1 to 10).foreach { index => new Thread(new Runnable { override def run(): Unit = { val zkResovler = new ZkResolver() val zkAnn = new ZkAnnouncer() val bind = new InetSocketAddress(index + 20000) val zkAddr = Start.zkHost + "!" + path + (if (index % 2 == 0) "_2" else "_1") + "!0" service(bind, "end_http_proxy_" + bind.getPort) zkResovler.bind(zkAddr) zkAnn.announce(bind, zkAddr) } }).start() } } def service(bind: InetSocketAddress, name: String): Unit = { ServerBuilder() .codec(RichHttp[Request](Http())) .bindTo(bind) .tracer(ZipkinTracer.mk("10.9.52.31",9410,DefaultStatsReceiver,1)) .name(name) .build(new Service[Request, Response] { override def apply(request: Request): Future[Response] = Future.value { println("HttpProxy Receive message..."+name) Trace.recordBinary("requestId", name) val response = Response() response.setContent(ChannelBuffers.wrappedBuffer((name).getBytes())) response } }) } } object ThriftServer { def main(args:Array[String]): Unit ={ require(args!=null && args.length>2) apply(new InetSocketAddress(args(0),args(1).toInt),args(2)) } def apply(addr: InetSocketAddress, name: String): Unit = { val service = new IndexNewsOperatorServices.FinagledService(new IndexNewsOperationImp(name), new TBinaryProtocol.Factory()) ServerBuilder() .bindTo(addr) // .tracer(ZipkinTracer.mk("10.9.52.31",9410,DefaultStatsReceiver,1)) .codec(ThriftServerFramedCodec()) .name(name) .build(service) } }
apache-2.0
google-code-export/google-api-dfp-java
examples/v201306/contentservice/GetAllContentExample.java
2593
// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v201306.contentservice; import com.google.api.ads.dfp.lib.DfpService; import com.google.api.ads.dfp.lib.DfpServiceLogger; import com.google.api.ads.dfp.lib.DfpUser; import com.google.api.ads.dfp.v201306.Content; import com.google.api.ads.dfp.v201306.ContentPage; import com.google.api.ads.dfp.v201306.ContentServiceInterface; import com.google.api.ads.dfp.v201306.Statement; /** * This example gets all content. This feature is only available to DFP video * publishers. * * Tags: ContentService.getContentByStatement * * @author api.arogal@gmail.com (Adam Rogal) */ public class GetAllContentExample { public static void main(String[] args) { try { // Log SOAP XML request and response. DfpServiceLogger.log(); // Get DfpUser from "~/dfp.properties". DfpUser user = new DfpUser(); // Get the ContentService. ContentServiceInterface contentService = user.getService(DfpService.V201306.CONTENT_SERVICE); // Set defaults for page and filterStatement. ContentPage page = new ContentPage(); Statement filterStatement = new Statement(); int offset = 0; do { // Create a statement to get all content. filterStatement.setQuery("LIMIT 500 OFFSET " + offset); // Get content by statement. page = contentService.getContentByStatement(filterStatement); if (page.getResults() != null) { int i = page.getStartIndex(); for (Content content : page.getResults()) { System.out.println(i + ") Content with ID \"" + content.getId() + "\", name \"" + content.getName() + "\", and status \"" + content.getStatus() + "\" was found."); i++; } } offset += 500; } while (offset < page.getTotalResultSetSize()); System.out.println("Number of results found: " + page.getTotalResultSetSize()); } catch (Exception e) { e.printStackTrace(); } } }
apache-2.0
xxMUROxx/Mairegger.Printing
src/Mairegger.Printing/Content/IPageBreakAware.cs
1308
// Copyright 2016 Michael Mairegger // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. namespace Mairegger.Printing.Content { using System.Collections.Generic; using System.Windows; public interface IPageBreakAware : IPrintContent { /// <summary> /// Gets a list of <see cref="UIElement"/> that prints the content on multiple pages. /// </summary> /// <param name="currentPageHeight">The space that is available on the current page.</param> /// <param name="printablePageSize">The space that is available on any further page.</param> /// <returns>A list of <see cref="UIElement"/>s that are print each on a single page.</returns> IEnumerable<UIElement> PageContents(double currentPageHeight, Size printablePageSize); } }
apache-2.0
datumbox/datumbox-framework
datumbox-framework-core/src/main/java/com/datumbox/framework/core/common/utilities/SelectKth.java
4881
/** * Copyright (C) 2013-2020 Vasilis Vryniotis <bbriniotis@datumbox.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datumbox.framework.core.common.utilities; import java.util.*; /** * The SelectKth class provides a fast implementation of a Selection algorithm. * * @author Vasilis Vryniotis <bbriniotis@datumbox.com> */ public class SelectKth { /** * Selects the kth largest element from an iterable object. * * @param elements * @param k * @return */ public static Double largest(Iterator<Double> elements, int k) { Iterator<Double> oppositeElements = new Iterator<Double>() { /** {@inheritDoc} */ @Override public boolean hasNext() { return elements.hasNext(); } /** {@inheritDoc} */ @Override public Double next() { return -elements.next(); //reverse the sign of every value } }; return -smallest(oppositeElements,k); //reverse the sign of the retrieved value } /** * Selects the kth smallest element from an iterable object. * * @param elements * @param k * @return */ /* * This method is adapted from Guava. Original method leastOf(). * * Copyright 2007 Google Inc. * Licensed under Apache License, Version 2.0 */ public static Double smallest(Iterator <Double> elements, int k) { if (k <= 0 || !elements.hasNext()) { return null; } else if (k >= Integer.MAX_VALUE / 2) { List <Double> list = new ArrayList <> (); while (elements.hasNext()) { list.add(elements.next()); } Collections.sort(list); return list.get(k - 1); } int bufferCap = k * 2; Double[] buffer = new Double[bufferCap]; Double threshold = elements.next(); buffer[0] = threshold; int bufferSize = 1; while (bufferSize < k && elements.hasNext()) { Double e = elements.next(); buffer[bufferSize++] = e; threshold = Math.max(threshold, e); } while (elements.hasNext()) { Double e = elements.next(); if (e >= threshold) { continue; } buffer[bufferSize++] = e; if (bufferSize == bufferCap) { int left = 0; int right = bufferCap - 1; int minThresholdPosition = 0; while (left < right) { int pivotIndex = (left + right + 1) >>> 1; //--- partition Double pivotValue = buffer[pivotIndex]; buffer[pivotIndex] = buffer[right]; buffer[right] = pivotValue; int pivotNewIndex = left; for (int l = left; l < right; l++) { if (buffer[l] < pivotValue) { Double temp = buffer[pivotNewIndex]; buffer[pivotNewIndex] = buffer[l]; buffer[l] = temp; pivotNewIndex++; } } Double temp = buffer[right]; buffer[right] = buffer[pivotNewIndex]; buffer[pivotNewIndex] = temp; //--- if (pivotNewIndex > k) { right = pivotNewIndex - 1; } else if (pivotNewIndex < k) { left = Math.max(pivotNewIndex, left + 1); minThresholdPosition = pivotNewIndex; } else { break; } } bufferSize = k; threshold = buffer[minThresholdPosition]; for (int i = minThresholdPosition + 1; i < bufferSize; i++) { threshold = Math.max(threshold, buffer[i]); } } } Arrays.sort(buffer, 0, bufferSize); k = Math.min(bufferSize, k); return buffer[k-1]; } }
apache-2.0
OpenTOSCA/container
org.opentosca.planbuilder/org.opentosca.planbuilder.model/src/main/resources/generated/org/apache/ode/schemas/dd/_2007/_03/TSchedule.java
2744
// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2013.05.27 at 03:40:55 PM CEST // package org.apache.ode.schemas.dd._2007._03; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for tSchedule complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="tSchedule"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="cleanup" type="{http://www.apache.org/ode/schemas/dd/2007/03}tCleanup" maxOccurs="unbounded" minOccurs="0"/> * &lt;/sequence> * &lt;attribute name="when" use="required" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "tSchedule", propOrder = { "cleanup" }) public class TSchedule { protected List<TCleanup> cleanup; @XmlAttribute(required = true) protected String when; /** * Gets the value of the cleanup property. * * <p> * This accessor method returns a reference to the live list, not a snapshot. Therefore any modification you make to * the returned list will be present inside the JAXB object. This is why there is not a <CODE>set</CODE> method for * the cleanup property. * * <p> * For example, to add a new item, do as follows: * <pre> * getCleanup().add(newItem); * </pre> * * * <p> * Objects of the following type(s) are allowed in the list {@link TCleanup } */ public List<TCleanup> getCleanup() { if (cleanup == null) { cleanup = new ArrayList<TCleanup>(); } return this.cleanup; } /** * Gets the value of the when property. * * @return possible object is {@link String } */ public String getWhen() { return when; } /** * Sets the value of the when property. * * @param value allowed object is {@link String } */ public void setWhen(String value) { this.when = value; } }
apache-2.0
pmk2429/investickation
app/src/main/java/com/sfsu/network/error/ApiError.java
593
package com.sfsu.network.error; /** * Wrapper for the Error status and message sent from the server * Created by Pavitra on 12/27/2015. */ public class ApiError { private int statusCode; private String message; public ApiError() { } public int getStatusCode() { return statusCode; } public void setStatusCode(int statusCode) { this.statusCode = statusCode; } public String getMessage() { return message; } public void setMessage(String message) { this.message = message; } }
apache-2.0
sizhaoliu/MemoryManagementBenchmark
org.talend.dataprofiler.benchmark.MapDB/src/main/java/org/talend/dataprofiler/benchmark/MapDB/test/MapDBTestForMixModeNewTest.java
10211
package org.talend.dataprofiler.benchmark.MapDB.test; import java.io.UnsupportedEncodingException; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.concurrent.ConcurrentNavigableMap; import junit.framework.Assert; import org.junit.Test; import org.mapdb.BTreeKeySerializer; import org.mapdb.BTreeMap; import org.mapdb.DB; import org.mapdb.DBMaker; import org.mapdb.Fun; import org.mapdb.Fun.Tuple2; import org.mapdb.HTreeMap; import org.mapdb.Pump; import org.mapdb.Serializer; import org.mapdb.Store; import org.mapdb.StoreHeap; import com.sun.management.OperatingSystemMXBean; public class MapDBTestForMixModeNewTest { private Long distinctCount = 0l; private Long rowCount = 0l; private Long uniqueCount = 0l; private Long duplicateCount = 0l; List<BTreeMap<String, Long>> mapList=new ArrayList<BTreeMap<String, Long>>(); @Test public void testHugeDataForTreeMap() throws UnsupportedEncodingException{ OperatingSystemMXBean bean = (OperatingSystemMXBean) java.lang.management.ManagementFactory.getOperatingSystemMXBean(); long max = bean.getFreePhysicalMemorySize(); double formatMemoryInGB=max/1024.0/1024.0/1024.0; setStartT(); DB fileDb =DBMaker.newTempFileDB().sizeLimit(1).cacheSize(12*1024).mmapFileEnableIfSupported().asyncWriteEnable().closeOnJvmShutdown().transactionDisable().make(); BTreeMap<String,Long> fileDbMap = fileDb.createTreeMap("test").keySerializer(BTreeKeySerializer.STRING).valueSerializer(Serializer.LONG).make(); DB slowFileDb = DBMaker.newTempFileDB().cacheSize(12*1024).mmapFileEnablePartial().asyncWriteEnable().closeOnJvmShutdown().transactionDisable().make(); // HTreeMap<String, Long> memoryDbMap = db.createHashMap("test").keySerializer(Serializer.STRING).valueSerializer(Serializer.LONG).make(); // HTreeMap<String, Long> fileDbMap= fileDb.createHashMap("test").keySerializer(Serializer.STRING).valueSerializer(Serializer.LONG).make(); Store fileDbStore = Store.forDB(fileDb); for(int index=0;index<1e7;index++){ if(!fileDb.isClosed()&& fileDbStore.getSizeLimit()-fileDbStore.getCurrSize()<=1024*1024*500){ // System.out.println(store.calculateStatistics()); //copy data to file when the size of memory is too large System.out.println("1111"+fileDbStore.getCurrSize()/1024); ellipseT(); fileDbMap=copyDataToFile(fileDbMap,slowFileDb); fileDb.close(); // fileDbMap = slowFileDb.createTreeMap("test").keySerializer(BTreeKeySerializer.STRING).valueSerializer(Serializer.LONG).make(); // memoryDbMap = db.createHashMap("test").keySerializer(Serializer.STRING).valueSerializer(Serializer.LONG).make(); // fileDbStore = Store.forDB(slowFileDb); } for(String[] dataItem:initRandomData()){ String convertToKey = ConvertToKey(dataItem); Long frequency = fileDbMap.get(convertToKey); if(frequency!=null){ frequency++; }else{ frequency=1l; } fileDbMap.put(convertToKey, frequency); } // copyDataToFile(memoryDbMap,fileDb,fileDbMap); // db.close(); // db = DBMaker.newMemoryDirectDB().sizeLimit(0.5).closeOnJvmShutdown().transactionDisable().make(); // memoryDbMap = db.createTreeMap("test").keySerializer(BTreeKeySerializer.STRING).valueSerializer(Serializer.LONG).make(); // memoryDbStore = Store.forDB(db); } // for(int index=0;index<2.5e5;index++){ // if(memoryDbStore.getSizeLimit()-memoryDbStore.getCurrSize()<=1024*1024*1){ //// System.out.println(store.calculateStatistics()); // //copy data to file when the size of memory is too large // System.out.println(memoryDbStore.getCurrSize()/1024); // ellipseT(); // copyDataToFile(memoryDbMap,fileDb); // db.close(); // db = DBMaker.newMemoryDirectDB().sizeLimit(0.5).closeOnJvmShutdown().transactionDisable().make(); // memoryDbMap = db.createTreeMap("test").keySerializer(BTreeKeySerializer.STRING).valueSerializer(Serializer.LONG).make(); //// memoryDbMap = db.createHashMap("test").keySerializer(Serializer.STRING).valueSerializer(Serializer.LONG).make(); // memoryDbStore = Store.forDB(db); // } // for(String[] dataItem:initData()){ // String convertToKey = ConvertToKey(dataItem); // Long frequency = memoryDbMap.get(convertToKey); // if(frequency!=null){ // frequency++; // }else{ // frequency=1l; // } // memoryDbMap.put(convertToKey, frequency); // } // // } //copy last data which leave at memory System.out.println(fileDbStore.getCurrSize()/1024); ellipseT(); computeResult(fileDbMap); ellipseT(); // Assert.assertEquals(10l, distinctCount.longValue()); Assert.assertEquals(10000000l, rowCount.longValue()); Assert.assertEquals(uniqueCount.longValue(), distinctCount.longValue()-duplicateCount.longValue()); // Assert.assertEquals(0l, uniqueCount.longValue()); // Assert.assertEquals(10l, duplicateCount.longValue()); if(!fileDb.isClosed()){ fileDb.close(); } slowFileDb.close(); System.out.println("end"); ellipseT(); } private BTreeMap<String, Long> copyDataToFile( final BTreeMap<String,Long > dbMap, DB fileDb) { Iterator sortIterator = Pump.sort(dbMap.keySet().iterator(), true, 100000, Collections.reverseOrder(BTreeMap.COMPARABLE_COMPARATOR), // reverse // order // comparator Serializer.STRING); Fun.Function1<Long, String> valueExtractor = new Fun.Function1<Long, String>() { @Override public Long run(String a) { Long returnValue=dbMap.get(a); return returnValue; } }; String randomString = randomString(10); BTreeMap<String, Long> make = fileDb.createTreeMap("map"+randomString).pumpSource(sortIterator, valueExtractor) // .pumpPresort(100000) // for presorting data we could also use this method .keySerializer(BTreeKeySerializer.STRING).valueSerializer(Serializer.LONG).makeOrGet(); // mapList.add(make); // fileDbMap.putAll(make); // for(String key :fileDbMap.keySet()){ // System.out.println("key="+key); // System.out.println("value="+fileDbMap.get(key)); // } return make; } // private BTreeMap<String, Long> copyDataToFile( // BTreeMap<String, Long> dbMap, BTreeMap<String, Long> fileDbMap ) { // // for(String key:dbMap.keySet()){ // Long oldValue = fileDbMap.get(key); // if(oldValue==null){ // fileDbMap.put(key, dbMap.get(key)); // }else{ // fileDbMap.put(key, oldValue+dbMap.get(key)); // } // } // return fileDbMap; // } private void computeResult(BTreeMap<String,Long> fileDbMap) throws UnsupportedEncodingException{ for(String keyArrays:fileDbMap.keySet()){ Long frequency = fileDbMap.get(keyArrays); rowCount+=frequency; distinctCount++; if(frequency==1){ uniqueCount++; }else{ duplicateCount++; } } } private List<String[]> initData() { List<String[]> returnList = new ArrayList<String[]>(); returnList.add(new String[] { "name1", "id1", "city1" });// 1 returnList.add(new String[] { "name6", "id6", "city6" });// 6 returnList.add(new String[] { "name4", "id4", "city4" });// 4 returnList.add(new String[] { "name5", "id5", "city5" });// 5 returnList.add(new String[] { "name8", "id8", "city8" });// 8 returnList.add(new String[] { "name3", "id3", "city3" });// 3 returnList.add(new String[] { "name7", "id7", "city7" });// 7 returnList.add(new String[] { "name5", "id5", "city5" });// 5 returnList.add(new String[] { "name2", "id2", "city2" });// 2 returnList.add(new String[] { "name9", "id9", "city9" });// 9 returnList.add(new String[] { "name4", "id4", "city4" });// 4 returnList.add(new String[] { "name3", "id3", "city3" });// 3 returnList.add(new String[] { "name5", "id5", "city5" });// 5 returnList.add(new String[] { "name0", "id0", "city0" });//0 returnList.add(new String[] { "name4", "id4", "city4" });// 4 returnList.add(new String[] { "name2", "id2", "city2" });// 2 returnList.add(new String[] { "name5", "id5", "city5" });// 5 returnList.add(new String[] { "name3", "id3", "city3" });// 3 returnList.add(new String[] { "name4", "id4", "city4" });// 4 returnList.add(new String[] { "name5", "id5", "city5" });// 5 return returnList; } private List<String[]> initRandomData() { List<String[]> returnList = new ArrayList<String[]>(); returnList.add(new String[] { "name"+Math.random(), "id"+Math.random(), "city"+Math.random() });// 1 return returnList; } static long startT = 0; static long endT = 0; public static void setStartT() { startT = System.currentTimeMillis(); } public static long ellipseT() { endT = System.currentTimeMillis(); long consumeT = endT - startT; System.out.println("consume time :" + consumeT / 1000 + " second"); return consumeT / 1000; } private String ConvertToKey(String[] input){ if(input==null){ return ""; } StringBuffer strBuf=new StringBuffer(); for(String str:input){ strBuf.append(str); } return strBuf.toString(); } public static String randomString(int size) { String chars = "0123456789abcdefghijklmnopqrstuvwxyz !@#$%^&*()_+=-{}[]:\",./<>?|\\"; StringBuilder b = new StringBuilder(size); Random r = new Random(); for (int i = 0; i < size; i++) { b.append(chars.charAt(r.nextInt(chars.length()))); } return b.toString(); } }
apache-2.0
Dev-Cloud-Platform/Dev-Cloud
dev_cloud/cc1/src/wi/urls/user/network.py
3124
# -*- coding: utf-8 -*- # @COPYRIGHT_begin # # Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @COPYRIGHT_end """@package src.wi.urls.user.network @author Piotr Wójcik @date 19.11.2010 """ from django.conf.urls import patterns, url, include from django.utils.translation import ugettext_lazy as _ from wi.forms.network import CreateNetworkForm from wi.utils.decorators import user_permission from wi.utils.views import direct_to_template, get_list_generic, simple_generic_id, form_generic resources_patterns = patterns('wi.views.user.network', url(r'^networks/$', user_permission(direct_to_template), {'template_name': 'resources/networks.html'}, name='res_networks'), url(r'^ajax/network_table/$', user_permission(get_list_generic), {'request_url': 'user/network/list_user_networks/'}, name='res_ajax_get_network_table'), url(r'^ajax/add_network/$', user_permission(form_generic), {'template_name': 'generic/form.html', 'success_msg': ( lambda desc, data: _('You have successfully added a network') % {'desc': desc}), 'confirmation': _('Create'), 'form_class': CreateNetworkForm, 'request_url_post': 'user/network/request/', }, name='res_ajax_add_network'), url(r'^ajax/release_network/(?P<id1>\d+)/$', user_permission(simple_generic_id), {'template_name': 'generic/simple.html', 'success_msg': ( lambda desc: _('You have successfully released network <b>%(desc)s</b>.') % { 'desc': desc}), 'ask_msg': ( lambda desc: _('Do you want to release network <b>%(desc)s</b>?') % {'desc': desc}), 'request_url': 'user/network/release/', 'id_key': 'network_id', }, name='res_ajax_release_network'), ) urlpatterns = patterns('', url(r'^resources/', include(resources_patterns)), )
apache-2.0
zhangyuchen0411/goutil
slice/shift.go
735
package slice func ShiftLeftInt(slice []int, n int) { ReverseInt(slice[:n]) ReverseInt(slice[n:]) ReverseInt(slice) } func ShiftLeftString(slice []string, n int) { ReverseString(slice[:n]) ReverseString(slice[n:]) ReverseString(slice) } func ShiftLeftFloat64(slice []float64, n int) { ReverseFloat64(slice[:n]) ReverseFloat64(slice[n:]) ReverseFloat64(slice) } func ShiftRightInt(slice []int, n int) { ReverseInt(slice) ReverseInt(slice[:n]) ReverseInt(slice[n:]) } func ShiftRightString(slice []string, n int) { ReverseString(slice) ReverseString(slice[:n]) ReverseString(slice[n:]) } func ShiftRightFloat64(slice []float64, n int) { ReverseFloat64(slice) ReverseFloat64(slice[:n]) ReverseFloat64(slice[n:]) }
apache-2.0
amartinss/prointer
class/src/vo/ClienteVO.class.php
1891
<?php class ClienteVO { private $cod_cliente; private $cod_valor; private $nome; private $tipo_pessoa; private $tipo; private $dt_vencimento; private $cod_usuario_cad; private $dt_cadastro; private $cod_usuario_modifica; private $dt_modificacao; public function setCodCliente( $cod_cliente ) { $this->cod_cliente = $cod_cliente; } public function getCodCliente() { return $this->cod_cliente; } public function setCodValor( ValorVO $cod_valor ) { $this->cod_valor = $cod_valor; } public function getCodValor() { return $this->cod_valor; } public function setNome( $nome ) { $this->nome = $nome; } public function getNome() { return $this->nome; } public function setTipoPessoa( $tipo_pessoa ) { $this->tipo_pessoa = $tipo_pessoa; } public function getTipoPessoa() { return $this->tipo_pessoa; } public function setTipo( $tipo ) { $this->tipo = $tipo; } public function getTipo() { return $this->tipo; } public function setDtVencimento( $dt_vencimento ) { $this->dt_vencimento = $dt_vencimento; } public function getDtVencimento() { return $this->dt_vencimento; } public function setCodUsuarioCad( UsuarioCadVO $cod_usuario_cad ) { $this->cod_usuario_cad = $cod_usuario_cad; } public function getCodUsuarioCad() { return $this->cod_usuario_cad; } public function setDtCadastro( $dt_cadastro ) { $this->dt_cadastro = $dt_cadastro; } public function getDtCadastro() { return $this->dt_cadastro; } public function setCodUsuarioModifica( UsuarioModificaVO $cod_usuario_modifica ) { $this->cod_usuario_modifica = $cod_usuario_modifica; } public function getCodUsuarioModifica() { return $this->cod_usuario_modifica; } public function setDtModificacao( $dt_modificacao ) { $this->dt_modificacao = $dt_modificacao; } public function getDtModificacao() { return $this->dt_modificacao; } } ?>
apache-2.0
lkastler/Analysis-DbpediaLogs
DatasetAnalyzer/src/Config.py
715
superdir = ".." global inputfolder inputfolder = superdir +'/data' global sparqloutput sparqloutput = superdir +'/output/sparqls.txt' global malformedsparql malformedsparql = superdir +'/output/malformed-sparql.txt' global extractionLog extractionLog = superdir +'/output/extraction.log' global analysisLog analysisLog = superdir +'/output/analysis.log' global bgpfile bgpfile = superdir +'/output/bgp.txt' global bgpExtractFile bgpExtractFile = superdir +'/output/bgp-analysis.csv' global bgpExtractLog bgpExtractLog = superdir +'/output/bgp-analysis.log' global sparqlFile sparqlFile = superdir + '/output/sparql-uncleaned.log' global validSparql validSparql = superdir + "/output/sparql-valid.txt"
apache-2.0
rhiot/amqp-kafka-bridge
src/test/java/io/strimzi/kafka/bridge/tracker/OffsetTrackerTest.java
10108
/* * Copyright 2016, Strimzi authors. * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). */ package io.strimzi.kafka.bridge.tracker; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import static org.junit.Assert.assertTrue; class OffsetTrackerTest { private static final Logger log = LoggerFactory.getLogger(OffsetTrackerTest.class); private List<ConsumerRecord<String, byte[]>> records = new ArrayList<>(); private Map<TopicPartition, OffsetAndMetadata> offsets; @BeforeEach void before() { this.records.add(new ConsumerRecord<String, byte[]>("my_topic", 0, 0, null, null)); this.records.add(new ConsumerRecord<String, byte[]>("my_topic", 0, 1, null, null)); this.records.add(new ConsumerRecord<String, byte[]>("my_topic", 0, 2, null, null)); this.records.add(new ConsumerRecord<String, byte[]>("my_topic", 0, 3, null, null)); this.records.add(new ConsumerRecord<String, byte[]>("my_topic", 0, 4, null, null)); this.records.add(new ConsumerRecord<String, byte[]>("my_topic", 0, 5, null, null)); } @Test void fullOffsetTrackerOutOfOrder() { OffsetTracker offsetTracker = new FullOffsetTracker("my_topic"); for (ConsumerRecord<String, byte[]> record : this.records) { offsetTracker.track(record.partition(), record.offset(), record); } log.info("0_2 deliverd"); offsetTracker.delivered(0, 2); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.isEmpty()); log.info("0_3 deliverd"); offsetTracker.delivered(0, 3); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.isEmpty()); log.info("0_0 deliverd"); offsetTracker.delivered(0, 0); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 0); log.info("0_1 deliverd"); offsetTracker.delivered(0, 1); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 3); log.info("0_4 deliverd"); offsetTracker.delivered(0, 4); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 4); log.info("0_5 deliverd"); offsetTracker.delivered(0, 5); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 5); offsetTracker.clear(); } @Test void fullOffsetTracker() { OffsetTracker offsetTracker = new FullOffsetTracker("my_topic"); for (ConsumerRecord<String, byte[]> record : this.records) { offsetTracker.track(record.partition(), record.offset(), record); } log.info("0_0 deliverd"); offsetTracker.delivered(0, 0); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 0); log.info("0_1 deliverd"); offsetTracker.delivered(0, 1); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 1); log.info("0_2 deliverd"); offsetTracker.delivered(0, 2); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 2); log.info("0_3 deliverd"); offsetTracker.delivered(0, 3); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 3); log.info("0_4 deliverd"); offsetTracker.delivered(0, 4); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 4); log.info("0_5 deliverd"); offsetTracker.delivered(0, 5); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 5); offsetTracker.clear(); } @Test void simpleOffsetTrackerOutOfOrder() { OffsetTracker offsetTracker = new SimpleOffsetTracker("my_topic"); for (ConsumerRecord<String, byte[]> record : this.records) { offsetTracker.track(record.partition(), record.offset(), record); } log.info("0_2 deliverd"); offsetTracker.delivered(0, 2); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 2); log.info("0_3 deliverd"); offsetTracker.delivered(0, 3); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 3); log.info("0_0 deliverd"); offsetTracker.delivered(0, 0); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.isEmpty()); log.info("0_1 deliverd"); offsetTracker.delivered(0, 1); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.isEmpty()); log.info("0_4 deliverd"); offsetTracker.delivered(0, 4); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 4); log.info("0_5 deliverd"); offsetTracker.delivered(0, 5); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 5); offsetTracker.clear(); } @Test void simpleOffsetTracker() { OffsetTracker offsetTracker = new SimpleOffsetTracker("my_topic"); for (ConsumerRecord<String, byte[]> record : this.records) { offsetTracker.track(record.partition(), record.offset(), record); } log.info("0_0 deliverd"); offsetTracker.delivered(0, 0); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 0); log.info("0_1 deliverd"); offsetTracker.delivered(0, 1); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 1); log.info("0_2 deliverd"); offsetTracker.delivered(0, 2); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 2); log.info("0_3 deliverd"); offsetTracker.delivered(0, 3); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 3); log.info("0_4 deliverd"); offsetTracker.delivered(0, 4); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 4); log.info("0_5 deliverd"); offsetTracker.delivered(0, 5); this.offsets = offsetTracker.getOffsets(); printOffsetsToCommit(this.offsets); offsetTracker.commit(this.offsets); assertTrue(this.offsets.get(new TopicPartition("my_topic", 0)).offset() == 5); offsetTracker.clear(); } private void printOffsetsToCommit(Map<TopicPartition, OffsetAndMetadata> offsets) { for (Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { log.info("Committed {} - {} [{}]", entry.getKey().topic(), entry.getKey().partition(), entry.getValue().offset()); } } }
apache-2.0