code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the ec2-2015-04-15.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Text;
using System.Xml.Serialization;
using Amazon.EC2.Model;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
using Amazon.Runtime.Internal.Transform;
using Amazon.Runtime.Internal.Util;
namespace Amazon.EC2.Model.Internal.MarshallTransformations
{
/// <summary>
/// CreateRoute Request Marshaller
/// </summary>
public class CreateRouteRequestMarshaller : IMarshaller<IRequest, CreateRouteRequest> , IMarshaller<IRequest,AmazonWebServiceRequest>
{
/// <summary>
/// Marshaller the request object to the HTTP request.
/// </summary>
/// <param name="input"></param>
/// <returns></returns>
public IRequest Marshall(AmazonWebServiceRequest input)
{
return this.Marshall((CreateRouteRequest)input);
}
/// <summary>
/// Marshaller the request object to the HTTP request.
/// </summary>
/// <param name="publicRequest"></param>
/// <returns></returns>
public IRequest Marshall(CreateRouteRequest publicRequest)
{
IRequest request = new DefaultRequest(publicRequest, "Amazon.EC2");
request.Parameters.Add("Action", "CreateRoute");
request.Parameters.Add("Version", "2015-04-15");
if(publicRequest != null)
{
if(publicRequest.IsSetDestinationCidrBlock())
{
request.Parameters.Add("DestinationCidrBlock", StringUtils.FromString(publicRequest.DestinationCidrBlock));
}
if(publicRequest.IsSetGatewayId())
{
request.Parameters.Add("GatewayId", StringUtils.FromString(publicRequest.GatewayId));
}
if(publicRequest.IsSetInstanceId())
{
request.Parameters.Add("InstanceId", StringUtils.FromString(publicRequest.InstanceId));
}
if(publicRequest.IsSetNetworkInterfaceId())
{
request.Parameters.Add("NetworkInterfaceId", StringUtils.FromString(publicRequest.NetworkInterfaceId));
}
if(publicRequest.IsSetRouteTableId())
{
request.Parameters.Add("RouteTableId", StringUtils.FromString(publicRequest.RouteTableId));
}
if(publicRequest.IsSetVpcPeeringConnectionId())
{
request.Parameters.Add("VpcPeeringConnectionId", StringUtils.FromString(publicRequest.VpcPeeringConnectionId));
}
}
return request;
}
}
} | mwilliamson-firefly/aws-sdk-net | sdk/src/Services/EC2/Generated/Model/Internal/MarshallTransformations/CreateRouteRequestMarshaller.cs | C# | apache-2.0 | 3,470 |
# -*- coding:utf-8 -*-
"""
Verion: 1.0
Author: zhangjian
Site: http://iliangqunru.com
File: __init__.py.py
Time: 2017/7/22 2:19
"""
| Xarrow/pySimulatedDNS | dnsCat/__init__.py | Python | apache-2.0 | 139 |
# frozen_string_literal: true
module Hyrax
# Module that will contain Renderer classes
# @since 0.14.0
# Renderers are used to display Ruby objects to users and take arguments from the Hyrax::Presenter
# supplied by the controller.
# They are typically used to respond to read requests from the controller, and may apply some level of
# HTML, or may just display human readable values for object attributes.
module Renderers
end
end
| samvera/hyrax | app/renderers/hyrax/renderers.rb | Ruby | apache-2.0 | 449 |
package com.coekie.gentyref.factory;
@SuppressWarnings("rawtypes")
public class RawOuter extends GenericOuter {}
| coekie/gentyref | src/test/java/com/coekie/gentyref/factory/RawOuter.java | Java | apache-2.0 | 114 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using RandomSchool.Extenders;
using RandomSchool.Repositories;
namespace RandomSchool.Maintain.vPupil
{
public partial class Details : System.Web.UI.Page
{
private PupilRepository<RandomSchool.Models.Pupil, int?> _repository = new PupilRepository<RandomSchool.Models.Pupil, int?>();
protected void Page_Init()
{
fvPupil.SetDataMethodsObject(_repository);
fvPupil.RedirectToRouteOnItemCommand("~/Maintain/vPupil/Default");
}
protected void Page_Load(object sender, EventArgs e)
{
}
}
}
| jbwilliamson/MaximiseWFScaffolding | RandomSchool/RandomSchool/Maintain/vPupil/Details.aspx.cs | C# | apache-2.0 | 711 |
HB.RadioButtonComponent = Ember.Component.extend({
tagName: 'input',
type: 'radio',
attributeBindings: ['type', 'htmlChecked:checked', 'value', 'name'],
htmlChecked: function(){
return this.get('value') === this.get('checked');
}.property('value', 'checked'),
change: function(){
this.set('checked', this.get('value'));
}
});
| fractalemagic/hummingbird | app/assets/javascripts/components/radio-button.js | JavaScript | apache-2.0 | 350 |
/*
* Copyright Anatoly Starostin (c) 2017.
*/
package treeton.prosody.corpus;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import treeton.core.config.BasicConfiguration;
import treeton.core.util.xml.XMLParser;
import javax.xml.parsers.ParserConfigurationException;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
public class CorpusFolder {
public CorpusFolder(String guid, Corpus corpus) {
this.guid = guid;
this.corpus = corpus;
entries = new HashMap<String, CorpusEntry>();
childFolders = new HashMap<String, CorpusFolder>();
}
private Corpus corpus;
private String guid;
private String label;
private CorpusFolder parentFolder;
private Map<String,CorpusEntry> entries;
private Map<String,CorpusFolder> childFolders;
public String getGuid() {
return guid;
}
public CorpusFolder getParentFolder() {
return parentFolder;
}
void setParentFolder(CorpusFolder parentFolder) {
if( this.parentFolder != null ) {
this.parentFolder.removeChildFolder( this );
}
this.parentFolder = parentFolder;
if( parentFolder != null ) {
parentFolder.addChildFolder(this);
}
}
void addChildFolder(CorpusFolder folder) {
childFolders.put( folder.getGuid(), folder );
}
void removeChildFolder(CorpusFolder folder) {
childFolders.remove( folder.getGuid(), folder );
}
void addEntry( CorpusEntry entry ) {
entries.put( entry.getGuid(), entry );
}
void deleteEntry( CorpusEntry entry ) {
entries.remove(entry.getGuid(), entry);
}
void load( File sourceFolder, Corpus corpus ) throws CorpusException {
File f = new File( sourceFolder, guid + ".info.xml" );
Document doc;
try {
doc = XMLParser.parse(f, new File(BasicConfiguration.getResource("/schema/corpusFolderSchema.xsd").toString()));
} catch (Exception e) {
throw new CorpusException("Corrupted entry (problem with folder info): " + guid, e);
}
Element e = doc.getDocumentElement();
label = e.getAttribute("label");
Node xmlnd = e.getFirstChild();
if( xmlnd != null ) {
if (xmlnd instanceof Element) {
Element cur = (Element) xmlnd;
if ("parent".equals(cur.getTagName())) {
String parentGuid = cur.getTextContent();
CorpusFolder pFolder = corpus.getFolder(parentGuid);
if (pFolder == null) {
throw new CorpusException("Corrupted folder (wrong parent folder " + parentGuid + " ): " + guid);
}
setParentFolder( pFolder );
} else {
throw new CorpusException("Corrupted folder (xml node contains unknown elements): " + guid);
}
} else {
throw new CorpusException("Corrupted folder (xml node contains unknown elements): " + guid);
}
}
}
void save(File targetFolder) throws CorpusException {
Document doc;
try {
doc = XMLParser.createDocument("http://starling.rinet.ru/treeton", "Document");
} catch (ParserConfigurationException e) {
throw new CorpusException("problem when trying to create xml with folder info", e);
}
Element result = doc.getDocumentElement();
result.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance");
result.setAttribute("xsi:schemaLocation", "http://starling.rinet.ru/treeton http://starling.rinet.ru/treeton/corpusFolderSchema.xsd");
result.setAttribute( "guid", guid );
result.setAttribute("label", label);
if( parentFolder != null ) {
Element parent = doc.createElement("parent");
parent.setTextContent(parentFolder.getGuid());
result.appendChild(parent);
}
File f = new File(targetFolder, guid + ".info.xml");
try {
XMLParser.serialize(f, doc);
} catch (IOException e) {
throw new CorpusException("problem when trying to serialize xml with folder info", e);
}
}
public Collection<CorpusFolder> getChildFolders() {
return childFolders.values();
}
public Collection<CorpusEntry> getEntries() {
return entries.values();
}
public Corpus getCorpus() {
return corpus;
}
public String getLabel() {
return label;
}
void setLabel(String label) {
this.label = label;
}
public static String getGuidByFile(File file) {
String name = file.getName();
String suffix = ".info.xml";
if( name.endsWith(suffix) ) {
return name.substring(0,name.length() - suffix.length());
}
return null;
}
public static void deleteFolderFiles(File targetFolder, String guid) throws CorpusException {
File f = new File(targetFolder, guid + ".info.xml");
if( f.exists() ) {
if( !f.delete() ) {
throw new CorpusException("unable to delete xml with folder info");
}
}
}
@Override
public String toString() {
return label;
}
}
| TreetonOrg/Treeton | dev/prosody/src/treeton/prosody/corpus/CorpusFolder.java | Java | apache-2.0 | 5,453 |
/*
* Copyright Red Hat Inc. and/or its affiliates and other contributors
* as indicated by the authors tag. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*
* This particular file is subject to the "Classpath" exception as provided in the
* LICENSE file that accompanied this code.
*
* This program is distributed in the hope that it will be useful, but WITHOUT A
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License,
* along with this distribution; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
package org.eclipse.ceylon.compiler.java.codegen.recovery;
/**
* The normal, error-free transformation plan.
* Instance available from {@link Errors#GENERATE}.
*/
public class Generate extends TransformationPlan {
Generate() {
super(0, null, null);
}
} | ceylon/ceylon | compiler-java/src/org/eclipse/ceylon/compiler/java/codegen/recovery/Generate.java | Java | apache-2.0 | 1,204 |
package com.github.ruediste.c3java.invocationRecording;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.lang.annotation.ElementType;
import org.junit.Before;
import org.junit.Test;
import com.google.common.reflect.TypeToken;
@SuppressWarnings("serial")
public class MethodInvocationRecorderTest {
static interface TestClass<T> {
T getT();
String getString();
ElementType getEnum();
}
MethodInvocationRecorder recorder;
@Before
public void setup() {
recorder = new MethodInvocationRecorder();
}
@Test
public void testSingle() {
recorder.getProxy(new TypeToken<TestClass<?>>() {
}).getString();
assertEquals(1, recorder.getInvocations().size());
assertEquals(new TypeToken<TestClass<?>>() {
}, recorder.getInvocations().get(0).getInstanceType());
assertEquals("getString", recorder.getInvocations().get(0).getMethod().getName());
}
@Test
public void testGeneric() {
recorder.getProxy(new TypeToken<TestClass<?>>() {
}).getT().hashCode();
assertEquals(2, recorder.getInvocations().size());
assertEquals(new TypeToken<TestClass<?>>() {
}, recorder.getInvocations().get(0).getInstanceType());
assertEquals("getT", recorder.getInvocations().get(0).getMethod().getName());
assertEquals("capture#2-of ? extends class java.lang.Object",
recorder.getInvocations().get(1).getInstanceType().toString());
assertEquals("hashCode", recorder.getInvocations().get(1).getMethod().getName());
}
@Test
public void testTerminal() {
assertTrue(recorder.isTerminal(TypeToken.of(String.class)));
assertTrue(recorder.isTerminal(TypeToken.of(ElementType.class)));
assertFalse(recorder.isTerminal(TypeToken.of(TestClass.class)));
recorder.getProxy(String.class);
assertEquals(0, recorder.getInvocations().size());
}
@Test
public void testEnum() {
recorder.getProxy(TestClass.class).getEnum();
assertEquals(1, recorder.getInvocations().size());
assertEquals("getEnum", recorder.getInvocations().get(0).getMethod().getName());
}
@Test
public void testGeneric2() {
recorder.getProxy(new TypeToken<TestClass<TestClass<?>>>() {
}).getT().getT().hashCode();
assertEquals(3, recorder.getInvocations().size());
assertEquals(new TypeToken<TestClass<TestClass<?>>>() {
}, recorder.getInvocations().get(0).getInstanceType());
assertEquals("getT", recorder.getInvocations().get(0).getMethod().getName());
assertEquals("getT", recorder.getInvocations().get(1).getMethod().getName());
assertEquals("hashCode", recorder.getInvocations().get(2).getMethod().getName());
}
}
| ruediste/c3java | src/test/java/com/github/ruediste/c3java/invocationRecording/MethodInvocationRecorderTest.java | Java | apache-2.0 | 2,917 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.homegraph.v1;
/**
* Service definition for HomeGraphService (v1).
*
* <p>
*
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/actions/smarthome/create-app#request-sync" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link HomeGraphServiceRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class HomeGraphService extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 15,
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.15 of google-api-client to run version " +
"1.30.10 of the HomeGraph API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://homegraph.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public HomeGraphService(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
HomeGraphService(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the AgentUsers collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code HomeGraphService homegraph = new HomeGraphService(...);}
* {@code HomeGraphService.AgentUsers.List request = homegraph.agentUsers().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AgentUsers agentUsers() {
return new AgentUsers();
}
/**
* The "agentUsers" collection of methods.
*/
public class AgentUsers {
/**
* Unlinks the given third-party user from your smart home Action. All data related to this user
* will be deleted. For more details on how users link their accounts, see [fulfillment and
* authentication](https://developers.google.com/assistant/smarthome/concepts/fulfillment-
* authentication). The third-party user's identity is passed in via the `agent_user_id` (see
* DeleteAgentUserRequest). This request must be authorized using service account credentials from
* your Actions console project.
*
* Create a request for the method "agentUsers.delete".
*
* This request holds the parameters needed by the homegraph server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param agentUserId Required. Third-party user ID.
* @return the request
*/
public Delete delete(java.lang.String agentUserId) throws java.io.IOException {
Delete result = new Delete(agentUserId);
initialize(result);
return result;
}
public class Delete extends HomeGraphServiceRequest<com.google.api.services.homegraph.v1.model.Empty> {
private static final String REST_PATH = "v1/{+agentUserId}";
private final java.util.regex.Pattern AGENT_USER_ID_PATTERN =
java.util.regex.Pattern.compile("^agentUsers/.*$");
/**
* Unlinks the given third-party user from your smart home Action. All data related to this user
* will be deleted. For more details on how users link their accounts, see [fulfillment and
* authentication](https://developers.google.com/assistant/smarthome/concepts/fulfillment-
* authentication). The third-party user's identity is passed in via the `agent_user_id` (see
* DeleteAgentUserRequest). This request must be authorized using service account credentials from
* your Actions console project.
*
* Create a request for the method "agentUsers.delete".
*
* This request holds the parameters needed by the the homegraph server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param agentUserId Required. Third-party user ID.
* @since 1.13
*/
protected Delete(java.lang.String agentUserId) {
super(HomeGraphService.this, "DELETE", REST_PATH, null, com.google.api.services.homegraph.v1.model.Empty.class);
this.agentUserId = com.google.api.client.util.Preconditions.checkNotNull(agentUserId, "Required parameter agentUserId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(AGENT_USER_ID_PATTERN.matcher(agentUserId).matches(),
"Parameter agentUserId must conform to the pattern " +
"^agentUsers/.*$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. Third-party user ID. */
@com.google.api.client.util.Key
private java.lang.String agentUserId;
/** Required. Third-party user ID.
*/
public java.lang.String getAgentUserId() {
return agentUserId;
}
/** Required. Third-party user ID. */
public Delete setAgentUserId(java.lang.String agentUserId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(AGENT_USER_ID_PATTERN.matcher(agentUserId).matches(),
"Parameter agentUserId must conform to the pattern " +
"^agentUsers/.*$");
}
this.agentUserId = agentUserId;
return this;
}
/** Request ID used for debugging. */
@com.google.api.client.util.Key
private java.lang.String requestId;
/** Request ID used for debugging.
*/
public java.lang.String getRequestId() {
return requestId;
}
/** Request ID used for debugging. */
public Delete setRequestId(java.lang.String requestId) {
this.requestId = requestId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Devices collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code HomeGraphService homegraph = new HomeGraphService(...);}
* {@code HomeGraphService.Devices.List request = homegraph.devices().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Devices devices() {
return new Devices();
}
/**
* The "devices" collection of methods.
*/
public class Devices {
/**
* Gets the current states in Home Graph for the given set of the third-party user's devices. The
* third-party user's identity is passed in via the `agent_user_id` (see QueryRequest). This request
* must be authorized using service account credentials from your Actions console project.
*
* Create a request for the method "devices.query".
*
* This request holds the parameters needed by the homegraph server. After setting any optional
* parameters, call the {@link Query#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.homegraph.v1.model.QueryRequest}
* @return the request
*/
public Query query(com.google.api.services.homegraph.v1.model.QueryRequest content) throws java.io.IOException {
Query result = new Query(content);
initialize(result);
return result;
}
public class Query extends HomeGraphServiceRequest<com.google.api.services.homegraph.v1.model.QueryResponse> {
private static final String REST_PATH = "v1/devices:query";
/**
* Gets the current states in Home Graph for the given set of the third-party user's devices. The
* third-party user's identity is passed in via the `agent_user_id` (see QueryRequest). This
* request must be authorized using service account credentials from your Actions console project.
*
* Create a request for the method "devices.query".
*
* This request holds the parameters needed by the the homegraph server. After setting any
* optional parameters, call the {@link Query#execute()} method to invoke the remote operation.
* <p> {@link
* Query#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.homegraph.v1.model.QueryRequest}
* @since 1.13
*/
protected Query(com.google.api.services.homegraph.v1.model.QueryRequest content) {
super(HomeGraphService.this, "POST", REST_PATH, content, com.google.api.services.homegraph.v1.model.QueryResponse.class);
}
@Override
public Query set$Xgafv(java.lang.String $Xgafv) {
return (Query) super.set$Xgafv($Xgafv);
}
@Override
public Query setAccessToken(java.lang.String accessToken) {
return (Query) super.setAccessToken(accessToken);
}
@Override
public Query setAlt(java.lang.String alt) {
return (Query) super.setAlt(alt);
}
@Override
public Query setCallback(java.lang.String callback) {
return (Query) super.setCallback(callback);
}
@Override
public Query setFields(java.lang.String fields) {
return (Query) super.setFields(fields);
}
@Override
public Query setKey(java.lang.String key) {
return (Query) super.setKey(key);
}
@Override
public Query setOauthToken(java.lang.String oauthToken) {
return (Query) super.setOauthToken(oauthToken);
}
@Override
public Query setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Query) super.setPrettyPrint(prettyPrint);
}
@Override
public Query setQuotaUser(java.lang.String quotaUser) {
return (Query) super.setQuotaUser(quotaUser);
}
@Override
public Query setUploadType(java.lang.String uploadType) {
return (Query) super.setUploadType(uploadType);
}
@Override
public Query setUploadProtocol(java.lang.String uploadProtocol) {
return (Query) super.setUploadProtocol(uploadProtocol);
}
@Override
public Query set(String parameterName, Object value) {
return (Query) super.set(parameterName, value);
}
}
/**
* Reports device state and optionally sends device notifications. Called by your smart home Action
* when the state of a third-party device changes or you need to send a notification about the
* device. See [Implement Report State](https://developers.google.com/assistant/smarthome/develop
* /report-state) for more information. This method updates the device state according to its
* declared [traits](https://developers.google.com/assistant/smarthome/concepts/devices-traits).
* Publishing a new state value outside of these traits will result in an `INVALID_ARGUMENT` error
* response. The third-party user's identity is passed in via the `agent_user_id` (see
* ReportStateAndNotificationRequest). This request must be authorized using service account
* credentials from your Actions console project.
*
* Create a request for the method "devices.reportStateAndNotification".
*
* This request holds the parameters needed by the homegraph server. After setting any optional
* parameters, call the {@link ReportStateAndNotification#execute()} method to invoke the remote
* operation.
*
* @param content the {@link com.google.api.services.homegraph.v1.model.ReportStateAndNotificationRequest}
* @return the request
*/
public ReportStateAndNotification reportStateAndNotification(com.google.api.services.homegraph.v1.model.ReportStateAndNotificationRequest content) throws java.io.IOException {
ReportStateAndNotification result = new ReportStateAndNotification(content);
initialize(result);
return result;
}
public class ReportStateAndNotification extends HomeGraphServiceRequest<com.google.api.services.homegraph.v1.model.ReportStateAndNotificationResponse> {
private static final String REST_PATH = "v1/devices:reportStateAndNotification";
/**
* Reports device state and optionally sends device notifications. Called by your smart home
* Action when the state of a third-party device changes or you need to send a notification about
* the device. See [Implement Report
* State](https://developers.google.com/assistant/smarthome/develop/report-state) for more
* information. This method updates the device state according to its declared
* [traits](https://developers.google.com/assistant/smarthome/concepts/devices-traits). Publishing
* a new state value outside of these traits will result in an `INVALID_ARGUMENT` error response.
* The third-party user's identity is passed in via the `agent_user_id` (see
* ReportStateAndNotificationRequest). This request must be authorized using service account
* credentials from your Actions console project.
*
* Create a request for the method "devices.reportStateAndNotification".
*
* This request holds the parameters needed by the the homegraph server. After setting any
* optional parameters, call the {@link ReportStateAndNotification#execute()} method to invoke the
* remote operation. <p> {@link ReportStateAndNotification#initialize(com.google.api.client.google
* apis.services.AbstractGoogleClientRequest)} must be called to initialize this instance
* immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.homegraph.v1.model.ReportStateAndNotificationRequest}
* @since 1.13
*/
protected ReportStateAndNotification(com.google.api.services.homegraph.v1.model.ReportStateAndNotificationRequest content) {
super(HomeGraphService.this, "POST", REST_PATH, content, com.google.api.services.homegraph.v1.model.ReportStateAndNotificationResponse.class);
}
@Override
public ReportStateAndNotification set$Xgafv(java.lang.String $Xgafv) {
return (ReportStateAndNotification) super.set$Xgafv($Xgafv);
}
@Override
public ReportStateAndNotification setAccessToken(java.lang.String accessToken) {
return (ReportStateAndNotification) super.setAccessToken(accessToken);
}
@Override
public ReportStateAndNotification setAlt(java.lang.String alt) {
return (ReportStateAndNotification) super.setAlt(alt);
}
@Override
public ReportStateAndNotification setCallback(java.lang.String callback) {
return (ReportStateAndNotification) super.setCallback(callback);
}
@Override
public ReportStateAndNotification setFields(java.lang.String fields) {
return (ReportStateAndNotification) super.setFields(fields);
}
@Override
public ReportStateAndNotification setKey(java.lang.String key) {
return (ReportStateAndNotification) super.setKey(key);
}
@Override
public ReportStateAndNotification setOauthToken(java.lang.String oauthToken) {
return (ReportStateAndNotification) super.setOauthToken(oauthToken);
}
@Override
public ReportStateAndNotification setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ReportStateAndNotification) super.setPrettyPrint(prettyPrint);
}
@Override
public ReportStateAndNotification setQuotaUser(java.lang.String quotaUser) {
return (ReportStateAndNotification) super.setQuotaUser(quotaUser);
}
@Override
public ReportStateAndNotification setUploadType(java.lang.String uploadType) {
return (ReportStateAndNotification) super.setUploadType(uploadType);
}
@Override
public ReportStateAndNotification setUploadProtocol(java.lang.String uploadProtocol) {
return (ReportStateAndNotification) super.setUploadProtocol(uploadProtocol);
}
@Override
public ReportStateAndNotification set(String parameterName, Object value) {
return (ReportStateAndNotification) super.set(parameterName, value);
}
}
/**
* Requests Google to send an `action.devices.SYNC`
* [intent](https://developers.google.com/assistant/smarthome/reference/intent/sync) to your smart
* home Action to update device metadata for the given user. The third-party user's identity is
* passed via the `agent_user_id` (see RequestSyncDevicesRequest). This request must be authorized
* using service account credentials from your Actions console project.
*
* Create a request for the method "devices.requestSync".
*
* This request holds the parameters needed by the homegraph server. After setting any optional
* parameters, call the {@link RequestSync#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.homegraph.v1.model.RequestSyncDevicesRequest}
* @return the request
*/
public RequestSync requestSync(com.google.api.services.homegraph.v1.model.RequestSyncDevicesRequest content) throws java.io.IOException {
RequestSync result = new RequestSync(content);
initialize(result);
return result;
}
public class RequestSync extends HomeGraphServiceRequest<com.google.api.services.homegraph.v1.model.RequestSyncDevicesResponse> {
private static final String REST_PATH = "v1/devices:requestSync";
/**
* Requests Google to send an `action.devices.SYNC`
* [intent](https://developers.google.com/assistant/smarthome/reference/intent/sync) to your smart
* home Action to update device metadata for the given user. The third-party user's identity is
* passed via the `agent_user_id` (see RequestSyncDevicesRequest). This request must be authorized
* using service account credentials from your Actions console project.
*
* Create a request for the method "devices.requestSync".
*
* This request holds the parameters needed by the the homegraph server. After setting any
* optional parameters, call the {@link RequestSync#execute()} method to invoke the remote
* operation. <p> {@link
* RequestSync#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.homegraph.v1.model.RequestSyncDevicesRequest}
* @since 1.13
*/
protected RequestSync(com.google.api.services.homegraph.v1.model.RequestSyncDevicesRequest content) {
super(HomeGraphService.this, "POST", REST_PATH, content, com.google.api.services.homegraph.v1.model.RequestSyncDevicesResponse.class);
}
@Override
public RequestSync set$Xgafv(java.lang.String $Xgafv) {
return (RequestSync) super.set$Xgafv($Xgafv);
}
@Override
public RequestSync setAccessToken(java.lang.String accessToken) {
return (RequestSync) super.setAccessToken(accessToken);
}
@Override
public RequestSync setAlt(java.lang.String alt) {
return (RequestSync) super.setAlt(alt);
}
@Override
public RequestSync setCallback(java.lang.String callback) {
return (RequestSync) super.setCallback(callback);
}
@Override
public RequestSync setFields(java.lang.String fields) {
return (RequestSync) super.setFields(fields);
}
@Override
public RequestSync setKey(java.lang.String key) {
return (RequestSync) super.setKey(key);
}
@Override
public RequestSync setOauthToken(java.lang.String oauthToken) {
return (RequestSync) super.setOauthToken(oauthToken);
}
@Override
public RequestSync setPrettyPrint(java.lang.Boolean prettyPrint) {
return (RequestSync) super.setPrettyPrint(prettyPrint);
}
@Override
public RequestSync setQuotaUser(java.lang.String quotaUser) {
return (RequestSync) super.setQuotaUser(quotaUser);
}
@Override
public RequestSync setUploadType(java.lang.String uploadType) {
return (RequestSync) super.setUploadType(uploadType);
}
@Override
public RequestSync setUploadProtocol(java.lang.String uploadProtocol) {
return (RequestSync) super.setUploadProtocol(uploadProtocol);
}
@Override
public RequestSync set(String parameterName, Object value) {
return (RequestSync) super.set(parameterName, value);
}
}
/**
* Gets all the devices associated with the given third-party user. The third-party user's identity
* is passed in via the `agent_user_id` (see SyncRequest). This request must be authorized using
* service account credentials from your Actions console project.
*
* Create a request for the method "devices.sync".
*
* This request holds the parameters needed by the homegraph server. After setting any optional
* parameters, call the {@link Sync#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.homegraph.v1.model.SyncRequest}
* @return the request
*/
public Sync sync(com.google.api.services.homegraph.v1.model.SyncRequest content) throws java.io.IOException {
Sync result = new Sync(content);
initialize(result);
return result;
}
public class Sync extends HomeGraphServiceRequest<com.google.api.services.homegraph.v1.model.SyncResponse> {
private static final String REST_PATH = "v1/devices:sync";
/**
* Gets all the devices associated with the given third-party user. The third-party user's
* identity is passed in via the `agent_user_id` (see SyncRequest). This request must be
* authorized using service account credentials from your Actions console project.
*
* Create a request for the method "devices.sync".
*
* This request holds the parameters needed by the the homegraph server. After setting any
* optional parameters, call the {@link Sync#execute()} method to invoke the remote operation. <p>
* {@link Sync#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.homegraph.v1.model.SyncRequest}
* @since 1.13
*/
protected Sync(com.google.api.services.homegraph.v1.model.SyncRequest content) {
super(HomeGraphService.this, "POST", REST_PATH, content, com.google.api.services.homegraph.v1.model.SyncResponse.class);
}
@Override
public Sync set$Xgafv(java.lang.String $Xgafv) {
return (Sync) super.set$Xgafv($Xgafv);
}
@Override
public Sync setAccessToken(java.lang.String accessToken) {
return (Sync) super.setAccessToken(accessToken);
}
@Override
public Sync setAlt(java.lang.String alt) {
return (Sync) super.setAlt(alt);
}
@Override
public Sync setCallback(java.lang.String callback) {
return (Sync) super.setCallback(callback);
}
@Override
public Sync setFields(java.lang.String fields) {
return (Sync) super.setFields(fields);
}
@Override
public Sync setKey(java.lang.String key) {
return (Sync) super.setKey(key);
}
@Override
public Sync setOauthToken(java.lang.String oauthToken) {
return (Sync) super.setOauthToken(oauthToken);
}
@Override
public Sync setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Sync) super.setPrettyPrint(prettyPrint);
}
@Override
public Sync setQuotaUser(java.lang.String quotaUser) {
return (Sync) super.setQuotaUser(quotaUser);
}
@Override
public Sync setUploadType(java.lang.String uploadType) {
return (Sync) super.setUploadType(uploadType);
}
@Override
public Sync setUploadProtocol(java.lang.String uploadProtocol) {
return (Sync) super.setUploadProtocol(uploadProtocol);
}
@Override
public Sync set(String parameterName, Object value) {
return (Sync) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link HomeGraphService}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
DEFAULT_ROOT_URL,
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link HomeGraphService}. */
@Override
public HomeGraphService build() {
return new HomeGraphService(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link HomeGraphServiceRequestInitializer}.
*
* @since 1.12
*/
public Builder setHomeGraphServiceRequestInitializer(
HomeGraphServiceRequestInitializer homegraphserviceRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(homegraphserviceRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-homegraph/v1/1.30.1/com/google/api/services/homegraph/v1/HomeGraphService.java | Java | apache-2.0 | 33,723 |
package org.jfl2.fx.controller.menu;
import javafx.application.Platform;
import javafx.event.Event;
import javafx.scene.Node;
import javafx.scene.control.RadioButton;
import javafx.scene.input.KeyEvent;
import javafx.scene.input.MouseEvent;
import lombok.extern.slf4j.Slf4j;
import org.jfl2.core.util.Jfl2NumberUtils;
import org.jfl2.fx.control.MenuPane;
import java.util.*;
import java.util.stream.Collectors;
@Slf4j
public class MenuWindowManager {
private Map<String, MenuWindow> id2MenuWindow = new HashMap<>();
/**
* 表示中のメニュー
*/
private MenuWindow nowMenu;
/**
* 選択中のアイテムIndex
*/
private int selected = -1;
/**
* Pane
*/
private MenuPane menuPane;
/**
* Constructor
*
* @param pane
*/
public MenuWindowManager(MenuPane pane) {
menuPane = pane;
}
/**
* 管理対象にMenuWindowを追加
*
* @param menu
* @return
*/
public MenuWindowManager add(MenuWindow menu) {
id2MenuWindow.put(menu.id, menu);
return this;
}
/**
* id からMenuWindowを取得
*
* @param id Specify string
* @return
*/
public MenuWindow get(String id) {
return id2MenuWindow.get(id);
}
/**
* メニューを開く
*
* @param id
* @return
*/
public MenuWindowManager show(String id) {
nowMenu = get(id);
if (nowMenu != null) {
menuPane.setTitleText(nowMenu.id);
menuPane.setDescriptionText(nowMenu.description);
menuPane.getListView().setVisible(false);
menuPane.getListView().setManaged(false);
/* listview は廃止
menuPane.setItems(FXCollections.observableList(nowMenu.items));
VirtualFlow flow = (VirtualFlow) menuPane.getListView().getChildrenUnmodifiable().get(0);
double height = 0;
for (int n = 0; n < nowMenu.items.size(); n++) {
IndexedCell cell = flow.getCell(n);
if (cell != null) {
height += cell.getHeight();
}
}
height = Jfl2Const.getMaxValue(height, Jfl2Const.MENU_MAX_HEIGHT);
// menuPane.getListView().setStyle("-fx-pref-height: " + height + ";");
/**/
List<RadioButton> rList = nowMenu.items.stream().map(menuItem -> {
RadioButton btn = new RadioButton(menuItem.toString());
btn.setFocusTraversable(false);
btn.setToggleGroup(menuPane.getToggleGroup());
btn.onMouseEnteredProperty().set((ev) -> select(menuItem));
menuPane.getRadioBox().getChildren().add(btn);
menuPane.getButtons().add(btn);
return btn;
}).collect(Collectors.toList());
selectFirst();
menuPane.setVisible(true);
getFocus();
}
return this;
}
/**
* 1つ上を選択
*/
public MenuWindowManager up() {
select(selected - 1, true);
return this;
}
/**
* 1つ下を選択
*/
public MenuWindowManager down() {
select(selected + 1, true);
return this;
}
/**
* 現在選択されているものを実行する
*
* @return
*/
public MenuWindowManager enter() {
return enter(selected);
}
/**
* 指定したIndexのMenuを実行する
*
* @return
*/
public MenuWindowManager enter(int index) {
return enter(nowMenu.items.get(index));
}
/**
* 指定したMenuItemを実行する
*
* @param item MenuItem is executed.
* @return
*/
public MenuWindowManager enter(MenuItem item) {
hide();
item.getConsumer().accept(null);
return this;
}
/**
* 指定したボタンを選択する
*
* @param index 0開始
* @param loop 上下間ループするならtrue
* @return
*/
public MenuWindowManager select(int index, boolean loop) {
if (menuPane.getButtons() != null) {
selected = Jfl2NumberUtils.loopValue(index, menuPane.getButtons().size(), loop);
menuPane.getButtons().get(selected).setSelected(true);
}
return this;
}
/**
* 指定したボタンを選択する
*
* @param menuItem MenuItem
* @return
*/
public MenuWindowManager select(MenuItem menuItem){
int index=0;
for( MenuItem item : nowMenu.items ){
if(Objects.equals(item, menuItem)){
select(index, false);
}
index++;
}
return this;
}
/**
* 最初のボタンを選択する
*/
public MenuWindowManager selectFirst() {
select(0, false);
return this;
}
/**
* ListViewにフォーカスを移す
*/
public void getFocus() {
Platform.runLater(() -> {
Optional<RadioButton> selected = menuPane.getButtons().stream().filter(RadioButton::isSelected).findFirst();
selected.ifPresent(RadioButton::requestFocus);
});
}
/**
* メニューを閉じる
*
* @return
*/
public MenuWindowManager hide() {
menuPane.setVisible(false);
menuPane.setDescriptionText("");
// menuPane.clearItems();
menuPane.getMenuBox().getChildren().removeAll();
menuPane.getButtons().stream().forEach(node->node.onMouseEnteredProperty().unbind());
menuPane.getButtons().clear();
menuPane.getRadioBox().getChildren().clear();
nowMenu = null;
return this;
}
/**
* その他キーの処理
*
* @param event
* @return
*/
public MenuWindowManager quickSelect(Event event) {
if (KeyEvent.class.isInstance(event)) {
KeyEvent keyEvent = (KeyEvent) event;
nowMenu.items.stream().filter(
(item) -> item.getKey().isHandle(keyEvent)
).findFirst().ifPresent(item -> enter(item));
}
return this;
}
/**
* ホバー時のイベント
* @param mouseEvent
* @return
*/
public MenuWindowManager hover(MouseEvent mouseEvent) {
int index = 0;
for( Node node : menuPane.getRadioBox().getChildren() ){
if( node.contains(mouseEvent.getSceneX(), mouseEvent.getSceneY()) ){
select(index, false);
}
if( node.contains(mouseEvent.getScreenX(), mouseEvent.getScreenY()) ){
select(index, false);
}
index++;
}
return this;
}
}
| lei0229/jfl2 | src/main/java/org/jfl2/fx/controller/menu/MenuWindowManager.java | Java | apache-2.0 | 6,781 |
package sunning.democollection.learn._0331.component;
import dagger.Component;
import sunning.democollection.learn._0331.UserActivity;
import sunning.democollection.learn._0331.module.ShoppingCartModule;
/**
* Created by sunning on 16/3/31.
*/
@Component(dependencies = ActivityComponent.class, modules = ShoppingCartModule.class)
public interface ShoppingCartComponent {
void inject(UserActivity userActivity);
}
| syg5201314/demoCollection | app/src/main/java/sunning/democollection/learn/_0331/component/ShoppingCartComponent.java | Java | apache-2.0 | 422 |
package gui.sub_controllers;
import gui.GraphDrawer;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.collections.transformation.FilteredList;
import javafx.collections.transformation.SortedList;
import javafx.fxml.FXML;
import javafx.scene.control.TableColumn;
import javafx.scene.control.TableRow;
import javafx.scene.control.TableView;
import javafx.scene.control.TextField;
import javafx.scene.control.cell.CheckBoxTableCell;
import javafx.scene.control.cell.PropertyValueFactory;
import javafx.stage.Stage;
import org.jetbrains.annotations.NotNull;
import structures.Annotation;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.TreeSet;
/**
* View-Controller for the genome table.
*
* @author Marco Jakob -> from http://code.makery.ch/blog/javafx-8-tableview-sorting-filtering/
* Changed to view and change annotations by Jip Rietveld
*/
public class AnnotationTableController {
@FXML
private TextField filterField;
@FXML
private TableView<Annotation> annotationTable;
@FXML
private TableColumn<Annotation, Integer> startColumn;
@FXML
private TableColumn<Annotation, Integer> endColumn;
@FXML
private TableColumn<Annotation, String> infoColumn;
@FXML
private TableColumn<Annotation, Boolean> highlightColumn;
private SortedList<Annotation> sortedData;
private HashMap<Integer, TreeSet<Annotation>> annotations;
private HashMap<Integer, TreeSet<Annotation>> updatedAnnotations;
private boolean allSelected;
/**
* Just add some sample data in the constructor.
*/
public AnnotationTableController() {
}
/**
* Initializes the controller class.
* Needs to be called manually to get the data.
* Initializes the table columns and sets up sorting and filtering.
*
* @param annotationsArg the annotations to load into the table.
*/
@FXML
@SuppressWarnings("MethodLength") //It is only 2 too long and the comments ensure clarity.
public void initialize(HashMap<Integer, TreeSet<Annotation>> annotationsArg) {
this.annotations = annotationsArg;
this.updatedAnnotations = this.annotations;
allSelected = false;
ObservableList<Annotation> masterData =
FXCollections.observableArrayList(new ArrayList<>(bucketsToTreeSet()));
// 0. Initialize the columns.
initializeColumns();
// 0.1 setRight editable columns
setEditable();
// 1. Wrap the ObservableList in a FilteredList (initially display all data).
FilteredList<Annotation> filteredData = new FilteredList<>(masterData, p -> true);
// 2. Set the filter Predicate whenever the filter changes.
filterField.textProperty().addListener((observable, oldValue, newValue)
-> filteredData.setPredicate(annotation -> {
// If filter text is empty, display all annotations.
if (newValue == null || newValue.isEmpty()) {
return true;
}
String lowerCaseFilter = newValue.toLowerCase();
//Check the info, but also co-ordinates.
return annotation.getInfo().toLowerCase().contains(lowerCaseFilter)
|| Integer.toString(annotation.getStart()).contains(lowerCaseFilter)
|| Integer.toString(annotation.getEnd()).contains(lowerCaseFilter);
}));
// 3. Wrap the FilteredList in a SortedList.
sortedData = new SortedList<>(filteredData);
// 4. Bind the SortedList comparator to the TableView comparator.
sortedData.comparatorProperty().bind(annotationTable.comparatorProperty());
// 5. Add sorted (and filtered) data to the table.
annotationTable.setItems(sortedData);
}
/**
* Sets the hashMap annotations to a HashSet.
*
* @return a hash set of the buckets of annotations
*/
@NotNull
private TreeSet<Annotation> bucketsToTreeSet() {
return bucketsToTreeSet(this.annotations);
}
/**
* Converts the hashMap to a hashSet.
*
* @param hashMap The hashMap to be converted.
* @return a hashSet of the hashMap.
*/
private TreeSet<Annotation> bucketsToTreeSet(HashMap<Integer, TreeSet<Annotation>> hashMap) {
if (hashMap == null) {
return null;
}
TreeSet<Annotation> drawThese = new TreeSet<>();
for (int i = 0; i <= hashMap.size(); i++) {
TreeSet<Annotation> tempAnnotations = hashMap.get(i);
if (tempAnnotations != null) {
drawThese.addAll(tempAnnotations);
}
}
return drawThese;
}
/**
* Method that sets the columns and table to the correct editable state.
*/
private void setEditable() {
annotationTable.setEditable(true);
startColumn.setEditable(false);
endColumn.setEditable(false);
infoColumn.setEditable(false);
highlightColumn.setEditable(true);
}
/**
* Method that initializes the columns with the right factories.
*/
private void initializeColumns() {
startColumn.setCellValueFactory(new PropertyValueFactory<>("start"));
endColumn.setCellValueFactory(new PropertyValueFactory<>("end"));
infoColumn.setCellValueFactory(new PropertyValueFactory<>("info"));
highlightColumn.setCellValueFactory(
param -> param.getValue().getSelected());
highlightColumn.setCellFactory(CheckBoxTableCell.forTableColumn(highlightColumn));
annotationTable.setRowFactory(tv -> {
TableRow<Annotation> row = new TableRow<>();
row.setOnMouseClicked(event -> {
if (event.getClickCount() == 2 && (!row.isEmpty())) {
Annotation annotation = row.getItem();
goToAnnotation(annotation);
close();
}
});
return row;
});
}
/**
* Method that goes to the annotation and highlights it.
*
* @param annotation the Annotation to go to.
*/
private void goToAnnotation(Annotation annotation) {
try {
int startNodeID = GraphDrawer.getInstance().hongerInAfrika(annotation.getStart());
int endNodeID = GraphDrawer.getInstance().hongerInAfrika(annotation.getEnd());
int soortVanRadius = (int) ((endNodeID - startNodeID) * 1.2);
if (soortVanRadius > 4000) {
ZoomController.getInstance().traverseGraphClicked(startNodeID, 4000);
} else {
ZoomController.getInstance().traverseGraphClicked(((endNodeID + startNodeID) / 2),
Math.max(soortVanRadius, (int) Math.sqrt(49)));
}
GraphDrawer.getInstance().highlightAnnotation(annotation);
} catch (StackOverflowError e) {
AnnotationPopUpController popUp = new AnnotationPopUpController();
popUp.loadNoAnnotationFound("Sorry, can't find this annotation.");
System.err.println("Sorry, too many nodes without ref to hold in memory.");
}
}
/**
* Handles pressing the save button.
*/
@FXML
public void saveButtonClicked() {
updatedAnnotations = annotations;
Annotation annotation = annotationTable.getSelectionModel().getSelectedItem();
if (annotation != null) {
goToAnnotation(annotation);
}
close();
}
/**
* Handles pressing the cancel button.
*/
public void cancelButtonClicked() {
close();
}
/**
* A general function that closes the stage.
*/
private void close() {
Stage stage = (Stage) annotationTable.getScene().getWindow();
stage.close();
}
/**
* Can select/deselect the entire sortedData at the same time.
*/
@FXML
public void selectAllFiltered() {
for (Annotation annotation : sortedData) {
if (allSelected) {
annotation.setSelected(false);
} else {
annotation.setSelected(true);
}
}
annotationTable.setItems(sortedData);
allSelected = !allSelected;
}
public HashMap<Integer, TreeSet<Annotation>> getAnnotations() {
return updatedAnnotations;
}
}
| ProgrammingLife2017/DynamiteAndButterflies | src/main/java/gui/sub_controllers/AnnotationTableController.java | Java | apache-2.0 | 8,434 |
package liquibase.change.core;
import liquibase.change.AbstractSQLChange;
import liquibase.change.DatabaseChange;
import liquibase.change.ChangeMetaData;
/**
* Allows execution of arbitrary SQL. This change can be used when existing changes are either don't exist,
* are not flexible enough, or buggy.
*/
@DatabaseChange(name="sql",
description = "The 'sql' tag allows you to specify whatever sql you want. It is useful for complex changes that aren't supported through Liquibase's automated refactoring tags and to work around bugs and limitations of Liquibase. The SQL contained in the sql tag can be multi-line.\n" +
"\n" +
"The createProcedure refactoring is the best way to create stored procedures.\n" +
"\n" +
"The 'sql' tag can also support multiline statements in the same file. Statements can either be split using a ; at the end of the last line of the SQL or a go on its own on the line between the statements can be used.Multiline SQL statements are also supported and only a ; or go statement will finish a statement, a new line is not enough. Files containing a single statement do not need to use a ; or go.\n" +
"\n" +
"The sql change can also contain comments of either of the following formats:\n" +
"\n" +
"A multiline comment that starts with /* and ends with */.\n" +
"A single line comment starting with <space>--<space> and finishing at the end of the line\n" +
"Note: By default it will attempt to split statements on a ';' or 'go' at the end of lines. Because of this, if you have a comment or some other non-statement ending ';' or 'go', don't have it at the end of a line or you will get invalid SQL.",
priority = ChangeMetaData.PRIORITY_DEFAULT)
public class RawSQLChange extends AbstractSQLChange {
private String comment;
public RawSQLChange() {
}
public RawSQLChange(String sql) {
setSql(sql);
}
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public String getConfirmationMessage() {
return "Custom SQL executed";
}
}
| ArloL/liquibase | liquibase-core/src/main/java/liquibase/change/core/RawSQLChange.java | Java | apache-2.0 | 2,252 |
package com.deleidos.dp.export;
import com.deleidos.dp.beans.Schema;
import com.deleidos.dp.exceptions.H2DataAccessException;
import com.deleidos.dp.exceptions.SchemaNotFoundException;
public interface Exporter {
public abstract String generateExport(Schema schema);
public abstract String generateExport(Schema schema, Schema previousVersion);
}
| deleidos/de-schema-wizard | data-profiler/src/main/java/com/deleidos/dp/export/Exporter.java | Java | apache-2.0 | 356 |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original source: github.com/micro/go-micro/v3/network/resolver/resolver.go
// Package resolver resolves network names to addresses
package resolver
// Resolver is network resolver. It's used to find network nodes
// via the name to connect to. This is done based on Network.Name().
// Before we can be part of any network, we have to connect to it.
type Resolver interface {
// Resolve returns a list of addresses for a name
Resolve(name string) ([]*Record, error)
}
// A resolved record
type Record struct {
Address string `json:"address"`
Priority int64 `json:"priority"`
}
| micro/micro | service/network/resolver/resolver.go | GO | apache-2.0 | 1,147 |
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.appyvet.rangebarsample.colorpicker;
import android.app.Activity;
import android.app.AlertDialog;
import android.app.Dialog;
import android.app.DialogFragment;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.ProgressBar;
import com.appyvet.rangebarsample.Component;
import com.appyvet.rangebarsample.R;
/**
* A dialog which takes in as input an array of colors and creates a palette allowing the user to
* select a specific color swatch, which invokes a listener.
*/
public class ColorPickerDialog extends DialogFragment implements ColorPickerSwatch.OnSwatchColorSelectedListener {
/**
* Interface for a callback when a color square is selected.
*/
public interface OnColorSelectedListener {
/**
* Called when a specific color square has been selected.
*/
public void onColorSelected(int color, Component component);
}
public static final int SIZE_LARGE = 1;
public static final int SIZE_SMALL = 2;
protected AlertDialog mAlertDialog;
protected static final String KEY_TITLE_ID = "title_id";
protected static final String KEY_COLORS = "colors";
protected static final String KEY_SELECTED_COLOR = "selected_color";
protected static final String KEY_COLUMNS = "columns";
protected static final String KEY_SIZE = "size";
protected int mTitleResId = R.string.color_picker_default_title;
protected int[] mColors = null;
protected int mSelectedColor;
protected int mColumns;
protected int mSize;
private Component mComponent;
private ColorPickerPalette mPalette;
private ProgressBar mProgress;
protected OnColorSelectedListener mListener;
public ColorPickerDialog() {
// Empty constructor required for dialog fragments.
}
public static ColorPickerDialog newInstance(int titleResId, int[] colors, int selectedColor,
int columns, int size, Component component) {
ColorPickerDialog ret = new ColorPickerDialog();
ret.initialize(titleResId, colors, selectedColor, columns, size, component);
return ret;
}
public void initialize(int titleResId, int[] colors, int selectedColor, int columns, int size, Component component) {
setArguments(titleResId, columns, size);
setColors(colors, selectedColor);
mComponent = component;
}
public void setArguments(int titleResId, int columns, int size) {
Bundle bundle = new Bundle();
bundle.putInt(KEY_TITLE_ID, titleResId);
bundle.putInt(KEY_COLUMNS, columns);
bundle.putInt(KEY_SIZE, size);
setArguments(bundle);
}
public void setOnColorSelectedListener(OnColorSelectedListener listener) {
mListener = listener;
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (getArguments() != null) {
mTitleResId = getArguments().getInt(KEY_TITLE_ID);
mColumns = getArguments().getInt(KEY_COLUMNS);
mSize = getArguments().getInt(KEY_SIZE);
}
if (savedInstanceState != null) {
mColors = savedInstanceState.getIntArray(KEY_COLORS);
mSelectedColor = (Integer) savedInstanceState.getSerializable(KEY_SELECTED_COLOR);
}
}
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
final Activity activity = getActivity();
View view = LayoutInflater.from(getActivity()).inflate(R.layout.color_picker_dialog, null);
mProgress = (ProgressBar) view.findViewById(android.R.id.progress);
mPalette = (ColorPickerPalette) view.findViewById(R.id.color_picker);
mPalette.init(mSize, mColumns, this);
if (mColors != null) {
showPaletteView();
}
mAlertDialog = new AlertDialog.Builder(activity)
.setTitle(mTitleResId)
.setView(view)
.create();
return mAlertDialog;
}
@Override
public void onSwatchColorSelected(int color) {
if (mListener != null) {
mListener.onColorSelected(color, mComponent);
}
if (getTargetFragment() instanceof ColorPickerSwatch.OnSwatchColorSelectedListener) {
final OnColorSelectedListener listener =
(OnColorSelectedListener) getTargetFragment();
listener.onColorSelected(color, mComponent);
}
if (color != mSelectedColor) {
mSelectedColor = color;
// Redraw palette to show checkmark on newly selected color before dismissing.
mPalette.drawPalette(mColors, mSelectedColor);
}
dismiss();
}
public void showPaletteView() {
if (mProgress != null && mPalette != null) {
mProgress.setVisibility(View.GONE);
refreshPalette();
mPalette.setVisibility(View.VISIBLE);
}
}
public void showProgressBarView() {
if (mProgress != null && mPalette != null) {
mProgress.setVisibility(View.VISIBLE);
mPalette.setVisibility(View.GONE);
}
}
public void setColors(int[] colors, int selectedColor) {
if (mColors != colors || mSelectedColor != selectedColor) {
mColors = colors;
mSelectedColor = selectedColor;
refreshPalette();
}
}
public void setColors(int[] colors) {
if (mColors != colors) {
mColors = colors;
refreshPalette();
}
}
public void setSelectedColor(int color) {
if (mSelectedColor != color) {
mSelectedColor = color;
refreshPalette();
}
}
private void refreshPalette() {
if (mPalette != null && mColors != null) {
mPalette.drawPalette(mColors, mSelectedColor);
}
}
public int[] getColors() {
return mColors;
}
public int getSelectedColor() {
return mSelectedColor;
}
@Override
public void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putIntArray(KEY_COLORS, mColors);
outState.putSerializable(KEY_SELECTED_COLOR, mSelectedColor);
}
}
| oli107/material-range-bar | RangeBarSample/src/main/java/com/appyvet/rangebarsample/colorpicker/ColorPickerDialog.java | Java | apache-2.0 | 6,997 |
package com.lling.qiqu.commons;
import java.io.Serializable;
/**
* @ClassName: ResponseInfo
* @Description: http接口返回数据封装类
* @author lling
* @date 2015-5-30
*/
public class ResponseInfo implements Serializable{
private static final long serialVersionUID = 1L;
private String code;
private String desc;
private Object data;
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public String getDesc() {
return desc;
}
public void setDesc(String desc) {
this.desc = desc;
}
public Object getData() {
return data;
}
public void setData(Object data) {
this.data = data;
}
}
| liuling07/QiQuYing | app/src/main/java/com/lling/qiqu/commons/ResponseInfo.java | Java | apache-2.0 | 673 |
/*
* EVE Swagger Interface
* An OpenAPI for EVE Online
*
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package net.troja.eve.esi.model;
import com.google.gson.TypeAdapter;
import com.google.gson.annotations.JsonAdapter;
import com.google.gson.annotations.SerializedName;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import net.troja.eve.esi.model.PlanetLink;
import net.troja.eve.esi.model.PlanetPin;
import net.troja.eve.esi.model.PlanetRoute;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
/**
* Model tests for CharacterPlanetResponse
*/
public class CharacterPlanetResponseTest {
private final CharacterPlanetResponse model = new CharacterPlanetResponse();
/**
* Model tests for CharacterPlanetResponse
*/
@Test
public void testCharacterPlanetResponse() {
// TODO: test CharacterPlanetResponse
}
/**
* Test the property 'routes'
*/
@Test
public void routesTest() {
// TODO: test routes
}
/**
* Test the property 'links'
*/
@Test
public void linksTest() {
// TODO: test links
}
/**
* Test the property 'pins'
*/
@Test
public void pinsTest() {
// TODO: test pins
}
}
| burberius/eve-esi | src/test/java/net/troja/eve/esi/model/CharacterPlanetResponseTest.java | Java | apache-2.0 | 1,583 |
/*
* Copyright 2006-2010 Virtual Laboratory for e-Science (www.vl-e.nl)
* Copyright 2012-2013 Netherlands eScience Center.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For the full license, see: LICENSE.txt (located in the root folder of this distribution).
* ---
*/
// source:
package nl.esciencecenter.vlet.vfs.lfc;
public class SEInfo
{
public String hostname=null;
public int optionalPort=-1;
public SEInfo(String infoStr)
{
// fail not or else fail later
if ((infoStr==null) || infoStr.equals(""))
throw new NullPointerException("Storage Element info string can not be null or empty");
String strs[]=infoStr.split(":");
if (strs.length>0)
hostname=strs[0];
if (strs.length>1)
optionalPort=Integer.parseInt(strs[1]);
}
public boolean hasExplicitPort()
{
return (optionalPort>0);
}
public int getPort()
{
return optionalPort;
}
public String getHostname()
{
return hostname;
}
}
| NLeSC/vbrowser | source/nl.esciencecenter.vlet.vfs.lfc/src/nl/esciencecenter/vlet/vfs/lfc/SEInfo.java | Java | apache-2.0 | 1,641 |
using Esri.ArcGISRuntime.Geometry;
using Esri.ArcGISRuntime.Layers;
using Esri.ArcGISRuntime.Symbology;
using System;
using System.Collections.Generic;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Media;
namespace ArcGISRuntimeSDKDotNet_DesktopSamples.Samples
{
/// <summary>
/// This sample demonstrates the use of the GraphicsLayer.GraphicsSouce property. Here, three separate graphics source lists are initially created with random graphics. A button is used to switch the GraphicsSource property of the GraphicsLayer between the sources.
/// </summary>
/// <title>Graphics Source</title>
/// <category>Layers</category>
/// <subcategory>Graphics Layers</subcategory>
public partial class GraphicsSourceSample : UserControl
{
private Random _random = new Random();
private GraphicsLayer _graphicsLayer;
private List<List<Graphic>> _grahicsSources;
private int _graphicSourceIndex;
/// <summary>Construct Graphics Source sample control</summary>
public GraphicsSourceSample()
{
InitializeComponent();
_graphicsLayer = MyMapView.Map.Layers["graphicsLayer"] as GraphicsLayer;
MyMapView.NavigationCompleted += MyMapView_NavigationCompleted;
}
private void MyMapView_NavigationCompleted(object sender, EventArgs e)
{
MyMapView.NavigationCompleted -= MyMapView_NavigationCompleted;
CreateGraphics();
}
// Switch between pre-created graphics lists
private void SwitchGraphicSourceButton_Click(object sender, RoutedEventArgs e)
{
++_graphicSourceIndex;
if (_graphicSourceIndex == _grahicsSources.Count)
_graphicSourceIndex = 0;
_graphicsLayer.GraphicsSource = _grahicsSources[_graphicSourceIndex];
}
// Create three List<Graphic> objects with random graphics to serve as overlay GraphicsSources
private void CreateGraphics()
{
_grahicsSources = new List<List<Graphic>>()
{
new List<Graphic>(),
new List<Graphic>(),
new List<Graphic>()
};
foreach (var graphicList in _grahicsSources)
{
for (int n = 0; n < 10; ++n)
{
graphicList.Add(CreateRandomGraphic());
}
}
_graphicSourceIndex = 0;
_graphicsLayer.GraphicsSource = _grahicsSources[_graphicSourceIndex];
}
// Create a random graphic
private Graphic CreateRandomGraphic()
{
return new Graphic()
{
Geometry = GetRandomMapPoint(),
Symbol = new SimpleMarkerSymbol() { Color = GetRandomColor(), Size = 15, Style = GetRandomMarkerStyle() }
};
}
// Utility: Generate a random MapPoint within the current extent
private MapPoint GetRandomMapPoint()
{
double x = MyMapView.Extent.XMin + (_random.NextDouble() * MyMapView.Extent.Width);
double y = MyMapView.Extent.YMin + (_random.NextDouble() * MyMapView.Extent.Height);
return new MapPoint(x, y, MyMapView.SpatialReference);
}
// Utility: Generate a random System.Windows.Media.Color
private Color GetRandomColor()
{
var colorBytes = new byte[3];
_random.NextBytes(colorBytes);
return Color.FromRgb(colorBytes[0], colorBytes[1], colorBytes[2]);
}
// Utility: Generate a random marker style
private SimpleMarkerStyle GetRandomMarkerStyle()
{
return (SimpleMarkerStyle)_random.Next(0, 6);
}
}
}
| Tyshark9/arcgis-runtime-samples-dotnet | src/Desktop/ArcGISRuntimeSDKDotNet_DesktopSamples/Samples/GraphicsLayers/GraphicsSourceSample.xaml.cs | C# | apache-2.0 | 3,725 |
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Bagcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "walletdb.h"
#include "base58.h"
#include "protocol.h"
#include "serialize.h"
#include "sync.h"
#include "util.h"
#include "utiltime.h"
#include "wallet.h"
#include <boost/filesystem.hpp>
#include <boost/foreach.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/thread.hpp>
using namespace boost;
using namespace std;
static uint64_t nAccountingEntryNumber = 0;
//
// CWalletDB
//
bool CWalletDB::WriteName(const string& strAddress, const string& strName)
{
nWalletDBUpdated++;
return Write(make_pair(string("name"), strAddress), strName);
}
bool CWalletDB::EraseName(const string& strAddress)
{
// This should only be used for sending addresses, never for receiving addresses,
// receiving addresses must always have an address book entry if they're not change return.
nWalletDBUpdated++;
return Erase(make_pair(string("name"), strAddress));
}
bool CWalletDB::WritePurpose(const string& strAddress, const string& strPurpose)
{
nWalletDBUpdated++;
return Write(make_pair(string("purpose"), strAddress), strPurpose);
}
bool CWalletDB::ErasePurpose(const string& strPurpose)
{
nWalletDBUpdated++;
return Erase(make_pair(string("purpose"), strPurpose));
}
bool CWalletDB::WriteTx(uint256 hash, const CWalletTx& wtx)
{
nWalletDBUpdated++;
return Write(std::make_pair(std::string("tx"), hash), wtx);
}
bool CWalletDB::EraseTx(uint256 hash)
{
nWalletDBUpdated++;
return Erase(std::make_pair(std::string("tx"), hash));
}
bool CWalletDB::WriteKey(const CPubKey& vchPubKey, const CPrivKey& vchPrivKey, const CKeyMetadata& keyMeta)
{
nWalletDBUpdated++;
if (!Write(std::make_pair(std::string("keymeta"), vchPubKey),
keyMeta, false))
return false;
// hash pubkey/privkey to accelerate wallet load
std::vector<unsigned char> vchKey;
vchKey.reserve(vchPubKey.size() + vchPrivKey.size());
vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end());
vchKey.insert(vchKey.end(), vchPrivKey.begin(), vchPrivKey.end());
return Write(std::make_pair(std::string("key"), vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey.begin(), vchKey.end())), false);
}
bool CWalletDB::WriteCryptedKey(const CPubKey& vchPubKey,
const std::vector<unsigned char>& vchCryptedSecret,
const CKeyMetadata &keyMeta)
{
const bool fEraseUnencryptedKey = true;
nWalletDBUpdated++;
if (!Write(std::make_pair(std::string("keymeta"), vchPubKey),
keyMeta))
return false;
if (!Write(std::make_pair(std::string("ckey"), vchPubKey), vchCryptedSecret, false))
return false;
if (fEraseUnencryptedKey)
{
Erase(std::make_pair(std::string("key"), vchPubKey));
Erase(std::make_pair(std::string("wkey"), vchPubKey));
}
return true;
}
bool CWalletDB::WriteMasterKey(unsigned int nID, const CMasterKey& kMasterKey)
{
nWalletDBUpdated++;
return Write(std::make_pair(std::string("mkey"), nID), kMasterKey, true);
}
bool CWalletDB::WriteCScript(const uint160& hash, const CScript& redeemScript)
{
nWalletDBUpdated++;
return Write(std::make_pair(std::string("cscript"), hash), redeemScript, false);
}
bool CWalletDB::WriteWatchOnly(const CScript &dest)
{
nWalletDBUpdated++;
return Write(std::make_pair(std::string("watchs"), dest), '1');
}
bool CWalletDB::EraseWatchOnly(const CScript &dest)
{
nWalletDBUpdated++;
return Erase(std::make_pair(std::string("watchs"), dest));
}
bool CWalletDB::WriteBestBlock(const CBlockLocator& locator)
{
nWalletDBUpdated++;
return Write(std::string("bestblock"), locator);
}
bool CWalletDB::ReadBestBlock(CBlockLocator& locator)
{
return Read(std::string("bestblock"), locator);
}
bool CWalletDB::WriteOrderPosNext(int64_t nOrderPosNext)
{
nWalletDBUpdated++;
return Write(std::string("orderposnext"), nOrderPosNext);
}
bool CWalletDB::WriteDefaultKey(const CPubKey& vchPubKey)
{
nWalletDBUpdated++;
return Write(std::string("defaultkey"), vchPubKey);
}
bool CWalletDB::ReadPool(int64_t nPool, CKeyPool& keypool)
{
return Read(std::make_pair(std::string("pool"), nPool), keypool);
}
bool CWalletDB::WritePool(int64_t nPool, const CKeyPool& keypool)
{
nWalletDBUpdated++;
return Write(std::make_pair(std::string("pool"), nPool), keypool);
}
bool CWalletDB::ErasePool(int64_t nPool)
{
nWalletDBUpdated++;
return Erase(std::make_pair(std::string("pool"), nPool));
}
bool CWalletDB::WriteMinVersion(int nVersion)
{
return Write(std::string("minversion"), nVersion);
}
bool CWalletDB::ReadAccount(const string& strAccount, CAccount& account)
{
account.SetNull();
return Read(make_pair(string("acc"), strAccount), account);
}
bool CWalletDB::WriteAccount(const string& strAccount, const CAccount& account)
{
return Write(make_pair(string("acc"), strAccount), account);
}
bool CWalletDB::WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry& acentry)
{
return Write(std::make_pair(std::string("acentry"), std::make_pair(acentry.strAccount, nAccEntryNum)), acentry);
}
bool CWalletDB::WriteAccountingEntry(const CAccountingEntry& acentry)
{
return WriteAccountingEntry(++nAccountingEntryNumber, acentry);
}
CAmount CWalletDB::GetAccountCreditDebit(const string& strAccount)
{
list<CAccountingEntry> entries;
ListAccountCreditDebit(strAccount, entries);
CAmount nCreditDebit = 0;
BOOST_FOREACH (const CAccountingEntry& entry, entries)
nCreditDebit += entry.nCreditDebit;
return nCreditDebit;
}
void CWalletDB::ListAccountCreditDebit(const string& strAccount, list<CAccountingEntry>& entries)
{
bool fAllAccounts = (strAccount == "*");
Dbc* pcursor = GetCursor();
if (!pcursor)
throw runtime_error("CWalletDB::ListAccountCreditDebit() : cannot create DB cursor");
unsigned int fFlags = DB_SET_RANGE;
while (true)
{
// Read next record
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
if (fFlags == DB_SET_RANGE)
ssKey << std::make_pair(std::string("acentry"), std::make_pair((fAllAccounts ? string("") : strAccount), uint64_t(0)));
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
int ret = ReadAtCursor(pcursor, ssKey, ssValue, fFlags);
fFlags = DB_NEXT;
if (ret == DB_NOTFOUND)
break;
else if (ret != 0)
{
pcursor->close();
throw runtime_error("CWalletDB::ListAccountCreditDebit() : error scanning DB");
}
// Unserialize
string strType;
ssKey >> strType;
if (strType != "acentry")
break;
CAccountingEntry acentry;
ssKey >> acentry.strAccount;
if (!fAllAccounts && acentry.strAccount != strAccount)
break;
ssValue >> acentry;
ssKey >> acentry.nEntryNo;
entries.push_back(acentry);
}
pcursor->close();
}
DBErrors CWalletDB::ReorderTransactions(CWallet* pwallet)
{
LOCK(pwallet->cs_wallet);
// Old wallets didn't have any defined order for transactions
// Probably a bad idea to change the output of this
// First: get all CWalletTx and CAccountingEntry into a sorted-by-time multimap.
typedef pair<CWalletTx*, CAccountingEntry*> TxPair;
typedef multimap<int64_t, TxPair > TxItems;
TxItems txByTime;
for (map<uint256, CWalletTx>::iterator it = pwallet->mapWallet.begin(); it != pwallet->mapWallet.end(); ++it)
{
CWalletTx* wtx = &((*it).second);
txByTime.insert(make_pair(wtx->nTimeReceived, TxPair(wtx, (CAccountingEntry*)0)));
}
list<CAccountingEntry> acentries;
ListAccountCreditDebit("", acentries);
BOOST_FOREACH(CAccountingEntry& entry, acentries)
{
txByTime.insert(make_pair(entry.nTime, TxPair((CWalletTx*)0, &entry)));
}
int64_t& nOrderPosNext = pwallet->nOrderPosNext;
nOrderPosNext = 0;
std::vector<int64_t> nOrderPosOffsets;
for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it)
{
CWalletTx *const pwtx = (*it).second.first;
CAccountingEntry *const pacentry = (*it).second.second;
int64_t& nOrderPos = (pwtx != 0) ? pwtx->nOrderPos : pacentry->nOrderPos;
if (nOrderPos == -1)
{
nOrderPos = nOrderPosNext++;
nOrderPosOffsets.push_back(nOrderPos);
if (pwtx)
{
if (!WriteTx(pwtx->GetHash(), *pwtx))
return DB_LOAD_FAIL;
}
else
if (!WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
return DB_LOAD_FAIL;
}
else
{
int64_t nOrderPosOff = 0;
BOOST_FOREACH(const int64_t& nOffsetStart, nOrderPosOffsets)
{
if (nOrderPos >= nOffsetStart)
++nOrderPosOff;
}
nOrderPos += nOrderPosOff;
nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1);
if (!nOrderPosOff)
continue;
// Since we're changing the order, write it back
if (pwtx)
{
if (!WriteTx(pwtx->GetHash(), *pwtx))
return DB_LOAD_FAIL;
}
else
if (!WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
return DB_LOAD_FAIL;
}
}
WriteOrderPosNext(nOrderPosNext);
return DB_LOAD_OK;
}
class CWalletScanState {
public:
unsigned int nKeys;
unsigned int nCKeys;
unsigned int nKeyMeta;
bool fIsEncrypted;
bool fAnyUnordered;
int nFileVersion;
vector<uint256> vWalletUpgrade;
CWalletScanState() {
nKeys = nCKeys = nKeyMeta = 0;
fIsEncrypted = false;
fAnyUnordered = false;
nFileVersion = 0;
}
};
bool
ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
CWalletScanState &wss, string& strType, string& strErr)
{
try {
// Unserialize
// Taking advantage of the fact that pair serialization
// is just the two items serialized one after the other
ssKey >> strType;
if (strType == "name")
{
string strAddress;
ssKey >> strAddress;
ssValue >> pwallet->mapAddressBook[CBitcoinAddress(strAddress).Get()].name;
}
else if (strType == "purpose")
{
string strAddress;
ssKey >> strAddress;
ssValue >> pwallet->mapAddressBook[CBitcoinAddress(strAddress).Get()].purpose;
}
else if (strType == "tx")
{
uint256 hash;
ssKey >> hash;
CWalletTx wtx;
ssValue >> wtx;
CValidationState state;
if (!(CheckTransaction(wtx, state) && (wtx.GetHash() == hash) && state.IsValid()))
return false;
// Undo serialize changes in 31600
if (31404 <= wtx.fTimeReceivedIsTxTime && wtx.fTimeReceivedIsTxTime <= 31703)
{
if (!ssValue.empty())
{
char fTmp;
char fUnused;
ssValue >> fTmp >> fUnused >> wtx.strFromAccount;
strErr = strprintf("LoadWallet() upgrading tx ver=%d %d '%s' %s",
wtx.fTimeReceivedIsTxTime, fTmp, wtx.strFromAccount, hash.ToString());
wtx.fTimeReceivedIsTxTime = fTmp;
}
else
{
strErr = strprintf("LoadWallet() repairing tx ver=%d %s", wtx.fTimeReceivedIsTxTime, hash.ToString());
wtx.fTimeReceivedIsTxTime = 0;
}
wss.vWalletUpgrade.push_back(hash);
}
if (wtx.nOrderPos == -1)
wss.fAnyUnordered = true;
pwallet->AddToWallet(wtx, true);
}
else if (strType == "acentry")
{
string strAccount;
ssKey >> strAccount;
uint64_t nNumber;
ssKey >> nNumber;
if (nNumber > nAccountingEntryNumber)
nAccountingEntryNumber = nNumber;
if (!wss.fAnyUnordered)
{
CAccountingEntry acentry;
ssValue >> acentry;
if (acentry.nOrderPos == -1)
wss.fAnyUnordered = true;
}
}
else if (strType == "watchs")
{
CScript script;
ssKey >> script;
char fYes;
ssValue >> fYes;
if (fYes == '1')
pwallet->LoadWatchOnly(script);
// Watch-only addresses have no birthday information for now,
// so set the wallet birthday to the beginning of time.
pwallet->nTimeFirstKey = 1;
}
else if (strType == "key" || strType == "wkey")
{
CPubKey vchPubKey;
ssKey >> vchPubKey;
if (!vchPubKey.IsValid())
{
strErr = "Error reading wallet database: CPubKey corrupt";
return false;
}
CKey key;
CPrivKey pkey;
uint256 hash = 0;
if (strType == "key")
{
wss.nKeys++;
ssValue >> pkey;
} else {
CWalletKey wkey;
ssValue >> wkey;
pkey = wkey.vchPrivKey;
}
// Old wallets store keys as "key" [pubkey] => [privkey]
// ... which was slow for wallets with lots of keys, because the public key is re-derived from the private key
// using EC operations as a checksum.
// Newer wallets store keys as "key"[pubkey] => [privkey][hash(pubkey,privkey)], which is much faster while
// remaining backwards-compatible.
try
{
ssValue >> hash;
}
catch(...){}
bool fSkipCheck = false;
if (hash != 0)
{
// hash pubkey/privkey to accelerate wallet load
std::vector<unsigned char> vchKey;
vchKey.reserve(vchPubKey.size() + pkey.size());
vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end());
vchKey.insert(vchKey.end(), pkey.begin(), pkey.end());
if (Hash(vchKey.begin(), vchKey.end()) != hash)
{
strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt";
return false;
}
fSkipCheck = true;
}
if (!key.Load(pkey, vchPubKey, fSkipCheck))
{
strErr = "Error reading wallet database: CPrivKey corrupt";
return false;
}
if (!pwallet->LoadKey(key, vchPubKey))
{
strErr = "Error reading wallet database: LoadKey failed";
return false;
}
}
else if (strType == "mkey")
{
unsigned int nID;
ssKey >> nID;
CMasterKey kMasterKey;
ssValue >> kMasterKey;
if(pwallet->mapMasterKeys.count(nID) != 0)
{
strErr = strprintf("Error reading wallet database: duplicate CMasterKey id %u", nID);
return false;
}
pwallet->mapMasterKeys[nID] = kMasterKey;
if (pwallet->nMasterKeyMaxID < nID)
pwallet->nMasterKeyMaxID = nID;
}
else if (strType == "ckey")
{
vector<unsigned char> vchPubKey;
ssKey >> vchPubKey;
vector<unsigned char> vchPrivKey;
ssValue >> vchPrivKey;
wss.nCKeys++;
if (!pwallet->LoadCryptedKey(vchPubKey, vchPrivKey))
{
strErr = "Error reading wallet database: LoadCryptedKey failed";
return false;
}
wss.fIsEncrypted = true;
}
else if (strType == "keymeta")
{
CPubKey vchPubKey;
ssKey >> vchPubKey;
CKeyMetadata keyMeta;
ssValue >> keyMeta;
wss.nKeyMeta++;
pwallet->LoadKeyMetadata(vchPubKey, keyMeta);
// find earliest key creation time, as wallet birthday
if (!pwallet->nTimeFirstKey ||
(keyMeta.nCreateTime < pwallet->nTimeFirstKey))
pwallet->nTimeFirstKey = keyMeta.nCreateTime;
}
else if (strType == "defaultkey")
{
ssValue >> pwallet->vchDefaultKey;
}
else if (strType == "pool")
{
int64_t nIndex;
ssKey >> nIndex;
CKeyPool keypool;
ssValue >> keypool;
pwallet->setKeyPool.insert(nIndex);
// If no metadata exists yet, create a default with the pool key's
// creation time. Note that this may be overwritten by actually
// stored metadata for that key later, which is fine.
CKeyID keyid = keypool.vchPubKey.GetID();
if (pwallet->mapKeyMetadata.count(keyid) == 0)
pwallet->mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
}
else if (strType == "version")
{
ssValue >> wss.nFileVersion;
if (wss.nFileVersion == 10300)
wss.nFileVersion = 300;
}
else if (strType == "cscript")
{
uint160 hash;
ssKey >> hash;
CScript script;
ssValue >> script;
if (!pwallet->LoadCScript(script))
{
strErr = "Error reading wallet database: LoadCScript failed";
return false;
}
}
else if (strType == "orderposnext")
{
ssValue >> pwallet->nOrderPosNext;
}
else if (strType == "destdata")
{
std::string strAddress, strKey, strValue;
ssKey >> strAddress;
ssKey >> strKey;
ssValue >> strValue;
if (!pwallet->LoadDestData(CBitcoinAddress(strAddress).Get(), strKey, strValue))
{
strErr = "Error reading wallet database: LoadDestData failed";
return false;
}
}
} catch (...)
{
return false;
}
return true;
}
static bool IsKeyType(string strType)
{
return (strType== "key" || strType == "wkey" ||
strType == "mkey" || strType == "ckey");
}
DBErrors CWalletDB::LoadWallet(CWallet* pwallet)
{
pwallet->vchDefaultKey = CPubKey();
CWalletScanState wss;
bool fNoncriticalErrors = false;
DBErrors result = DB_LOAD_OK;
try {
LOCK(pwallet->cs_wallet);
int nMinVersion = 0;
if (Read((string)"minversion", nMinVersion))
{
if (nMinVersion > CLIENT_VERSION)
return DB_TOO_NEW;
pwallet->LoadMinVersion(nMinVersion);
}
// Get cursor
Dbc* pcursor = GetCursor();
if (!pcursor)
{
LogPrintf("Error getting wallet database cursor\n");
return DB_CORRUPT;
}
while (true)
{
// Read next record
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
int ret = ReadAtCursor(pcursor, ssKey, ssValue);
if (ret == DB_NOTFOUND)
break;
else if (ret != 0)
{
LogPrintf("Error reading next record from wallet database\n");
return DB_CORRUPT;
}
// Try to be tolerant of single corrupt records:
string strType, strErr;
if (!ReadKeyValue(pwallet, ssKey, ssValue, wss, strType, strErr))
{
// losing keys is considered a catastrophic error, anything else
// we assume the user can live with:
if (IsKeyType(strType))
result = DB_CORRUPT;
else
{
// Leave other errors alone, if we try to fix them we might make things worse.
fNoncriticalErrors = true; // ... but do warn the user there is something wrong.
if (strType == "tx")
// Rescan if there is a bad transaction record:
SoftSetBoolArg("-rescan", true);
}
}
if (!strErr.empty())
LogPrintf("%s\n", strErr);
}
pcursor->close();
}
catch (boost::thread_interrupted) {
throw;
}
catch (...) {
result = DB_CORRUPT;
}
if (fNoncriticalErrors && result == DB_LOAD_OK)
result = DB_NONCRITICAL_ERROR;
// Any wallet corruption at all: skip any rewriting or
// upgrading, we don't want to make it worse.
if (result != DB_LOAD_OK)
return result;
LogPrintf("nFileVersion = %d\n", wss.nFileVersion);
LogPrintf("Keys: %u plaintext, %u encrypted, %u w/ metadata, %u total\n",
wss.nKeys, wss.nCKeys, wss.nKeyMeta, wss.nKeys + wss.nCKeys);
// nTimeFirstKey is only reliable if all keys have metadata
if ((wss.nKeys + wss.nCKeys) != wss.nKeyMeta)
pwallet->nTimeFirstKey = 1; // 0 would be considered 'no value'
BOOST_FOREACH(uint256 hash, wss.vWalletUpgrade)
WriteTx(hash, pwallet->mapWallet[hash]);
// Rewrite encrypted wallets of versions 0.4.0 and 0.5.0rc:
if (wss.fIsEncrypted && (wss.nFileVersion == 40000 || wss.nFileVersion == 50000))
return DB_NEED_REWRITE;
if (wss.nFileVersion < CLIENT_VERSION) // Update
WriteVersion(CLIENT_VERSION);
if (wss.fAnyUnordered)
result = ReorderTransactions(pwallet);
return result;
}
DBErrors CWalletDB::FindWalletTx(CWallet* pwallet, vector<uint256>& vTxHash, vector<CWalletTx>& vWtx)
{
pwallet->vchDefaultKey = CPubKey();
bool fNoncriticalErrors = false;
DBErrors result = DB_LOAD_OK;
try {
LOCK(pwallet->cs_wallet);
int nMinVersion = 0;
if (Read((string)"minversion", nMinVersion))
{
if (nMinVersion > CLIENT_VERSION)
return DB_TOO_NEW;
pwallet->LoadMinVersion(nMinVersion);
}
// Get cursor
Dbc* pcursor = GetCursor();
if (!pcursor)
{
LogPrintf("Error getting wallet database cursor\n");
return DB_CORRUPT;
}
while (true)
{
// Read next record
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
int ret = ReadAtCursor(pcursor, ssKey, ssValue);
if (ret == DB_NOTFOUND)
break;
else if (ret != 0)
{
LogPrintf("Error reading next record from wallet database\n");
return DB_CORRUPT;
}
string strType;
ssKey >> strType;
if (strType == "tx") {
uint256 hash;
ssKey >> hash;
CWalletTx wtx;
ssValue >> wtx;
vTxHash.push_back(hash);
vWtx.push_back(wtx);
}
}
pcursor->close();
}
catch (boost::thread_interrupted) {
throw;
}
catch (...) {
result = DB_CORRUPT;
}
if (fNoncriticalErrors && result == DB_LOAD_OK)
result = DB_NONCRITICAL_ERROR;
return result;
}
DBErrors CWalletDB::ZapWalletTx(CWallet* pwallet, vector<CWalletTx>& vWtx)
{
// build list of wallet TXs
vector<uint256> vTxHash;
DBErrors err = FindWalletTx(pwallet, vTxHash, vWtx);
if (err != DB_LOAD_OK)
return err;
// erase each wallet TX
BOOST_FOREACH (uint256& hash, vTxHash) {
if (!EraseTx(hash))
return DB_CORRUPT;
}
return DB_LOAD_OK;
}
void ThreadFlushWalletDB(const string& strFile)
{
// Make this thread recognisable as the wallet flushing thread
RenameThread("bagcoin-wallet");
static bool fOneThread;
if (fOneThread)
return;
fOneThread = true;
if (!GetBoolArg("-flushwallet", true))
return;
unsigned int nLastSeen = nWalletDBUpdated;
unsigned int nLastFlushed = nWalletDBUpdated;
int64_t nLastWalletUpdate = GetTime();
while (true)
{
MilliSleep(500);
if (nLastSeen != nWalletDBUpdated)
{
nLastSeen = nWalletDBUpdated;
nLastWalletUpdate = GetTime();
}
if (nLastFlushed != nWalletDBUpdated && GetTime() - nLastWalletUpdate >= 2)
{
TRY_LOCK(bitdb.cs_db,lockDb);
if (lockDb)
{
// Don't do this if any databases are in use
int nRefCount = 0;
map<string, int>::iterator mi = bitdb.mapFileUseCount.begin();
while (mi != bitdb.mapFileUseCount.end())
{
nRefCount += (*mi).second;
mi++;
}
if (nRefCount == 0)
{
boost::this_thread::interruption_point();
map<string, int>::iterator mi = bitdb.mapFileUseCount.find(strFile);
if (mi != bitdb.mapFileUseCount.end())
{
LogPrint("db", "Flushing wallet.dat\n");
nLastFlushed = nWalletDBUpdated;
int64_t nStart = GetTimeMillis();
// Flush wallet.dat so it's self contained
bitdb.CloseDb(strFile);
bitdb.CheckpointLSN(strFile);
bitdb.mapFileUseCount.erase(mi++);
LogPrint("db", "Flushed wallet.dat %dms\n", GetTimeMillis() - nStart);
}
}
}
}
}
}
bool BackupWallet(const CWallet& wallet, const string& strDest)
{
if (!wallet.fFileBacked)
return false;
while (true)
{
{
LOCK(bitdb.cs_db);
if (!bitdb.mapFileUseCount.count(wallet.strWalletFile) || bitdb.mapFileUseCount[wallet.strWalletFile] == 0)
{
// Flush log data to the dat file
bitdb.CloseDb(wallet.strWalletFile);
bitdb.CheckpointLSN(wallet.strWalletFile);
bitdb.mapFileUseCount.erase(wallet.strWalletFile);
// Copy wallet.dat
filesystem::path pathSrc = GetDataDir() / wallet.strWalletFile;
filesystem::path pathDest(strDest);
if (filesystem::is_directory(pathDest))
pathDest /= wallet.strWalletFile;
try {
#if BOOST_VERSION >= 104000
filesystem::copy_file(pathSrc, pathDest, filesystem::copy_option::overwrite_if_exists);
#else
filesystem::copy_file(pathSrc, pathDest);
#endif
LogPrintf("copied wallet.dat to %s\n", pathDest.string());
return true;
} catch(const filesystem::filesystem_error &e) {
LogPrintf("error copying wallet.dat to %s - %s\n", pathDest.string(), e.what());
return false;
}
}
}
MilliSleep(100);
}
return false;
}
//
// Try to (very carefully!) recover wallet.dat if there is a problem.
//
bool CWalletDB::Recover(CDBEnv& dbenv, std::string filename, bool fOnlyKeys)
{
// Recovery procedure:
// move wallet.dat to wallet.timestamp.bak
// Call Salvage with fAggressive=true to
// get as much data as possible.
// Rewrite salvaged data to wallet.dat
// Set -rescan so any missing transactions will be
// found.
int64_t now = GetTime();
std::string newFilename = strprintf("wallet.%d.bak", now);
int result = dbenv.dbenv.dbrename(NULL, filename.c_str(), NULL,
newFilename.c_str(), DB_AUTO_COMMIT);
if (result == 0)
LogPrintf("Renamed %s to %s\n", filename, newFilename);
else
{
LogPrintf("Failed to rename %s to %s\n", filename, newFilename);
return false;
}
std::vector<CDBEnv::KeyValPair> salvagedData;
bool allOK = dbenv.Salvage(newFilename, true, salvagedData);
if (salvagedData.empty())
{
LogPrintf("Salvage(aggressive) found no records in %s.\n", newFilename);
return false;
}
LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size());
bool fSuccess = allOK;
boost::scoped_ptr<Db> pdbCopy(new Db(&dbenv.dbenv, 0));
int ret = pdbCopy->open(NULL, // Txn pointer
filename.c_str(), // Filename
"main", // Logical db name
DB_BTREE, // Database type
DB_CREATE, // Flags
0);
if (ret > 0)
{
LogPrintf("Cannot create database file %s\n", filename);
return false;
}
CWallet dummyWallet;
CWalletScanState wss;
DbTxn* ptxn = dbenv.TxnBegin();
BOOST_FOREACH(CDBEnv::KeyValPair& row, salvagedData)
{
if (fOnlyKeys)
{
CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION);
CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION);
string strType, strErr;
bool fReadOK = ReadKeyValue(&dummyWallet, ssKey, ssValue,
wss, strType, strErr);
if (!IsKeyType(strType))
continue;
if (!fReadOK)
{
LogPrintf("WARNING: CWalletDB::Recover skipping %s: %s\n", strType, strErr);
continue;
}
}
Dbt datKey(&row.first[0], row.first.size());
Dbt datValue(&row.second[0], row.second.size());
int ret2 = pdbCopy->put(ptxn, &datKey, &datValue, DB_NOOVERWRITE);
if (ret2 > 0)
fSuccess = false;
}
ptxn->commit(0);
pdbCopy->close(0);
return fSuccess;
}
bool CWalletDB::Recover(CDBEnv& dbenv, std::string filename)
{
return CWalletDB::Recover(dbenv, filename, false);
}
bool CWalletDB::WriteDestData(const std::string &address, const std::string &key, const std::string &value)
{
nWalletDBUpdated++;
return Write(std::make_pair(std::string("destdata"), std::make_pair(address, key)), value);
}
bool CWalletDB::EraseDestData(const std::string &address, const std::string &key)
{
nWalletDBUpdated++;
return Erase(std::make_pair(std::string("destdata"), std::make_pair(address, key)));
}
| bagcoin/bagcoin | src/walletdb.cpp | C++ | apache-2.0 | 31,407 |
define([
'./user-settings'
], function (userSettings) {
var context;
var exposed = {
init: function(thisContext){
context = thisContext;
context.sandbox.on('settings.close', userSettings.close);
context.sandbox.on('settings.open', userSettings.open);
context.sandbox.on('menu.opening', userSettings.handleMenuOpening);
context.sandbox.on('data.clear.all', userSettings.clear);
},
publishMessage: function(params) {
context.sandbox.emit('message.publish', params);
},
publishOpening: function(params){
context.sandbox.emit('menu.opening', params);
},
zoomToLocation: function(params){
context.sandbox.emit('map.zoom.toLocation',params);
},
changeBasemap: function(params) {
context.sandbox.emit('map.basemap.change', params);
},
closeUserSettings: function() {
context.sandbox.emit('settings.close');
},
openUserSettings: function() {
context.sandbox.emit('settings.open');
}
};
return exposed;
}); | ozone-development/meridian | app/components/controls/user-settings/user-settings-mediator.js | JavaScript | apache-2.0 | 1,165 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.lightsail.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Describes the source of a CloudFormation stack record (i.e., the export snapshot record).
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CloudFormationStackRecordSourceInfo"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CloudFormationStackRecordSourceInfo implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*/
private String resourceType;
/**
* <p>
* The name of the record.
* </p>
*/
private String name;
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*/
private String arn;
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @param resourceType
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @see CloudFormationStackRecordSourceType
*/
public void setResourceType(String resourceType) {
this.resourceType = resourceType;
}
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @return The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @see CloudFormationStackRecordSourceType
*/
public String getResourceType() {
return this.resourceType;
}
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @param resourceType
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @return Returns a reference to this object so that method calls can be chained together.
* @see CloudFormationStackRecordSourceType
*/
public CloudFormationStackRecordSourceInfo withResourceType(String resourceType) {
setResourceType(resourceType);
return this;
}
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @param resourceType
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @return Returns a reference to this object so that method calls can be chained together.
* @see CloudFormationStackRecordSourceType
*/
public CloudFormationStackRecordSourceInfo withResourceType(CloudFormationStackRecordSourceType resourceType) {
this.resourceType = resourceType.toString();
return this;
}
/**
* <p>
* The name of the record.
* </p>
*
* @param name
* The name of the record.
*/
public void setName(String name) {
this.name = name;
}
/**
* <p>
* The name of the record.
* </p>
*
* @return The name of the record.
*/
public String getName() {
return this.name;
}
/**
* <p>
* The name of the record.
* </p>
*
* @param name
* The name of the record.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CloudFormationStackRecordSourceInfo withName(String name) {
setName(name);
return this;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*
* @param arn
* The Amazon Resource Name (ARN) of the export snapshot record.
*/
public void setArn(String arn) {
this.arn = arn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*
* @return The Amazon Resource Name (ARN) of the export snapshot record.
*/
public String getArn() {
return this.arn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*
* @param arn
* The Amazon Resource Name (ARN) of the export snapshot record.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CloudFormationStackRecordSourceInfo withArn(String arn) {
setArn(arn);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getResourceType() != null)
sb.append("ResourceType: ").append(getResourceType()).append(",");
if (getName() != null)
sb.append("Name: ").append(getName()).append(",");
if (getArn() != null)
sb.append("Arn: ").append(getArn());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CloudFormationStackRecordSourceInfo == false)
return false;
CloudFormationStackRecordSourceInfo other = (CloudFormationStackRecordSourceInfo) obj;
if (other.getResourceType() == null ^ this.getResourceType() == null)
return false;
if (other.getResourceType() != null && other.getResourceType().equals(this.getResourceType()) == false)
return false;
if (other.getName() == null ^ this.getName() == null)
return false;
if (other.getName() != null && other.getName().equals(this.getName()) == false)
return false;
if (other.getArn() == null ^ this.getArn() == null)
return false;
if (other.getArn() != null && other.getArn().equals(this.getArn()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getResourceType() == null) ? 0 : getResourceType().hashCode());
hashCode = prime * hashCode + ((getName() == null) ? 0 : getName().hashCode());
hashCode = prime * hashCode + ((getArn() == null) ? 0 : getArn().hashCode());
return hashCode;
}
@Override
public CloudFormationStackRecordSourceInfo clone() {
try {
return (CloudFormationStackRecordSourceInfo) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.lightsail.model.transform.CloudFormationStackRecordSourceInfoMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-lightsail/src/main/java/com/amazonaws/services/lightsail/model/CloudFormationStackRecordSourceInfo.java | Java | apache-2.0 | 8,005 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.rocketmq.common;
import org.apache.rocketmq.common.annotation.ImportantField;
import org.apache.rocketmq.common.constant.PermName;
import org.apache.rocketmq.remoting.common.RemotingUtil;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class BrokerConfig {
private String rocketmqHome = System.getProperty(MixAll.ROCKETMQ_HOME_PROPERTY, System.getenv(MixAll.ROCKETMQ_HOME_ENV));
@ImportantField
private String namesrvAddr = System.getProperty(MixAll.NAMESRV_ADDR_PROPERTY, System.getenv(MixAll.NAMESRV_ADDR_ENV));
@ImportantField
private String brokerIP1 = RemotingUtil.getLocalAddress();
private String brokerIP2 = RemotingUtil.getLocalAddress();
@ImportantField
private String brokerName = localHostName();
@ImportantField
private String brokerClusterName = "DefaultCluster";
@ImportantField
private long brokerId = MixAll.MASTER_ID;
/**
* Broker 权限(读写等)
*/
private int brokerPermission = PermName.PERM_READ | PermName.PERM_WRITE;
private int defaultTopicQueueNums = 8;
@ImportantField
private boolean autoCreateTopicEnable = true;
private boolean clusterTopicEnable = true;
private boolean brokerTopicEnable = true;
@ImportantField
private boolean autoCreateSubscriptionGroup = true;
private String messageStorePlugIn = "";
private int sendMessageThreadPoolNums = 1; //16 + Runtime.getRuntime().availableProcessors() * 4;
private int pullMessageThreadPoolNums = 16 + Runtime.getRuntime().availableProcessors() * 2;
private int adminBrokerThreadPoolNums = 16;
private int clientManageThreadPoolNums = 32;
private int consumerManageThreadPoolNums = 32;
private int flushConsumerOffsetInterval = 1000 * 5;
private int flushConsumerOffsetHistoryInterval = 1000 * 60;
@ImportantField
private boolean rejectTransactionMessage = false;
@ImportantField
private boolean fetchNamesrvAddrByAddressServer = false;
private int sendThreadPoolQueueCapacity = 10000;
private int pullThreadPoolQueueCapacity = 100000;
private int clientManagerThreadPoolQueueCapacity = 1000000;
private int consumerManagerThreadPoolQueueCapacity = 1000000;
private int filterServerNums = 0;
private boolean longPollingEnable = true;
private long shortPollingTimeMills = 1000;
private boolean notifyConsumerIdsChangedEnable = true;
private boolean highSpeedMode = false;
private boolean commercialEnable = true;
private int commercialTimerCount = 1;
private int commercialTransCount = 1;
private int commercialBigCount = 1;
private int commercialBaseCount = 1;
private boolean transferMsgByHeap = true;
private int maxDelayTime = 40;
// TODO 疑问:这个是干啥的
private String regionId = MixAll.DEFAULT_TRACE_REGION_ID;
private int registerBrokerTimeoutMills = 6000;
private boolean slaveReadEnable = false;
private boolean disableConsumeIfConsumerReadSlowly = false;
private long consumerFallbehindThreshold = 1024L * 1024 * 1024 * 16;
private long waitTimeMillsInSendQueue = 200;
/**
* 开始接收请求时间
* TODO 疑问:什么时候设置的
*/
private long startAcceptSendRequestTimeStamp = 0L;
private boolean traceOn = true;
public static String localHostName() {
try {
return InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return "DEFAULT_BROKER";
}
public boolean isTraceOn() {
return traceOn;
}
public void setTraceOn(final boolean traceOn) {
this.traceOn = traceOn;
}
public long getStartAcceptSendRequestTimeStamp() {
return startAcceptSendRequestTimeStamp;
}
public void setStartAcceptSendRequestTimeStamp(final long startAcceptSendRequestTimeStamp) {
this.startAcceptSendRequestTimeStamp = startAcceptSendRequestTimeStamp;
}
public long getWaitTimeMillsInSendQueue() {
return waitTimeMillsInSendQueue;
}
public void setWaitTimeMillsInSendQueue(final long waitTimeMillsInSendQueue) {
this.waitTimeMillsInSendQueue = waitTimeMillsInSendQueue;
}
public long getConsumerFallbehindThreshold() {
return consumerFallbehindThreshold;
}
public void setConsumerFallbehindThreshold(final long consumerFallbehindThreshold) {
this.consumerFallbehindThreshold = consumerFallbehindThreshold;
}
public boolean isDisableConsumeIfConsumerReadSlowly() {
return disableConsumeIfConsumerReadSlowly;
}
public void setDisableConsumeIfConsumerReadSlowly(final boolean disableConsumeIfConsumerReadSlowly) {
this.disableConsumeIfConsumerReadSlowly = disableConsumeIfConsumerReadSlowly;
}
public boolean isSlaveReadEnable() {
return slaveReadEnable;
}
public void setSlaveReadEnable(final boolean slaveReadEnable) {
this.slaveReadEnable = slaveReadEnable;
}
public int getRegisterBrokerTimeoutMills() {
return registerBrokerTimeoutMills;
}
public void setRegisterBrokerTimeoutMills(final int registerBrokerTimeoutMills) {
this.registerBrokerTimeoutMills = registerBrokerTimeoutMills;
}
public String getRegionId() {
return regionId;
}
public void setRegionId(final String regionId) {
this.regionId = regionId;
}
public boolean isTransferMsgByHeap() {
return transferMsgByHeap;
}
public void setTransferMsgByHeap(final boolean transferMsgByHeap) {
this.transferMsgByHeap = transferMsgByHeap;
}
public String getMessageStorePlugIn() {
return messageStorePlugIn;
}
public void setMessageStorePlugIn(String messageStorePlugIn) {
this.messageStorePlugIn = messageStorePlugIn;
}
public boolean isHighSpeedMode() {
return highSpeedMode;
}
public void setHighSpeedMode(final boolean highSpeedMode) {
this.highSpeedMode = highSpeedMode;
}
public String getRocketmqHome() {
return rocketmqHome;
}
public void setRocketmqHome(String rocketmqHome) {
this.rocketmqHome = rocketmqHome;
}
public String getBrokerName() {
return brokerName;
}
public void setBrokerName(String brokerName) {
this.brokerName = brokerName;
}
public int getBrokerPermission() {
return brokerPermission;
}
public void setBrokerPermission(int brokerPermission) {
this.brokerPermission = brokerPermission;
}
public int getDefaultTopicQueueNums() {
return defaultTopicQueueNums;
}
public void setDefaultTopicQueueNums(int defaultTopicQueueNums) {
this.defaultTopicQueueNums = defaultTopicQueueNums;
}
public boolean isAutoCreateTopicEnable() {
return autoCreateTopicEnable;
}
public void setAutoCreateTopicEnable(boolean autoCreateTopic) {
this.autoCreateTopicEnable = autoCreateTopic;
}
public String getBrokerClusterName() {
return brokerClusterName;
}
public void setBrokerClusterName(String brokerClusterName) {
this.brokerClusterName = brokerClusterName;
}
public String getBrokerIP1() {
return brokerIP1;
}
public void setBrokerIP1(String brokerIP1) {
this.brokerIP1 = brokerIP1;
}
public String getBrokerIP2() {
return brokerIP2;
}
public void setBrokerIP2(String brokerIP2) {
this.brokerIP2 = brokerIP2;
}
public int getSendMessageThreadPoolNums() {
return sendMessageThreadPoolNums;
}
public void setSendMessageThreadPoolNums(int sendMessageThreadPoolNums) {
this.sendMessageThreadPoolNums = sendMessageThreadPoolNums;
}
public int getPullMessageThreadPoolNums() {
return pullMessageThreadPoolNums;
}
public void setPullMessageThreadPoolNums(int pullMessageThreadPoolNums) {
this.pullMessageThreadPoolNums = pullMessageThreadPoolNums;
}
public int getAdminBrokerThreadPoolNums() {
return adminBrokerThreadPoolNums;
}
public void setAdminBrokerThreadPoolNums(int adminBrokerThreadPoolNums) {
this.adminBrokerThreadPoolNums = adminBrokerThreadPoolNums;
}
public int getFlushConsumerOffsetInterval() {
return flushConsumerOffsetInterval;
}
public void setFlushConsumerOffsetInterval(int flushConsumerOffsetInterval) {
this.flushConsumerOffsetInterval = flushConsumerOffsetInterval;
}
public int getFlushConsumerOffsetHistoryInterval() {
return flushConsumerOffsetHistoryInterval;
}
public void setFlushConsumerOffsetHistoryInterval(int flushConsumerOffsetHistoryInterval) {
this.flushConsumerOffsetHistoryInterval = flushConsumerOffsetHistoryInterval;
}
public boolean isClusterTopicEnable() {
return clusterTopicEnable;
}
public void setClusterTopicEnable(boolean clusterTopicEnable) {
this.clusterTopicEnable = clusterTopicEnable;
}
public String getNamesrvAddr() {
return namesrvAddr;
}
public void setNamesrvAddr(String namesrvAddr) {
this.namesrvAddr = namesrvAddr;
}
public long getBrokerId() {
return brokerId;
}
public void setBrokerId(long brokerId) {
this.brokerId = brokerId;
}
public boolean isAutoCreateSubscriptionGroup() {
return autoCreateSubscriptionGroup;
}
public void setAutoCreateSubscriptionGroup(boolean autoCreateSubscriptionGroup) {
this.autoCreateSubscriptionGroup = autoCreateSubscriptionGroup;
}
public boolean isRejectTransactionMessage() {
return rejectTransactionMessage;
}
public void setRejectTransactionMessage(boolean rejectTransactionMessage) {
this.rejectTransactionMessage = rejectTransactionMessage;
}
public boolean isFetchNamesrvAddrByAddressServer() {
return fetchNamesrvAddrByAddressServer;
}
public void setFetchNamesrvAddrByAddressServer(boolean fetchNamesrvAddrByAddressServer) {
this.fetchNamesrvAddrByAddressServer = fetchNamesrvAddrByAddressServer;
}
public int getSendThreadPoolQueueCapacity() {
return sendThreadPoolQueueCapacity;
}
public void setSendThreadPoolQueueCapacity(int sendThreadPoolQueueCapacity) {
this.sendThreadPoolQueueCapacity = sendThreadPoolQueueCapacity;
}
public int getPullThreadPoolQueueCapacity() {
return pullThreadPoolQueueCapacity;
}
public void setPullThreadPoolQueueCapacity(int pullThreadPoolQueueCapacity) {
this.pullThreadPoolQueueCapacity = pullThreadPoolQueueCapacity;
}
public boolean isBrokerTopicEnable() {
return brokerTopicEnable;
}
public void setBrokerTopicEnable(boolean brokerTopicEnable) {
this.brokerTopicEnable = brokerTopicEnable;
}
public int getFilterServerNums() {
return filterServerNums;
}
public void setFilterServerNums(int filterServerNums) {
this.filterServerNums = filterServerNums;
}
public boolean isLongPollingEnable() {
return longPollingEnable;
}
public void setLongPollingEnable(boolean longPollingEnable) {
this.longPollingEnable = longPollingEnable;
}
public boolean isNotifyConsumerIdsChangedEnable() {
return notifyConsumerIdsChangedEnable;
}
public void setNotifyConsumerIdsChangedEnable(boolean notifyConsumerIdsChangedEnable) {
this.notifyConsumerIdsChangedEnable = notifyConsumerIdsChangedEnable;
}
public long getShortPollingTimeMills() {
return shortPollingTimeMills;
}
public void setShortPollingTimeMills(long shortPollingTimeMills) {
this.shortPollingTimeMills = shortPollingTimeMills;
}
public int getClientManageThreadPoolNums() {
return clientManageThreadPoolNums;
}
public void setClientManageThreadPoolNums(int clientManageThreadPoolNums) {
this.clientManageThreadPoolNums = clientManageThreadPoolNums;
}
public boolean isCommercialEnable() {
return commercialEnable;
}
public void setCommercialEnable(final boolean commercialEnable) {
this.commercialEnable = commercialEnable;
}
public int getCommercialTimerCount() {
return commercialTimerCount;
}
public void setCommercialTimerCount(final int commercialTimerCount) {
this.commercialTimerCount = commercialTimerCount;
}
public int getCommercialTransCount() {
return commercialTransCount;
}
public void setCommercialTransCount(final int commercialTransCount) {
this.commercialTransCount = commercialTransCount;
}
public int getCommercialBigCount() {
return commercialBigCount;
}
public void setCommercialBigCount(final int commercialBigCount) {
this.commercialBigCount = commercialBigCount;
}
public int getMaxDelayTime() {
return maxDelayTime;
}
public void setMaxDelayTime(final int maxDelayTime) {
this.maxDelayTime = maxDelayTime;
}
public int getClientManagerThreadPoolQueueCapacity() {
return clientManagerThreadPoolQueueCapacity;
}
public void setClientManagerThreadPoolQueueCapacity(int clientManagerThreadPoolQueueCapacity) {
this.clientManagerThreadPoolQueueCapacity = clientManagerThreadPoolQueueCapacity;
}
public int getConsumerManagerThreadPoolQueueCapacity() {
return consumerManagerThreadPoolQueueCapacity;
}
public void setConsumerManagerThreadPoolQueueCapacity(int consumerManagerThreadPoolQueueCapacity) {
this.consumerManagerThreadPoolQueueCapacity = consumerManagerThreadPoolQueueCapacity;
}
public int getConsumerManageThreadPoolNums() {
return consumerManageThreadPoolNums;
}
public void setConsumerManageThreadPoolNums(int consumerManageThreadPoolNums) {
this.consumerManageThreadPoolNums = consumerManageThreadPoolNums;
}
public int getCommercialBaseCount() {
return commercialBaseCount;
}
public void setCommercialBaseCount(int commercialBaseCount) {
this.commercialBaseCount = commercialBaseCount;
}
}
| Coneboy-k/incubator-rocketmq | common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java | Java | apache-2.0 | 15,205 |
package command
import (
"github.com/goodmustache/pt/actor"
"github.com/goodmustache/pt/command/display"
)
//counterfeiter:generate . ProjectListActor
type ProjectListActor interface {
Projects() ([]actor.Project, error)
}
type ProjectList struct {
UserID uint64 `short:"u" long:"user-id" description:"User ID to run commands with"`
Actor ProjectListActor
UI UI
}
func (cmd ProjectList) Execute(_ []string) error {
projects, err := cmd.Actor.Projects()
if err != nil {
return err
}
displayProjects := make([]display.ProjectRow, 0, len(projects))
for _, project := range projects {
displayProjects = append(displayProjects, display.ProjectRow{
ID: project.ID,
Name: project.Name,
Description: project.Description,
Visibility: project.Visibility(),
})
}
cmd.UI.PrintTable(displayProjects)
return nil
}
| goodmustache/pt | command/project_list.go | GO | apache-2.0 | 860 |
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rdmo.core.exports import XMLResponse
from rdmo.core.permissions import HasModelPermission
from rdmo.core.views import ChoicesViewSet
from rdmo.core.viewsets import CopyModelMixin
from .models import Condition
from .renderers import ConditionRenderer
from .serializers.export import ConditionExportSerializer
from .serializers.v1 import ConditionIndexSerializer, ConditionSerializer
class ConditionViewSet(CopyModelMixin, ModelViewSet):
permission_classes = (HasModelPermission, )
queryset = Condition.objects.select_related('source', 'target_option') \
.prefetch_related('optionsets', 'questionsets', 'questions', 'tasks')
serializer_class = ConditionSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'uri',
'key',
'source',
'relation',
'target_text',
'target_option'
)
@action(detail=False)
def index(self, request):
queryset = Condition.objects.select_related('source', 'target_option')
serializer = ConditionIndexSerializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False, permission_classes=[HasModelPermission])
def export(self, request):
serializer = ConditionExportSerializer(self.get_queryset(), many=True)
xml = ConditionRenderer().render(serializer.data)
return XMLResponse(xml, name='conditions')
@action(detail=True, url_path='export', permission_classes=[HasModelPermission])
def detail_export(self, request, pk=None):
serializer = ConditionExportSerializer(self.get_object())
xml = ConditionRenderer().render([serializer.data])
return XMLResponse(xml, name=self.get_object().key)
class RelationViewSet(ChoicesViewSet):
permission_classes = (IsAuthenticated, )
queryset = Condition.RELATION_CHOICES
| rdmorganiser/rdmo | rdmo/conditions/viewsets.py | Python | apache-2.0 | 2,141 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.redis.internal;
import java.io.IOException;
import java.net.URL;
import java.util.Collection;
import org.apache.geode.distributed.internal.DistributedSystemService;
import org.apache.geode.distributed.internal.InternalDistributedSystem;
import org.apache.geode.internal.InternalDataSerializer;
import org.apache.geode.internal.classloader.ClassPathLoader;
public class RedisDistributedSystemService implements DistributedSystemService {
@Override
public void init(InternalDistributedSystem internalDistributedSystem) {
}
@Override
public Class getInterface() {
return getClass();
}
@Override
public Collection<String> getSerializationAcceptlist() throws IOException {
URL sanctionedSerializables = ClassPathLoader.getLatest().getResource(getClass(),
"sanctioned-geode-apis-compatible-with-redis-serializables.txt");
return InternalDataSerializer.loadClassNames(sanctionedSerializables);
}
}
| masaki-yamakawa/geode | geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/RedisDistributedSystemService.java | Java | apache-2.0 | 1,750 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package leap.lang.meta;
import leap.lang.Named;
import leap.lang.Titled;
public interface MNamed extends MObject,Named,Titled {
} | leapframework/framework | base/lang/src/main/java/leap/lang/meta/MNamed.java | Java | apache-2.0 | 748 |
package com.vladmihalcea.book.hpjp.hibernate.type.array;
import org.hibernate.dialect.PostgreSQL95Dialect;
import java.sql.Types;
/**
* @author Vlad Mihalcea
*/
public class PostgreSQL95ArrayDialect extends PostgreSQL95Dialect {
public PostgreSQL95ArrayDialect() {
super();
this.registerColumnType(Types.ARRAY, "array");
}
}
| vladmihalcea/high-performance-java-persistence | core/src/test/java/com/vladmihalcea/book/hpjp/hibernate/type/array/PostgreSQL95ArrayDialect.java | Java | apache-2.0 | 355 |
require_dependency 'libraetd/lib/serviceclient/user_info_client'
require_dependency 'libraetd/lib/helpers/user_info'
require_dependency 'libraetd/lib/serviceclient/entity_id_client'
module Helpers
class EtdHelper
def self.process_inbound_sis_authorization( deposit_authorization )
# lookup the user and create their account as necessary
user, _ = lookup_or_create_user( deposit_authorization.who )
if user.nil?
return false
end
# determine if this is an update to an existing work
work_source = "#{GenericWork::THESIS_SOURCE_SIS}:#{deposit_authorization.id}"
existing = find_existing_work( work_source )
if existing.present?
puts "INFO: found existing work for this authorization (#{deposit_authorization.id})"
# only apply updates to draft works
if existing.is_draft?
before = existing.title.present? ? existing.title[ 0 ] : ''
if deposit_authorization.title != before
existing.title = [ deposit_authorization.title ]
existing.save!
puts "INFO: updated work #{existing.id} title from: '#{before}' to '#{deposit_authorization.title}'"
end
return true
else
puts "ERROR: SIS update for a published work (#{existing.id}); ignoring"
return false
end
end
ok = true
w = GenericWork.create!( title: [ deposit_authorization.title ] ) do |w|
# generic work attributes
w.apply_depositor_metadata( user )
w.creator = user.email
w.author_email = user.email
w.author_first_name = deposit_authorization.first_name
w.author_last_name = deposit_authorization.last_name
w.author_institution = GenericWork::DEFAULT_INSTITUTION
w.date_created = CurationConcerns::TimeService.time_in_utc.strftime( "%Y-%m-%d" )
w.visibility = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC
w.embargo_state = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC
w.visibility_during_embargo = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC
w.work_type = GenericWork::WORK_TYPE_THESIS
w.draft = 'true'
w.publisher = GenericWork::DEFAULT_PUBLISHER
w.department = deposit_authorization.department
w.degree = deposit_authorization.degree
w.language = GenericWork::DEFAULT_LANGUAGE
w.license = GenericWork::DEFAULT_LICENSE
# where the authorization comes from
w.work_source = work_source
end
status, id = ServiceClient::EntityIdClient.instance.newid( w )
if ServiceClient::EntityIdClient.instance.ok?( status ) && id.present?
w.identifier = id
w.permanent_url = GenericWork.doi_url( id )
else
puts "ERROR: cannot mint DOI (#{status}). Using public view"
w.identifier = nil
w.permanent_url = Rails.application.routes.url_helpers.public_view_url( w )
end
ok = w.save
# send the email if necessary
ThesisMailers.sis_thesis_can_be_submitted( user.email, user.display_name, MAIL_SENDER ).deliver_later if ok
return ok
end
def self.process_inbound_optional_authorization( deposit_request )
# lookup the user and create their account as necessary
user, extended_info = lookup_or_create_user( deposit_request.who )
if user.nil?
return false
end
# default values
default_title = 'Enter your title here'
ok = true
w = GenericWork.create!( title: [ default_title ] ) do |w|
# generic work attributes
w.apply_depositor_metadata( user )
w.creator = user.email
w.author_email = user.email
w.author_first_name = extended_info.first_name || 'First name'
w.author_last_name = extended_info.last_name || 'Last name'
w.author_institution = GenericWork::DEFAULT_INSTITUTION
w.date_created = CurationConcerns::TimeService.time_in_utc.strftime( "%Y-%m-%d" )
w.visibility = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC
w.visibility_during_embargo = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC
w.embargo_state = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC
w.work_type = GenericWork::WORK_TYPE_THESIS
w.draft = 'true'
w.publisher = GenericWork::DEFAULT_PUBLISHER
w.department = deposit_request.department
w.degree = deposit_request.degree
w.language = GenericWork::DEFAULT_LANGUAGE
w.license = GenericWork::DEFAULT_LICENSE
# where the authorization comes from
w.work_source = "#{GenericWork::THESIS_SOURCE_OPTIONAL}:#{deposit_request.id}"
# who requested it
w.registrar_computing_id = deposit_request.requester unless deposit_request.requester.nil?
end
status, id = ServiceClient::EntityIdClient.instance.newid( w )
if ServiceClient::EntityIdClient.instance.ok?( status ) && id.present?
w.identifier = id
w.permanent_url = GenericWork.doi_url( id )
else
puts "ERROR: cannot mint DOI (#{status}). Using public view"
w.permanent_url = Rails.application.routes.url_helpers.public_view_url( w )
end
ok = w.save
ThesisMailers.optional_thesis_can_be_submitted( user.email, user.display_name, MAIL_SENDER ).deliver_later if ok
return ok
end
private
#
# look for a work that corresponds to the specified work source. This tells us that we have
# previously created a placeholder ETD for the student
#
def self.find_existing_work( work_source )
works = GenericWork.where( {work_source: work_source } )
if works.present?
return works.first
end
return nil
end
def self.lookup_or_create_user( cid )
# lookup the user by computing id
user_info = lookup_user( cid )
if user_info.nil?
puts "ERROR: cannot locate user info for #{cid}"
return nil, nil
end
# locate the user and create the account if we cannot... cant create an ETD without an owner
email = user_info.email
email = User.email_from_cid( user_info.id ) if email.nil? || email.blank?
user = User.find_by_email( email )
user = create_user( user_info, email ) if user.nil?
return user, user_info
end
def self.create_user( user_info, email )
default_password = 'password'
user = User.new( email: email,
password: default_password, password_confirmation: default_password,
display_name: user_info.display_name,
department: user_info.department.first,
office: user_info.office,
telephone: user_info.phone,
title: user_info.description )
user.save!
puts "INFO: created new account for #{user_info.id}"
return( user )
end
def self.lookup_user( id )
status, resp = ServiceClient::UserInfoClient.instance.get_by_id( id )
if ServiceClient::UserInfoClient.instance.ok?( status )
return Helpers::UserInfo.create( resp )
end
return nil
end
end
end
#
# end of file
#
| uvalib/Libra2 | lib/libraetd/lib/helpers/etd_helper.rb | Ruby | apache-2.0 | 7,345 |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'fbcode_builder steps to build Facebook Thrift'
import specs.fbthrift as fbthrift
def fbcode_builder_spec(builder):
return {
'depends_on': [fbthrift],
}
config = {
'github_project': 'facebook/fbthrift',
'fbcode_builder_spec': fbcode_builder_spec,
}
| getyourguide/fbthrift | build/fbcode_builder_config.py | Python | apache-2.0 | 449 |
package grammar.model.nouns;
import grammar.model.Multiplicity;
import grammar.model.PseudoEnum;
import grammar.model.SubjectGender;
import grammar.util.Utilities;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class Noun implements PseudoEnum<Noun> {
private static final Map<String, Noun> INSTANCE_MAP = new HashMap<String, Noun>();
private static final Set<Noun> INSTANCES = new HashSet<Noun>();
private static int sequenceGenerator = 0;
private final int sequence;
private final Map<NounClass, Map<Multiplicity, NounForm>> formMap = new HashMap<NounClass, Map<Multiplicity, NounForm>>();
private final Set<NounForm> formSet;
private final Set<NounTag> classifications;
public Noun(Set<NounForm> forms, Set<NounTag> classifications) {
for (NounForm form : forms) {
for (NounClass nc : form.getNounClasses()) {
Map<Multiplicity, NounForm> ms = Utilities.initialiseIfReqd(nc, formMap);
ms.put(form.getMultiplicity(), form);
}
form.setNoun(this);
}
sequence = sequenceGenerator++;
this.formSet = forms;
this.classifications = classifications;
INSTANCE_MAP.put(toString(), this);
INSTANCES.add(this);
}
public Set<NounForm> getForms() {
return formSet;
}
public boolean isRegular() {
for (NounForm form : formSet) {
if (!form.isRegular())
return false;
}
return true;
}
public int ordinal() {
return sequence;
}
public int compareTo(Noun o) {
return ordinal() - o.ordinal();
}
public static Noun[] values() {
return INSTANCES.toArray(new Noun[]{});
}
public static Noun valueOf(String key) {
Noun m = INSTANCE_MAP.get(key);
if (m == null)
throw new IllegalArgumentException("No such Noun: '"+key+"'.");
return m;
}
public String getText(SubjectGender subjectGender, Multiplicity multiplicity) {
NounClass nc = mapSubjectGenderToNounClass(subjectGender);
Map<Multiplicity, NounForm> multiplicities = formMap.get(nc);
if (multiplicities == null)
multiplicities = formMap.values().iterator().next();
NounForm form = multiplicities.get(multiplicity);
if (form != null)
return form.getText();
if (multiplicity.equals(Multiplicity.PLURAL)) {
form = multiplicities.get(Multiplicity.SINGULAR);
return form + (form.getText().endsWith("s") ? "es" : "s"); // TODO this should come from some xml somewhere...
}
else // singular requested; only a plural form exists
throw new IllegalArgumentException("No singular form exists for noun "+getText(subjectGender, Multiplicity.PLURAL)+".");
}
public String toString() {
try {
return getText(SubjectGender.MASCULINE, Multiplicity.SINGULAR);
}
catch (IllegalArgumentException iae) {
return getText(SubjectGender.MASCULINE, Multiplicity.PLURAL);
}
}
public NounClass getNounClass(SubjectGender subjectGender) {
NounClass nc = mapSubjectGenderToNounClass(subjectGender);
if (formMap.keySet().contains(nc))
return nc;
return formMap.keySet().iterator().next();
}
private static NounClass mapSubjectGenderToNounClass(SubjectGender subjectGender) {
NounClass nc;
if (subjectGender.equals(SubjectGender.MASCULINE)) // TODO mapping is specific to french...
nc = NounClass.valueOf("MASCULINE");
else if (subjectGender.equals(SubjectGender.FEMININE))
nc = NounClass.valueOf("FEMININE");
else
nc = null;
return nc;
}
public Set<NounTag> getClassifications() {
return classifications;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + sequence;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (!(obj instanceof Noun))
return false;
final Noun other = (Noun) obj;
if (sequence != other.sequence)
return false;
return true;
}
} | dliroberts/lang | RomanceConjugator/src/main/java/grammar/model/nouns/Noun.java | Java | apache-2.0 | 3,873 |
/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.route53domains.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/route53domains-2014-05-15/EnableDomainAutoRenew"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class EnableDomainAutoRenewResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* Returns a string representation of this object; useful for testing and debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof EnableDomainAutoRenewResult == false)
return false;
EnableDomainAutoRenewResult other = (EnableDomainAutoRenewResult) obj;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
return hashCode;
}
@Override
public EnableDomainAutoRenewResult clone() {
try {
return (EnableDomainAutoRenewResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| dagnir/aws-sdk-java | aws-java-sdk-route53/src/main/java/com/amazonaws/services/route53domains/model/EnableDomainAutoRenewResult.java | Java | apache-2.0 | 2,301 |
/*
* ASM: a very small and fast Java bytecode manipulation framework
* Copyright (c) 2000-2007 INRIA, France Telecom
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.powermock.api.mockito.repackaged.asm.tree;
import org.powermock.api.mockito.repackaged.asm.AnnotationVisitor;
import org.powermock.api.mockito.repackaged.asm.Attribute;
import org.powermock.api.mockito.repackaged.asm.ClassVisitor;
import org.powermock.api.mockito.repackaged.asm.Label;
import org.powermock.api.mockito.repackaged.asm.MethodVisitor;
import org.powermock.api.mockito.repackaged.asm.Opcodes;
import org.powermock.api.mockito.repackaged.asm.Type;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* A node that represents a method.
*
* @author Eric Bruneton
*/
public class MethodNode extends MemberNode implements MethodVisitor {
/**
* The method's access flags (see {@link Opcodes}). This field also
* indicates if the method is synthetic and/or deprecated.
*/
public int access;
/**
* The method's name.
*/
public String name;
/**
* The method's descriptor (see {@link Type}).
*/
public String desc;
/**
* The method's signature. May be <tt>null</tt>.
*/
public String signature;
/**
* The internal names of the method's exception classes (see
* {@link Type#getInternalName() getInternalName}). This list is a list of
* {@link String} objects.
*/
public List exceptions;
/**
* The default value of this annotation interface method. This field must be
* a {@link Byte}, {@link Boolean}, {@link Character}, {@link Short},
* {@link Integer}, {@link Long}, {@link Float}, {@link Double},
* {@link String} or {@link Type}, or an two elements String array (for
* enumeration values), a {@link AnnotationNode}, or a {@link List} of
* values of one of the preceding types. May be <tt>null</tt>.
*/
public Object annotationDefault;
/**
* The runtime visible parameter annotations of this method. These lists are
* lists of {@link AnnotationNode} objects. May be <tt>null</tt>.
*
* @associates org.powermock.api.mockito.repackaged.asm.tree.AnnotationNode
* @label invisible parameters
*/
public List[] visibleParameterAnnotations;
/**
* The runtime invisible parameter annotations of this method. These lists
* are lists of {@link AnnotationNode} objects. May be <tt>null</tt>.
*
* @associates org.powermock.api.mockito.repackaged.asm.tree.AnnotationNode
* @label visible parameters
*/
public List[] invisibleParameterAnnotations;
/**
* The instructions of this method. This list is a list of
* {@link AbstractInsnNode} objects.
*
* @associates org.powermock.api.mockito.repackaged.asm.tree.AbstractInsnNode
* @label instructions
*/
public InsnList instructions;
/**
* The try catch blocks of this method. This list is a list of
* {@link TryCatchBlockNode} objects.
*
* @associates org.powermock.api.mockito.repackaged.asm.tree.TryCatchBlockNode
*/
public List tryCatchBlocks;
/**
* The maximum stack size of this method.
*/
public int maxStack;
/**
* The maximum number of local variables of this method.
*/
public int maxLocals;
/**
* The local variables of this method. This list is a list of
* {@link LocalVariableNode} objects. May be <tt>null</tt>
*
* @associates org.powermock.api.mockito.repackaged.asm.tree.LocalVariableNode
*/
public List localVariables;
/**
* Constructs an unitialized .
*/
public MethodNode() {
this.instructions = new InsnList();
}
/**
* Constructs a new .
*
* @param access the method's access flags (see {@link Opcodes}). This
* parameter also indicates if the method is synthetic and/or
* deprecated.
* @param name the method's name.
* @param desc the method's descriptor (see {@link Type}).
* @param signature the method's signature. May be <tt>null</tt>.
* @param exceptions the internal names of the method's exception classes
* (see {@link Type#getInternalName() getInternalName}). May be
* <tt>null</tt>.
*/
public MethodNode(
final int access,
final String name,
final String desc,
final String signature,
final String[] exceptions)
{
this();
this.access = access;
this.name = name;
this.desc = desc;
this.signature = signature;
this.exceptions = new ArrayList(exceptions == null
? 0
: exceptions.length);
boolean isAbstract = (access & Opcodes.ACC_ABSTRACT) != 0;
if (!isAbstract) {
this.localVariables = new ArrayList(5);
}
this.tryCatchBlocks = new ArrayList();
if (exceptions != null) {
this.exceptions.addAll(Arrays.asList(exceptions));
}
}
// ------------------------------------------------------------------------
// Implementation of the MethodVisitor interface
// ------------------------------------------------------------------------
public AnnotationVisitor visitAnnotationDefault() {
return new AnnotationNode(new ArrayList(0) {
public boolean add(final Object o) {
annotationDefault = o;
return super.add(o);
}
});
}
public AnnotationVisitor visitParameterAnnotation(
final int parameter,
final String desc,
final boolean visible)
{
AnnotationNode an = new AnnotationNode(desc);
if (visible) {
if (visibleParameterAnnotations == null) {
int params = Type.getArgumentTypes(this.desc).length;
visibleParameterAnnotations = new List[params];
}
if (visibleParameterAnnotations[parameter] == null) {
visibleParameterAnnotations[parameter] = new ArrayList(1);
}
visibleParameterAnnotations[parameter].add(an);
} else {
if (invisibleParameterAnnotations == null) {
int params = Type.getArgumentTypes(this.desc).length;
invisibleParameterAnnotations = new List[params];
}
if (invisibleParameterAnnotations[parameter] == null) {
invisibleParameterAnnotations[parameter] = new ArrayList(1);
}
invisibleParameterAnnotations[parameter].add(an);
}
return an;
}
public void visitCode() {
}
public void visitFrame(
final int type,
final int nLocal,
final Object[] local,
final int nStack,
final Object[] stack)
{
instructions.add(new FrameNode(type, nLocal, local == null
? null
: getLabelNodes(local), nStack, stack == null
? null
: getLabelNodes(stack)));
}
public void visitInsn(final int opcode) {
instructions.add(new InsnNode(opcode));
}
public void visitIntInsn(final int opcode, final int operand) {
instructions.add(new IntInsnNode(opcode, operand));
}
public void visitVarInsn(final int opcode, final int var) {
instructions.add(new VarInsnNode(opcode, var));
}
public void visitTypeInsn(final int opcode, final String type) {
instructions.add(new TypeInsnNode(opcode, type));
}
public void visitFieldInsn(
final int opcode,
final String owner,
final String name,
final String desc)
{
instructions.add(new FieldInsnNode(opcode, owner, name, desc));
}
public void visitMethodInsn(
final int opcode,
final String owner,
final String name,
final String desc)
{
instructions.add(new MethodInsnNode(opcode, owner, name, desc));
}
public void visitJumpInsn(final int opcode, final Label label) {
instructions.add(new JumpInsnNode(opcode, getLabelNode(label)));
}
public void visitLabel(final Label label) {
instructions.add(getLabelNode(label));
}
public void visitLdcInsn(final Object cst) {
instructions.add(new LdcInsnNode(cst));
}
public void visitIincInsn(final int var, final int increment) {
instructions.add(new IincInsnNode(var, increment));
}
public void visitTableSwitchInsn(
final int min,
final int max,
final Label dflt,
final Label[] labels)
{
instructions.add(new TableSwitchInsnNode(min,
max,
getLabelNode(dflt),
getLabelNodes(labels)));
}
public void visitLookupSwitchInsn(
final Label dflt,
final int[] keys,
final Label[] labels)
{
instructions.add(new LookupSwitchInsnNode(getLabelNode(dflt),
keys,
getLabelNodes(labels)));
}
public void visitMultiANewArrayInsn(final String desc, final int dims) {
instructions.add(new MultiANewArrayInsnNode(desc, dims));
}
public void visitTryCatchBlock(
final Label start,
final Label end,
final Label handler,
final String type)
{
tryCatchBlocks.add(new TryCatchBlockNode(getLabelNode(start),
getLabelNode(end),
getLabelNode(handler),
type));
}
public void visitLocalVariable(
final String name,
final String desc,
final String signature,
final Label start,
final Label end,
final int index)
{
localVariables.add(new LocalVariableNode(name,
desc,
signature,
getLabelNode(start),
getLabelNode(end),
index));
}
public void visitLineNumber(final int line, final Label start) {
instructions.add(new LineNumberNode(line, getLabelNode(start)));
}
public void visitMaxs(final int maxStack, final int maxLocals) {
this.maxStack = maxStack;
this.maxLocals = maxLocals;
}
/**
* Returns the LabelNode corresponding to the given Label. Creates a new
* LabelNode if necessary. The default implementation of this method uses
* the {@link Label#info} field to store associations between labels and
* label nodes.
*
* @param l a Label.
* @return the LabelNode corresponding to l.
*/
protected LabelNode getLabelNode(final Label l) {
if (!(l.info instanceof LabelNode)) {
l.info = new LabelNode(l);
}
return (LabelNode) l.info;
}
private LabelNode[] getLabelNodes(final Label[] l) {
LabelNode[] nodes = new LabelNode[l.length];
for (int i = 0; i < l.length; ++i) {
nodes[i] = getLabelNode(l[i]);
}
return nodes;
}
private Object[] getLabelNodes(final Object[] objs) {
Object[] nodes = new Object[objs.length];
for (int i = 0; i < objs.length; ++i) {
Object o = objs[i];
if (o instanceof Label) {
o = getLabelNode((Label) o);
}
nodes[i] = o;
}
return nodes;
}
// ------------------------------------------------------------------------
// Accept method
// ------------------------------------------------------------------------
/**
* Makes the given class visitor visit this method.
*
* @param cv a class visitor.
*/
public void accept(final ClassVisitor cv) {
String[] exceptions = new String[this.exceptions.size()];
this.exceptions.toArray(exceptions);
MethodVisitor mv = cv.visitMethod(access,
name,
desc,
signature,
exceptions);
if (mv != null) {
accept(mv);
}
}
/**
* Makes the given method visitor visit this method.
*
* @param mv a method visitor.
*/
public void accept(final MethodVisitor mv) {
// visits the method attributes
int i, j, n;
if (annotationDefault != null) {
AnnotationVisitor av = mv.visitAnnotationDefault();
AnnotationNode.accept(av, null, annotationDefault);
if (av != null) {
av.visitEnd();
}
}
n = visibleAnnotations == null ? 0 : visibleAnnotations.size();
for (i = 0; i < n; ++i) {
AnnotationNode an = (AnnotationNode) visibleAnnotations.get(i);
an.accept(mv.visitAnnotation(an.desc, true));
}
n = invisibleAnnotations == null ? 0 : invisibleAnnotations.size();
for (i = 0; i < n; ++i) {
AnnotationNode an = (AnnotationNode) invisibleAnnotations.get(i);
an.accept(mv.visitAnnotation(an.desc, false));
}
n = visibleParameterAnnotations == null
? 0
: visibleParameterAnnotations.length;
for (i = 0; i < n; ++i) {
List l = visibleParameterAnnotations[i];
if (l == null) {
continue;
}
for (j = 0; j < l.size(); ++j) {
AnnotationNode an = (AnnotationNode) l.get(j);
an.accept(mv.visitParameterAnnotation(i, an.desc, true));
}
}
n = invisibleParameterAnnotations == null
? 0
: invisibleParameterAnnotations.length;
for (i = 0; i < n; ++i) {
List l = invisibleParameterAnnotations[i];
if (l == null) {
continue;
}
for (j = 0; j < l.size(); ++j) {
AnnotationNode an = (AnnotationNode) l.get(j);
an.accept(mv.visitParameterAnnotation(i, an.desc, false));
}
}
n = attrs == null ? 0 : attrs.size();
for (i = 0; i < n; ++i) {
mv.visitAttribute((Attribute) attrs.get(i));
}
// visits the method's code
if (instructions.size() > 0) {
mv.visitCode();
// visits try catch blocks
for (i = 0; i < tryCatchBlocks.size(); ++i) {
((TryCatchBlockNode) tryCatchBlocks.get(i)).accept(mv);
}
// visits instructions
instructions.accept(mv);
// visits local variables
n = localVariables == null ? 0 : localVariables.size();
for (i = 0; i < n; ++i) {
((LocalVariableNode) localVariables.get(i)).accept(mv);
}
// visits maxs
mv.visitMaxs(maxStack, maxLocals);
}
mv.visitEnd();
}
}
| gstamac/powermock | powermock-api/powermock-api-mockito2/src/main/java/org/powermock/api/mockito/repackaged/asm/tree/MethodNode.java | Java | apache-2.0 | 16,512 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ManagerShop.Domain.DomainEvents
{
public class MessageSentEventHandler : IEventHandler<MessageSentEvent>
{
public void Handle(MessageSentEvent evt)
{
Console.WriteLine("Order_Number:{0},Send a Email.", evt.OrderID);
}
}
}
| dawutao/ManagerShop | ManagerShop.UI/ManagerShop.Domain.Event/EventHandlers/MessageSentEventHandler.cs | C# | apache-2.0 | 400 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator;
import com.facebook.airlift.log.Logger;
import com.facebook.presto.common.Page;
import com.facebook.presto.common.block.BlockEncodingSerde;
import com.facebook.presto.execution.buffer.PagesSerdeFactory;
import com.facebook.presto.metadata.Split;
import com.facebook.presto.metadata.Split.SplitIdentifier;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.spi.page.PagesSerde;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.collect.AbstractIterator;
import io.airlift.slice.InputStreamSliceInput;
import io.airlift.slice.OutputStreamSliceOutput;
import io.airlift.slice.SliceOutput;
import javax.inject.Inject;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import static com.facebook.presto.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR;
import static com.facebook.presto.spi.page.PagesSerdeUtil.readPages;
import static com.facebook.presto.spi.page.PagesSerdeUtil.writePages;
import static com.google.common.util.concurrent.Futures.immediateFuture;
import static java.nio.file.Files.newInputStream;
import static java.nio.file.Files.newOutputStream;
import static java.nio.file.StandardOpenOption.APPEND;
import static java.util.Objects.requireNonNull;
import static java.util.UUID.randomUUID;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
public class FileFragmentResultCacheManager
implements FragmentResultCacheManager
{
private static final Logger log = Logger.get(FileFragmentResultCacheManager.class);
private final Path baseDirectory;
private final long maxInFlightBytes;
private final PagesSerde pagesSerde;
private final FragmentCacheStats fragmentCacheStats;
private final ExecutorService flushExecutor;
private final ExecutorService removalExecutor;
private final Cache<CacheKey, Path> cache;
// TODO: Decouple CacheKey by encoding PlanNode and SplitIdentifier separately so we don't have to keep too many objects in memory
@Inject
public FileFragmentResultCacheManager(
FileFragmentResultCacheConfig cacheConfig,
BlockEncodingSerde blockEncodingSerde,
FragmentCacheStats fragmentCacheStats,
ExecutorService flushExecutor,
ExecutorService removalExecutor)
{
requireNonNull(cacheConfig, "cacheConfig is null");
requireNonNull(blockEncodingSerde, "blockEncodingSerde is null");
this.baseDirectory = Paths.get(cacheConfig.getBaseDirectory());
this.maxInFlightBytes = cacheConfig.getMaxInFlightSize().toBytes();
this.pagesSerde = new PagesSerdeFactory(blockEncodingSerde, cacheConfig.isBlockEncodingCompressionEnabled()).createPagesSerde();
this.fragmentCacheStats = requireNonNull(fragmentCacheStats, "fragmentCacheStats is null");
this.flushExecutor = requireNonNull(flushExecutor, "flushExecutor is null");
this.removalExecutor = requireNonNull(removalExecutor, "removalExecutor is null");
this.cache = CacheBuilder.newBuilder()
.maximumSize(cacheConfig.getMaxCachedEntries())
.expireAfterAccess(cacheConfig.getCacheTtl().toMillis(), MILLISECONDS)
.removalListener(new CacheRemovalListener())
.recordStats()
.build();
File target = new File(baseDirectory.toUri());
if (!target.exists()) {
try {
Files.createDirectories(target.toPath());
}
catch (IOException e) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "cannot create cache directory " + target, e);
}
}
else {
File[] files = target.listFiles();
if (files == null) {
return;
}
this.removalExecutor.submit(() -> Arrays.stream(files).forEach(file -> {
try {
Files.delete(file.toPath());
}
catch (IOException e) {
// ignore
}
}));
}
}
@Override
public Future<?> put(String serializedPlan, Split split, List<Page> result)
{
CacheKey key = new CacheKey(serializedPlan, split.getSplitIdentifier());
long resultSize = getPagesSize(result);
if (fragmentCacheStats.getInFlightBytes() + resultSize > maxInFlightBytes || cache.getIfPresent(key) != null) {
return immediateFuture(null);
}
fragmentCacheStats.addInFlightBytes(resultSize);
Path path = baseDirectory.resolve(randomUUID().toString().replaceAll("-", "_"));
return flushExecutor.submit(() -> cachePages(key, path, result));
}
private static long getPagesSize(List<Page> pages)
{
return pages.stream()
.mapToLong(Page::getSizeInBytes)
.sum();
}
private void cachePages(CacheKey key, Path path, List<Page> pages)
{
// TODO: To support both memory and disk limit, we should check cache size before putting to cache and use written bytes as weight for cache
try {
Files.createFile(path);
try (SliceOutput output = new OutputStreamSliceOutput(newOutputStream(path, APPEND))) {
writePages(pagesSerde, output, pages.iterator());
cache.put(key, path);
}
catch (UncheckedIOException | IOException e) {
log.warn(e, "%s encountered an error while writing to path %s", Thread.currentThread().getName(), path);
tryDeleteFile(path);
}
}
catch (UncheckedIOException | IOException e) {
log.warn(e, "%s encountered an error while writing to path %s", Thread.currentThread().getName(), path);
tryDeleteFile(path);
}
finally {
fragmentCacheStats.addInFlightBytes(-getPagesSize(pages));
}
}
private static void tryDeleteFile(Path path)
{
try {
File file = new File(path.toUri());
if (file.exists()) {
Files.delete(file.toPath());
}
}
catch (IOException e) {
// ignore
}
}
@Override
public Optional<Iterator<Page>> get(String serializedPlan, Split split)
{
CacheKey key = new CacheKey(serializedPlan, split.getSplitIdentifier());
Path path = cache.getIfPresent(key);
if (path == null) {
fragmentCacheStats.incrementCacheMiss();
return Optional.empty();
}
try {
InputStream inputStream = newInputStream(path);
Iterator<Page> result = readPages(pagesSerde, new InputStreamSliceInput(inputStream));
fragmentCacheStats.incrementCacheHit();
return Optional.of(closeWhenExhausted(result, inputStream));
}
catch (UncheckedIOException | IOException e) {
// there might be a chance the file has been deleted. We would return cache miss in this case.
fragmentCacheStats.incrementCacheMiss();
return Optional.empty();
}
}
private static <T> Iterator<T> closeWhenExhausted(Iterator<T> iterator, Closeable resource)
{
requireNonNull(iterator, "iterator is null");
requireNonNull(resource, "resource is null");
return new AbstractIterator<T>()
{
@Override
protected T computeNext()
{
if (iterator.hasNext()) {
return iterator.next();
}
try {
resource.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
return endOfData();
}
};
}
public static class CacheKey
{
private final String serializedPlan;
private final SplitIdentifier splitIdentifier;
public CacheKey(String serializedPlan, SplitIdentifier splitIdentifier)
{
this.serializedPlan = requireNonNull(serializedPlan, "serializedPlan is null");
this.splitIdentifier = requireNonNull(splitIdentifier, "splitIdentifier is null");
}
public String getSerializedPlan()
{
return serializedPlan;
}
public SplitIdentifier getSplitIdentifier()
{
return splitIdentifier;
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CacheKey cacheKey = (CacheKey) o;
return Objects.equals(serializedPlan, cacheKey.serializedPlan) &&
Objects.equals(splitIdentifier, cacheKey.splitIdentifier);
}
@Override
public int hashCode()
{
return Objects.hash(serializedPlan, splitIdentifier);
}
}
private class CacheRemovalListener
implements RemovalListener<CacheKey, Path>
{
@Override
public void onRemoval(RemovalNotification<CacheKey, Path> notification)
{
removalExecutor.submit(() -> tryDeleteFile(notification.getValue()));
}
}
}
| EvilMcJerkface/presto | presto-main/src/main/java/com/facebook/presto/operator/FileFragmentResultCacheManager.java | Java | apache-2.0 | 10,483 |
package org.opencommercesearch.client.impl;
import java.util.Date;
/**
* Represents a sku's availability.
*
* @author rmerizalde
*/
public class Availability {
public enum Status {
InStock,
OutOfStock,
PermanentlyOutOfStock,
Backorderable,
Preorderable
}
private Status status;
private Long stockLevel;
private Long backorderLevel;
private Date date;
public Date getDate() {
return date;
}
public void setDate(Date date) {
this.date = date;
}
public Status getStatus() {
return status;
}
public void setStatus(Status status) {
this.status = status;
}
public Long getStockLevel() {
return stockLevel;
}
public void setStockLevel(Long stockLevel) {
this.stockLevel = stockLevel;
}
public Long getBackorderLevel() {
return backorderLevel;
}
public void setBackorderLevel(Long backorderLevel) {
this.backorderLevel = backorderLevel;
}
}
| madickson/opencommercesearch | opencommercesearch-sdk-java/src/main/java/org/opencommercesearch/client/impl/Availability.java | Java | apache-2.0 | 947 |
package distribution // import "github.com/tiborvass/docker/distribution"
import (
"bufio"
"compress/gzip"
"context"
"fmt"
"io"
"github.com/docker/distribution/reference"
"github.com/tiborvass/docker/distribution/metadata"
"github.com/tiborvass/docker/pkg/progress"
"github.com/tiborvass/docker/registry"
"github.com/sirupsen/logrus"
)
// Pusher is an interface that abstracts pushing for different API versions.
type Pusher interface {
// Push tries to push the image configured at the creation of Pusher.
// Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint.
//
// TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic.
Push(ctx context.Context) error
}
const compressionBufSize = 32768
// NewPusher creates a new Pusher interface that will push to either a v1 or v2
// registry. The endpoint argument contains a Version field that determines
// whether a v1 or v2 pusher will be created. The other parameters are passed
// through to the underlying pusher implementation for use during the actual
// push operation.
func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) {
switch endpoint.Version {
case registry.APIVersion2:
return &v2Pusher{
v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore),
ref: ref,
endpoint: endpoint,
repoInfo: repoInfo,
config: imagePushConfig,
}, nil
case registry.APIVersion1:
return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL)
}
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
}
// Push initiates a push operation on ref.
// ref is the specific variant of the image to be pushed.
// If no tag is provided, all tags will be pushed.
func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error {
// FIXME: Allow to interrupt current push when new push of same image is done.
// Resolve the Repository name from fqn to RepositoryInfo
repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref)
if err != nil {
return err
}
endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name))
if err != nil {
return err
}
progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name())
associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name)
if len(associations) == 0 {
return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name))
}
var (
lastErr error
// confirmedV2 is set to true if a push attempt managed to
// confirm that it was talking to a v2 registry. This will
// prevent fallback to the v1 protocol.
confirmedV2 bool
// confirmedTLSRegistries is a map indicating which registries
// are known to be using TLS. There should never be a plaintext
// retry for any of these.
confirmedTLSRegistries = make(map[string]struct{})
)
for _, endpoint := range endpoints {
if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 {
continue
}
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
continue
}
if endpoint.URL.Scheme != "https" {
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
continue
}
}
logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version)
pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig)
if err != nil {
lastErr = err
continue
}
if err := pusher.Push(ctx); err != nil {
// Was this push cancelled? If so, don't try to fall
// back.
select {
case <-ctx.Done():
default:
if fallbackErr, ok := err.(fallbackError); ok {
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
}
err = fallbackErr.err
lastErr = err
logrus.Infof("Attempting next endpoint for push after error: %v", err)
continue
}
}
logrus.Errorf("Not continuing with push after error: %v", err)
return err
}
imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push")
return nil
}
if lastErr == nil {
lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name())
}
return lastErr
}
// compress returns an io.ReadCloser which will supply a compressed version of
// the provided Reader. The caller must close the ReadCloser after reading the
// compressed data.
//
// Note that this function returns a reader instead of taking a writer as an
// argument so that it can be used with httpBlobWriter's ReadFrom method.
// Using httpBlobWriter's Write method would send a PATCH request for every
// Write call.
//
// The second return value is a channel that gets closed when the goroutine
// is finished. This allows the caller to make sure the goroutine finishes
// before it releases any resources connected with the reader that was
// passed in.
func compress(in io.Reader) (io.ReadCloser, chan struct{}) {
compressionDone := make(chan struct{})
pipeReader, pipeWriter := io.Pipe()
// Use a bufio.Writer to avoid excessive chunking in HTTP request.
bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize)
compressor := gzip.NewWriter(bufWriter)
go func() {
_, err := io.Copy(compressor, in)
if err == nil {
err = compressor.Close()
}
if err == nil {
err = bufWriter.Flush()
}
if err != nil {
pipeWriter.CloseWithError(err)
} else {
pipeWriter.Close()
}
close(compressionDone)
}()
return pipeReader, compressionDone
}
| tiborvass/docker | distribution/push.go | GO | apache-2.0 | 6,226 |
#pragma once
#ifndef GEODE_INTEGRATION_TEST_QUERYHELPER_H_
#define GEODE_INTEGRATION_TEST_QUERYHELPER_H_
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <geode/SystemProperties.hpp>
#include <ace/OS.h>
#include "DistributedSystemImpl.hpp"
#include "testobject/Portfolio.hpp"
#include "testobject/Position.hpp"
#include "testobject/PdxType.hpp"
#include "testobject/PortfolioPdx.hpp"
#include "testobject/PositionPdx.hpp"
#include <geode/ResultSet.hpp>
#include <geode/StructSet.hpp>
#include "CacheRegionHelper.hpp"
#include "CacheImpl.hpp"
//#include <geode/Struct.hpp>
//#ifndef ROOT_NAME
// ROOT_NAME+++ DEFINE ROOT_NAME before including QueryHelper.hpp
//#endif
#ifndef ROOT_SCOPE
#define ROOT_SCOPE LOCAL
#endif
using namespace apache::geode::client;
using namespace testData;
using namespace PdxTests;
using namespace testobject;
class QueryHelper {
public:
static QueryHelper* singleton;
static QueryHelper& getHelper() {
if (singleton == nullptr) {
singleton = new QueryHelper();
}
return *singleton;
}
QueryHelper() {
portfolioSetSize = 20;
portfolioNumSets = 1;
positionSetSize = 20;
positionNumSets = 1;
}
virtual ~QueryHelper() { ; }
virtual void populatePortfolioData(
std::shared_ptr<Region>& pregion, size_t setSize, size_t numSets,
int32_t objSize = 1, std::shared_ptr<CacheableStringArray> nm = nullptr);
virtual void populatePositionData(std::shared_ptr<Region>& pregion,
size_t setSize, size_t numSets);
virtual void populatePortfolioPdxData(std::shared_ptr<Region>& pregion,
size_t setSize, size_t numSets,
int32_t objSize = 1,
char** nm = nullptr);
virtual void populatePositionPdxData(std::shared_ptr<Region>& pregion,
size_t setSize, size_t numSets);
virtual void populatePDXObject(std::shared_ptr<Region>& pregion);
virtual void getPDXObject(std::shared_ptr<Region>& pregion);
virtual bool verifyRS(std::shared_ptr<SelectResults>& resultset,
size_t rowCount);
virtual bool verifySS(std::shared_ptr<SelectResults>& structset,
size_t rowCount, int32_t fieldCount);
// utility methods
virtual size_t getPortfolioSetSize() { return portfolioSetSize; };
virtual size_t getPortfolioNumSets() { return portfolioNumSets; };
virtual size_t getPositionSetSize() { return positionSetSize; };
virtual size_t getPositionNumSets() { return positionNumSets; };
bool isExpectedRowsConstantRS(int queryindex) {
for (int i = (sizeof(constantExpectedRowsRS) / sizeof(int)) - 1; i > -1;
i--) {
if (constantExpectedRowsRS[i] == queryindex) {
printf("index %d is having constant rows \n",
constantExpectedRowsRS[i]);
return true;
}
}
return false;
}
bool isExpectedRowsConstantPQRS(int queryindex) {
for (int i = (sizeof(constantExpectedRowsPQRS) / sizeof(int)) - 1; i > -1;
i--) {
if (constantExpectedRowsPQRS[i] == queryindex) {
printf("index %d is having constant rows \n",
constantExpectedRowsPQRS[i]);
return true;
}
}
return false;
}
bool isExpectedRowsConstantSS(int queryindex) {
for (int i = (sizeof(constantExpectedRowsSS) / sizeof(int)) - 1; i > -1;
i--) {
if (constantExpectedRowsSS[i] == queryindex) {
printf("index %d is having constant rows \n",
constantExpectedRowsSS[i]);
return true;
}
}
return false;
}
bool isExpectedRowsConstantSSPQ(int queryindex) {
for (int i = (sizeof(constantExpectedRowsSSPQ) / sizeof(int)) - 1; i > -1;
i--) {
if (constantExpectedRowsSSPQ[i] == queryindex) {
printf("index %d is having constant rows \n",
constantExpectedRowsSSPQ[i]);
return true;
}
}
return false;
}
private:
size_t portfolioSetSize;
size_t portfolioNumSets;
size_t positionSetSize;
size_t positionNumSets;
};
QueryHelper* QueryHelper::singleton = nullptr;
//===========================================================================================
void QueryHelper::populatePortfolioData(
std::shared_ptr<Region>& rptr, size_t setSize, size_t numSets,
int32_t objSize, std::shared_ptr<CacheableStringArray> nm) {
// lets reset the counter for uniform population of position objects
Position::resetCounter();
for (size_t set = 1; set <= numSets; set++) {
for (size_t current = 1; current <= setSize; current++) {
auto port = std::make_shared<Portfolio>(static_cast<int32_t>(current),
objSize, nm);
char portname[100] = {0};
ACE_OS::sprintf(portname, "port%zd-%zd", set, current);
auto keyport = CacheableKey::create(portname);
// printf(" QueryHelper::populatePortfolioData creating key = %s and
// puting data \n",portname);
rptr->put(keyport, port);
}
}
// portfolioSetSize = setSize; portfolioNumSets = numSets; objectSize =
// objSize;
printf("all puts done \n");
}
const char* secIds[] = {"SUN", "IBM", "YHOO", "GOOG", "MSFT",
"AOL", "APPL", "ORCL", "SAP", "DELL"};
void QueryHelper::populatePositionData(std::shared_ptr<Region>& rptr,
size_t setSize, size_t numSets) {
int numSecIds = sizeof(secIds) / sizeof(char*);
for (size_t set = 1; set <= numSets; set++) {
for (size_t current = 1; current <= setSize; current++) {
auto pos = std::make_shared<Position>(
secIds[current % numSecIds], static_cast<int32_t>(current * 100));
char posname[100] = {0};
ACE_OS::sprintf(posname, "pos%zd-%zd", set, current);
auto keypos = CacheableKey::create(posname);
rptr->put(keypos, pos);
}
}
// positionSetSize = setSize; positionNumSets = numSets;
}
void QueryHelper::populatePortfolioPdxData(std::shared_ptr<Region>& rptr,
size_t setSize, size_t numSets,
int32_t objSize, char**) {
// lets reset the counter for uniform population of position objects
PositionPdx::resetCounter();
for (size_t set = 1; set <= numSets; set++) {
for (size_t current = 1; current <= setSize; current++) {
auto port = std::make_shared<PortfolioPdx>(static_cast<int32_t>(current),
objSize);
char portname[100] = {0};
ACE_OS::sprintf(portname, "port%zd-%zd", set, current);
auto keyport = CacheableKey::create(portname);
rptr->put(keyport, port);
LOGDEBUG("populatePortfolioPdxData:: Put for iteration current = %d done",
current);
}
}
// portfolioSetSize = setSize; portfolioNumSets = numSets; objectSize =
// objSize;
printf("all puts done \n");
}
void QueryHelper::populatePositionPdxData(std::shared_ptr<Region>& rptr,
size_t setSize, size_t numSets) {
auto numSecIds = sizeof(secIds) / sizeof(char*);
for (size_t set = 1; set <= numSets; set++) {
for (size_t current = 1; current <= setSize; current++) {
auto pos = std::make_shared<PositionPdx>(
secIds[current % numSecIds], static_cast<int32_t>(current * 100));
char posname[100] = {0};
ACE_OS::sprintf(posname, "pos%zd-%zd", set, current);
auto keypos = CacheableKey::create(posname);
rptr->put(keypos, pos);
}
}
// positionSetSize = setSize; positionNumSets = numSets;
}
void QueryHelper::populatePDXObject(std::shared_ptr<Region>& rptr) {
// Register PdxType Object
auto cacheImpl = CacheRegionHelper::getCacheImpl(&rptr->getCache());
cacheImpl->getSerializationRegistry()->addPdxType(
PdxTests::PdxType::createDeserializable);
LOG("PdxObject Registered Successfully....");
// Creating object of type PdxObject
auto pdxobj = std::make_shared<PdxTests::PdxType>();
auto keyport = CacheableKey::create("ABC");
// PUT Operation
rptr->put(keyport, pdxobj);
// locally destroy PdxObject
rptr->localDestroy(keyport);
LOG("localDestroy() operation....Done");
// Remote GET for PdxObject
auto obj2 = std::dynamic_pointer_cast<PdxTests::PdxType>(rptr->get(keyport));
LOGINFO("get... Result-1: Returned float=%f, String val = %s double=%lf",
obj2->getFloat(), obj2->getString().c_str(), obj2->getDouble());
// LOGINFO("get.. Result-2: Returned BOOL = %d and BYTE = %s SHORT=%d INT=%d",
// obj2->getBool(), obj2->getByte(), obj2->getShort(), obj2->getInt());
// TODO
/*
ASSERT(obj2->getID1() == 101, "ID1 = 101 expected");
ASSERT(obj2->getID2() == 201, "ID2 = 201 expected");
ASSERT(obj2->getID3() == 301, "ID3 = 301 expected");
*/
LOG("NIL:200:PUT Operation successfully Done....End");
}
void QueryHelper::getPDXObject(std::shared_ptr<Region>& rptr) {
// Remote GET for PdxObject
// PdxObject *obj2 = dynamic_cast<PdxObject *> ((rptr->get(keyport)).get());
auto keyport = CacheableKey::create("ABC");
LOG("Client-2 PdxObject GET OP Start....");
auto obj2 = std::dynamic_pointer_cast<PdxTests::PdxType>(rptr->get(keyport));
LOG("Client-2 PdxObject GET OP Done....");
/*
LOGINFO("GET OP Result: BoolVal=%d", obj2->getBool());
LOGINFO("GET OP Result: ByteVal=%d", obj2->getByte());
LOGINFO("GET OP Result: ShortVal=%d", obj2->getShort());*/
// LOGINFO("GET OP Result: IntVal=%d", obj2->getInt());
/*
LOGINFO("GET OP Result: LongVal=%ld", obj2->getLong());
LOGINFO("GET OP Result: FloatVal=%f", obj2->getFloat());
LOGINFO("GET OP Result: DoubleVal=%lf", obj2->getDouble());
LOGINFO("GET OP Result: StringVal=%s", obj2->getString());
*/
}
bool QueryHelper::verifyRS(std::shared_ptr<SelectResults>& resultSet,
size_t expectedRows) {
if (!std::dynamic_pointer_cast<ResultSet>(resultSet)) {
return false;
}
auto rsptr = std::static_pointer_cast<ResultSet>(resultSet);
size_t foundRows = 0;
SelectResultsIterator iter = rsptr->getIterator();
for (size_t rows = 0; rows < rsptr->size(); rows++) {
auto ser = (*rsptr)[rows];
foundRows++;
}
printf("found rows %zd, expected %zd \n", foundRows, expectedRows);
if (foundRows == expectedRows) return true;
return false;
}
bool QueryHelper::verifySS(std::shared_ptr<SelectResults>& structSet,
size_t expectedRows, int32_t expectedFields) {
if (!std::dynamic_pointer_cast<StructSet>(structSet)) {
if (expectedRows == 0 && expectedFields == 0) {
return true; // quite possible we got a null set back.
}
printf("we have structSet itself nullptr \n");
return false;
}
auto ssptr = std::static_pointer_cast<StructSet>(structSet);
size_t foundRows = 0;
for (SelectResults::Iterator iter = ssptr->begin(); iter != ssptr->end();
iter++) {
auto ser = (*iter);
foundRows++;
Struct* siptr = dynamic_cast<Struct*>(ser.get());
if (siptr == nullptr) {
printf("siptr is nullptr \n\n");
return false;
}
int32_t foundFields = 0;
for (int32_t cols = 0; cols < siptr->length(); cols++) {
auto field = (*siptr)[cols];
foundFields++;
}
if (foundFields != expectedFields) {
char buffer[1024] = {'\0'};
sprintf(buffer, "found fields %d, expected fields %d \n", foundFields,
expectedFields);
LOG(buffer);
return false;
}
}
if (foundRows == expectedRows) return true;
// lets log and return in case of error only situation
char buffer[1024] = {'\0'};
sprintf(buffer, "found rows %zd, expected rows %zd\n", foundRows,
expectedRows);
LOG(buffer);
return false;
}
#endif // GEODE_INTEGRATION_TEST_QUERYHELPER_H_
| mmartell/geode-native | cppcache/integration-test/QueryHelper.hpp | C++ | apache-2.0 | 12,719 |
/*
* testdatefmtrange_wo_SN.js - test the date range formatter object Wolof-Senegal
*
* Copyright © 2021, JEDLSoft
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
if (typeof(GregorianDate) === "undefined") {
var GregorianDate = require("../../lib/GregorianDate.js");
}
if (typeof(DateRngFmt) === "undefined") {
var DateRngFmt = require("../../lib/DateRngFmt.js");
}
if (typeof(ilib) === "undefined") {
var ilib = require("../../lib/ilib.js");
}
module.exports.testdatefmtrange_wo_SN = {
setUp: function(callback) {
ilib.clearCache();
callback();
},
testDateRngFmtRangeInDayShort_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "short"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "31-12-2011 - 13:45 – 14:30");
test.done();
},
testDateRngFmtRangeInDayMedium_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "medium"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "31 Des, 2011 - 13:45 – 14:30");
test.done();
},
testDateRngFmtRangeInDayLong_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "long"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "31 Desàmbar, 2011 ci 13:45 – 14:30");
test.done();
},
testDateRngFmtRangeInDayFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "31 Des, 2011 ci 13:45 – 14:30");
test.done();
},
testDateRngFmtRangeNextDayShort_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "short"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 30,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "30-12-2011 - 13:45 – 31-12-2011 - 14:30");
test.done();
},
testDateRngFmtRangeNextDayMedium_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "medium"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 30,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "30 Des, 2011 - 13:45 – 31 Des, 2011 - 14:30");
test.done();
},
testDateRngFmtRangeNextDayLong_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "long"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 30,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "30 Desàmbar, 2011 ci 13:45 – 31 Desàmbar, 2011 ci 14:30");
test.done();
},
testDateRngFmtRangeNextDayFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 30,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "30 Des, 2011 ci 13:45 – 31 Des, 2011 ci 14:30");
test.done();
},
testDateRngFmtRangeMultiDayShort_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "short"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 – 31-12-2011");
test.done();
},
testDateRngFmtRangeMultiDayMedium_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "medium"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 – 31 Des, 2011");
test.done();
},
testDateRngFmtRangeMultiDayLong_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "long"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 – 31 Desàmbar, 2011");
test.done();
},
testDateRngFmtRangeMultiDayFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 12,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 – 31 Des, 2011");
test.done();
},
testDateRngFmtRangeNextMonthShort_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "short"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20-11 – 31-12-2011");
test.done();
},
testDateRngFmtRangeNextMonthMedium_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "medium"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 Now – 31 Des, 2011");
test.done();
},
testDateRngFmtRangeNextMonthLong_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "long"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 Nowàmbar – 31 Desàmbar, 2011");
test.done();
},
testDateRngFmtRangeNextMonthFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2011,
month: 12,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 Now – 31 Des, 2011");
test.done();
},
testDateRngFmtRangeNextYearShort_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "short"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2012,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20-11-2011 – 31-01-2012");
test.done();
},
testDateRngFmtRangeNextYearMedium_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "medium"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2012,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 Now, 2011 – 31 Sam, 2012");
test.done();
},
testDateRngFmtRangeNextYearLong_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "long"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2012,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 Nowàmbar, 2011 – 31 Samwiyee, 2012");
test.done();
},
testDateRngFmtRangeNextYearFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2012,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "20 Now, 2011 – 31 Sam, 2012");
test.done();
},
testDateRngFmtRangeMultiYearShort_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "short"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2014,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "11-2011 – 01-2014");
test.done();
},
testDateRngFmtRangeMultiYearMedium_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "medium"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2014,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "Now, 2011 – Sam, 2014");
test.done();
},
testDateRngFmtRangeMultiYearLong_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "long"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2014,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "Nowàmbar, 2011 – Samwiyee, 2014");
test.done();
},
testDateRngFmtRangeMultiYearFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2014,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "Now, 2011 – Sam, 2014");
test.done();
},
testDateRngFmtManyYearsFull_wo_SN: function(test) {
test.expect(2);
var fmt = new DateRngFmt({locale: "wo-SN", length: "full"});
test.ok(fmt !== null);
var start = new GregorianDate({
year: 2011,
month: 11,
day: 20,
hour: 13,
minute: 45,
second: 0,
millisecond: 0
});
var end = new GregorianDate({
year: 2064,
month: 1,
day: 31,
hour: 14,
minute: 30,
second: 0,
millisecond: 0
});
test.equal(fmt.format(start, end), "2011 – 2064");
test.done();
}
}; | iLib-js/iLib | js/test/daterange/testdatefmtrange_wo_SN.js | JavaScript | apache-2.0 | 19,135 |
package org.usfirst.frc.team4453.robot.commands;
import org.usfirst.frc.team4453.library.Vision;
import org.usfirst.frc.team4453.robot.Robot;
import edu.wpi.first.wpilibj.command.Command;
import edu.wpi.first.wpilibj.smartdashboard.SmartDashboard;
/**
*
*/
public class DriveWithCamera extends Command {
public DriveWithCamera() {
// Use requires() here to declare subsystem dependencies
requires(Robot.chassis);
}
// Called just before this Command runs the first time
protected void initialize() {
System.out.println("DriveWithCamera");
Robot.ahrs.zeroYaw();
Robot.chassis.setPidVel(0);
Robot.chassis.enableChassisPID();
Robot.chassis.setAutoTurn(true);
}
// Called repeatedly when this Command is scheduled to run
// TODO: Figure out the 3d coordinant system, so we don't have to use the 2d coordinants, and so we can be camera independant.
protected void execute() {
double targetXPos = Vision.getTargetImgPosition("TheTarget").X;
double targetAngleOffset;
if(targetXPos == -99.0)
{
targetAngleOffset = 0;
}
else
{
double targetXOffset = Vision.getTargetImgPosition("TheTarget").X - (Vision.getFOVx()/2);
// Lots of trig, gets us the number of degrees we need to turn.
targetAngleOffset = Math.toDegrees(Math.atan(targetXOffset / ((Vision.getFOVx()/2) / Math.tan(Math.toRadians(Vision.getFOV()/2)))));
}
//Update the setpoint (does this work?);
double setpointAngle = Robot.ahrs.getYaw() + targetAngleOffset;
/*
if(setpointAngle > 180.0)
{
setpointAngle -= 360.0;
}
if(setpointAngle < -180.0)
{
setpointAngle += 360.0;
}
*/
SmartDashboard.putNumber("DriveWithCamera Output ", targetAngleOffset);
Robot.chassis.chassisSetSetpoint(setpointAngle);
}
// Make this return true when this Command no longer needs to run execute()
protected boolean isFinished() {
return !Robot.chassis.getAutoTurn();
}
// Called once after isFinished returns true
protected void end() {
Robot.chassis.disableChassisPID();
Robot.chassis.setAutoTurn(false);
}
// Called when another command which requires one or more of the same
// subsystems is scheduled to run
protected void interrupted() {
}
}
| RedHotChiliBots/FRC4453 | Robot/Programming/FRC2016Robot/src/org/usfirst/frc/team4453/robot/commands/DriveWithCamera.java | Java | apache-2.0 | 2,359 |
/**
* Copyright (C) 2012 52°North Initiative for Geospatial Open Source Software GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.n52.sos.cache;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.n52.oxf.valueDomains.time.ITimePeriod;
import org.n52.sos.dataTypes.EnvelopeWrapper;
import org.n52.sos.dataTypes.ObservationOffering;
import org.n52.sos.db.AccessGDB;
public class ObservationOfferingCache extends AbstractEntityCache<ObservationOffering> {
private static final String TOKEN_SEP = "@@";
private static ObservationOfferingCache instance;
public static synchronized ObservationOfferingCache instance(String dbName) throws FileNotFoundException {
if (instance == null) {
instance = new ObservationOfferingCache(dbName);
}
return instance;
}
public static synchronized ObservationOfferingCache instance() throws FileNotFoundException {
return instance;
}
private boolean cancelled;
private ObservationOfferingCache(String dbName) throws FileNotFoundException {
super(dbName);
}
@Override
protected String getCacheFileName() {
return "observationOfferingsList.cache";
}
@Override
protected String serializeEntity(ObservationOffering entity) throws CacheException {
StringBuilder sb = new StringBuilder();
sb.append(entity.getId());
sb.append(TOKEN_SEP);
sb.append(entity.getName());
sb.append(TOKEN_SEP);
sb.append(entity.getProcedureIdentifier());
sb.append(TOKEN_SEP);
try {
sb.append(EnvelopeEncoderDecoder.encode(entity.getObservedArea()));
} catch (IOException e) {
throw new CacheException(e);
}
sb.append(TOKEN_SEP);
sb.append(Arrays.toString(entity.getObservedProperties()));
sb.append(TOKEN_SEP);
sb.append(TimePeriodEncoder.encode(entity.getTimeExtent()));
return sb.toString();
}
@Override
protected ObservationOffering deserializeEntity(String line) {
String[] values = line.split(TOKEN_SEP);
if (values == null || values.length != 6) {
return null;
}
String id = values[0].trim();
String name = values[1].trim();
String proc = values[2].trim();
EnvelopeWrapper env = EnvelopeEncoderDecoder.decode(values[3]);
String[] props = decodeStringArray(values[4]);
ITimePeriod time = TimePeriodEncoder.decode(values[5]);
return new ObservationOffering(id, name, props, proc, env, time);
}
@Override
protected boolean mergeWithPreviousEntries() {
return true;
}
protected Collection<ObservationOffering> getCollectionFromDAO(AccessGDB geoDB)
throws IOException {
this.cancelled = false;
clearTempCacheFile();
geoDB.getOfferingAccess().getNetworksAsObservationOfferingsAsync(new OnOfferingRetrieved() {
int count = 0;
@Override
public void retrieveExpectedOfferingsCount(int c) {
setMaximumEntries(c);
}
@Override
public void retrieveOffering(ObservationOffering oo, int currentOfferingIndex) throws RetrievingCancelledException {
storeTemporaryEntity(oo);
setLatestEntryIndex(currentOfferingIndex);
LOGGER.info(String.format("Added ObservationOffering #%s to the cache.", count++));
if (cancelled) {
throw new RetrievingCancelledException("Cache update cancelled due to shutdown.");
}
}
});
return Collections.emptyList();
}
@Override
protected AbstractEntityCache<ObservationOffering> getSingleInstance() {
return instance;
}
@Override
public void cancelCurrentExecution() {
this.cancelled = true;
}
}
| 52North/ArcGIS-Server-SOS-Extension | src/main/java/org/n52/sos/cache/ObservationOfferingCache.java | Java | apache-2.0 | 4,089 |
/**
* Copyright 2010 - 2022 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrains.exodus.io;
import jetbrains.exodus.core.dataStructures.Pair;
import jetbrains.exodus.env.Environment;
import jetbrains.exodus.env.EnvironmentConfig;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.ServiceLoader;
/**
* Service provider interface for creation instances of {@linkplain DataReader} and {@linkplain DataWriter}.
* {@linkplain DataReader} and {@linkplain DataWriter} are used by {@code Log} implementation to perform basic
* operations with {@linkplain Block blocks} ({@code .xd} files) and basic read/write/delete operations.
*
* Service provider interface is identified by a fully-qualified name of its implementation. When opening an
* {@linkplain Environment}, {@linkplain #DEFAULT_READER_WRITER_PROVIDER} is used as default provide name. To use a
* custom I/O provider, specify its fully-qualified name as a parameter of {@linkplain EnvironmentConfig#setLogDataReaderWriterProvider}.
*
* On {@linkplain Environment} creation new instance of {@code DataReaderWriterProvider} is created.
*
* @see Block
* @see DataReader
* @see DataWriter
* @see EnvironmentConfig#getLogDataReaderWriterProvider
* @see EnvironmentConfig#setLogDataReaderWriterProvider
* @since 1.3.0
*/
public abstract class DataReaderWriterProvider {
/**
* Fully-qualified name of default {@code DataReaderWriteProvider}.
*/
public static final String DEFAULT_READER_WRITER_PROVIDER = "jetbrains.exodus.io.FileDataReaderWriterProvider";
/**
* Fully-qualified name of read-only watching {@code DataReaderWriteProvider}.
*/
public static final String WATCHING_READER_WRITER_PROVIDER = "jetbrains.exodus.io.WatchingFileDataReaderWriterProvider";
/**
* Fully-qualified name of in-memory {@code DataReaderWriteProvider}.
*/
public static final String IN_MEMORY_READER_WRITER_PROVIDER = "jetbrains.exodus.io.MemoryDataReaderWriterProvider";
/**
* Creates pair of new instances of {@linkplain DataReader} and {@linkplain DataWriter} by specified location.
* What is location depends on the implementation of {@code DataReaderWriterProvider}, e.g. for {@code FileDataReaderWriterProvider}
* location is a full path on local file system where the database is located.
*
* @param location identifies the database in this {@code DataReaderWriterProvider}
* @return pair of new instances of {@linkplain DataReader} and {@linkplain DataWriter}
*/
public abstract Pair<DataReader, DataWriter> newReaderWriter(@NotNull final String location);
/**
* Returns {@code true} if the {@code DataReaderWriterProvider} creates in-memory {@linkplain DataReader} and {@linkplain DataWriter}.
*
* @return {@code true} if the {@code DataReaderWriterProvider} creates in-memory {@linkplain DataReader} and {@linkplain DataWriter}
*/
public boolean isInMemory() {
return false;
}
/**
* Returns {@code true} if the {@code DataReaderWriterProvider} creates read-only {@linkplain DataWriter}.
*
* @return {@code true} if the {@code DataReaderWriterProvider} creates read-only {@linkplain DataWriter}
*/
public boolean isReadonly() {
return false;
}
/**
* Callback method which is called when an environment is been opened/created. Can be used in implementation of
* the {@code DataReaderWriterProvider} to access directly an {@linkplain Environment} instance,
* its {@linkplain Environment#getEnvironmentConfig() config}, etc. Creation of {@code environment} is not
* completed when the method is called.
*
* @param environment {@linkplain Environment} instance which is been opened/created using this
* {@code DataReaderWriterProvider}
*/
public void onEnvironmentCreated(@NotNull final Environment environment) {
}
/**
* Gets a {@code DataReaderWriterProvider} implementation by specified provider name.
*
* @param providerName fully-qualified name of {@code DataReaderWriterProvider} implementation
* @return {@code DataReaderWriterProvider} implementation or {@code null} if the service could not be loaded
*/
@Nullable
public static DataReaderWriterProvider getProvider(@NotNull final String providerName) {
ServiceLoader<DataReaderWriterProvider> serviceLoader = ServiceLoader.load(DataReaderWriterProvider.class);
if (!serviceLoader.iterator().hasNext()) {
serviceLoader = ServiceLoader.load(DataReaderWriterProvider.class, DataReaderWriterProvider.class.getClassLoader());
}
for (DataReaderWriterProvider provider : serviceLoader) {
if (provider.getClass().getCanonicalName().equalsIgnoreCase(providerName)) {
return provider;
}
}
return null;
}
}
| JetBrains/xodus | openAPI/src/main/java/jetbrains/exodus/io/DataReaderWriterProvider.java | Java | apache-2.0 | 5,499 |
package org.commcare.tasks;
import android.content.Context;
import android.text.TextUtils;
import android.text.format.DateUtils;
import android.util.Pair;
import org.commcare.CommCareApplication;
import org.commcare.models.AndroidSessionWrapper;
import org.commcare.models.database.AndroidSandbox;
import org.commcare.models.database.SqlStorage;
import org.commcare.android.database.user.models.FormRecord;
import org.commcare.android.database.user.models.SessionStateDescriptor;
import org.commcare.suite.model.Text;
import org.commcare.tasks.templates.ManagedAsyncTask;
import org.commcare.util.FormDataUtil;
import org.commcare.utils.AndroidCommCarePlatform;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.Set;
/**
* Loads textual information for a list of FormRecords.
* <p/>
* This text currently includes the form name, record title, and last modified
* date
*
* @author ctsims
*/
public class FormRecordLoaderTask extends ManagedAsyncTask<FormRecord, Pair<FormRecord, ArrayList<String>>, Integer> {
private Hashtable<String, String> descriptorCache;
private final SqlStorage<SessionStateDescriptor> descriptorStorage;
private final AndroidCommCarePlatform platform;
private Hashtable<Integer, String[]> searchCache;
private final Context context;
// Functions to call when some or all of the data has been loaded. Data
// can be loaded normally, or be given precedence (priority), determining
// which callback is dispatched to the listeners.
private final ArrayList<FormRecordLoadListener> listeners = new ArrayList<>();
// These are all synchronized together
final private Queue<FormRecord> priorityQueue = new LinkedList<>();
// The IDs of FormRecords that have been loaded
private final Set<Integer> loaded = new HashSet<>();
// Maps form namespace (unique id for forms) to their form title
// (entry-point text). Needed because FormRecords don't have form title
// info, but do have the namespace.
private Hashtable<String, Text> formNames;
// Is the background task done loading all the FormRecord information?
private boolean loadingComplete = false;
public FormRecordLoaderTask(Context c, SqlStorage<SessionStateDescriptor> descriptorStorage, AndroidCommCarePlatform platform) {
this(c, descriptorStorage, null, platform);
}
private FormRecordLoaderTask(Context c, SqlStorage<SessionStateDescriptor> descriptorStorage, Hashtable<String, String> descriptorCache, AndroidCommCarePlatform platform) {
this.context = c;
this.descriptorStorage = descriptorStorage;
this.descriptorCache = descriptorCache;
this.platform = platform;
}
/**
* Create a copy of this loader task.
*/
public FormRecordLoaderTask spawn() {
FormRecordLoaderTask task = new FormRecordLoaderTask(context, descriptorStorage, descriptorCache, platform);
task.setListeners(listeners);
return task;
}
/**
* Pass in hashtables that will be used to store data that is loaded.
*
* @param searchCache maps FormRecord ID to an array of query-able form descriptor text
* @param formNames map from form namespaces to their titles
*/
public void init(Hashtable<Integer, String[]> searchCache, Hashtable<String, Text> formNames) {
this.searchCache = searchCache;
if (descriptorCache == null) {
descriptorCache = new Hashtable<>();
}
priorityQueue.clear();
loaded.clear();
this.formNames = formNames;
}
/**
* Set the listeners list, whose callbacks will be executed once the data
* has been loaded.
*
* @param listeners a list of objects to call when data is done loading
*/
private void setListeners(ArrayList<FormRecordLoadListener> listeners) {
this.listeners.addAll(listeners);
}
/**
* Add a listener to the list that is called once the data has been loaded.
*
* @param listener an objects to call when data is done loading
*/
public void addListener(FormRecordLoadListener listener) {
this.listeners.add(listener);
}
@Override
protected Integer doInBackground(FormRecord... params) {
// Load text information for every FormRecord passed in, unless task is
// cancelled before that.
FormRecord current;
int loadedFormCount = 0;
while (loadedFormCount < params.length && !isCancelled()) {
synchronized (priorityQueue) {
//If we have one to do immediately, grab it
if (!priorityQueue.isEmpty()) {
current = priorityQueue.poll();
} else {
current = params[loadedFormCount++];
}
if (loaded.contains(current.getID())) {
// skip if we already loaded this record due to priority queue
continue;
}
}
// load text about this record: last modified date, title of the record, and form name
ArrayList<String> recordTextDesc = loadRecordText(current);
loaded.add(current.getID());
// Copy data into search task and notify anything waiting on this
// record.
this.publishProgress(new Pair<>(current, recordTextDesc));
}
return 1;
}
private ArrayList<String> loadRecordText(FormRecord current) {
ArrayList<String> recordTextDesc = new ArrayList<>();
// Get the date in a searchable format.
recordTextDesc.add(DateUtils.formatDateTime(context, current.lastModified().getTime(), DateUtils.FORMAT_NO_MONTH_DAY | DateUtils.FORMAT_NO_YEAR).toLowerCase());
String dataTitle = current.getDescriptor();
if (TextUtils.isEmpty(dataTitle)) {
dataTitle = loadDataTitle(current.getID());
}
recordTextDesc.add(dataTitle);
if (formNames.containsKey(current.getFormNamespace())) {
Text name = formNames.get(current.getFormNamespace());
recordTextDesc.add(name.evaluate());
}
return recordTextDesc;
}
private String loadDataTitle(int formRecordId) {
// Grab our record hash
SessionStateDescriptor ssd = null;
try {
ssd = descriptorStorage.getRecordForValue(SessionStateDescriptor.META_FORM_RECORD_ID, formRecordId);
} catch (NoSuchElementException nsee) {
//s'all good
}
String dataTitle = "";
if (ssd != null) {
String descriptor = ssd.getSessionDescriptor();
if (!descriptorCache.containsKey(descriptor)) {
AndroidSessionWrapper asw = new AndroidSessionWrapper(platform);
asw.loadFromStateDescription(ssd);
try {
dataTitle =
FormDataUtil.getTitleFromSession(new AndroidSandbox(CommCareApplication.instance()),
asw.getSession(), asw.getEvaluationContext());
} catch (RuntimeException e) {
dataTitle = "[Unavailable]";
}
if (dataTitle == null) {
dataTitle = "";
}
descriptorCache.put(descriptor, dataTitle);
} else {
return descriptorCache.get(descriptor);
}
}
return dataTitle;
}
@Override
protected void onPreExecute() {
super.onPreExecute();
// Tell users of the data being loaded that it isn't ready yet.
this.loadingComplete = false;
}
/**
* Has all the FormRecords' textual data been loaded yet? Used to let
* users of the data only start accessing it once it is all there.
*/
public boolean doneLoadingFormRecords() {
return this.loadingComplete;
}
@Override
protected void onPostExecute(Integer result) {
super.onPostExecute(result);
this.loadingComplete = true;
for (FormRecordLoadListener listener : this.listeners) {
if (listener != null) {
listener.notifyLoaded();
}
}
// free up things we don't need to spawn new tasks
priorityQueue.clear();
loaded.clear();
formNames = null;
}
@Override
protected void onProgressUpdate(Pair<FormRecord, ArrayList<String>>... values) {
super.onProgressUpdate(values);
// copy a single form record's data out of method arguments
String[] vals = new String[values[0].second.size()];
for (int i = 0; i < vals.length; ++i) {
vals[i] = values[0].second.get(i);
}
// store the loaded data in the search cache
this.searchCache.put(values[0].first.getID(), vals);
for (FormRecordLoadListener listener : this.listeners) {
if (listener != null) {
// TODO PLM: pretty sure loaded.contains(values[0].first) is
// always true at this point.
listener.notifyPriorityLoaded(values[0].first,
loaded.contains(values[0].first.getID()));
}
}
}
public boolean registerPriority(FormRecord record) {
synchronized (priorityQueue) {
if (loaded.contains(record.getID())) {
return false;
} else if (priorityQueue.contains(record)) {
// if we already have it in the queue, just move along
return true;
} else {
priorityQueue.add(record);
return true;
}
}
}
}
| dimagi/commcare-android | app/src/org/commcare/tasks/FormRecordLoaderTask.java | Java | apache-2.0 | 10,155 |
/* Copyright 2019 Telstra Open Source
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openkilda.wfm.topology.network.controller;
import org.openkilda.persistence.PersistenceManager;
import org.openkilda.persistence.repositories.FeatureTogglesRepository;
import org.openkilda.wfm.share.model.Endpoint;
import org.openkilda.wfm.share.utils.AbstractBaseFsm;
import org.openkilda.wfm.share.utils.FsmExecutor;
import org.openkilda.wfm.topology.network.service.IBfdGlobalToggleCarrier;
import lombok.Builder;
import lombok.Value;
import org.squirrelframework.foundation.fsm.StateMachineBuilder;
import org.squirrelframework.foundation.fsm.StateMachineBuilderFactory;
public class BfdGlobalToggleFsm
extends AbstractBaseFsm<BfdGlobalToggleFsm,
BfdGlobalToggleFsm.BfdGlobalToggleFsmState,
BfdGlobalToggleFsm.BfdGlobalToggleFsmEvent,
BfdGlobalToggleFsm.BfdGlobalToggleFsmContext> {
private final IBfdGlobalToggleCarrier carrier;
private final Endpoint endpoint;
public static BfdGlobalToggleFsmFactory factory(PersistenceManager persistenceManager) {
return new BfdGlobalToggleFsmFactory(persistenceManager);
}
// -- FSM actions --
public void emitBfdKill(BfdGlobalToggleFsmState from, BfdGlobalToggleFsmState to, BfdGlobalToggleFsmEvent event,
BfdGlobalToggleFsmContext context) {
log.info("BFD event KILL for {}", endpoint);
carrier.filteredBfdKillNotification(endpoint);
}
public void emitBfdUp(BfdGlobalToggleFsmState from, BfdGlobalToggleFsmState to, BfdGlobalToggleFsmEvent event,
BfdGlobalToggleFsmContext context) {
log.info("BFD event UP for {}", endpoint);
carrier.filteredBfdUpNotification(endpoint);
}
public void emitBfdDown(BfdGlobalToggleFsmState from, BfdGlobalToggleFsmState to, BfdGlobalToggleFsmEvent event,
BfdGlobalToggleFsmContext context) {
log.info("BFD event DOWN for {}", endpoint);
carrier.filteredBfdDownNotification(endpoint);
}
public void emitBfdFail(BfdGlobalToggleFsmState from, BfdGlobalToggleFsmState to, BfdGlobalToggleFsmEvent event,
BfdGlobalToggleFsmContext context) {
log.info("BFD event FAIL for {}", endpoint);
carrier.filteredBfdFailNotification(endpoint);
}
// -- private/service methods --
// -- service data types --
public BfdGlobalToggleFsm(IBfdGlobalToggleCarrier carrier, Endpoint endpoint) {
this.carrier = carrier;
this.endpoint = endpoint;
}
public static class BfdGlobalToggleFsmFactory {
private final FeatureTogglesRepository featureTogglesRepository;
private final StateMachineBuilder<BfdGlobalToggleFsm, BfdGlobalToggleFsmState, BfdGlobalToggleFsmEvent,
BfdGlobalToggleFsmContext> builder;
BfdGlobalToggleFsmFactory(PersistenceManager persistenceManager) {
featureTogglesRepository = persistenceManager.getRepositoryFactory().createFeatureTogglesRepository();
final String emitBfdKillMethod = "emitBfdKill";
final String emitBfdFailMethod = "emitBfdFail";
builder = StateMachineBuilderFactory.create(
BfdGlobalToggleFsm.class, BfdGlobalToggleFsmState.class, BfdGlobalToggleFsmEvent.class,
BfdGlobalToggleFsmContext.class,
// extra parameters
IBfdGlobalToggleCarrier.class, Endpoint.class);
// DOWN_ENABLED
builder.transition()
.from(BfdGlobalToggleFsmState.DOWN_ENABLED).to(BfdGlobalToggleFsmState.DOWN_DISABLED)
.on(BfdGlobalToggleFsmEvent.DISABLE);
builder.transition()
.from(BfdGlobalToggleFsmState.DOWN_ENABLED).to(BfdGlobalToggleFsmState.UP_ENABLED)
.on(BfdGlobalToggleFsmEvent.BFD_UP);
builder.internalTransition()
.within(BfdGlobalToggleFsmState.DOWN_ENABLED).on(BfdGlobalToggleFsmEvent.BFD_KILL)
.callMethod(emitBfdKillMethod);
builder.internalTransition()
.within(BfdGlobalToggleFsmState.DOWN_ENABLED).on(BfdGlobalToggleFsmEvent.BFD_FAIL)
.callMethod(emitBfdFailMethod);
// DOWN_DISABLED
builder.transition()
.from(BfdGlobalToggleFsmState.DOWN_DISABLED).to(BfdGlobalToggleFsmState.DOWN_ENABLED)
.on(BfdGlobalToggleFsmEvent.ENABLE);
builder.transition()
.from(BfdGlobalToggleFsmState.DOWN_DISABLED).to(BfdGlobalToggleFsmState.UP_DISABLED)
.on(BfdGlobalToggleFsmEvent.BFD_UP);
builder.internalTransition()
.within(BfdGlobalToggleFsmState.DOWN_DISABLED).on(BfdGlobalToggleFsmEvent.BFD_FAIL)
.callMethod(emitBfdFailMethod);
// UP_ENABLED
builder.transition()
.from(BfdGlobalToggleFsmState.UP_ENABLED).to(BfdGlobalToggleFsmState.UP_DISABLED)
.on(BfdGlobalToggleFsmEvent.DISABLE)
.callMethod(emitBfdKillMethod);
builder.transition()
.from(BfdGlobalToggleFsmState.UP_ENABLED).to(BfdGlobalToggleFsmState.DOWN_ENABLED)
.on(BfdGlobalToggleFsmEvent.BFD_DOWN)
.callMethod("emitBfdDown");
builder.transition()
.from(BfdGlobalToggleFsmState.UP_ENABLED).to(BfdGlobalToggleFsmState.DOWN_ENABLED)
.on(BfdGlobalToggleFsmEvent.BFD_KILL)
.callMethod(emitBfdKillMethod);
builder.onEntry(BfdGlobalToggleFsmState.UP_ENABLED)
.callMethod("emitBfdUp");
// UP_DISABLED
builder.transition()
.from(BfdGlobalToggleFsmState.UP_DISABLED).to(BfdGlobalToggleFsmState.UP_ENABLED)
.on(BfdGlobalToggleFsmEvent.ENABLE);
builder.transition()
.from(BfdGlobalToggleFsmState.UP_DISABLED).to(BfdGlobalToggleFsmState.DOWN_DISABLED)
.on(BfdGlobalToggleFsmEvent.BFD_DOWN);
}
public FsmExecutor<BfdGlobalToggleFsm, BfdGlobalToggleFsmState, BfdGlobalToggleFsmEvent,
BfdGlobalToggleFsmContext> produceExecutor() {
return new FsmExecutor<>(BfdGlobalToggleFsmEvent.NEXT);
}
/**
* Determine initial state and create {@link BfdGlobalToggleFsm} instance.
*/
public BfdGlobalToggleFsm produce(IBfdGlobalToggleCarrier carrier, Endpoint endpoint) {
Boolean toggle = featureTogglesRepository.getOrDefault().getUseBfdForIslIntegrityCheck();
if (toggle == null) {
throw new IllegalStateException("Unable to identify initial BFD-global-toggle value (it is null at"
+ " this moment)");
}
BfdGlobalToggleFsmState state;
if (toggle) {
state = BfdGlobalToggleFsmState.DOWN_ENABLED;
} else {
state = BfdGlobalToggleFsmState.DOWN_DISABLED;
}
return builder.newStateMachine(state, carrier, endpoint);
}
}
@Value
@Builder
public static class BfdGlobalToggleFsmContext { }
public enum BfdGlobalToggleFsmState {
DOWN_DISABLED, DOWN_ENABLED,
UP_DISABLED, UP_ENABLED
}
public enum BfdGlobalToggleFsmEvent {
KILL, NEXT,
ENABLE, DISABLE,
BFD_UP, BFD_DOWN, BFD_KILL, BFD_FAIL
}
}
| jonvestal/open-kilda | src-java/network-topology/network-storm-topology/src/main/java/org/openkilda/wfm/topology/network/controller/BfdGlobalToggleFsm.java | Java | apache-2.0 | 8,262 |
using System;
namespace NativeAppFabricConsoleUI
{
internal static class CreateRegionTests
{
internal static void CreateRegionWithNonEmptyString()
{
try
{
Logger.PrintTestStartInformation("Creating Region with non-empty string value");
string myRegion = Guid.NewGuid().ToString();
var result = Program.myDefaultCache.CreateRegion(myRegion);
if (result)
{
Logger.PrintSuccessfulOutcome($"Region {myRegion} successfully created");
}
else
{
Logger.PrintFailureOutcome($"Region {myRegion} could not be created");
}
}
catch (Exception e)
{
Logger.PrintDataCacheException(e);
}
finally
{
Logger.PrintBreakLine();
}
}
internal static void RecreateRegionWithNonEmptyString()
{
try
{
Logger.PrintTestStartInformation("Creating Another Region with Same Name");
string myRegion = Guid.NewGuid().ToString();
var result = Program.myDefaultCache.CreateRegion(myRegion);
result = Program.myDefaultCache.CreateRegion(myRegion);
if (result)
{
Logger.PrintSuccessfulOutcome($"Region {myRegion} successfully recreated");
}
else
{
Logger.PrintFailureOutcome($"Region {myRegion} could not be recreated");
}
}
catch (Exception e)
{
Logger.PrintDataCacheException(e);
}
finally
{
Logger.PrintBreakLine();
}
}
internal static void CreateRegionWithNullString()
{
try
{
Logger.PrintTestStartInformation("Creating Another Region with NULL Region Name");
string myRegion = null;
var result = Program.myDefaultCache.CreateRegion(myRegion);
if (result)
{
Logger.PrintSuccessfulOutcome($"Region successfully created");
}
else
{
Logger.PrintFailureOutcome($"Region could not be created");
}
}
catch (Exception e)
{
Logger.PrintDataCacheException(e);
}
finally
{
Logger.PrintBreakLine();
}
}
}
}
| Alachisoft/NCache-Wrapper-For-AppFabric | samples/AppFabricWrapperTest/NativeAppFabricConsoleUI/RegionLevelBasicOperations/CreateRegionTests.cs | C# | apache-2.0 | 2,744 |
package org.nbone.core.util;
/**
*
* @author thinking
* @version 1.0
* @since 2019-07-20
* org.elasticsearch.common.unit.ByteSizeUnit
*/
public enum ByteSizeUnit {
BYTES {
@Override
public long toBytes(long size) {
return size;
}
@Override
public long toKB(long size) {
return size / (C1 / C0);
}
@Override
public long toMB(long size) {
return size / (C2 / C0);
}
@Override
public long toGB(long size) {
return size / (C3 / C0);
}
@Override
public long toTB(long size) {
return size / (C4 / C0);
}
@Override
public long toPB(long size) {
return size / (C5 / C0);
}
},
KB {
@Override
public long toBytes(long size) {
return x(size, C1 / C0, MAX / (C1 / C0));
}
@Override
public long toKB(long size) {
return size;
}
@Override
public long toMB(long size) {
return size / (C2 / C1);
}
@Override
public long toGB(long size) {
return size / (C3 / C1);
}
@Override
public long toTB(long size) {
return size / (C4 / C1);
}
@Override
public long toPB(long size) {
return size / (C5 / C1);
}
},
MB {
@Override
public long toBytes(long size) {
return x(size, C2 / C0, MAX / (C2 / C0));
}
@Override
public long toKB(long size) {
return x(size, C2 / C1, MAX / (C2 / C1));
}
@Override
public long toMB(long size) {
return size;
}
@Override
public long toGB(long size) {
return size / (C3 / C2);
}
@Override
public long toTB(long size) {
return size / (C4 / C2);
}
@Override
public long toPB(long size) {
return size / (C5 / C2);
}
},
GB {
@Override
public long toBytes(long size) {
return x(size, C3 / C0, MAX / (C3 / C0));
}
@Override
public long toKB(long size) {
return x(size, C3 / C1, MAX / (C3 / C1));
}
@Override
public long toMB(long size) {
return x(size, C3 / C2, MAX / (C3 / C2));
}
@Override
public long toGB(long size) {
return size;
}
@Override
public long toTB(long size) {
return size / (C4 / C3);
}
@Override
public long toPB(long size) {
return size / (C5 / C3);
}
},
TB {
@Override
public long toBytes(long size) {
return x(size, C4 / C0, MAX / (C4 / C0));
}
@Override
public long toKB(long size) {
return x(size, C4 / C1, MAX / (C4 / C1));
}
@Override
public long toMB(long size) {
return x(size, C4 / C2, MAX / (C4 / C2));
}
@Override
public long toGB(long size) {
return x(size, C4 / C3, MAX / (C4 / C3));
}
@Override
public long toTB(long size) {
return size;
}
@Override
public long toPB(long size) {
return size / (C5 / C4);
}
},
PB {
@Override
public long toBytes(long size) {
return x(size, C5 / C0, MAX / (C5 / C0));
}
@Override
public long toKB(long size) {
return x(size, C5 / C1, MAX / (C5 / C1));
}
@Override
public long toMB(long size) {
return x(size, C5 / C2, MAX / (C5 / C2));
}
@Override
public long toGB(long size) {
return x(size, C5 / C3, MAX / (C5 / C3));
}
@Override
public long toTB(long size) {
return x(size, C5 / C4, MAX / (C5 / C4));
}
@Override
public long toPB(long size) {
return size;
}
};
static final long C0 = 1L;
static final long C1 = C0 * 1024L;
static final long C2 = C1 * 1024L;
static final long C3 = C2 * 1024L;
static final long C4 = C3 * 1024L;
static final long C5 = C4 * 1024L;
static final long MAX = Long.MAX_VALUE;
/**
* Scale d by m, checking for overflow.
* This has a short name to make above code more readable.
*/
static long x(long d, long m, long over) {
if (d > over) return Long.MAX_VALUE;
if (d < -over) return Long.MIN_VALUE;
return d * m;
}
public abstract long toBytes(long size);
public abstract long toKB(long size);
public abstract long toMB(long size);
public abstract long toGB(long size);
public abstract long toTB(long size);
public abstract long toPB(long size);
} | thinking-github/nbone | nbone/nbone-core/src/main/java/org/nbone/core/util/ByteSizeUnit.java | Java | apache-2.0 | 5,035 |
def emptyLayout(layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
| fireeye/flare-wmi | python-cim/samples/ui/uicommon.py | Python | apache-2.0 | 121 |
// Copyright (c) 2009 Carl Barron
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <iostream>
#include <sstream>
#include <boost/detail/lightweight_test.hpp>
#include <boost/spirit/include/lex.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
#include <boost/spirit/include/phoenix.hpp>
namespace lex = boost::spirit::lex;
namespace phoenix = boost::phoenix;
///////////////////////////////////////////////////////////////////////////////
template <typename Lexer>
struct multi_tokens : lex::lexer<Lexer>
{
int level;
multi_tokens() : level(0)
{
using lex::_state;
using lex::_start;
using lex::_end;
using lex::_pass;
using lex::pass_flags;
a = "A";
b = "B";
c = "C";
this->self =
a [ ++phoenix::ref(level) ]
| b
| c [
_state = "in_dedenting",
_end = _start,
_pass = pass_flags::pass_ignore
]
;
d = ".";
this->self("in_dedenting") =
d [
if_(--phoenix::ref(level)) [
_end = _start
]
.else_ [
_state = "INITIAL"
]
]
;
}
lex::token_def<> a, b, c, d;
};
struct dumper
{
typedef bool result_type;
dumper(std::stringstream& strm) : strm(strm) {}
template <typename Token>
bool operator () (Token const &t)
{
strm << (char)(t.id() - lex::min_token_id + 'a');
return true;
}
std::stringstream& strm;
private:
// silence MSVC warning C4512: assignment operator could not be generated
dumper& operator= (dumper const&);
};
///////////////////////////////////////////////////////////////////////////////
int main()
{
typedef lex::lexertl::token<std::string::iterator> token_type;
typedef lex::lexertl::actor_lexer<token_type> base_lexer_type;
typedef multi_tokens<base_lexer_type> lexer_type;
std::string in("AAABBC");
std::string::iterator first(in.begin());
std::stringstream strm;
lexer_type the_lexer;
BOOST_TEST(lex::tokenize(first, in.end(), the_lexer, dumper(strm)));
BOOST_TEST(strm.str() == "aaabbddd");
return boost::report_errors();
}
| Im-dex/xray-162 | code/3rd-party/boost/libs/spirit/test/lex/dedent_handling_phoenix.cpp | C++ | apache-2.0 | 2,510 |
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Generates paragraph text style for a text element.
*
* @param {Object} props Props.
* @param {function(number):any} dataToStyleX Converts a x-unit to CSS.
* @param {function(number):any} dataToStyleY Converts a y-unit to CSS.
* @param {function(number):any} dataToFontSizeY Converts a font-size metric to
* y-unit CSS.
* @param {Object<*>} element Text element properties.
* @param {function(number):any} dataToPaddingY Falls back to dataToStyleX if not provided.
* @return {Object} The map of text style properties and values.
*/
export function generateParagraphTextStyle(
props,
dataToStyleX,
dataToStyleY,
dataToFontSizeY = dataToStyleY,
element,
dataToPaddingY = dataToStyleY
) {
const { font, fontSize, lineHeight, padding, textAlign } = props;
const { marginOffset } = calcFontMetrics(element);
const verticalPadding = padding?.vertical || 0;
const horizontalPadding = padding?.horizontal || 0;
const hasPadding = verticalPadding || horizontalPadding;
const paddingStyle = hasPadding
? `${dataToStyleY(verticalPadding)} ${dataToStyleX(horizontalPadding)}`
: 0;
return {
dataToEditorY: dataToStyleY,
whiteSpace: 'pre-wrap',
overflowWrap: 'break-word',
wordBreak: 'break-word',
margin: `${dataToPaddingY(-marginOffset / 2)} 0`,
fontFamily: generateFontFamily(font),
fontSize: dataToFontSizeY(fontSize),
font,
lineHeight,
textAlign,
padding: paddingStyle,
};
}
export const generateFontFamily = ({ family, fallbacks } = {}) => {
const genericFamilyKeywords = [
'cursive',
'fantasy',
'monospace',
'serif',
'sans-serif',
];
// Wrap into " since some fonts won't work without it.
let fontFamilyDisplay = family ? `"${family}"` : '';
if (fallbacks && fallbacks.length) {
fontFamilyDisplay += family ? `,` : ``;
fontFamilyDisplay += fallbacks
.map((fallback) =>
genericFamilyKeywords.includes(fallback) ? fallback : `"${fallback}"`
)
.join(`,`);
}
return fontFamilyDisplay;
};
export const getHighlightLineheight = function (
lineHeight,
verticalPadding = 0,
unit = 'px'
) {
if (verticalPadding === 0) {
return `${lineHeight}em`;
}
return `calc(${lineHeight}em ${verticalPadding > 0 ? '+' : '-'} ${
2 * Math.abs(verticalPadding)
}${unit})`;
};
export function calcFontMetrics(element) {
if (!element.font.metrics) {
return {
contentAreaPx: 0,
lineBoxPx: 0,
marginOffset: 0,
};
}
const {
fontSize,
lineHeight,
font: {
metrics: { upm, asc, des },
},
} = element;
// We cant to cut some of the "virtual-area"
// More info: https://iamvdo.me/en/blog/css-font-metrics-line-height-and-vertical-align
const contentAreaPx = ((asc - des) / upm) * fontSize;
const lineBoxPx = lineHeight * fontSize;
const marginOffset = lineBoxPx - contentAreaPx;
return {
marginOffset,
contentAreaPx,
lineBoxPx,
};
}
| GoogleForCreators/web-stories-wp | packages/story-editor/src/elements/text/util.js | JavaScript | apache-2.0 | 3,563 |
<?php
echo "This is success.php";
?> | pari/rand0m | paypalipn/success.php | PHP | apache-2.0 | 38 |
package org.zentaur.core.http;
/*
* Copyright 2012 The Zentaur Server Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.zentaur.http.Response;
/**
* Factory delegated to {@link Response} instances creation.
*/
public final class ResponseFactory
{
/**
* Creates a new {@link Response} instance.
*
* @return a new {@link Response} instance.
*/
public static Response newResponse()
{
return new DefaultResponse();
}
/**
* Hidden constructor, this class cannot be instantiated.
*/
private ResponseFactory()
{
// do nothing
}
}
| zentaur/core | src/main/java/org/zentaur/core/http/ResponseFactory.java | Java | apache-2.0 | 1,161 |
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/terror"
)
var (
// ErrBodyMissing response body is missing error
ErrBodyMissing = errors.New("response body is missing")
)
// TiDB decides whether to retry transaction by checking if error message contains
// string "try again later" literally.
// In TiClient we use `errors.Annotate(err, txnRetryableMark)` to direct TiDB to
// restart a transaction.
// Note that it should be only used if i) the error occurs inside a transaction
// and ii) the error is not totally unexpected and hopefully will recover soon.
const txnRetryableMark = "[try again later]"
// MySQL error instances.
var (
ErrTiKVServerTimeout = terror.ClassTiKV.New(mysql.ErrTiKVServerTimeout, mysql.MySQLErrName[mysql.ErrTiKVServerTimeout]+txnRetryableMark)
ErrResolveLockTimeout = terror.ClassTiKV.New(mysql.ErrResolveLockTimeout, mysql.MySQLErrName[mysql.ErrResolveLockTimeout]+txnRetryableMark)
ErrPDServerTimeout = terror.ClassTiKV.New(mysql.ErrPDServerTimeout, mysql.MySQLErrName[mysql.ErrPDServerTimeout]+"%v")
ErrRegionUnavailable = terror.ClassTiKV.New(mysql.ErrRegionUnavailable, mysql.MySQLErrName[mysql.ErrRegionUnavailable]+txnRetryableMark)
ErrTiKVServerBusy = terror.ClassTiKV.New(mysql.ErrTiKVServerBusy, mysql.MySQLErrName[mysql.ErrTiKVServerBusy]+txnRetryableMark)
ErrGCTooEarly = terror.ClassTiKV.New(mysql.ErrGCTooEarly, mysql.MySQLErrName[mysql.ErrGCTooEarly])
)
func init() {
tikvMySQLErrCodes := map[terror.ErrCode]uint16{
mysql.ErrTiKVServerTimeout: mysql.ErrTiKVServerTimeout,
mysql.ErrResolveLockTimeout: mysql.ErrResolveLockTimeout,
mysql.ErrPDServerTimeout: mysql.ErrPDServerTimeout,
mysql.ErrRegionUnavailable: mysql.ErrRegionUnavailable,
mysql.ErrTiKVServerBusy: mysql.ErrTiKVServerBusy,
mysql.ErrGCTooEarly: mysql.ErrGCTooEarly,
}
terror.ErrClassToMySQLCodes[terror.ClassTiKV] = tikvMySQLErrCodes
}
| spongedu/tidb | store/tikv/error.go | GO | apache-2.0 | 2,481 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Mermaid.Loft.Infrastructure.DomainBase
{
public class EntityBase
{
}
}
| mengxinjinglong/Mermaid.Loft | Mermaid.Loft.Infrastructure/DomainBase/EntityBase.cs | C# | apache-2.0 | 214 |
package pl.wavesoftware.examples.wildflyswarm.service.api;
import pl.wavesoftware.examples.wildflyswarm.domain.User;
import java.util.Collection;
/**
* @author Krzysztof Suszynski <krzysztof.suszynski@coi.gov.pl>
* @since 04.03.16
*/
public interface UserService {
/**
* Retrieves a collection of active users
* @return a collection with only active users
*/
Collection<User> fetchActiveUsers();
}
| cardil/cdi-inheritance-wildfly-swarm | src/main/java/pl/wavesoftware/examples/wildflyswarm/service/api/UserService.java | Java | apache-2.0 | 427 |
<?php
/**
* Copyright (c) <2016> Protobile contributors and Addvilz <mrtreinis@gmail.com>.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Protobile\Framework\CompilerPass;
use Symfony\Component\Console\Output\ConsoleOutput;
use Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface;
use Symfony\Component\DependencyInjection\ContainerBuilder;
class RegisterOutputPass implements CompilerPassInterface
{
/**
* @param ContainerBuilder $container
*/
public function process(ContainerBuilder $container)
{
$container->register('app.output', ConsoleOutput::class);
}
}
| protobile/framework | src/Protobile/Framework/CompilerPass/RegisterOutputPass.php | PHP | apache-2.0 | 1,123 |
/*
* #%L
* =====================================================
* _____ _ ____ _ _ _ _
* |_ _|_ __ _ _ ___| |_ / __ \| | | | ___ | | | |
* | | | '__| | | / __| __|/ / _` | |_| |/ __|| |_| |
* | | | | | |_| \__ \ |_| | (_| | _ |\__ \| _ |
* |_| |_| \__,_|___/\__|\ \__,_|_| |_||___/|_| |_|
* \____/
*
* =====================================================
*
* Hochschule Hannover
* (University of Applied Sciences and Arts, Hannover)
* Faculty IV, Dept. of Computer Science
* Ricklinger Stadtweg 118, 30459 Hannover, Germany
*
* Email: trust@f4-i.fh-hannover.de
* Website: http://trust.f4.hs-hannover.de/
*
* This file is part of ifmapj, version 2.3.2, implemented by the Trust@HsH
* research group at the Hochschule Hannover.
* %%
* Copyright (C) 2010 - 2016 Trust@HsH
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package de.hshannover.f4.trust.ifmapj.messages;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import de.hshannover.f4.trust.ifmapj.exception.IfmapErrorResult;
import de.hshannover.f4.trust.ifmapj.messages.SearchResult.Type;
/**
* Implementation of {@link PollResult}
*
* @author aw
*
*/
class PollResultImpl implements PollResult {
private final List<SearchResult> mResults;
private final Collection<IfmapErrorResult> mErrorResults;
PollResultImpl(List<SearchResult> results, Collection<IfmapErrorResult> eres) {
if (results == null || eres == null) {
throw new NullPointerException("result list is null");
}
mResults = new ArrayList<SearchResult>(results);
mErrorResults = new ArrayList<IfmapErrorResult>(eres);
}
@Override
public List<SearchResult> getResults() {
return Collections.unmodifiableList(mResults);
}
@Override
public Collection<SearchResult> getSearchResults() {
return resultsOfType(SearchResult.Type.searchResult);
}
@Override
public Collection<SearchResult> getUpdateResults() {
return resultsOfType(SearchResult.Type.updateResult);
}
@Override
public Collection<SearchResult> getDeleteResults() {
return resultsOfType(SearchResult.Type.deleteResult);
}
@Override
public Collection<SearchResult> getNotifyResults() {
return resultsOfType(SearchResult.Type.notifyResult);
}
@Override
public Collection<IfmapErrorResult> getErrorResults() {
return Collections.unmodifiableCollection(mErrorResults);
}
private Collection<SearchResult> resultsOfType(Type type) {
List<SearchResult> ret = new ArrayList<SearchResult>();
for (SearchResult sr : mResults) {
if (sr.getType() == type) {
ret.add(sr);
}
}
return Collections.unmodifiableCollection(ret);
}
}
| trustathsh/ifmapj | src/main/java/de/hshannover/f4/trust/ifmapj/messages/PollResultImpl.java | Java | apache-2.0 | 3,270 |
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/resourcemanager/v3/folders.proto
package com.google.cloud.resourcemanager.v3;
/**
*
*
* <pre>
* The request sent to the
* [UpdateFolder][google.cloud.resourcemanager.v3.Folder.UpdateFolder]
* method.
* Only the `display_name` field can be changed. All other fields will be
* ignored. Use the
* [MoveFolder][google.cloud.resourcemanager.v3.Folders.MoveFolder] method to
* change the `parent` field.
* </pre>
*
* Protobuf type {@code google.cloud.resourcemanager.v3.UpdateFolderRequest}
*/
public final class UpdateFolderRequest extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.cloud.resourcemanager.v3.UpdateFolderRequest)
UpdateFolderRequestOrBuilder {
private static final long serialVersionUID = 0L;
// Use UpdateFolderRequest.newBuilder() to construct.
private UpdateFolderRequest(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private UpdateFolderRequest() {}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
return new UpdateFolderRequest();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
return this.unknownFields;
}
private UpdateFolderRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10:
{
com.google.cloud.resourcemanager.v3.Folder.Builder subBuilder = null;
if (folder_ != null) {
subBuilder = folder_.toBuilder();
}
folder_ =
input.readMessage(
com.google.cloud.resourcemanager.v3.Folder.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(folder_);
folder_ = subBuilder.buildPartial();
}
break;
}
case 18:
{
com.google.protobuf.FieldMask.Builder subBuilder = null;
if (updateMask_ != null) {
subBuilder = updateMask_.toBuilder();
}
updateMask_ =
input.readMessage(com.google.protobuf.FieldMask.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(updateMask_);
updateMask_ = subBuilder.buildPartial();
}
break;
}
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.resourcemanager.v3.FoldersProto
.internal_static_google_cloud_resourcemanager_v3_UpdateFolderRequest_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.resourcemanager.v3.FoldersProto
.internal_static_google_cloud_resourcemanager_v3_UpdateFolderRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.resourcemanager.v3.UpdateFolderRequest.class,
com.google.cloud.resourcemanager.v3.UpdateFolderRequest.Builder.class);
}
public static final int FOLDER_FIELD_NUMBER = 1;
private com.google.cloud.resourcemanager.v3.Folder folder_;
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return Whether the folder field is set.
*/
@java.lang.Override
public boolean hasFolder() {
return folder_ != null;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return The folder.
*/
@java.lang.Override
public com.google.cloud.resourcemanager.v3.Folder getFolder() {
return folder_ == null
? com.google.cloud.resourcemanager.v3.Folder.getDefaultInstance()
: folder_;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
@java.lang.Override
public com.google.cloud.resourcemanager.v3.FolderOrBuilder getFolderOrBuilder() {
return getFolder();
}
public static final int UPDATE_MASK_FIELD_NUMBER = 2;
private com.google.protobuf.FieldMask updateMask_;
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return Whether the updateMask field is set.
*/
@java.lang.Override
public boolean hasUpdateMask() {
return updateMask_ != null;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return The updateMask.
*/
@java.lang.Override
public com.google.protobuf.FieldMask getUpdateMask() {
return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
@java.lang.Override
public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
return getUpdateMask();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (folder_ != null) {
output.writeMessage(1, getFolder());
}
if (updateMask_ != null) {
output.writeMessage(2, getUpdateMask());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (folder_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getFolder());
}
if (updateMask_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.cloud.resourcemanager.v3.UpdateFolderRequest)) {
return super.equals(obj);
}
com.google.cloud.resourcemanager.v3.UpdateFolderRequest other =
(com.google.cloud.resourcemanager.v3.UpdateFolderRequest) obj;
if (hasFolder() != other.hasFolder()) return false;
if (hasFolder()) {
if (!getFolder().equals(other.getFolder())) return false;
}
if (hasUpdateMask() != other.hasUpdateMask()) return false;
if (hasUpdateMask()) {
if (!getUpdateMask().equals(other.getUpdateMask())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasFolder()) {
hash = (37 * hash) + FOLDER_FIELD_NUMBER;
hash = (53 * hash) + getFolder().hashCode();
}
if (hasUpdateMask()) {
hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER;
hash = (53 * hash) + getUpdateMask().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(
com.google.cloud.resourcemanager.v3.UpdateFolderRequest prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* <pre>
* The request sent to the
* [UpdateFolder][google.cloud.resourcemanager.v3.Folder.UpdateFolder]
* method.
* Only the `display_name` field can be changed. All other fields will be
* ignored. Use the
* [MoveFolder][google.cloud.resourcemanager.v3.Folders.MoveFolder] method to
* change the `parent` field.
* </pre>
*
* Protobuf type {@code google.cloud.resourcemanager.v3.UpdateFolderRequest}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:google.cloud.resourcemanager.v3.UpdateFolderRequest)
com.google.cloud.resourcemanager.v3.UpdateFolderRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.resourcemanager.v3.FoldersProto
.internal_static_google_cloud_resourcemanager_v3_UpdateFolderRequest_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.resourcemanager.v3.FoldersProto
.internal_static_google_cloud_resourcemanager_v3_UpdateFolderRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.resourcemanager.v3.UpdateFolderRequest.class,
com.google.cloud.resourcemanager.v3.UpdateFolderRequest.Builder.class);
}
// Construct using com.google.cloud.resourcemanager.v3.UpdateFolderRequest.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (folderBuilder_ == null) {
folder_ = null;
} else {
folder_ = null;
folderBuilder_ = null;
}
if (updateMaskBuilder_ == null) {
updateMask_ = null;
} else {
updateMask_ = null;
updateMaskBuilder_ = null;
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.cloud.resourcemanager.v3.FoldersProto
.internal_static_google_cloud_resourcemanager_v3_UpdateFolderRequest_descriptor;
}
@java.lang.Override
public com.google.cloud.resourcemanager.v3.UpdateFolderRequest getDefaultInstanceForType() {
return com.google.cloud.resourcemanager.v3.UpdateFolderRequest.getDefaultInstance();
}
@java.lang.Override
public com.google.cloud.resourcemanager.v3.UpdateFolderRequest build() {
com.google.cloud.resourcemanager.v3.UpdateFolderRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.cloud.resourcemanager.v3.UpdateFolderRequest buildPartial() {
com.google.cloud.resourcemanager.v3.UpdateFolderRequest result =
new com.google.cloud.resourcemanager.v3.UpdateFolderRequest(this);
if (folderBuilder_ == null) {
result.folder_ = folder_;
} else {
result.folder_ = folderBuilder_.build();
}
if (updateMaskBuilder_ == null) {
result.updateMask_ = updateMask_;
} else {
result.updateMask_ = updateMaskBuilder_.build();
}
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.cloud.resourcemanager.v3.UpdateFolderRequest) {
return mergeFrom((com.google.cloud.resourcemanager.v3.UpdateFolderRequest) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.cloud.resourcemanager.v3.UpdateFolderRequest other) {
if (other == com.google.cloud.resourcemanager.v3.UpdateFolderRequest.getDefaultInstance())
return this;
if (other.hasFolder()) {
mergeFolder(other.getFolder());
}
if (other.hasUpdateMask()) {
mergeUpdateMask(other.getUpdateMask());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.cloud.resourcemanager.v3.UpdateFolderRequest parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage =
(com.google.cloud.resourcemanager.v3.UpdateFolderRequest) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private com.google.cloud.resourcemanager.v3.Folder folder_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.resourcemanager.v3.Folder,
com.google.cloud.resourcemanager.v3.Folder.Builder,
com.google.cloud.resourcemanager.v3.FolderOrBuilder>
folderBuilder_;
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return Whether the folder field is set.
*/
public boolean hasFolder() {
return folderBuilder_ != null || folder_ != null;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return The folder.
*/
public com.google.cloud.resourcemanager.v3.Folder getFolder() {
if (folderBuilder_ == null) {
return folder_ == null
? com.google.cloud.resourcemanager.v3.Folder.getDefaultInstance()
: folder_;
} else {
return folderBuilder_.getMessage();
}
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder setFolder(com.google.cloud.resourcemanager.v3.Folder value) {
if (folderBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
folder_ = value;
onChanged();
} else {
folderBuilder_.setMessage(value);
}
return this;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder setFolder(com.google.cloud.resourcemanager.v3.Folder.Builder builderForValue) {
if (folderBuilder_ == null) {
folder_ = builderForValue.build();
onChanged();
} else {
folderBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder mergeFolder(com.google.cloud.resourcemanager.v3.Folder value) {
if (folderBuilder_ == null) {
if (folder_ != null) {
folder_ =
com.google.cloud.resourcemanager.v3.Folder.newBuilder(folder_)
.mergeFrom(value)
.buildPartial();
} else {
folder_ = value;
}
onChanged();
} else {
folderBuilder_.mergeFrom(value);
}
return this;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder clearFolder() {
if (folderBuilder_ == null) {
folder_ = null;
onChanged();
} else {
folder_ = null;
folderBuilder_ = null;
}
return this;
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public com.google.cloud.resourcemanager.v3.Folder.Builder getFolderBuilder() {
onChanged();
return getFolderFieldBuilder().getBuilder();
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public com.google.cloud.resourcemanager.v3.FolderOrBuilder getFolderOrBuilder() {
if (folderBuilder_ != null) {
return folderBuilder_.getMessageOrBuilder();
} else {
return folder_ == null
? com.google.cloud.resourcemanager.v3.Folder.getDefaultInstance()
: folder_;
}
}
/**
*
*
* <pre>
* Required. The new definition of the Folder. It must include the `name` field, which
* cannot be changed.
* </pre>
*
* <code>
* .google.cloud.resourcemanager.v3.Folder folder = 1 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.resourcemanager.v3.Folder,
com.google.cloud.resourcemanager.v3.Folder.Builder,
com.google.cloud.resourcemanager.v3.FolderOrBuilder>
getFolderFieldBuilder() {
if (folderBuilder_ == null) {
folderBuilder_ =
new com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.resourcemanager.v3.Folder,
com.google.cloud.resourcemanager.v3.Folder.Builder,
com.google.cloud.resourcemanager.v3.FolderOrBuilder>(
getFolder(), getParentForChildren(), isClean());
folder_ = null;
}
return folderBuilder_;
}
private com.google.protobuf.FieldMask updateMask_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.FieldMask,
com.google.protobuf.FieldMask.Builder,
com.google.protobuf.FieldMaskOrBuilder>
updateMaskBuilder_;
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return Whether the updateMask field is set.
*/
public boolean hasUpdateMask() {
return updateMaskBuilder_ != null || updateMask_ != null;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*
* @return The updateMask.
*/
public com.google.protobuf.FieldMask getUpdateMask() {
if (updateMaskBuilder_ == null) {
return updateMask_ == null
? com.google.protobuf.FieldMask.getDefaultInstance()
: updateMask_;
} else {
return updateMaskBuilder_.getMessage();
}
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder setUpdateMask(com.google.protobuf.FieldMask value) {
if (updateMaskBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
updateMask_ = value;
onChanged();
} else {
updateMaskBuilder_.setMessage(value);
}
return this;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) {
if (updateMaskBuilder_ == null) {
updateMask_ = builderForValue.build();
onChanged();
} else {
updateMaskBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) {
if (updateMaskBuilder_ == null) {
if (updateMask_ != null) {
updateMask_ =
com.google.protobuf.FieldMask.newBuilder(updateMask_).mergeFrom(value).buildPartial();
} else {
updateMask_ = value;
}
onChanged();
} else {
updateMaskBuilder_.mergeFrom(value);
}
return this;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public Builder clearUpdateMask() {
if (updateMaskBuilder_ == null) {
updateMask_ = null;
onChanged();
} else {
updateMask_ = null;
updateMaskBuilder_ = null;
}
return this;
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() {
onChanged();
return getUpdateMaskFieldBuilder().getBuilder();
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
if (updateMaskBuilder_ != null) {
return updateMaskBuilder_.getMessageOrBuilder();
} else {
return updateMask_ == null
? com.google.protobuf.FieldMask.getDefaultInstance()
: updateMask_;
}
}
/**
*
*
* <pre>
* Required. Fields to be updated.
* Only the `display_name` can be updated.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED];
* </code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.FieldMask,
com.google.protobuf.FieldMask.Builder,
com.google.protobuf.FieldMaskOrBuilder>
getUpdateMaskFieldBuilder() {
if (updateMaskBuilder_ == null) {
updateMaskBuilder_ =
new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.FieldMask,
com.google.protobuf.FieldMask.Builder,
com.google.protobuf.FieldMaskOrBuilder>(
getUpdateMask(), getParentForChildren(), isClean());
updateMask_ = null;
}
return updateMaskBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.cloud.resourcemanager.v3.UpdateFolderRequest)
}
// @@protoc_insertion_point(class_scope:google.cloud.resourcemanager.v3.UpdateFolderRequest)
private static final com.google.cloud.resourcemanager.v3.UpdateFolderRequest DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.cloud.resourcemanager.v3.UpdateFolderRequest();
}
public static com.google.cloud.resourcemanager.v3.UpdateFolderRequest getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<UpdateFolderRequest> PARSER =
new com.google.protobuf.AbstractParser<UpdateFolderRequest>() {
@java.lang.Override
public UpdateFolderRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new UpdateFolderRequest(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<UpdateFolderRequest> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<UpdateFolderRequest> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.cloud.resourcemanager.v3.UpdateFolderRequest getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| googleapis/java-resourcemanager | proto-google-cloud-resourcemanager-v3/src/main/java/com/google/cloud/resourcemanager/v3/UpdateFolderRequest.java | Java | apache-2.0 | 34,397 |
package com.lyubenblagoev.postfixrest.service;
import com.lyubenblagoev.postfixrest.entity.User;
import com.lyubenblagoev.postfixrest.security.JwtTokenProvider;
import com.lyubenblagoev.postfixrest.security.RefreshTokenProvider;
import com.lyubenblagoev.postfixrest.security.UserPrincipal;
import com.lyubenblagoev.postfixrest.service.model.AuthResponse;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import javax.persistence.EntityNotFoundException;
import java.util.Optional;
@Service
@Transactional(readOnly = true)
public class AuthServiceImpl implements AuthService {
private final JwtTokenProvider jwtTokenProvider;
private final RefreshTokenProvider refreshTokenProvider;
private final UserService userService;
public AuthServiceImpl(JwtTokenProvider jwtTokenProvider,
RefreshTokenProvider refreshTokenProvider,
UserService userService) {
this.jwtTokenProvider = jwtTokenProvider;
this.refreshTokenProvider = refreshTokenProvider;
this.userService = userService;
}
@Override
public AuthResponse createTokens(String email) {
Optional<User> userOptional = userService.findByEmail(email);
if (userOptional.isEmpty()) {
throw new EntityNotFoundException("Failed to find user with email " + email);
}
UserPrincipal userPrincipal = new UserPrincipal(userOptional.get());
String token = jwtTokenProvider.createToken(userPrincipal.getUsername(), userPrincipal.getAuthorities());
RefreshTokenProvider.RefreshToken refreshToken = refreshTokenProvider.createToken();
return new AuthResponse(token, refreshToken.getToken(), refreshToken.getExpirationDate());
}
}
| lyubenblagoev/postfix-rest-server | src/main/java/com/lyubenblagoev/postfixrest/service/AuthServiceImpl.java | Java | apache-2.0 | 1,809 |
/**
* @license
* Copyright 2012 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Checkbox field. Checked or not checked.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.FieldCheckbox');
/** @suppress {extraRequire} */
goog.require('Blockly.Events.BlockChange');
goog.require('Blockly.Field');
goog.require('Blockly.fieldRegistry');
goog.require('Blockly.utils.dom');
goog.require('Blockly.utils.object');
/**
* Class for a checkbox field.
* @param {string|boolean=} opt_value The initial value of the field. Should
* either be 'TRUE', 'FALSE' or a boolean. Defaults to 'FALSE'.
* @param {Function=} opt_validator A function that is called to validate
* changes to the field's value. Takes in a value ('TRUE' or 'FALSE') &
* returns a validated value ('TRUE' or 'FALSE'), or null to abort the
* change.
* @param {Object=} opt_config A map of options used to configure the field.
* See the [field creation documentation]{@link https://developers.google.com/blockly/guides/create-custom-blocks/fields/built-in-fields/checkbox#creation}
* for a list of properties this parameter supports.
* @extends {Blockly.Field}
* @constructor
*/
Blockly.FieldCheckbox = function(opt_value, opt_validator, opt_config) {
/**
* Character for the check mark. Used to apply a different check mark
* character to individual fields.
* @type {?string}
* @private
*/
this.checkChar_ = null;
Blockly.FieldCheckbox.superClass_.constructor.call(
this, opt_value, opt_validator, opt_config);
};
Blockly.utils.object.inherits(Blockly.FieldCheckbox, Blockly.Field);
/**
* The default value for this field.
* @type {*}
* @protected
*/
Blockly.FieldCheckbox.prototype.DEFAULT_VALUE = false;
/**
* Construct a FieldCheckbox from a JSON arg object.
* @param {!Object} options A JSON object with options (checked).
* @return {!Blockly.FieldCheckbox} The new field instance.
* @package
* @nocollapse
*/
Blockly.FieldCheckbox.fromJson = function(options) {
return new Blockly.FieldCheckbox(options['checked'], undefined, options);
};
/**
* Default character for the checkmark.
* @type {string}
* @const
*/
Blockly.FieldCheckbox.CHECK_CHAR = '\u2713';
/**
* Serializable fields are saved by the XML renderer, non-serializable fields
* are not. Editable fields should also be serializable.
* @type {boolean}
*/
Blockly.FieldCheckbox.prototype.SERIALIZABLE = true;
/**
* Mouse cursor style when over the hotspot that initiates editability.
*/
Blockly.FieldCheckbox.prototype.CURSOR = 'default';
/**
* Configure the field based on the given map of options.
* @param {!Object} config A map of options to configure the field based on.
* @protected
* @override
*/
Blockly.FieldCheckbox.prototype.configure_ = function(config) {
Blockly.FieldCheckbox.superClass_.configure_.call(this, config);
if (config['checkCharacter']) {
this.checkChar_ = config['checkCharacter'];
}
};
/**
* Create the block UI for this checkbox.
* @package
*/
Blockly.FieldCheckbox.prototype.initView = function() {
Blockly.FieldCheckbox.superClass_.initView.call(this);
Blockly.utils.dom.addClass(
/** @type {!SVGTextElement} **/ (this.textElement_), 'blocklyCheckbox');
this.textElement_.style.display = this.value_ ? 'block' : 'none';
};
/**
* @override
*/
Blockly.FieldCheckbox.prototype.render_ = function() {
if (this.textContent_) {
this.textContent_.nodeValue = this.getDisplayText_();
}
this.updateSize_(this.getConstants().FIELD_CHECKBOX_X_OFFSET);
};
/**
* @override
*/
Blockly.FieldCheckbox.prototype.getDisplayText_ = function() {
return this.checkChar_ || Blockly.FieldCheckbox.CHECK_CHAR;
};
/**
* Set the character used for the check mark.
* @param {?string} character The character to use for the check mark, or
* null to use the default.
*/
Blockly.FieldCheckbox.prototype.setCheckCharacter = function(character) {
this.checkChar_ = character;
this.forceRerender();
};
/**
* Toggle the state of the checkbox on click.
* @protected
*/
Blockly.FieldCheckbox.prototype.showEditor_ = function() {
this.setValue(!this.value_);
};
/**
* Ensure that the input value is valid ('TRUE' or 'FALSE').
* @param {*=} opt_newValue The input value.
* @return {?string} A valid value ('TRUE' or 'FALSE), or null if invalid.
* @protected
*/
Blockly.FieldCheckbox.prototype.doClassValidation_ = function(opt_newValue) {
if (opt_newValue === true || opt_newValue === 'TRUE') {
return 'TRUE';
}
if (opt_newValue === false || opt_newValue === 'FALSE') {
return 'FALSE';
}
return null;
};
/**
* Update the value of the field, and update the checkElement.
* @param {*} newValue The value to be saved. The default validator guarantees
* that this is a either 'TRUE' or 'FALSE'.
* @protected
*/
Blockly.FieldCheckbox.prototype.doValueUpdate_ = function(newValue) {
this.value_ = this.convertValueToBool_(newValue);
// Update visual.
if (this.textElement_) {
this.textElement_.style.display = this.value_ ? 'block' : 'none';
}
};
/**
* Get the value of this field, either 'TRUE' or 'FALSE'.
* @return {string} The value of this field.
*/
Blockly.FieldCheckbox.prototype.getValue = function() {
return this.value_ ? 'TRUE' : 'FALSE';
};
/**
* Get the boolean value of this field.
* @return {boolean} The boolean value of this field.
*/
Blockly.FieldCheckbox.prototype.getValueBoolean = function() {
return /** @type {boolean} */ (this.value_);
};
/**
* Get the text of this field. Used when the block is collapsed.
* @return {string} Text representing the value of this field
* ('true' or 'false').
*/
Blockly.FieldCheckbox.prototype.getText = function() {
return String(this.convertValueToBool_(this.value_));
};
/**
* Convert a value into a pure boolean.
*
* Converts 'TRUE' to true and 'FALSE' to false correctly, everything else
* is cast to a boolean.
* @param {*} value The value to convert.
* @return {boolean} The converted value.
* @private
*/
Blockly.FieldCheckbox.prototype.convertValueToBool_ = function(value) {
if (typeof value == 'string') {
return value == 'TRUE';
} else {
return !!value;
}
};
Blockly.fieldRegistry.register('field_checkbox', Blockly.FieldCheckbox);
| mark-friedman/blockly | core/field_checkbox.js | JavaScript | apache-2.0 | 6,315 |
package task03.pages;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindBy;
import org.openqa.selenium.support.ui.ExpectedConditions;
public class TicketSelectionPage extends Page {
public TicketSelectionPage(PageManager pages) {
super(pages);
}
@FindBy(xpath = ".//*[@id='fareRowContainer_0']/tbody/tr[2]/td[2]")
private WebElement firstTicket;
@FindBy(xpath = ".//*[@id='fareRowContainer_0']/tbody/tr[2]/td[2]")
private WebElement secondTicket;
@FindBy(id = "tripSummarySubmitBtn")
private WebElement submitButton;
public void select2Tickets() {
wait.until(ExpectedConditions.elementToBeClickable(firstTicket));
firstTicket.click();
wait.until(ExpectedConditions.elementToBeClickable(secondTicket));
secondTicket.click();
wait.until(ExpectedConditions.elementToBeClickable(submitButton));
submitButton.submit();
}
}
| RihnKornak/TestTasks | src/test/java/task03/pages/TicketSelectionPage.java | Java | apache-2.0 | 945 |
using System.Collections;
using System.Collections.Generic;
namespace Basic.Ast
{
public class ParameterList : IEnumerable<Parameter>
{
private readonly List<Parameter> parameters;
public ParameterList()
{
parameters = new List<Parameter>();
}
public MethodDef Method { get; private set; }
#region IEnumerable<Parameter> Members
public IEnumerator<Parameter> GetEnumerator()
{
return parameters.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return parameters.GetEnumerator();
}
#endregion
public void SetMethod(MethodDef method)
{
Method = method;
}
public void Add(Parameter param)
{
parameters.Add(param);
}
public void Define()
{
foreach (Parameter p in parameters)
{
p.Define();
}
}
public void Resolve()
{
foreach (Parameter p in parameters)
{
p.Resolve();
}
}
public void CreateParameterBuilders()
{
foreach (Parameter p in parameters)
{
p.DefineParameterBuilder();
}
}
public Parameter Get(string name)
{
return parameters.Find(x => x.Name == name);
}
}
} | robertsundstrom/vb-lite-compiler | basc/Ast/ParameterList.cs | C# | apache-2.0 | 1,481 |
/*
Copyright [2011] [Prasad Balan]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.yarsquidy.x12;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.Scanner;
import java.util.regex.Pattern;
/**
* The class represents methods used to translate a X12 transaction represented
* as a file or string into an X12 object.
*
* @author Prasad Balan
* @version $Id: $Id
*/
public class X12Parser implements Parser {
private static final int SIZE = 106;
/** Constant <code>POS_SEGMENT=105</code> */
public static final int POS_SEGMENT = 105;
/** Constant <code>POS_ELEMENT=3</code> */
public static final int POS_ELEMENT = 3;
/** Constant <code>POS_COMPOSITE_ELEMENT=104</code> */
public static final int POS_COMPOSITE_ELEMENT = 104;
/** Constant <code>START_TAG="ISA"</code> */
public static final String START_TAG = "ISA";
private Cf x12Cf;
private Cf cfMarker;
private Loop loopMarker;
/**
* <p>Constructor for X12Parser.</p>
*
* @param cf a {@link Cf} object.
*/
public X12Parser(Cf cf) {
this.x12Cf = cf;
}
/**
* {@inheritDoc}
*
* The method takes a X12 file and converts it into a X2 object. The X12
* class has methods to convert it into XML format as well as methods to
* modify the contents.
*/
public EDI parse(File fileName) throws FormatException, IOException {
final char[] buffer = new char[SIZE];
FileReader fr = new FileReader(fileName);
int count = fr.read(buffer);
String start = new String(buffer, 0, 3);
fr.close();
if (count != SIZE) {
throw new FormatException("The Interchange Control Header is not " +
"the correct size expected: "+SIZE+" found: "+count);
}
if (!start.startsWith(START_TAG)){
throw new FormatException("The Interchange Control Header Segment element: "+START_TAG+" is missing");
}
Context context = new Context();
context.setSegmentSeparator(buffer[POS_SEGMENT]);
context.setElementSeparator(buffer[POS_ELEMENT]);
context.setCompositeElementSeparator(buffer[POS_COMPOSITE_ELEMENT]);
Scanner scanner = new Scanner(fileName);
X12 x12 = scanSource(scanner, context);
scanner.close();
return x12;
}
/**
* private helper method
* @param scanner - the scanner to use in scanning.
* @param context - context for the scanner.
* @return X12 object found by the scanner.
*/
private X12 scanSource(Scanner scanner, Context context) {
Character segmentSeparator = context.getSegmentSeparator();
String quotedSegmentSeparator = Pattern.quote(segmentSeparator.toString());
scanner.useDelimiter(quotedSegmentSeparator + "\r\n|" + quotedSegmentSeparator + "\n|" + quotedSegmentSeparator);
cfMarker = x12Cf;
X12 x12 = new X12(context);
loopMarker = x12;
Loop loop = x12;
while (scanner.hasNext()) {
String line = scanner.next();
String[] tokens = line.split("\\" + context.getElementSeparator());
if (doesChildLoopMatch(cfMarker, tokens)) {
loop = loop.addChild(cfMarker.getName());
loop.addSegment(line);
} else if (doesParentLoopMatch(cfMarker, tokens, loop)) {
loop = loopMarker.addChild(cfMarker.getName());
loop.addSegment(line);
} else {
loop.addSegment(line);
}
}
return x12;
}
/**
* The method takes a InputStream and converts it into a X2 object. The X12
* class has methods to convert it into XML format as well as methods to
* modify the contents.
*
* @param source
* InputStream
* @return the X12 object
* @throws FormatException if any.
* @throws java.io.IOException if any.
*/
public EDI parse(InputStream source) throws FormatException, IOException {
StringBuilder strBuffer = new StringBuilder();
char[] cbuf = new char[1024];
int length;
Reader reader = new BufferedReader(new InputStreamReader(source));
while ((length = reader.read(cbuf)) != -1) {
strBuffer.append(cbuf, 0, length);
}
String strSource = strBuffer.toString();
return parse(strSource);
}
/**
* The method takes a X12 string and converts it into a X2 object. The X12
* class has methods to convert it into XML format as well as methods to
* modify the contents.
*
* @param source
* String
* @return the X12 object
* @throws FormatException if any.
*/
public EDI parse(String source) throws FormatException {
if (source.length() < SIZE) {
throw new FormatException();
}
Context context = new Context();
context.setSegmentSeparator(source.charAt(POS_SEGMENT));
context.setElementSeparator(source.charAt(POS_ELEMENT));
context.setCompositeElementSeparator(source.charAt(POS_COMPOSITE_ELEMENT));
Scanner scanner = new Scanner(source);
X12 x12 = scanSource(scanner, context);
scanner.close();
return x12;
}
/**
* Checks if the segment (or line read) matches to current loop
*
* @param cf
* Cf
* @param tokens
* String[] represents the segment broken into elements
* @return boolean
*/
private boolean doesLoopMatch(Cf cf, String[] tokens) {
if (cf.getSegment().equals(tokens[0])) {
if (null == cf.getSegmentQualPos()) {
return true;
} else {
for (String qual : cf.getSegmentQuals()) {
if (qual.equals(tokens[cf.getSegmentQualPos()])) {
return true;
}
}
}
}
return false;
}
/**
* Checks if the segment (or line read) matches to any of the child loops
* configuration.
*
* @param parent
* Cf
* @param tokens
* String[] represents the segment broken into elements
* @return boolean
*/
boolean doesChildLoopMatch(Cf parent, String[] tokens) {
for (Cf cf : parent.childList()) {
if (doesLoopMatch(cf, tokens)) {
cfMarker = cf;
return true;
}
}
return false;
}
/**
* Checks if the segment (or line read) matches the parent loop
* configuration.
*
* @param child
* Cf
* @param tokens
* String[] represents the segment broken into elements
* @param loop
* Loop
* @return boolean
*/
private boolean doesParentLoopMatch(Cf child, String[] tokens, Loop loop) {
Cf parent = child.getParent();
if (parent == null)
return false;
loopMarker = loop.getParent();
for (Cf cf : parent.childList()) {
if (doesLoopMatch(cf, tokens)) {
cfMarker = cf;
return true;
}
}
return doesParentLoopMatch(parent, tokens, loopMarker);
}
}
| ryanco/x12-parser | src/main/java/com/yarsquidy/x12/X12Parser.java | Java | apache-2.0 | 7,028 |
package com.chenantao.autolayout.utils;
import android.view.ViewGroup;
/**
* Created by Chenantao_gg on 2016/1/20.
*/
public class AutoLayoutGenerate
{
public static <T extends ViewGroup> T generate(Class<T> clazz, Class[] argumentTypes, Object[]
arguments)
{
// Enhancer enhancer = new Enhancer();
// enhancer.setSuperclass(clazz);
// CallbackFilter filter = new ConcreteClassCallbackFilter();
// Callback methodInterceptor = new ConcreteClassMethodInterceptor<T>((Context) arguments[0],
// (AttributeSet) arguments[1]);
// Callback noOp = NoOp.INSTANCE;
// Callback[] callbacks = new Callback[]{methodInterceptor, noOp};
// enhancer.setCallbackFilter(filter);
// enhancer.setCallbacks(callbacks);
// T proxyObj = (T) enhancer.create(argumentTypes, arguments);
// //对onMeasure方法以及generateLayoutParams进行拦截,其他方法不进行操作
// return proxyObj;
return null;
}
// static class ConcreteClassMethodInterceptor<T extends ViewGroup> implements MethodInterceptor
// {
// private AutoLayoutHelper mHelper;
// private Context mContext;
// private AttributeSet mAttrs;
//
// public ConcreteClassMethodInterceptor(Context context, AttributeSet attrs)
// {
// mContext = context;
// mAttrs = attrs;
// }
//
// public Object intercept(Object obj, Method method, Object[] arg, MethodProxy proxy)
// throws Throwable
// {
// if (mHelper == null)
// {
// mHelper = new AutoLayoutHelper((ViewGroup) obj);
// }
// System.out.println("Before:" + method);
// if ("onMeasure".equals(method.getName()))
// {
// //在onMeasure之前adjustChild
// if (!((ViewGroup) obj).isInEditMode())
// {
// mHelper.adjustChildren();
// }
//
// } else if ("generateLayoutParams".equals(method.getName()))
// {
// ViewGroup parent = (ViewGroup) obj;
// final T.LayoutParams layoutParams = (T.LayoutParams) Enhancer.create(T.LayoutParams
// .class, new Class[]{
// AutoLayoutHelper.AutoLayoutParams.class},
// new MethodInterceptor()
// {
// public Object intercept(Object obj, Method method, Object[] args,
// MethodProxy proxy) throws Throwable
// {
// if ("getAutoLayoutInfo".equals(method.getName()))
// {
// return AutoLayoutHelper.getAutoLayoutInfo(mContext, mAttrs);
// }
// return proxy.invoke(obj, args);
// }
// });
// return layoutParams;
// }
// Object object = proxy.invokeSuper(obj, arg);
// System.out.println("After:" + method);
// return object;
// }
// }
//
// static class ConcreteClassCallbackFilter implements CallbackFilter
// {
// public int accept(Method method)
// {
// if ("onMeasure".equals(method.getName()))
// {
// return 0;//Callback callbacks[0]
// } else if ("generateLayoutParams".equals(method.getName()))
// {
// return 0;
// }
// return 1;
// }
// }
// static class LayoutParamsGenerate implements FixedValue
// {
// public LayoutParamsGenerate(Context context, AttributeSet attributeSet)
// {
// }
//
// public Object loadObject() throws Exception
// {
// System.out.println("ConcreteClassFixedValue loadObject ...");
// Object object = 999;
// return object;
// }
// }
}
| Chenantao/PlayTogether | AutoLayout/src/main/java/com/chenantao/autolayout/utils/AutoLayoutGenerate.java | Java | apache-2.0 | 3,353 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test.utils import override_settings
from sis_provisioner.tests import (
fdao_pws_override, fdao_hrp_override, fdao_bridge_override)
from sis_provisioner.tests.account_managers import set_uw_account
user_file_name_override = override_settings(
BRIDGE_IMPORT_USER_FILENAME="users")
def set_db_records():
affiemp = set_uw_account("affiemp")
javerage = set_uw_account("javerage")
ellen = set_uw_account("ellen")
staff = set_uw_account("staff")
staff.set_disable()
retiree = set_uw_account("retiree")
tyler = set_uw_account("faculty")
leftuw = set_uw_account("leftuw")
leftuw.set_terminate_date()
testid = set_uw_account("testid")
| uw-it-aca/bridge-sis-provisioner | sis_provisioner/tests/csv/__init__.py | Python | apache-2.0 | 784 |
package com.petercipov.mobi.deployer;
import com.petercipov.mobi.Instance;
import com.petercipov.traces.api.Trace;
import java.util.Optional;
import rx.Observable;
/**
*
* @author Peter Cipov
*/
public abstract class RxDeployment {
protected Optional<String> name;
public RxDeployment() {
this.name = Optional.empty();
}
public Optional<String> name() {
return name;
}
/**
* Sets container name
* @since 1.14
* @param name
* @return
*/
public RxDeployment setName(String name) {
this.name = Optional.of(name);
return this;
}
/**
* Adds volume bindings to container as string in format /host/path:/container/path
* @since 1.14
* @param volumeBindings iterable of bindings
* @return
*/
public abstract RxDeployment addVolumes(Iterable<String> volumeBindings);
/**
* Adds volume binding
* @since 1.14
* @param hostPath
* @param containerPath
* @return
*/
public abstract RxDeployment addVolume(String hostPath, String containerPath);
/**
* Adds environment variable to container
* @since 1.14
* @param variable variable in a format NAME=VALUE
* @return
*/
public abstract RxDeployment addEnv(String variable);
/**
* Adds environment variable to container
* @since 1.14
* @param name
* @param value
* @return
*/
public abstract RxDeployment addEnv(String name, String value);
/**
* Add port that should be published
* @since 1.14
* @param port - container port spect in format [tcp/udp]/port. f.e tcp/8080
* @param customPort - remapping port
* @return
*/
public abstract RxDeployment addPortMapping(String port, int customPort);
/**
* Publishes all exposed ports if is set to true
* @since 1.14
* @param publish
* @return
*/
public abstract RxDeployment setPublishAllPorts(boolean publish);
/**
* Publishes all exposed ports
* @since 1.14
* @return
*/
public abstract RxDeployment publishAllPorts();
/**
* Runs the command when starting the container
* @since 1.14
* @param cmd - command in for of single string or multitude of string that
* contains parts of command
* @return
*/
public abstract RxDeployment setCmd(String ... cmd);
/**
* Sets cpu quota
* @since 1.19
* @param quota Microseconds of CPU time that the container can get in a CPU period
* @return
*/
public abstract RxDeployment setCpuQuota(long quota);
/**
* Sets cpu shares
* @since 1.14
* @param shares An integer value containing the container’s CPU Shares (ie. the relative weight vs other containers)
* @return
*/
public abstract RxDeployment setCpuShares(long shares);
/**
* Sets domain name
* @since 1.14
* @param name A string value containing the domain name to use for the container.
* @return
*/
public abstract RxDeployment setDomainName(String name);
/**
* Sets entry point
* @since 1.15
* @param entry A command to run inside container. it overrides one specified by container docker file.
* @return
*/
public abstract RxDeployment setEntryPoint(String ... entry);
/**
* adds container exposed port
* @since 1.14
* @param port in format [tcp/udp]/port. f.e tcp/8080
* @return
*/
public abstract RxDeployment addExposedPort(String port);
/**
* Sets hostname
* @since 1.14
* @param hostName A string value containing the hostname to use for the container.
* @return
*/
public abstract RxDeployment setHostName(String hostName);
/**
* Adds label
* @since 1.18
* @param key
* @param value
* @return
*/
public abstract RxDeployment addLabel(String key, String value);
/**
* Sets MAC address.
* @since 1.15
* @param mac
* @return
*/
public abstract RxDeployment setMacAdress(String mac);
/**
* Sets memory limits
* @since 1.14
* @param memory Memory limit in bytes
* @return
*/
public abstract RxDeployment setMemory(long memory);
/**
* Sets memory limit
* @since 1.14
* @param memory Memory limit in bytes
* @param swap Memory limit for swap. Set -1 to disable swap.
* @return
*/
public abstract RxDeployment setMemory(long memory, long swap);
/**
* Disables networking for the container
* @since 1.14
* @param disabled
* @return
*/
public abstract RxDeployment setNetworkDisabled(boolean disabled);
/**
* Opens stdin
* @since 1.14
* @param open
* @return
*/
public abstract RxDeployment setOpenStdIn(boolean open);
/**
* Opens stdin and closes stdin after the 1. attached client disconnects.
* @since 1.14
* @param once
* @return
*/
public abstract RxDeployment setStdInOnce(boolean once);
/**
* Attaches standard streams to a tty, including stdin if it is not closed.
* @since 1.14
* @param enabled
* @return
*/
public abstract RxDeployment setTty(boolean enabled);
/**
* @since 1.14
* @param user A string value specifying the user inside the containe
* @return
*/
public abstract RxDeployment setUser(String user);
/**
* @since 1.14
* @param workDir A string specifying the working directory for commands to run in.
* @return
*/
public abstract RxDeployment setWorkDir(String workDir);
/**
* Sets path to cgroup
* @since 1.18
* @param parent Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist
* @return
*/
public abstract RxDeployment setCgroupParent(String parent);
/**
* Adds DNS for fontainer
* @since 1.14
* @param dns A list of DNS servers for the container to use
* @return
*/
public abstract RxDeployment addDns(String ... dns);
/**
* Adds DNS search domains
* @since 1.15
* @param dns A list of DNS servers for the container to use.
* @return
*/
public abstract RxDeployment addDnsSearch(String ... dns);
/**
* Adds extra hosts co container /etc/hosts
* @since 1.15
* @param hosts A list of hostnames/IP mappings to add to the container’s /etc/hosts file. Specified in the form ["hostname:IP"]
* @return
*/
public abstract RxDeployment addExtraHosts(String ... hosts);
/**
* Adds links to other containers
* @since 1.14
* @param links A list of links for the container. Each link entry should be in the form of container_name:alias
* @return
*/
public abstract RxDeployment addLinks(String ... links);
/**
* Sets LXC specific configurations. These configurations only work when using the lxc execution driver.
* @since 1.14
* @param key
* @param value
* @return
*/
public abstract RxDeployment addLxcParameter(String key, String value);
/**
* Sets the networking mode for the container
* @since 1.15
* @param mode Supported values are: bridge, host, and container:name|id
* @return
*/
public abstract RxDeployment setNetworkMode(String mode);
/**
* Gives the container full access to the host.
* @since 1.14
* @param privileged
* @return
*/
public abstract RxDeployment setPrivileged(boolean privileged);
/**
* @since 1.15
* @param opts string value to customize labels for MLS systems, such as SELinux.
* @return
*/
public abstract RxDeployment addSecurityOpt(String ... opts);
/**
* Adds volume from an other container
* @since 1.14
* @param volumes volume to inherit from another container. Specified in the form container name:ro|rw
* @return
*/
public abstract RxDeployment addVolumeFrom(String ... volumes);
protected abstract Observable<String> createContainer(Trace trace, Instance image);
}
| petercipov/mobi | deployer/src/main/java/com/petercipov/mobi/deployer/RxDeployment.java | Java | apache-2.0 | 7,654 |
/*
* Copyright 2015 Torridity.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tor.tribes.ui.models;
import de.tor.tribes.types.ext.Village;
import de.tor.tribes.ui.wiz.ret.types.RETSourceElement;
import java.util.LinkedList;
import java.util.List;
import javax.swing.table.AbstractTableModel;
/**
*
* @author Torridity
*/
public class RETSourceTableModel extends AbstractTableModel {
private String[] columnNames = new String[]{
"Herkunft"
};
private Class[] types = new Class[]{
Village.class
};
private final List<RETSourceElement> elements = new LinkedList<RETSourceElement>();
public RETSourceTableModel() {
super();
}
public void clear() {
elements.clear();
fireTableDataChanged();
}
public void addRow(RETSourceElement pVillage, boolean pValidate) {
elements.add(pVillage);
if (pValidate) {
fireTableDataChanged();
}
}
@Override
public int getRowCount() {
if (elements == null) {
return 0;
}
return elements.size();
}
@Override
public Class getColumnClass(int columnIndex) {
return types[columnIndex];
}
@Override
public boolean isCellEditable(int row, int column) {
return false;
}
@Override
public String getColumnName(int column) {
return columnNames[column];
}
public void removeRow(int row) {
elements.remove(row);
fireTableDataChanged();
}
public RETSourceElement getRow(int row) {
return elements.get(row);
}
@Override
public Object getValueAt(int row, int column) {
if (elements == null || elements.size() - 1 < row) {
return null;
}
return elements.get(row).getVillage();
}
@Override
public int getColumnCount() {
return columnNames.length;
}
}
| Akeshihiro/dsworkbench | Core/src/main/java/de/tor/tribes/ui/models/RETSourceTableModel.java | Java | apache-2.0 | 2,442 |
package com.zk.web.interceptor;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.Ordered;
import org.springframework.http.HttpStatus;
import org.springframework.validation.BindException;
import org.springframework.web.method.HandlerMethod;
import org.springframework.web.servlet.HandlerExceptionResolver;
import org.springframework.web.servlet.ModelAndView;
public class CustomizedHandlerExceptionResolver implements HandlerExceptionResolver, Ordered {
private static final Logger LOGGER = LoggerFactory.getLogger(CustomizedHandlerExceptionResolver.class);
public int getOrder() {
return Integer.MIN_VALUE;
}
public ModelAndView resolveException(HttpServletRequest aReq, HttpServletResponse aRes, Object aHandler,
Exception exception) {
if (aHandler instanceof HandlerMethod) {
if (exception instanceof BindException) {
return null;
}
}
LOGGER.error(StringUtils.EMPTY, exception);
ModelAndView mav = new ModelAndView("common/error");
String errorMsg = exception.getMessage();
aRes.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.value());
if ("XMLHttpRequest".equals(aReq.getHeader("X-Requested-With"))) {
try {
aRes.setContentType("application/text; charset=utf-8");
PrintWriter writer = aRes.getWriter();
aRes.setStatus(HttpServletResponse.SC_FORBIDDEN);
writer.print(errorMsg);
writer.flush();
writer.close();
return null;
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
}
}
mav.addObject("errorMsg", errorMsg);
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw, true);
exception.printStackTrace(pw);
mav.addObject("stackTrace", sw.getBuffer().toString());
mav.addObject("exception", exception);
return mav;
}
} | wqintel/zookeeper-web | src/main/java/com/zk/web/interceptor/CustomizedHandlerExceptionResolver.java | Java | apache-2.0 | 2,187 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.intelligentsia.dowsers.core.serializers.jackson;
import java.io.IOException;
import org.intelligentsia.dowsers.core.reflection.ClassInformation;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
/**
*
* ClassInformationDeserializer.
*
* @author <a href="mailto:jguibert@intelligents-ia.com" >Jerome Guibert</a>
*/
public class ClassInformationDeserializer extends StdDeserializer<ClassInformation> {
/**
* serialVersionUID:long
*/
private static final long serialVersionUID = -6052449554113264932L;
public ClassInformationDeserializer() {
super(ClassInformation.class);
}
@Override
public ClassInformation deserialize(final JsonParser jp, final DeserializationContext ctxt) throws IOException, JsonProcessingException {
String description = null;
if (jp.hasCurrentToken()) {
if (jp.getCurrentToken().equals(JsonToken.START_OBJECT)) {
jp.nextValue();
description = jp.getText();
jp.nextToken();
}
}
return description != null ? ClassInformation.parse(description) : null;
}
}
| geronimo-iia/dowsers | dowsers-core/src/main/java/org/intelligentsia/dowsers/core/serializers/jackson/ClassInformationDeserializer.java | Java | apache-2.0 | 2,180 |
/*
* Copyright 2018 Sebastien Callier
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sebastien.callier.serialization.codec.extendable.object.field.primitives;
import sebastien.callier.serialization.codec.Codec;
import sebastien.callier.serialization.codec.extendable.object.field.FieldCodec;
import sebastien.callier.serialization.codec.extendable.object.field.LambdaMetaFactoryUtils;
import sebastien.callier.serialization.deserializer.InputStreamWrapper;
import sebastien.callier.serialization.exceptions.CodecGenerationException;
import sebastien.callier.serialization.serializer.OutputStreamWrapper;
import java.io.IOException;
import java.lang.reflect.Method;
/**
* @author Sebastien Callier
* @since 2018
*/
public class ByteFieldCodec implements FieldCodec {
private final Getter get;
private final Setter set;
private final Codec codec;
public ByteFieldCodec(
Method getter,
Method setter,
Codec codec) throws CodecGenerationException {
super();
get = LambdaMetaFactoryUtils.wrapGetter(Getter.class, getter, byte.class);
set = LambdaMetaFactoryUtils.wrapSetter(Setter.class, setter, byte.class);
this.codec = codec;
}
@FunctionalInterface
public interface Getter {
byte get(Object instance);
}
@FunctionalInterface
public interface Setter {
void set(Object instance, byte value);
}
@Override
@SuppressWarnings("unchecked")
public void write(OutputStreamWrapper wrapper, Object instance) throws IOException {
codec.write(wrapper, get.get(instance));
}
@Override
@SuppressWarnings("unchecked")
public void read(InputStreamWrapper wrapper, Object instance) throws IOException {
set.set(instance, (Byte) codec.read(wrapper));
}
} | S-Callier/serialization | src/main/java/sebastien/callier/serialization/codec/extendable/object/field/primitives/ByteFieldCodec.java | Java | apache-2.0 | 2,341 |
import eventlet
import gettext
import sys
from staccato.common import config
import staccato.openstack.common.wsgi as os_wsgi
import staccato.openstack.common.pastedeploy as os_pastedeploy
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
gettext.install('staccato', unicode=1)
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(returncode)
def main():
try:
conf = config.get_config_object()
paste_file = conf.find_file(conf.paste_deploy.config_file)
wsgi_app = os_pastedeploy.paste_deploy_app(paste_file,
'staccato-api',
conf)
server = os_wsgi.Service(wsgi_app, conf.bind_port)
server.start()
server.wait()
except RuntimeError as e:
fail(1, e)
main()
| buzztroll/staccato | staccato/cmd/api.py | Python | apache-2.0 | 899 |
<?php
exec('"' . __DIR__ . '/vendor/bin/phinx" rollback -t=0');
exec('"' . __DIR__ . '/vendor/bin/phinx" migrate');
exec('"' . __DIR__ . '/vendor/bin/phinx" seed:run');
?> | Manoel-Matias-bls/son-financas | migrate_seed.php | PHP | apache-2.0 | 172 |
using System;
using System.Drawing;
using NetTopologySuite.Geometries;
namespace SharpMap
{
/// <summary>
/// Utility class that checks Viewport min/max Zoom and constraint
/// </summary>
[Serializable]
public class MapViewPortGuard
{
private double _minimumZoom;
private double _maximumZoom;
private Envelope _maximumExtents;
private double _pixelAspectRatio;
const double MinMinZoomValue = 2d * Double.Epsilon;
/// <summary>
/// Gets or sets a value indicating the minimum zoom level.
/// </summary>
public double MinimumZoom
{
get { return _minimumZoom; }
set
{
if (value < MinMinZoomValue)
value = MinMinZoomValue;
_minimumZoom = value;
}
}
/// <summary>
/// Gets or sets a value indicating the maximum zoom level.
/// </summary>
public double MaximumZoom
{
get { return _maximumZoom; }
set
{
if (value < _minimumZoom)
value = _minimumZoom;
_maximumZoom = value;
}
}
/// <summary>
/// Gets or sets a value indicating the maximum extents
/// </summary>
public Envelope MaximumExtents
{
get { return _maximumExtents ?? (_maximumExtents = new Envelope()); }
set { _maximumExtents = value; }
}
/// <summary>
/// Gets or sets the size of the Map in device units (Pixel)
/// </summary>
public Size Size { get; set; }
/// <summary>
/// Gets or sets the aspect-ratio of the pixel scales. A value less than
/// 1 will make the map streach upwards, and larger than 1 will make it smaller.
/// </summary>
/// <exception cref="ArgumentException">Throws an argument exception when value is 0 or less.</exception>
public double PixelAspectRatio
{
get { return _pixelAspectRatio; }
set
{
if (value <= 0)
throw new ArgumentException("Invalid Pixel Aspect Ratio");
_pixelAspectRatio = value;
}
}
/// <summary>
/// Creates an instance of this class
/// </summary>
internal MapViewPortGuard(Size size, double minZoom, double maxZoom)
{
Size = size;
MinimumZoom = minZoom;
MaximumZoom = maxZoom;
PixelAspectRatio = 1d;
}
/// <summary>
/// Gets or sets a value indicating if <see cref="Map.MaximumExtents"/> should be enforced or not.
/// </summary>
public bool EnforceMaximumExtents { get; set; }
/// <summary>
/// Verifies the zoom level and center of the map
/// </summary>
/// <param name="zoom">The zoom level to test</param>
/// <param name="center">The center of the map. This coordinate might change so you <b>must</b> provide a copy if you want to preserve the old value</param>
/// <returns>The zoom level, might have changed</returns>
public double VerifyZoom(double zoom, Coordinate center)
{
// Zoom within valid region
if (zoom < _minimumZoom)
zoom = _minimumZoom;
else if (zoom > _maximumZoom)
zoom = _maximumZoom;
if (EnforceMaximumExtents)
{
var arWidth = (double) Size.Width/Size.Height;
if (zoom > _maximumExtents.Width)
zoom = _maximumExtents.Width;
if (zoom > arWidth * _maximumExtents.Height)
zoom = arWidth * _maximumExtents.Height;
zoom = VerifyValidViewport(zoom, center);
}
return zoom;
}
/// <summary>
/// Verifies the valid viewport, makes adjustments if required
/// </summary>
/// <param name="zoom">The current zoom</param>
/// <param name="center">The </param>
/// <returns>The verified zoom level</returns>
private double VerifyValidViewport(double zoom, Coordinate center)
{
var maxExtents = MaximumExtents ?? new Envelope();
if (maxExtents.IsNull)
return zoom;
var halfWidth = 0.5d * zoom;
var halfHeight = halfWidth * PixelAspectRatio * ((double)Size.Height / Size.Width);
var maxZoomHeight = _maximumZoom < double.MaxValue ? _maximumZoom : double.MaxValue;
if (2 * halfHeight > maxZoomHeight)
{
halfHeight = 0.5d*maxZoomHeight;
halfWidth = halfHeight / (_pixelAspectRatio * ((double)Size.Height / Size.Width));
zoom = 2 * halfWidth;
}
var testEnvelope = new Envelope(center.X - halfWidth, center.X + halfWidth,
center.Y - halfHeight, center.Y + halfHeight);
if (maxExtents.Contains(testEnvelope))
return zoom;
var dx = testEnvelope.MinX < maxExtents.MinX
? maxExtents.MinX - testEnvelope.MinX
: testEnvelope.MaxX > maxExtents.MaxX
? maxExtents.MaxX - testEnvelope.MaxX
: 0;
var dy = testEnvelope.MinY < maxExtents.MinY
? maxExtents.MinY - testEnvelope.MinY
: testEnvelope.MaxY > maxExtents.MaxY
? maxExtents.MaxY - testEnvelope.MaxY
: 0;
center.X += dx;
center.Y += dy;
return zoom;
}
}
/// <summary>
/// Utility class to lock a map's viewport so it cannot be changed
/// </summary>
public class MapViewportLock
{
private readonly Map _map;
private double _minimumZoom;
private double _maximumZoom;
private Envelope _maximumExtents;
private bool _enforce;
/// <summary>
/// Creates an instance of this class
/// </summary>
/// <param name="map"></param>
public MapViewportLock(Map map)
{
_map = map;
}
/// <summary>
/// Lock the viewport of the map
/// </summary>
public void Lock()
{
if (IsLocked)
return;
// Signal the viewport as locked
IsLocked = true;
// store the current extent settings
_minimumZoom = _map.MinimumZoom;
_maximumZoom = _map.MaximumZoom;
_maximumExtents = _map.MaximumExtents;
_enforce = _map.EnforceMaximumExtents;
// Lock the viewport
_map.MinimumZoom = _map.MaximumZoom = _map.Zoom;
_map.MaximumExtents = _map.Envelope;
_map.EnforceMaximumExtents = true;
}
/// <summary>
/// Gets a value indicating that the map's viewport is locked
/// </summary>
public bool IsLocked { get; private set; }
/// <summary>
/// Unlock the viewport of the map
/// </summary>
public void Unlock()
{
// Unlock the viewport
_map.EnforceMaximumExtents = _enforce;
_map.MaximumExtents = _maximumExtents;
_map.MinimumZoom = _minimumZoom;
_map.MaximumZoom = _maximumZoom;
// Signal the viewport as unlocked
IsLocked = false;
}
}
} | ShammyLevva/FTAnalyzer | SharpMap/Map/MapViewportGuard.cs | C# | apache-2.0 | 7,759 |
/*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/ec2/model/DescribeNetworkInterfacesResponse.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <utility>
using namespace Aws::EC2::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils::Logging;
using namespace Aws::Utils;
using namespace Aws;
DescribeNetworkInterfacesResponse::DescribeNetworkInterfacesResponse()
{
}
DescribeNetworkInterfacesResponse::DescribeNetworkInterfacesResponse(const AmazonWebServiceResult<XmlDocument>& result)
{
*this = result;
}
DescribeNetworkInterfacesResponse& DescribeNetworkInterfacesResponse::operator =(const AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode rootNode = xmlDocument.GetRootElement();
XmlNode resultNode = rootNode;
if (rootNode.GetName() != "DescribeNetworkInterfacesResponse")
{
resultNode = rootNode.FirstChild("DescribeNetworkInterfacesResponse");
}
if(!resultNode.IsNull())
{
XmlNode networkInterfacesNode = resultNode.FirstChild("networkInterfaceSet");
if(!networkInterfacesNode.IsNull())
{
XmlNode networkInterfacesMember = networkInterfacesNode.FirstChild("item");
while(!networkInterfacesMember.IsNull())
{
m_networkInterfaces.push_back(networkInterfacesMember);
networkInterfacesMember = networkInterfacesMember.NextNode("item");
}
}
}
XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata");
m_responseMetadata = responseMetadataNode;
AWS_LOGSTREAM_DEBUG("Aws::EC2::Model::DescribeNetworkInterfacesResponse", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() );
return *this;
}
| kahkeng/aws-sdk-cpp | aws-cpp-sdk-ec2/source/model/DescribeNetworkInterfacesResponse.cpp | C++ | apache-2.0 | 2,358 |
package com.noeasy.money.exception;
public class UserErrorMetadata extends BaseErrorMetadata {
public static final UserErrorMetadata USER_EXIST = new UserErrorMetadata(101, "User exit");
public static final UserErrorMetadata NULL_USER_BEAN = new UserErrorMetadata(102, "Userbean is null");
protected UserErrorMetadata(int pErrorCode, String pErrorMesage) {
super(pErrorCode, pErrorMesage);
}
}
| DormitoryTeam/Dormitory | src/main/java/com/noeasy/money/exception/UserErrorMetadata.java | Java | apache-2.0 | 428 |
/*
*
* (c) Copyright Ascensio System Limited 2010-2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
NamingPeopleContentManager = function() {
this.SaveSchema = function(parentCallback) {
var schemaId = jq('#namingPeopleSchema').val();
if (schemaId == 'custom') {
NamingPeopleContentController.SaveCustomNamingSettings(jq('#usrcaption').val().substring(0, 30), jq('#usrscaption').val().substring(0, 30),
jq('#grpcaption').val().substring(0, 30), jq('#grpscaption').val().substring(0, 30),
jq('#usrstatuscaption').val().substring(0, 30), jq('#regdatecaption').val().substring(0, 30),
jq('#grpheadcaption').val().substring(0, 30),
jq('#guestcaption').val().substring(0, 30), jq('#guestscaption').val().substring(0, 30),
function(result) { if (parentCallback != null) parentCallback(result.value); });
}
else
NamingPeopleContentController.SaveNamingSettings(schemaId, function(result) { if (parentCallback != null) parentCallback(result.value); });
}
this.SaveSchemaCallback = function(res) {
}
this.LoadSchemaNames = function(parentCallback) {
var schemaId = jq('#namingPeopleSchema').val();
NamingPeopleContentController.GetPeopleNames(schemaId, function(res) {
var names = res.value;
jq('#usrcaption').val(names.UserCaption);
jq('#usrscaption').val(names.UsersCaption);
jq('#grpcaption').val(names.GroupCaption);
jq('#grpscaption').val(names.GroupsCaption);
jq('#usrstatuscaption').val(names.UserPostCaption);
jq('#regdatecaption').val(names.RegDateCaption);
jq('#grpheadcaption').val(names.GroupHeadCaption);
jq('#guestcaption').val(names.GuestCaption);
jq('#guestscaption').val(names.GuestsCaption);
if (parentCallback != null)
parentCallback(res.value);
});
}
}
NamingPeopleContentViewer = new function() {
this.ChangeValue = function(event) {
jq('#namingPeopleSchema').val('custom');
}
};
jq(document).ready(function() {
jq('.namingPeopleBox input[type="text"]').each(function(i, el) {
jq(el).keypress(function(event) { NamingPeopleContentViewer.ChangeValue(); });
});
var manager = new NamingPeopleContentManager();
jq('#namingPeopleSchema').change(function () {
manager.LoadSchemaNames(null);
});
manager.LoadSchemaNames(null);
}); | ONLYOFFICE/CommunityServer | web/studio/ASC.Web.Studio/UserControls/Management/NamingPeopleSettings/js/namingpeoplecontent.js | JavaScript | apache-2.0 | 3,300 |
#!/usr/bin/env python
# Use Netmiko to execute 'show arp' on pynet-rtr1, pynet-rtr2, and juniper-srx.
from netmiko import ConnectHandler
def main():
# Definition of routers
rtr1 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
}
rtr2 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
'port': 8022,
}
srx = {
'device_type': 'juniper',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
'port': 9822,
}
# Create a list of all the routers.
all_routers = [rtr1, rtr2, srx]
# Loop through all the routers and show arp.
for a_router in all_routers:
net_connect = ConnectHandler(**a_router)
output = net_connect.send_command("show arp")
print "\n\n>>>>>>>>> Device {0} <<<<<<<<<".format(a_router['device_type'])
print output
print ">>>>>>>>> End <<<<<<<<<"
if __name__ == "__main__":
main()
| dprzybyla/python-ansible | week4/netmiko_sh_arp.py | Python | apache-2.0 | 1,128 |
// Fill out your copyright notice in the Description page of Project Settings.
#include "Projectile.h"
// Sets default values
AProjectile::AProjectile()
{
// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.
PrimaryActorTick.bCanEverTick = true;
ProjectileMovement = CreateDefaultSubobject<UProjectileMovementComponent>(FName("Projectile Movement"));
ProjectileMovement->bAutoActivate = false;
}
// Called when the game starts or when spawned
void AProjectile::BeginPlay()
{
Super::BeginPlay();
}
// Called every frame
void AProjectile::Tick(float DeltaTime)
{
Super::Tick(DeltaTime);
}
void AProjectile::LaunchProjectile(float Speed)
{
ProjectileMovement->SetVelocityInLocalSpace(FVector::ForwardVector * Speed);
ProjectileMovement->Activate();
}
| Tacticalmint/04_BattleTank | BattleTank/Source/BattleTank/Private/Projectile.cpp | C++ | apache-2.0 | 826 |
#include "stdafx.h"
#include "NET_Common.h"
#include "net_client.h"
#include "net_server.h"
#include "net_messages.h"
#include "NET_Log.h"
#include "../xr_3da/xrGame/battleye.h"
#pragma warning(push)
#pragma warning(disable:4995)
#include <malloc.h>
#include "dxerr.h"
//#pragma warning(pop)
static INetLog* pClNetLog = NULL;
#define BASE_PORT_LAN_SV 5445
#define BASE_PORT_LAN_CL 5446
#define BASE_PORT 0
#define END_PORT 65535
void dump_URL (LPCSTR p, IDirectPlay8Address* A)
{
string256 aaaa;
DWORD aaaa_s = sizeof(aaaa);
R_CHK (A->GetURLA(aaaa,&aaaa_s));
Log (p,aaaa);
}
//
INetQueue::INetQueue()
#ifdef PROFILE_CRITICAL_SECTIONS
:cs(MUTEX_PROFILE_ID(INetQueue))
#endif // PROFILE_CRITICAL_SECTIONS
{
unused.reserve (128);
for (int i=0; i<16; i++)
unused.push_back (xr_new<NET_Packet>());
}
INetQueue::~INetQueue()
{
cs.Enter ();
u32 it;
for (it=0; it<unused.size(); it++) xr_delete(unused[it]);
for (it=0; it<ready.size(); it++) xr_delete(ready[it]);
cs.Leave ();
}
static u32 LastTimeCreate = 0;
void INetQueue::CreateCommit(NET_Packet* P)
{
cs.Enter ();
ready.push_back (P);
cs.Leave ();
}
NET_Packet* INetQueue::CreateGet()
{
NET_Packet* P = 0;
cs.Enter ();
if (unused.empty())
{
P = xr_new<NET_Packet> ();
//. ready.push_back (xr_new<NET_Packet> ());
//. P = ready.back ();
LastTimeCreate = GetTickCount();
} else
{
P = unused.back();
//. ready.push_back (unused.back());
unused.pop_back ();
//. P = ready.back ();
}
cs.Leave ();
return P;
}
/*
NET_Packet* INetQueue::Create (const NET_Packet& _other)
{
NET_Packet* P = 0;
cs.Enter ();
//#ifdef _DEBUG
// Msg ("- INetQueue::Create - ready %d, unused %d", ready.size(), unused.size());
//#endif
if (unused.empty())
{
ready.push_back (xr_new<NET_Packet> ());
P = ready.back ();
//---------------------------------------------
LastTimeCreate = GetTickCount();
//---------------------------------------------
} else {
ready.push_back (unused.back());
unused.pop_back ();
P = ready.back ();
}
CopyMemory (P,&_other,sizeof(NET_Packet));
cs.Leave ();
return P;
}
*/
NET_Packet* INetQueue::Retreive ()
{
NET_Packet* P = 0;
cs.Enter ();
//#ifdef _DEBUG
// Msg ("INetQueue::Retreive - ready %d, unused %d", ready.size(), unused.size());
//#endif
if (!ready.empty()) P = ready.front();
//---------------------------------------------
else
{
u32 tmp_time = GetTickCount()-60000;
u32 size = unused.size();
if ((LastTimeCreate < tmp_time) && (size > 32))
{
xr_delete(unused.back());
unused.pop_back();
}
}
//---------------------------------------------
cs.Leave ();
return P;
}
void INetQueue::Release ()
{
cs.Enter ();
//#ifdef _DEBUG
// Msg ("INetQueue::Release - ready %d, unused %d", ready.size(), unused.size());
//#endif
VERIFY (!ready.empty());
//---------------------------------------------
u32 tmp_time = GetTickCount()-60000;
u32 size = unused.size();
if ((LastTimeCreate < tmp_time) && (size > 32))
{
xr_delete(ready.front());
}
else
unused.push_back(ready.front());
//---------------------------------------------
ready.pop_front ();
cs.Leave ();
}
//
const u32 syncQueueSize = 512;
const int syncSamples = 256;
class XRNETSERVER_API syncQueue
{
u32 table [syncQueueSize];
u32 write;
u32 count;
public:
syncQueue() { clear(); }
IC void push (u32 value)
{
table[write++] = value;
if (write == syncQueueSize) write = 0;
if (count <= syncQueueSize) count++;
}
IC u32* begin () { return table; }
IC u32* end () { return table+count; }
IC u32 size () { return count; }
IC void clear () { write=0; count=0; }
} net_DeltaArray;
//-------
XRNETSERVER_API Flags32 psNET_Flags = {0};
XRNETSERVER_API int psNET_ClientUpdate = 30; // FPS
XRNETSERVER_API int psNET_ClientPending = 2;
XRNETSERVER_API char psNET_Name[32] = "Player";
XRNETSERVER_API BOOL psNET_direct_connect = FALSE;
// {0218FA8B-515B-4bf2-9A5F-2F079D1759F3}
static const GUID NET_GUID =
{ 0x218fa8b, 0x515b, 0x4bf2, { 0x9a, 0x5f, 0x2f, 0x7, 0x9d, 0x17, 0x59, 0xf3 } };
// {8D3F9E5E-A3BD-475b-9E49-B0E77139143C}
static const GUID CLSID_NETWORKSIMULATOR_DP8SP_TCPIP =
{ 0x8d3f9e5e, 0xa3bd, 0x475b, { 0x9e, 0x49, 0xb0, 0xe7, 0x71, 0x39, 0x14, 0x3c } };
static HRESULT WINAPI Handler (PVOID pvUserContext, DWORD dwMessageType, PVOID pMessage)
{
IPureClient* C = (IPureClient*)pvUserContext;
return C->net_Handler(dwMessageType,pMessage);
}
//------------------------------------------------------------------------------
void
IPureClient::_SendTo_LL( const void* data, u32 size, u32 flags, u32 timeout )
{
IPureClient::SendTo_LL( const_cast<void*>(data), size, flags, timeout );
}
//------------------------------------------------------------------------------
void IPureClient::_Recieve( const void* data, u32 data_size, u32 /*param*/ )
{
MSYS_PING* cfg = (MSYS_PING*)data;
if( (data_size>2*sizeof(u32))
&& (cfg->sign1==0x12071980)
&& (cfg->sign2==0x26111975)
)
{
// Internal system message
if( (data_size == sizeof(MSYS_PING)) )
{
// It is reverted(server) ping
u32 time = TimerAsync( device_timer );
u32 ping = time - (cfg->dwTime_ClientSend);
u32 delta = cfg->dwTime_Server + ping/2 - time;
net_DeltaArray.push ( delta );
Sync_Average ();
return;
}
if ( data_size == sizeof(MSYS_CONFIG) )
{
MSYS_CONFIG* msys_cfg = (MSYS_CONFIG*)data;
if ( msys_cfg->is_battleye )
{
#ifdef BATTLEYE
if ( !TestLoadBEClient() )
{
net_Connected = EnmConnectionFails;
return;
}
#endif // BATTLEYE
}
net_Connected = EnmConnectionCompleted;
return;
}
Msg( "! Unknown system message" );
return;
}
else if( net_Connected == EnmConnectionCompleted )
{
// one of the messages - decompress it
if( psNET_Flags.test( NETFLAG_LOG_CL_PACKETS ) )
{
if( !pClNetLog )
pClNetLog = xr_new<INetLog>("logs\\net_cl_log.log", timeServer());
if( pClNetLog )
pClNetLog->LogData( timeServer(), const_cast<void*>(data), data_size, TRUE );
}
OnMessage( const_cast<void*>(data), data_size );
}
}
//==============================================================================
IPureClient::IPureClient (CTimer* timer): net_Statistic(timer)
#ifdef PROFILE_CRITICAL_SECTIONS
,net_csEnumeration(MUTEX_PROFILE_ID(IPureClient::net_csEnumeration))
#endif // PROFILE_CRITICAL_SECTIONS
{
NET = NULL;
net_Address_server = NULL;
net_Address_device = NULL;
device_timer = timer;
net_TimeDelta_User = 0;
net_Time_LastUpdate = 0;
net_TimeDelta = 0;
net_TimeDelta_Calculated = 0;
pClNetLog = NULL;//xr_new<INetLog>("logs\\net_cl_log.log", timeServer());
}
IPureClient::~IPureClient ()
{
xr_delete(pClNetLog); pClNetLog = NULL;
psNET_direct_connect = FALSE;
}
void gen_auth_code();
BOOL IPureClient::Connect (LPCSTR options)
{
R_ASSERT (options);
net_Disconnected = FALSE;
if(!psNET_direct_connect && !strstr(options,"localhost") )
{
gen_auth_code ();
}
if(!psNET_direct_connect)
{
//
string256 server_name = "";
// strcpy (server_name,options);
if (strchr(options, '/'))
strncpy(server_name,options, strchr(options, '/')-options);
if (strchr(server_name,'/')) *strchr(server_name,'/') = 0;
string64 password_str = "";
if (strstr(options, "psw="))
{
const char* PSW = strstr(options, "psw=") + 4;
if (strchr(PSW, '/'))
strncpy(password_str, PSW, strchr(PSW, '/') - PSW);
else
strcpy(password_str, PSW);
}
string64 user_name_str = "";
if (strstr(options, "name="))
{
const char* NM = strstr(options, "name=") + 5;
if (strchr(NM, '/'))
strncpy(user_name_str, NM, strchr(NM, '/') - NM);
else
strcpy(user_name_str, NM);
}
string64 user_pass = "";
if (strstr(options, "pass="))
{
const char* UP = strstr(options, "pass=") + 5;
if (strchr(UP, '/'))
strncpy(user_pass, UP, strchr(UP, '/') - UP);
else
strcpy(user_pass, UP);
}
int psSV_Port = BASE_PORT_LAN_SV;
if (strstr(options, "port="))
{
string64 portstr;
strcpy(portstr, strstr(options, "port=")+5);
if (strchr(portstr,'/')) *strchr(portstr,'/') = 0;
psSV_Port = atol(portstr);
clamp(psSV_Port, int(BASE_PORT), int(END_PORT));
};
BOOL bPortWasSet = FALSE;
int psCL_Port = BASE_PORT_LAN_CL;
if (strstr(options, "portcl="))
{
string64 portstr;
strcpy(portstr, strstr(options, "portcl=")+7);
if (strchr(portstr,'/')) *strchr(portstr,'/') = 0;
psCL_Port = atol(portstr);
clamp(psCL_Port, int(BASE_PORT), int(END_PORT));
bPortWasSet = TRUE;
};
// Msg("* Client connect on port %d\n",psNET_Port);
//
net_Connected = EnmConnectionWait;
net_Syncronised = FALSE;
net_Disconnected= FALSE;
//---------------------------
string1024 tmp="";
// HRESULT CoInitializeExRes = CoInitializeEx(NULL, 0);
// if (CoInitializeExRes != S_OK && CoInitializeExRes != S_FALSE)
// {
// DXTRACE_ERR(tmp, CoInitializeExRes);
// CHK_DX(CoInitializeExRes);
// };
//---------------------------
// Create the IDirectPlay8Client object.
HRESULT CoCreateInstanceRes = CoCreateInstance (CLSID_DirectPlay8Client, NULL, CLSCTX_INPROC_SERVER, IID_IDirectPlay8Client, (LPVOID*) &NET);
//---------------------------
if (CoCreateInstanceRes != S_OK)
{
DXTRACE_ERR(tmp, CoCreateInstanceRes );
CHK_DX(CoCreateInstanceRes );
}
//---------------------------
// Initialize IDirectPlay8Client object.
#ifdef DEBUG
R_CHK(NET->Initialize (this, Handler, 0));
#else
R_CHK(NET->Initialize (this, Handler, DPNINITIALIZE_DISABLEPARAMVAL ));
#endif
BOOL bSimulator = FALSE;
if (strstr(Core.Params,"-netsim")) bSimulator = TRUE;
// Create our IDirectPlay8Address Device Address, --- Set the SP for our Device Address
net_Address_device = NULL;
R_CHK(CoCreateInstance (CLSID_DirectPlay8Address,NULL, CLSCTX_INPROC_SERVER, IID_IDirectPlay8Address,(LPVOID*) &net_Address_device ));
R_CHK(net_Address_device->SetSP(bSimulator? &CLSID_NETWORKSIMULATOR_DP8SP_TCPIP : &CLSID_DP8SP_TCPIP ));
// Create our IDirectPlay8Address Server Address, --- Set the SP for our Server Address
WCHAR ServerNameUNICODE [256];
R_CHK(MultiByteToWideChar(CP_ACP, 0, server_name, -1, ServerNameUNICODE, 256 ));
net_Address_server = NULL;
R_CHK(CoCreateInstance (CLSID_DirectPlay8Address,NULL, CLSCTX_INPROC_SERVER, IID_IDirectPlay8Address,(LPVOID*) &net_Address_server ));
R_CHK(net_Address_server->SetSP (bSimulator? &CLSID_NETWORKSIMULATOR_DP8SP_TCPIP : &CLSID_DP8SP_TCPIP ));
R_CHK(net_Address_server->AddComponent (DPNA_KEY_HOSTNAME, ServerNameUNICODE, 2*u32(wcslen(ServerNameUNICODE) + 1), DPNA_DATATYPE_STRING ));
R_CHK(net_Address_server->AddComponent (DPNA_KEY_PORT, &psSV_Port, sizeof(psSV_Port), DPNA_DATATYPE_DWORD ));
// Debug
// dump_URL ("! cl ", net_Address_device);
// dump_URL ("! en ", net_Address_server);
// Now set up the Application Description
DPN_APPLICATION_DESC dpAppDesc;
ZeroMemory (&dpAppDesc, sizeof(DPN_APPLICATION_DESC));
dpAppDesc.dwSize = sizeof(DPN_APPLICATION_DESC);
dpAppDesc.guidApplication = NET_GUID;
// Setup client info
/*strcpy_s( tmp, server_name );
strcat_s( tmp, "/name=" );
strcat_s( tmp, user_name_str );
strcat_s( tmp, "/" );*/
WCHAR ClientNameUNICODE [256];
R_CHK(MultiByteToWideChar (CP_ACP, 0, user_name_str, -1, ClientNameUNICODE, 256 ));
{
DPN_PLAYER_INFO Pinfo;
ZeroMemory (&Pinfo,sizeof(Pinfo));
Pinfo.dwSize = sizeof(Pinfo);
Pinfo.dwInfoFlags = DPNINFO_NAME|DPNINFO_DATA;
Pinfo.pwszName = ClientNameUNICODE;
SClientConnectData cl_data;
cl_data.process_id = GetCurrentProcessId();
strcpy_s( cl_data.name, user_name_str );
strcpy_s( cl_data.pass, user_pass );
Pinfo.pvData = &cl_data;
Pinfo.dwDataSize = sizeof(cl_data);
R_CHK(NET->SetClientInfo (&Pinfo,0,0,DPNSETCLIENTINFO_SYNC));
}
if ( stricmp( server_name, "localhost" ) == 0 )
{
WCHAR SessionPasswordUNICODE[4096];
if ( xr_strlen( password_str ) )
{
CHK_DX(MultiByteToWideChar (CP_ACP, 0, password_str, -1, SessionPasswordUNICODE, 4096 ));
dpAppDesc.dwFlags |= DPNSESSION_REQUIREPASSWORD;
dpAppDesc.pwszPassword = SessionPasswordUNICODE;
};
u32 c_port = u32(psCL_Port);
HRESULT res = S_FALSE;
while (res != S_OK && c_port <=u32(psCL_Port+100))
{
R_CHK(net_Address_device->AddComponent (DPNA_KEY_PORT, &c_port, sizeof(c_port), DPNA_DATATYPE_DWORD ));
res = NET->Connect(
&dpAppDesc, // pdnAppDesc
net_Address_server, // pHostAddr
net_Address_device, // pDeviceInfo
NULL, // pdnSecurity
NULL, // pdnCredentials
NULL, 0, // pvUserConnectData/Size
NULL, // pvAsyncContext
NULL, // pvAsyncHandle
DPNCONNECT_SYNC); // dwFlags
if (res != S_OK)
{
// xr_string res = Debug.error2string(HostSuccess);
if (bPortWasSet)
{
Msg("! IPureClient : port %d is BUSY!", c_port);
return FALSE;
}
#ifdef DEBUG
else
Msg("! IPureClient : port %d is BUSY!", c_port);
#endif
c_port++;
}
else
{
Msg("- IPureClient : created on port %d!", c_port);
}
};
// R_CHK(res);
if (res != S_OK) return FALSE;
// Create ONE node
HOST_NODE NODE;
ZeroMemory (&NODE, sizeof(HOST_NODE));
// Copy the Host Address
R_CHK (net_Address_server->Duplicate(&NODE.pHostAddress ) );
// Retreive session name
char desc[4096];
ZeroMemory (desc,sizeof(desc));
DPN_APPLICATION_DESC* dpServerDesc=(DPN_APPLICATION_DESC*)desc;
DWORD dpServerDescSize=sizeof(desc);
dpServerDesc->dwSize = sizeof(DPN_APPLICATION_DESC);
R_CHK (NET->GetApplicationDesc(dpServerDesc,&dpServerDescSize,0));
if( dpServerDesc->pwszSessionName) {
string4096 dpSessionName;
R_CHK(WideCharToMultiByte(CP_ACP,0,dpServerDesc->pwszSessionName,-1,dpSessionName,sizeof(dpSessionName),0,0));
NODE.dpSessionName = (char*)(&dpSessionName[0]);
}
net_Hosts.push_back (NODE);
} else {
string64 EnumData;
EnumData[0] = 0;
strcat (EnumData, "ToConnect");
DWORD EnumSize = xr_strlen(EnumData) + 1;
// We now have the host address so lets enum
u32 c_port = psCL_Port;
HRESULT res = S_FALSE;
while (res != S_OK && c_port <=END_PORT)
{
R_CHK(net_Address_device->AddComponent (DPNA_KEY_PORT, &c_port, sizeof(c_port), DPNA_DATATYPE_DWORD ));
res = NET->EnumHosts(
&dpAppDesc, // pApplicationDesc
net_Address_server, // pdpaddrHost
net_Address_device, // pdpaddrDeviceInfo
EnumData, EnumSize, // pvUserEnumData, size
10, // dwEnumCount
1000, // dwRetryInterval
1000, // dwTimeOut
NULL, // pvUserContext
NULL, // pAsyncHandle
DPNENUMHOSTS_SYNC // dwFlags
);
if (res != S_OK)
{
// xr_string res = Debug.error2string(HostSuccess);
switch (res)
{
case DPNERR_INVALIDHOSTADDRESS:
{
OnInvalidHost();
return FALSE;
}break;
case DPNERR_SESSIONFULL:
{
OnSessionFull();
return FALSE;
}break;
};
if (bPortWasSet)
{
Msg("! IPureClient : port %d is BUSY!", c_port);
return FALSE;
}
#ifdef DEBUG
else
Msg("! IPureClient : port %d is BUSY!", c_port);
// const char* x = DXGetErrorString9(res);
string1024 tmp = "";
DXTRACE_ERR(tmp, res);
#endif
c_port++;
}
else
{
Msg("- IPureClient : created on port %d!", c_port);
}
};
// ****** Connection
IDirectPlay8Address* pHostAddress = NULL;
if (net_Hosts.empty())
{
OnInvalidHost();
return FALSE;
};
WCHAR SessionPasswordUNICODE[4096];
if ( xr_strlen( password_str) )
{
CHK_DX(MultiByteToWideChar(CP_ACP, 0, password_str, -1, SessionPasswordUNICODE, 4096 ));
dpAppDesc.dwFlags |= DPNSESSION_REQUIREPASSWORD;
dpAppDesc.pwszPassword = SessionPasswordUNICODE;
};
net_csEnumeration.Enter ();
// real connect
for (u32 I=0; I<net_Hosts.size(); I++)
Msg("* HOST #%d: %s\n",I+1,*net_Hosts[I].dpSessionName);
R_CHK(net_Hosts.front().pHostAddress->Duplicate(&pHostAddress ) );
// dump_URL ("! c2s ", pHostAddress);
res = NET->Connect(
&dpAppDesc, // pdnAppDesc
pHostAddress, // pHostAddr
net_Address_device, // pDeviceInfo
NULL, // pdnSecurity
NULL, // pdnCredentials
NULL, 0, // pvUserConnectData/Size
NULL, // pvAsyncContext
NULL, // pvAsyncHandle
DPNCONNECT_SYNC); // dwFlags
// R_CHK(res);
net_csEnumeration.Leave ();
_RELEASE (pHostAddress);
#ifdef DEBUG
// const char* x = DXGetErrorString9(res);
string1024 tmp = "";
DXTRACE_ERR(tmp, res);
#endif
switch (res)
{
case DPNERR_INVALIDPASSWORD:
{
OnInvalidPassword();
}break;
case DPNERR_SESSIONFULL:
{
OnSessionFull();
}break;
case DPNERR_CANTCREATEPLAYER:
{
Msg("! Error: Can\'t create player");
}break;
}
if (res != S_OK) return FALSE;
}
// Caps
/*
GUID sp_guid;
DPN_SP_CAPS sp_caps;
net_Address_device->GetSP(&sp_guid);
ZeroMemory (&sp_caps,sizeof(sp_caps));
sp_caps.dwSize = sizeof(sp_caps);
R_CHK (NET->GetSPCaps(&sp_guid,&sp_caps,0));
sp_caps.dwSystemBufferSize = 0;
R_CHK (NET->SetSPCaps(&sp_guid,&sp_caps,0));
R_CHK (NET->GetSPCaps(&sp_guid,&sp_caps,0));
*/
} //psNET_direct_connect
// Sync
net_TimeDelta = 0;
return TRUE;
}
void IPureClient::Disconnect()
{
if( NET ) NET->Close(0);
// Clean up Host _list_
net_csEnumeration.Enter ();
for (u32 i=0; i<net_Hosts.size(); i++) {
HOST_NODE& N = net_Hosts[i];
_RELEASE (N.pHostAddress);
}
net_Hosts.clear ();
net_csEnumeration.Leave ();
// Release interfaces
_SHOW_REF ("cl_netADR_Server",net_Address_server);
_RELEASE (net_Address_server);
_SHOW_REF ("cl_netADR_Device",net_Address_device);
_RELEASE (net_Address_device);
_SHOW_REF ("cl_netCORE",NET);
_RELEASE (NET);
net_Connected = EnmConnectionWait;
net_Syncronised = FALSE;
}
HRESULT IPureClient::net_Handler(u32 dwMessageType, PVOID pMessage)
{
// HRESULT hr = S_OK;
switch (dwMessageType)
{
case DPN_MSGID_ENUM_HOSTS_RESPONSE:
{
PDPNMSG_ENUM_HOSTS_RESPONSE pEnumHostsResponseMsg;
const DPN_APPLICATION_DESC* pDesc;
// HOST_NODE* pHostNode = NULL;
// WCHAR* pwszSession = NULL;
pEnumHostsResponseMsg = (PDPNMSG_ENUM_HOSTS_RESPONSE) pMessage;
pDesc = pEnumHostsResponseMsg->pApplicationDescription;
// Insert each host response if it isn't already present
net_csEnumeration.Enter ();
BOOL bHostRegistered = FALSE;
for (u32 I=0; I<net_Hosts.size(); I++)
{
HOST_NODE& N = net_Hosts [I];
if ( pDesc->guidInstance == N.dpAppDesc.guidInstance)
{
// This host is already in the list
bHostRegistered = TRUE;
break;
}
}
if (!bHostRegistered)
{
// This host session is not in the list then so insert it.
HOST_NODE NODE;
ZeroMemory (&NODE, sizeof(HOST_NODE));
// Copy the Host Address
R_CHK (pEnumHostsResponseMsg->pAddressSender->Duplicate(&NODE.pHostAddress ) );
CopyMemory(&NODE.dpAppDesc,pDesc,sizeof(DPN_APPLICATION_DESC));
// Null out all the pointers we aren't copying
NODE.dpAppDesc.pwszSessionName = NULL;
NODE.dpAppDesc.pwszPassword = NULL;
NODE.dpAppDesc.pvReservedData = NULL;
NODE.dpAppDesc.dwReservedDataSize = 0;
NODE.dpAppDesc.pvApplicationReservedData = NULL;
NODE.dpAppDesc.dwApplicationReservedDataSize = 0;
if( pDesc->pwszSessionName) {
string4096 dpSessionName;
R_CHK (WideCharToMultiByte(CP_ACP,0,pDesc->pwszSessionName,-1,dpSessionName,sizeof(dpSessionName),0,0));
NODE.dpSessionName = (char*)(&dpSessionName[0]);
}
net_Hosts.push_back (NODE);
}
net_csEnumeration.Leave ();
}
break;
case DPN_MSGID_RECEIVE:
{
PDPNMSG_RECEIVE pMsg = (PDPNMSG_RECEIVE) pMessage;
MultipacketReciever::RecievePacket( pMsg->pReceiveData, pMsg->dwReceiveDataSize );
}
break;
case DPN_MSGID_TERMINATE_SESSION:
{
PDPNMSG_TERMINATE_SESSION pMsg = (PDPNMSG_TERMINATE_SESSION ) pMessage;
char* m_data = (char*)pMsg->pvTerminateData;
u32 m_size = pMsg->dwTerminateDataSize;
HRESULT m_hResultCode = pMsg->hResultCode;
net_Disconnected = TRUE;
if (m_size != 0)
{
OnSessionTerminate(m_data);
//Msg("- Session terminated : %s", m_data);
}
else
{
OnSessionTerminate( (::Debug.error2string(m_hResultCode)));
//Msg("- Session terminated : %s", (::Debug.error2string(m_hResultCode)));
}
};
break;
default:
{
#if 1
LPSTR msg = "";
switch (dwMessageType)
{
case DPN_MSGID_ADD_PLAYER_TO_GROUP: msg = "DPN_MSGID_ADD_PLAYER_TO_GROUP"; break;
case DPN_MSGID_ASYNC_OP_COMPLETE: msg = "DPN_MSGID_ASYNC_OP_COMPLETE"; break;
case DPN_MSGID_CLIENT_INFO: msg = "DPN_MSGID_CLIENT_INFO"; break;
case DPN_MSGID_CONNECT_COMPLETE:
{
PDPNMSG_CONNECT_COMPLETE pMsg = (PDPNMSG_CONNECT_COMPLETE)pMessage;
#ifdef DEBUG
// const char* x = DXGetErrorString9(pMsg->hResultCode);
if (pMsg->hResultCode != S_OK)
{
string1024 tmp="";
DXTRACE_ERR(tmp, pMsg->hResultCode);
}
#endif
if (pMsg->dwApplicationReplyDataSize)
{
string256 ResStr = "";
strncpy(ResStr, (char*)(pMsg->pvApplicationReplyData), pMsg->dwApplicationReplyDataSize);
Msg("Connection result : %s", ResStr);
}
else
msg = "DPN_MSGID_CONNECT_COMPLETE";
}break;
case DPN_MSGID_CREATE_GROUP: msg = "DPN_MSGID_CREATE_GROUP"; break;
case DPN_MSGID_CREATE_PLAYER: msg = "DPN_MSGID_CREATE_PLAYER"; break;
case DPN_MSGID_DESTROY_GROUP: msg = "DPN_MSGID_DESTROY_GROUP"; break;
case DPN_MSGID_DESTROY_PLAYER: msg = "DPN_MSGID_DESTROY_PLAYER"; break;
case DPN_MSGID_ENUM_HOSTS_QUERY: msg = "DPN_MSGID_ENUM_HOSTS_QUERY"; break;
case DPN_MSGID_GROUP_INFO: msg = "DPN_MSGID_GROUP_INFO"; break;
case DPN_MSGID_HOST_MIGRATE: msg = "DPN_MSGID_HOST_MIGRATE"; break;
case DPN_MSGID_INDICATE_CONNECT: msg = "DPN_MSGID_INDICATE_CONNECT"; break;
case DPN_MSGID_INDICATED_CONNECT_ABORTED: msg = "DPN_MSGID_INDICATED_CONNECT_ABORTED"; break;
case DPN_MSGID_PEER_INFO: msg = "DPN_MSGID_PEER_INFO"; break;
case DPN_MSGID_REMOVE_PLAYER_FROM_GROUP: msg = "DPN_MSGID_REMOVE_PLAYER_FROM_GROUP"; break;
case DPN_MSGID_RETURN_BUFFER: msg = "DPN_MSGID_RETURN_BUFFER"; break;
case DPN_MSGID_SEND_COMPLETE: msg = "DPN_MSGID_SEND_COMPLETE"; break;
case DPN_MSGID_SERVER_INFO: msg = "DPN_MSGID_SERVER_INFO"; break;
case DPN_MSGID_TERMINATE_SESSION: msg = "DPN_MSGID_TERMINATE_SESSION"; break;
default: msg = "???"; break;
}
//Msg("! ************************************ : %s",msg);
#endif
}
break;
}
return S_OK;
}
void IPureClient::OnMessage(void* data, u32 size)
{
// One of the messages - decompress it
NET_Packet* P = net_Queue.CreateGet();
P->construct (data, size);
P->timeReceive = timeServer_Async();
u16 tmp_type;
P->r_begin (tmp_type);
net_Queue.CreateCommit (P);
}
void IPureClient::timeServer_Correct(u32 sv_time, u32 cl_time)
{
u32 ping = net_Statistic.getPing();
u32 delta = sv_time + ping/2 - cl_time;
net_DeltaArray.push (delta);
Sync_Average ();
}
void IPureClient::SendTo_LL(void* data, u32 size, u32 dwFlags, u32 dwTimeout)
{
if( net_Disconnected )
return;
if( psNET_Flags.test(NETFLAG_LOG_CL_PACKETS) )
{
if( !pClNetLog)
pClNetLog = xr_new<INetLog>( "logs\\net_cl_log.log", timeServer() );
if( pClNetLog )
pClNetLog->LogData( timeServer(), data, size );
}
DPN_BUFFER_DESC desc;
desc.dwBufferSize = size;
desc.pBufferData = (BYTE*)data;
net_Statistic.dwBytesSended += size;
// verify
VERIFY(desc.dwBufferSize);
VERIFY(desc.pBufferData);
VERIFY(NET);
DPNHANDLE hAsync = 0;
HRESULT hr = NET->Send( &desc, 1, dwTimeout, 0, &hAsync, dwFlags | DPNSEND_COALESCE );
// Msg("- Client::SendTo_LL [%d]", size);
if( FAILED(hr) )
{
Msg ("! ERROR: Failed to send net-packet, reason: %s",::Debug.error2string(hr));
// const char* x = DXGetErrorString9(hr);
string1024 tmp="";
DXTRACE_ERR(tmp, hr);
}
// UpdateStatistic();
}
void IPureClient::Send( NET_Packet& packet, u32 dwFlags, u32 dwTimeout )
{
MultipacketSender::SendPacket( packet.B.data, packet.B.count, dwFlags, dwTimeout );
}
void IPureClient::Flush_Send_Buffer ()
{
MultipacketSender::FlushSendBuffer( 0 );
}
BOOL IPureClient::net_HasBandwidth ()
{
u32 dwTime = TimeGlobal(device_timer);
u32 dwInterval = 0;
if (net_Disconnected) return FALSE;
if (psNET_ClientUpdate != 0) dwInterval = 1000/psNET_ClientUpdate;
if (psNET_Flags.test(NETFLAG_MINIMIZEUPDATES)) dwInterval = 1000; // approx 3 times per second
if(psNET_direct_connect)
{
if( 0 != psNET_ClientUpdate && (dwTime-net_Time_LastUpdate)>dwInterval)
{
net_Time_LastUpdate = dwTime;
return TRUE;
}else
return FALSE;
}else
if (0 != psNET_ClientUpdate && (dwTime-net_Time_LastUpdate)>dwInterval)
{
HRESULT hr;
R_ASSERT (NET);
// check queue for "empty" state
DWORD dwPending=0;
hr = NET->GetSendQueueInfo(&dwPending,0,0);
if (FAILED(hr)) return FALSE;
if (dwPending > u32(psNET_ClientPending))
{
net_Statistic.dwTimesBlocked++;
return FALSE;
};
UpdateStatistic();
// ok
net_Time_LastUpdate = dwTime;
return TRUE;
}
return FALSE;
}
void IPureClient::UpdateStatistic()
{
// Query network statistic for this client
DPN_CONNECTION_INFO CI;
ZeroMemory (&CI,sizeof(CI));
CI.dwSize = sizeof(CI);
HRESULT hr = NET->GetConnectionInfo(&CI,0);
if (FAILED(hr)) return;
net_Statistic.Update(CI);
}
void IPureClient::Sync_Thread ()
{
MSYS_PING clPing;
//***** Ping server
net_DeltaArray.clear();
R_ASSERT (NET);
for (; NET && !net_Disconnected; )
{
// Waiting for queue empty state
if (net_Syncronised) break; // Sleep(2000);
else {
DWORD dwPending=0;
do {
R_CHK (NET->GetSendQueueInfo(&dwPending,0,0));
Sleep (1);
} while (dwPending);
}
// Construct message
clPing.sign1 = 0x12071980;
clPing.sign2 = 0x26111975;
clPing.dwTime_ClientSend = TimerAsync(device_timer);
// Send it
__try {
DPN_BUFFER_DESC desc;
DPNHANDLE hAsync=0;
desc.dwBufferSize = sizeof(clPing);
desc.pBufferData = LPBYTE(&clPing);
if (0==NET || net_Disconnected) break;
if (FAILED(NET->Send(&desc,1,0,0,&hAsync,net_flags(FALSE,FALSE,TRUE)))) {
Msg("* CLIENT: SyncThread: EXIT. (failed to send - disconnected?)");
break;
}
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
Msg("* CLIENT: SyncThread: EXIT. (failed to send - disconnected?)");
break;
}
// Waiting for reply-packet to arrive
if (!net_Syncronised) {
u32 old_size = net_DeltaArray.size();
u32 timeBegin = TimerAsync(device_timer);
while ((net_DeltaArray.size()==old_size)&&(TimerAsync(device_timer)-timeBegin<5000)) Sleep(1);
if (net_DeltaArray.size()>=syncSamples) {
net_Syncronised = TRUE;
net_TimeDelta = net_TimeDelta_Calculated;
// Msg ("* CL_TimeSync: DELTA: %d",net_TimeDelta);
}
}
}
}
void IPureClient::Sync_Average ()
{
//***** Analyze results
s64 summary_delta = 0;
s32 size = net_DeltaArray.size();
u32* I = net_DeltaArray.begin();
u32* E = I+size;
for (; I!=E; I++) summary_delta += *((int*)I);
s64 frac = s64(summary_delta) % s64(size);
if (frac<0) frac=-frac;
summary_delta /= s64(size);
if (frac>s64(size/2)) summary_delta += (summary_delta<0)?-1:1;
net_TimeDelta_Calculated= s32(summary_delta);
net_TimeDelta = (net_TimeDelta*5+net_TimeDelta_Calculated)/6;
// Msg("* CLIENT: d(%d), dc(%d), s(%d)",net_TimeDelta,net_TimeDelta_Calculated,size);
}
void sync_thread(void* P)
{
SetThreadPriority (GetCurrentThread(),THREAD_PRIORITY_TIME_CRITICAL);
IPureClient* C = (IPureClient*)P;
C->Sync_Thread ();
}
void IPureClient::net_Syncronize ()
{
net_Syncronised = FALSE;
net_DeltaArray.clear();
thread_spawn (sync_thread,"network-time-sync",0,this);
}
void IPureClient::ClearStatistic()
{
net_Statistic.Clear();
}
BOOL IPureClient::net_IsSyncronised()
{
return net_Syncronised;
}
#include <WINSOCK2.H>
#include <Ws2tcpip.h>
bool IPureClient::GetServerAddress (ip_address& pAddress, DWORD* pPort)
{
*pPort = 0;
if (!net_Address_server) return false;
WCHAR wstrHostname[ 2048 ] = {0};
DWORD dwHostNameSize = sizeof(wstrHostname);
DWORD dwHostNameDataType = DPNA_DATATYPE_STRING;
CHK_DX(net_Address_server->GetComponentByName( DPNA_KEY_HOSTNAME, wstrHostname, &dwHostNameSize, &dwHostNameDataType ));
string2048 HostName;
CHK_DX(WideCharToMultiByte(CP_ACP,0,wstrHostname,-1,HostName,sizeof(HostName),0,0));
hostent* pHostEnt = gethostbyname(HostName);
char* localIP;
localIP = inet_ntoa (*(struct in_addr *)*pHostEnt->h_addr_list);
pHostEnt = gethostbyname(pHostEnt->h_name);
localIP = inet_ntoa (*(struct in_addr *)*pHostEnt->h_addr_list);
pAddress.set (localIP);
//. pAddress[0] = (char)(*(struct in_addr *)*pHostEnt->h_addr_list).s_net;
//. pAddress[1] = (char)(*(struct in_addr *)*pHostEnt->h_addr_list).s_host;
//. pAddress[2] = (char)(*(struct in_addr *)*pHostEnt->h_addr_list).s_lh;
//. pAddress[3] = (char)(*(struct in_addr *)*pHostEnt->h_addr_list).s_impno;
DWORD dwPort = 0;
DWORD dwPortSize = sizeof(dwPort);
DWORD dwPortDataType = DPNA_DATATYPE_DWORD;
CHK_DX(net_Address_server->GetComponentByName( DPNA_KEY_PORT, &dwPort, &dwPortSize, &dwPortDataType ));
*pPort = dwPort;
return true;
};
| OLR-xray/OLR-3.0 | src/xray/xrNetServer/NET_Client.cpp | C++ | apache-2.0 | 30,074 |
/*******************************************************************************
* Copyright (c) 2015-2019 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.nn.weights.embeddings;
import lombok.EqualsAndHashCode;
import lombok.NonNull;
import org.deeplearning4j.nn.weights.IWeightInit;
import org.nd4j.common.base.Preconditions;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.shade.jackson.annotation.JsonIgnoreProperties;
import org.nd4j.shade.jackson.annotation.JsonProperty;
/**
* Weight initialization for initializing the parameters of an EmbeddingLayer from a {@link EmbeddingInitializer}
*
* Note: WeightInitEmbedding supports both JSON serializable and non JSON serializable initializations.
* In the case of non-JSON serializable embeddings, they are a one-time only use: once they have been used
* to initialize the parameters, they will be removed from the WeightInitEmbedding instance.
* This is to prevent unnecessary references to potentially large objects in memory (i.e., to avoid memory leaks)
*
* @author Alex Black
*/
@JsonIgnoreProperties("nonSerializableInit")
@EqualsAndHashCode
public class WeightInitEmbedding implements IWeightInit {
private EmbeddingInitializer serializableInit;
private EmbeddingInitializer nonSerializableInit;
public WeightInitEmbedding(@NonNull EmbeddingInitializer embeddingInitializer){
this((embeddingInitializer.jsonSerializable() ? embeddingInitializer : null), (embeddingInitializer.jsonSerializable() ? null : embeddingInitializer));
}
protected WeightInitEmbedding(@JsonProperty("serializableInit") EmbeddingInitializer serializableInit,
@JsonProperty("nonSerializableInit") EmbeddingInitializer nonSerializableInit){
this.serializableInit = serializableInit;
this.nonSerializableInit = nonSerializableInit;
}
@Override
public INDArray init(double fanIn, double fanOut, long[] shape, char order, INDArray paramView) {
EmbeddingInitializer init = serializableInit != null ? serializableInit : nonSerializableInit;
if(init == null){
throw new IllegalStateException("Cannot initialize embedding layer weights: no EmbeddingInitializer is available." +
" This can occur if you save network configuration, load it, and the try to ");
}
Preconditions.checkState(shape[0] == init.vocabSize(), "Parameters shape[0]=%s does not match embedding initializer vocab size of %s", shape[0], init.vocabSize());
Preconditions.checkState(shape[1] == init.vectorSize(), "Parameters shape[1]=%s does not match embedding initializer vector size of %s", shape[1], init.vectorSize());
INDArray reshaped = paramView.reshape('c', shape);
init.loadWeightsInto(reshaped);
//Now that we've loaded weights - let's clear the reference if it's non-serializable so it can be GC'd
this.nonSerializableInit = null;
return reshaped;
}
public long[] shape(){
if(serializableInit != null){
return new long[]{serializableInit.vocabSize(), serializableInit.vectorSize()};
} else if(nonSerializableInit != null){
return new long[]{nonSerializableInit.vocabSize(), nonSerializableInit.vectorSize()};
}
return null;
}
}
| deeplearning4j/deeplearning4j | deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/weights/embeddings/WeightInitEmbedding.java | Java | apache-2.0 | 3,970 |
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import subprocess
import tempfile
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
VALID_PROTO_VERSIONS = ('2.0', '1.5')
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional."),
'ipmi_protocol_version': _('the version of the IPMI protocol; default '
'is "2.0". One of "1.5", "2.0". Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
# NOTE(cinerama): use subprocess.check_call to
# check options & suppress ipmitool output to
# avoid alarming people
with open(os.devnull, 'wb') as nullfile:
subprocess.check_call(cmd, stdout=nullfile,
stderr=nullfile)
except subprocess.CalledProcessError:
LOG.info(_LI("Option %(opt)s is not supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, False)
else:
LOG.info(_LI("Option %(opt)s is supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(CONF.tempdir, file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
with excutils.save_and_reraise_exception():
if f is not None:
f.close()
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = info.get('ipmi_password')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
protocol_version = str(info.get('ipmi_protocol_version', '2.0'))
if protocol_version not in VALID_PROTO_VERSIONS:
valid_versions = ', '.join(VALID_PROTO_VERSIONS)
raise exception.InvalidParameterValue(_(
"Invalid IPMI protocol version value %(version)s, the valid "
"value can be one of %(valid_versions)s") %
{'version': protocol_version, 'valid_versions': valid_versions})
if port:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"IPMI terminal port is not an integer."))
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address,
'protocol_version': protocol_version,
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
ipmi_version = ('lanplus'
if driver_info['protocol_version'] == '2.0'
else 'lan')
args = ['ipmitool',
'-I',
ipmi_version,
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(driver_info['password'] or '\0') as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in e.args[0]]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting "%(cmd)s"'
'for node %(node)s. Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"),
{'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(
sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s")
% {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def _check_temp_dir():
"""Check for Valid temp directory."""
global TMP_DIR_CHECKED
# because a temporary file is used to pass the password to ipmitool,
# we should check the directory
if TMP_DIR_CHECKED is None:
try:
utils.check_dir()
except (exception.PathNotFound,
exception.DirectoryNotWritable,
exception.InsufficientDiskSpace) as e:
with excutils.save_and_reraise_exception():
TMP_DIR_CHECKED = False
err_msg = (_("Ipmitool drivers need to be able to create "
"temporary files to pass password to ipmitool. "
"Encountered error: %s") % e)
e.message = err_msg
LOG.error(err_msg)
else:
TMP_DIR_CHECKED = True
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(
_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
if driver_info['protocol_version'] != '2.0':
raise exception.InvalidParameterValue(_(
"Serial over lan only works with IPMI protocol version 2.0. "
"Check the 'ipmi_protocol_version' parameter in "
"node's driver_info"))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: ConsoleError if unable to stop the console
"""
driver_info = _parse_driver_info(task.node)
try:
console_utils.stop_shellinabox_console(driver_info['uuid'])
finally:
utils.unlink_without_raise(
_console_pwfile_path(driver_info['uuid']))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| Tan0/ironic | ironic/drivers/modules/ipmitool.py | Python | apache-2.0 | 44,249 |
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.dmn.openapi.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.eclipse.microprofile.openapi.OASFactory;
import org.eclipse.microprofile.openapi.models.media.Schema;
import org.eclipse.microprofile.openapi.models.media.Schema.SchemaType;
import org.kie.dmn.api.core.DMNType;
import org.kie.dmn.core.impl.BaseDMNTypeImpl;
import org.kie.dmn.core.impl.CompositeTypeImpl;
import org.kie.dmn.core.impl.SimpleTypeImpl;
import org.kie.dmn.openapi.NamingPolicy;
import org.kie.dmn.openapi.model.DMNModelIOSets;
import org.kie.dmn.openapi.model.DMNModelIOSets.DSIOSets;
import org.kie.dmn.typesafe.DMNTypeUtils;
public class DMNTypeSchemas {
private final List<DMNModelIOSets> ioSets;
private final Set<DMNType> typesIndex;
private final NamingPolicy namingPolicy;
public DMNTypeSchemas(List<DMNModelIOSets> ioSets, Set<DMNType> typesIndex, NamingPolicy namingPolicy) {
this.ioSets = Collections.unmodifiableList(ioSets);
this.typesIndex = Collections.unmodifiableSet(typesIndex);
this.namingPolicy = namingPolicy;
}
public Map<DMNType, Schema> generateSchemas() {
Map<DMNType, Schema> schemas = new HashMap<>();
for (DMNType t : typesIndex) {
Schema schema = schemaFromType(t);
schemas.put(t, schema);
}
return schemas;
}
private Schema refOrBuiltinSchema(DMNType t) {
if (DMNTypeUtils.isFEELBuiltInType(t)) {
return FEELBuiltinTypeSchemas.from(t);
}
if (typesIndex.contains(t)) {
Schema schema = OASFactory.createObject(Schema.class).ref(namingPolicy.getRef(t));
return schema;
}
throw new UnsupportedOperationException();
}
private boolean isIOSet(DMNType t) {
for (DMNModelIOSets ios : ioSets) {
if (ios.getInputSet().equals(t)) {
return true;
}
for (DSIOSets ds : ios.getDSIOSets()) {
if (ds.getDSInputSet().equals(t)) {
return true;
}
}
}
return false;
}
private Schema schemaFromType(DMNType t) {
if (t instanceof CompositeTypeImpl) {
return schemaFromCompositeType((CompositeTypeImpl) t);
}
if (t instanceof SimpleTypeImpl) {
return schemaFromSimpleType((SimpleTypeImpl) t);
}
throw new UnsupportedOperationException();
}
private Schema schemaFromSimpleType(SimpleTypeImpl t) {
DMNType baseType = t.getBaseType();
if (baseType == null) {
throw new IllegalStateException();
}
Schema schema = refOrBuiltinSchema(baseType);
if (t.getAllowedValues() != null && !t.getAllowedValues().isEmpty()) {
FEELSchemaEnum.parseAllowedValuesIntoSchema(schema, t.getAllowedValues());
}
schema = nestAsItemIfCollection(schema, t);
schema.addExtension(DMNOASConstants.X_DMN_TYPE, getDMNTypeSchemaXDMNTYPEdescr(t));
return schema;
}
private Schema schemaFromCompositeType(CompositeTypeImpl ct) {
Schema schema = OASFactory.createObject(Schema.class).type(SchemaType.OBJECT);
if (ct.getBaseType() == null) { // main case
for (Entry<String, DMNType> fkv : ct.getFields().entrySet()) {
schema.addProperty(fkv.getKey(), refOrBuiltinSchema(fkv.getValue()));
}
if (isIOSet(ct) && ct.getFields().size() > 0) {
schema.required(new ArrayList<>(ct.getFields().keySet()));
}
} else if (ct.isCollection()) {
schema = refOrBuiltinSchema(ct.getBaseType());
} else {
throw new IllegalStateException();
}
schema = nestAsItemIfCollection(schema, ct);
schema.addExtension(DMNOASConstants.X_DMN_TYPE, getDMNTypeSchemaXDMNTYPEdescr(ct));
return schema;
}
private Schema nestAsItemIfCollection(Schema original, DMNType t) {
if (t.isCollection()) {
return OASFactory.createObject(Schema.class).type(SchemaType.ARRAY).items(original);
} else {
return original;
}
}
private String getDMNTypeSchemaXDMNTYPEdescr(DMNType t) {
if (((BaseDMNTypeImpl) t).getBelongingType() == null) { // internals for anonymous inner types.
return t.toString();
} else {
return null;
}
}
}
| droolsjbpm/drools | kie-dmn/kie-dmn-openapi/src/main/java/org/kie/dmn/openapi/impl/DMNTypeSchemas.java | Java | apache-2.0 | 5,225 |
<html>
<head>
<title>Inserir Veiculos</title>
<link rel="stylesheet" href="estilo.css" type="text/css">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<script language="JavaScript" type="text/javascript" src="..\MascaraValidacao.js"></script>
<script>
function busca(){
document.form.submit();
return true;
}
</script>
</head>
<body onload="document.form.PLACA.focus();">
<form name="form" method="post" action="InserirVeiculo2.php" onsubmit="return busca();">
<table width="100%" border="1" align="center" cellpadding="0" cellspacing="0" bordercolor="#CCCCCC">
<tr><td bgcolor="#CCCCCC" align="center" class="titulo">Inseri Veículo</td></tr>
<tr>
<table>
<tr>
<td>Placa</td>
<td>Tipo</td>
<td>Modelo</td>
<td>Ano</td>
<td>Cor</td>
<td>Cliente</td>
</tr>
<tr>
<td><input name="PLACA" type="text" id="PLACA" maxlength="8" onKeyPress="ColocaMascaraPlaca(document.form.PLACA);"/></td>
<td><select name=TIPO>
<option value="Carro">Carro</option>
<option value="Moto">Moto</option>
<option value="Caminhao">Caminhão</option>
<option value="Outro">Outro</option>
</select></td>
<td><input name="MODELO" type="text" id="MODELO" maxlength="30"/></td>
<td><input name="ANO" type="text" id="ANO" maxlength="4"/></td>
<td><input name="COR" type="text" id="COR" maxlength="15"/></td>
<td><?php
require_once('funcoes.php');
conectar('localhost', 'root','', 'bd_estacionamento');
$sql=mysql_query("SELECT nome_cliente,id_cliente FROM clientes order by nome_cliente");
echo "<select name=CLIENTES>";
while($rowl = mysql_fetch_assoc($sql)){
echo "<option value=". $rowl['id_cliente'] . ">" . $rowl['nome_cliente'] . "</option>";
}
echo "</select>";// Closing of list box
?></td>
<td><input type="submit" name="Submit" value="Incluir"</td>
</tr>
</table>
</tr>
</table>
</form>
<?php
require_once('funcoes.php');
conectar('localhost', 'root','', 'bd_estacionamento');
$PLACA = $_POST["PLACA"];
$TIPO = $_POST["TIPO"];
$MODELO = $_POST["MODELO"];
$ANO = $_POST["ANO"];
$COR = $_POST["COR"];
$CLIENTES = $_POST["CLIENTES"];
$query = "INSERT INTO veiculos VALUES('', '$TIPO', '$ANO', '$PLACA', '$MODELO', '$COR', '$CLIENTES')";
mysql_query($query) or die ('Falha ao executar query no banco de dados');
mysql_close() or die ('Falha ao fechar o banco de dados');
?>
<?php
require_once('funcoes.php');
conectar('localhost', 'root','', 'bd_estacionamento');
$result = mysql_query("SELECT * FROM veiculos");
echo "<table border=5 style=3 width=100%><tr><td>ID</td><td>Placa</td><td>Tipo</td><td>Modelo</td><td>Ano</td><td>Cor</td><td>Cliente</td></tr>";
while($row = mysql_fetch_assoc($result)){
echo "<tr><td>".$row['id_veiculo']."</td>"."<td>".$row['placa_veiculo']."</td>"."<td>".$row['tipo_veiculo']."</td>"."<td>".$row['modelo_veiculo']."</td>"."<td>".$row['ano_veiculo']."</td>"."<td>".$row['cor_veiculo']."</td>";
$fk = mysql_fetch_assoc(mysql_query("SELECT * FROM clientes where id_cliente='".$row['fk_cliente_veiculo']."'"));
echo "<td>".$fk['nome_cliente']."</td></tr>";
}
echo "</table><br><br>";
echo "";
?>
</body>
</html>
| danilohenriqueandrade/AtividadeFinal | home/Veiculos/InserirVeiculo2.php | PHP | apache-2.0 | 3,283 |
/*
* Copyright 2018 OPS4J Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ops4j.kaiserkai.rest;
import com.spotify.docker.client.auth.RegistryAuthSupplier;
import com.spotify.docker.client.exceptions.DockerException;
import com.spotify.docker.client.messages.RegistryAuth;
import com.spotify.docker.client.messages.RegistryConfigs;
/**
* @author Harald Wellmann
*
*/
public class LocalAuthSupplier implements RegistryAuthSupplier {
@Override
public RegistryAuth authFor(String imageName) throws DockerException {
if (imageName.startsWith("127.0.0.1")) {
RegistryAuth auth = RegistryAuth.builder().username("admin").password("admin").build();
return auth;
}
return null;
}
@Override
public RegistryAuth authForSwarm() throws DockerException {
return null;
}
@Override
public RegistryConfigs authForBuild() throws DockerException {
return null;
}
}
| hwellmann/org.ops4j.kaiserkai | kaiserkai-itest/src/test/java/org/ops4j/kaiserkai/rest/LocalAuthSupplier.java | Java | apache-2.0 | 1,497 |
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/dialogflow/cx/v3beta1/advanced_settings.proto
package com.google.cloud.dialogflow.cx.v3beta1;
public final class AdvancedSettingsProto {
private AdvancedSettingsProto() {}
public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {}
public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry);
}
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_LoggingSettings_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_LoggingSettings_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor descriptor;
static {
java.lang.String[] descriptorData = {
"\n:google/cloud/dialogflow/cx/v3beta1/adv"
+ "anced_settings.proto\022\"google.cloud.dialo"
+ "gflow.cx.v3beta1\032\037google/api/field_behav"
+ "ior.proto\"\315\001\n\020AdvancedSettings\022^\n\020loggin"
+ "g_settings\030\006 \001(\0132D.google.cloud.dialogfl"
+ "ow.cx.v3beta1.AdvancedSettings.LoggingSe"
+ "ttings\032Y\n\017LoggingSettings\022\"\n\032enable_stac"
+ "kdriver_logging\030\002 \001(\010\022\"\n\032enable_interact"
+ "ion_logging\030\003 \001(\010B\335\001\n&com.google.cloud.d"
+ "ialogflow.cx.v3beta1B\025AdvancedSettingsPr"
+ "otoP\001ZDgoogle.golang.org/genproto/google"
+ "apis/cloud/dialogflow/cx/v3beta1;cx\370\001\001\242\002"
+ "\002DF\252\002\"Google.Cloud.Dialogflow.Cx.V3Beta1"
+ "\352\002&Google::Cloud::Dialogflow::CX::V3beta"
+ "1b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
com.google.api.FieldBehaviorProto.getDescriptor(),
});
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_descriptor,
new java.lang.String[] {
"LoggingSettings",
});
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_LoggingSettings_descriptor =
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_descriptor
.getNestedTypes()
.get(0);
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_LoggingSettings_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_cx_v3beta1_AdvancedSettings_LoggingSettings_descriptor,
new java.lang.String[] {
"EnableStackdriverLogging", "EnableInteractionLogging",
});
com.google.api.FieldBehaviorProto.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}
| googleapis/java-dialogflow-cx | proto-google-cloud-dialogflow-cx-v3beta1/src/main/java/com/google/cloud/dialogflow/cx/v3beta1/AdvancedSettingsProto.java | Java | apache-2.0 | 4,476 |
// Copyright 2018 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Unit tests for SuggestionThreadObjectFactory.
*/
// TODO(#7222): Remove the following block of unnnecessary imports once
// SuggestionThreadObjectFactory.ts is upgraded to Angular 8.
import { SuggestionObjectFactory } from
'domain/suggestion/SuggestionObjectFactory.ts';
// ^^^ This block is to be removed.
require('domain/suggestion/SuggestionThreadObjectFactory.ts');
describe('Suggestion thread object factory', function() {
beforeEach(function() {
angular.mock.module('oppia');
});
beforeEach(angular.mock.module('oppia', function($provide) {
$provide.value('SuggestionObjectFactory', new SuggestionObjectFactory());
}));
var SuggestionThreadObjectFactory = null;
var suggestionObjectFactory = null;
beforeEach(angular.mock.inject(function($injector) {
SuggestionThreadObjectFactory = $injector.get(
'SuggestionThreadObjectFactory');
suggestionObjectFactory = $injector.get('SuggestionObjectFactory');
}));
it('should create a new suggestion thread from a backend dict.', function() {
var suggestionThreadBackendDict = {
last_updated: 1000,
original_author_username: 'author',
status: 'accepted',
subject: 'sample subject',
summary: 'sample summary',
message_count: 10,
state_name: 'state 1',
thread_id: 'exploration.exp1.thread1'
};
var suggestionBackendDict = {
suggestion_id: 'exploration.exp1.thread1',
suggestion_type: 'edit_exploration_state_content',
target_type: 'exploration',
target_id: 'exp1',
target_version_at_submission: 1,
status: 'accepted',
author_name: 'author',
change: {
cmd: 'edit_state_property',
property_name: 'content',
state_name: 'state_1',
new_value: {
html: 'new suggestion content'
},
old_value: {
html: 'old suggestion content'
}
},
last_updated: 1000
};
var suggestionThread = SuggestionThreadObjectFactory.createFromBackendDicts(
suggestionThreadBackendDict, suggestionBackendDict);
expect(suggestionThread.status).toEqual('accepted');
expect(suggestionThread.subject).toEqual('sample subject');
expect(suggestionThread.summary).toEqual('sample summary');
expect(suggestionThread.originalAuthorName).toEqual('author');
expect(suggestionThread.lastUpdated).toEqual(1000);
expect(suggestionThread.messageCount).toEqual(10);
expect(suggestionThread.threadId).toEqual('exploration.exp1.thread1');
expect(suggestionThread.suggestion.suggestionType).toEqual(
'edit_exploration_state_content');
expect(suggestionThread.suggestion.targetType).toEqual('exploration');
expect(suggestionThread.suggestion.targetId).toEqual('exp1');
expect(suggestionThread.suggestion.suggestionId).toEqual(
'exploration.exp1.thread1');
expect(suggestionThread.suggestion.status).toEqual('accepted');
expect(suggestionThread.suggestion.authorName).toEqual('author');
expect(suggestionThread.suggestion.newValue.html).toEqual(
'new suggestion content');
expect(suggestionThread.suggestion.oldValue.html).toEqual(
'old suggestion content');
expect(suggestionThread.suggestion.lastUpdated).toEqual(1000);
expect(suggestionThread.suggestion.getThreadId()).toEqual(
'exploration.exp1.thread1');
expect(suggestionThread.isSuggestionThread()).toEqual(true);
expect(suggestionThread.isSuggestionHandled()).toEqual(true);
suggestionThread.suggestion.status = 'review';
expect(suggestionThread.isSuggestionHandled()).toEqual(false);
expect(suggestionThread.getSuggestionStatus()).toEqual('review');
expect(suggestionThread.getSuggestionStateName()).toEqual('state_1');
expect(suggestionThread.getReplacementHtmlFromSuggestion()).toEqual(
'new suggestion content');
var messages = [{
text: 'message1'
}, {
text: 'message2'
}];
suggestionThread.setMessages(messages);
expect(suggestionThread.messages).toEqual(messages);
});
});
| souravbadami/oppia | core/templates/dev/head/domain/suggestion/SuggestionThreadObjectFactorySpec.ts | TypeScript | apache-2.0 | 4,690 |
#!/usr/bin/env python
from random import choice
from python.decorators import euler_timer
SQUARES = ["GO",
"A1", "CC1", "A2", "T1", "R1", "B1", "CH1", "B2", "B3",
"JAIL",
"C1", "U1", "C2", "C3", "R2", "D1", "CC2", "D2", "D3",
"FP",
"E1", "CH2", "E2", "E3", "R3", "F1", "F2", "U2", "F3",
"G2J",
"G1", "G2", "CC3", "G3", "R4", "CH3", "H1", "T2", "H2"]
def roll_die(size):
first_die = choice(range(1, size + 1))
second_die = choice(range(1, size + 1))
return (first_die + second_die, (first_die == second_die))
def back(square, step):
index = SQUARES.index(square)
new_index = (index - step) % len(SQUARES)
return SQUARES[new_index]
def next_specific(square, next_type):
if next_type not in ["R", "U"]:
raise Exception("next_specific only intended for R and U")
# R1=5, R2=15, R3=25, R4=35
index = SQUARES.index(square)
if next_type == "R":
if 0 <= index < 5 or 35 < index:
return "R1"
elif 5 < index < 15:
return "R2"
elif 15 < index < 25:
return "R3"
elif 25 < index < 35:
return "R4"
else:
raise Exception("Case should not occur")
# U1=12, U2=28
elif next_type == "U":
if 0 <= index < 12 or index > 28:
return "U1"
elif 12 < index < 28:
return "U2"
else:
return Exception("Case should not occur")
else:
raise Exception("Case should not occur")
def next_square(landing_square, chance_card, chest_card):
if landing_square not in ["CC1", "CC2", "CC3", "CH1", "CH2", "CH3", "G2J"]:
return (landing_square, chance_card, chest_card)
if landing_square == "G2J":
return ("JAIL", chance_card, chest_card)
elif landing_square in ["CC1", "CC2", "CC3"]:
# 1/16 Go, Jail
# 14/16 Stay
chest_card = (chest_card + 1) % 16
if chest_card == 0:
return ("GO", chance_card, chest_card)
elif chest_card == 1:
return ("JAIL", chance_card, chest_card)
else:
return (landing_square, chance_card, chest_card)
elif landing_square in ["CH1", "CH2", "CH3"]:
# 1/16 Go, Jail, C1, E3, H2, R1, next U, back 3
# 1/8 Next R
chance_card = (chance_card + 1) % 16
if chance_card == 0:
return ("GO", chance_card, chest_card)
elif chance_card == 1:
return ("JAIL", chance_card, chest_card)
elif chance_card == 2:
return ("C1", chance_card, chest_card)
elif chance_card == 3:
return ("E3", chance_card, chest_card)
elif chance_card == 4:
return ("H2", chance_card, chest_card)
elif chance_card == 5:
return ("R1", chance_card, chest_card)
elif chance_card == 6:
return (next_specific(landing_square, "U"),
chance_card, chest_card)
elif chance_card == 7:
return next_square(back(landing_square, 3),
chance_card, chest_card)
elif chance_card in [8, 9]:
return (next_specific(landing_square, "R"),
chance_card, chest_card)
else:
return (landing_square, chance_card, chest_card)
else:
raise Exception("Case should not occur")
def main(verbose=False):
GAME_PLAY = 10 ** 6
dice_size = 4
visited = {"GO": 1}
current = "GO"
chance_card = 0
chest_card = 0
doubles = 0
for place in xrange(GAME_PLAY):
total, double = roll_die(dice_size)
if double:
doubles += 1
else:
doubles = 0
if doubles == 3:
doubles = 0
current = "JAIL"
else:
index = SQUARES.index(current)
landing_square = SQUARES[(index + total) % len(SQUARES)]
(current, chance_card,
chest_card) = next_square(landing_square, chance_card, chest_card)
# if current is not in visited, sets to 1
# (default 0 returned by get)
visited[current] = visited.get(current, 0) + 1
top_visited = sorted(visited.items(),
key=lambda pair: pair[1],
reverse=True)
top_visited = [SQUARES.index(square[0]) for square in top_visited[:3]]
return ''.join(str(index).zfill(2) for index in top_visited)
if __name__ == '__main__':
print euler_timer(84)(main)(verbose=True)
| dhermes/project-euler | python/complete/no084.py | Python | apache-2.0 | 4,565 |
package rds
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// DescribeSQLLogFiles invokes the rds.DescribeSQLLogFiles API synchronously
// api document: https://help.aliyun.com/api/rds/describesqllogfiles.html
func (client *Client) DescribeSQLLogFiles(request *DescribeSQLLogFilesRequest) (response *DescribeSQLLogFilesResponse, err error) {
response = CreateDescribeSQLLogFilesResponse()
err = client.DoAction(request, response)
return
}
// DescribeSQLLogFilesWithChan invokes the rds.DescribeSQLLogFiles API asynchronously
// api document: https://help.aliyun.com/api/rds/describesqllogfiles.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) DescribeSQLLogFilesWithChan(request *DescribeSQLLogFilesRequest) (<-chan *DescribeSQLLogFilesResponse, <-chan error) {
responseChan := make(chan *DescribeSQLLogFilesResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.DescribeSQLLogFiles(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// DescribeSQLLogFilesWithCallback invokes the rds.DescribeSQLLogFiles API asynchronously
// api document: https://help.aliyun.com/api/rds/describesqllogfiles.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) DescribeSQLLogFilesWithCallback(request *DescribeSQLLogFilesRequest, callback func(response *DescribeSQLLogFilesResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *DescribeSQLLogFilesResponse
var err error
defer close(result)
response, err = client.DescribeSQLLogFiles(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// DescribeSQLLogFilesRequest is the request struct for api DescribeSQLLogFiles
type DescribeSQLLogFilesRequest struct {
*requests.RpcRequest
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
DBInstanceId string `position:"Query" name:"DBInstanceId"`
FileName string `position:"Query" name:"FileName"`
PageSize requests.Integer `position:"Query" name:"PageSize"`
PageNumber requests.Integer `position:"Query" name:"PageNumber"`
OwnerAccount string `position:"Query" name:"OwnerAccount"`
}
// DescribeSQLLogFilesResponse is the response struct for api DescribeSQLLogFiles
type DescribeSQLLogFilesResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
TotalRecordCount int `json:"TotalRecordCount" xml:"TotalRecordCount"`
PageNumber int `json:"PageNumber" xml:"PageNumber"`
PageRecordCount int `json:"PageRecordCount" xml:"PageRecordCount"`
Items ItemsInDescribeSQLLogFiles `json:"Items" xml:"Items"`
}
// CreateDescribeSQLLogFilesRequest creates a request to invoke DescribeSQLLogFiles API
func CreateDescribeSQLLogFilesRequest() (request *DescribeSQLLogFilesRequest) {
request = &DescribeSQLLogFilesRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Rds", "2014-08-15", "DescribeSQLLogFiles", "rds", "openAPI")
return
}
// CreateDescribeSQLLogFilesResponse creates a response to parse from DescribeSQLLogFiles response
func CreateDescribeSQLLogFilesResponse() (response *DescribeSQLLogFilesResponse) {
response = &DescribeSQLLogFilesResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| xiaozhu36/terraform-provider | vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/rds/describe_sql_log_files.go | GO | apache-2.0 | 4,727 |
#
# Cookbook Name:: bcpc
# Recipe:: diamond
#
# Copyright 2013, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if node['bcpc']['enabled']['metrics'] then
include_recipe "bcpc::default"
cookbook_file "/tmp/diamond.deb" do
source "bins/diamond.deb"
owner "root"
mode 00444
end
%w{python-support python-configobj python-pip python-httplib2}.each do |pkg|
package pkg do
action :upgrade
end
end
package "diamond" do
provider Chef::Provider::Package::Dpkg
source "/tmp/diamond.deb"
action :upgrade
options "--force-confold --force-confdef"
end
if node['bcpc']['virt_type'] == "kvm"
package "ipmitool" do
action :upgrade
end
package "smartmontools" do
action :upgrade
end
end
cookbook_file "/tmp/pyrabbit-1.0.1.tar.gz" do
source "bins/pyrabbit-1.0.1.tar.gz"
owner "root"
mode 00444
end
bash "install-pyrabbit" do
code <<-EOH
pip install /tmp/pyrabbit-1.0.1.tar.gz
EOH
not_if "pip freeze|grep pyrabbit"
end
bash "diamond-set-user" do
user "root"
code <<-EOH
sed --in-place '/^DIAMOND_USER=/d' /etc/default/diamond
echo 'DIAMOND_USER="root"' >> /etc/default/diamond
EOH
not_if "grep -e '^DIAMOND_USER=\"root\"' /etc/default/diamond"
notifies :restart, "service[diamond]", :delayed
end
template "/etc/diamond/diamond.conf" do
source "diamond.conf.erb"
owner "diamond"
group "root"
mode 00600
variables(:servers => get_head_nodes)
notifies :restart, "service[diamond]", :delayed
end
service "diamond" do
action [:enable, :start]
end
end
| agilemobiledev/chef-bcpc | cookbooks/bcpc/recipes/diamond.rb | Ruby | apache-2.0 | 2,355 |
import { promises as fsPromises } from 'fs';
import { expect } from 'chai';
describe('Verify stryker runs with mocha < 6', () => {
let strykerLog: string;
before(async () => {
strykerLog = await fsPromises.readFile('./stryker.log', 'utf8');
});
it('should warn about old mocha version', async () => {
expect(strykerLog).contains('DEPRECATED: Mocha < 6 detected. Please upgrade to at least Mocha version 6. Stryker will drop support for Mocha < 6 in V5');
});
});
| stryker-mutator/stryker | e2e/test/mocha-old-version/verify/verify.ts | TypeScript | apache-2.0 | 486 |
/**
* Copyright (C) 2010-2013 Alibaba Group Holding Limited
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.rocketmq.client.impl.consumer;
import com.alibaba.rocketmq.client.log.ClientLogger;
import com.alibaba.rocketmq.common.message.MessageConst;
import com.alibaba.rocketmq.common.message.MessageExt;
import com.alibaba.rocketmq.common.protocol.body.ProcessQueueInfo;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Queue consumption snapshot
*
* @author shijia.wxr<vintage.wang@gmail.com>
* @since 2013-7-24
*/
public class ProcessQueue {
public final static long RebalanceLockMaxLiveTime = Long.parseLong(System.getProperty(
"rocketmq.client.rebalance.lockMaxLiveTime", "30000"));
public final static long RebalanceLockInterval = Long.parseLong(System.getProperty(
"rocketmq.client.rebalance.lockInterval", "20000"));
private final static long PullMaxIdleTime = Long.parseLong(System.getProperty(
"rocketmq.client.pull.pullMaxIdleTime", "120000"));
private final Logger log = ClientLogger.getLog();
private final ReadWriteLock lockTreeMap = new ReentrantReadWriteLock();
private final TreeMap<Long, MessageExt> msgTreeMap = new TreeMap<Long, MessageExt>();
private final AtomicLong msgCount = new AtomicLong();
private final Lock lockConsume = new ReentrantLock();
private final TreeMap<Long, MessageExt> msgTreeMapTemp = new TreeMap<Long, MessageExt>();
private final AtomicLong tryUnlockTimes = new AtomicLong(0);
private volatile long queueOffsetMax = 0L;
private volatile boolean dropped = false;
private volatile long lastPullTimestamp = System.currentTimeMillis();
private volatile long lastConsumeTimestamp = System.currentTimeMillis();
private volatile boolean locked = false;
private volatile long lastLockTimestamp = System.currentTimeMillis();
private volatile boolean consuming = false;
private volatile long msgAccCnt = 0;
public boolean isLockExpired() {
boolean result = (System.currentTimeMillis() - this.lastLockTimestamp) > RebalanceLockMaxLiveTime;
return result;
}
public boolean isPullExpired() {
boolean result = (System.currentTimeMillis() - this.lastPullTimestamp) > PullMaxIdleTime;
return result;
}
public boolean putMessage(final List<MessageExt> msgs) {
boolean dispatchToConsume = false;
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
int validMsgCnt = 0;
for (MessageExt msg : msgs) {
MessageExt old = msgTreeMap.put(msg.getQueueOffset(), msg);
if (null == old) {
validMsgCnt++;
this.queueOffsetMax = msg.getQueueOffset();
}
}
msgCount.addAndGet(validMsgCnt);
if (!msgTreeMap.isEmpty() && !this.consuming) {
dispatchToConsume = true;
this.consuming = true;
}
if (!msgs.isEmpty()) {
MessageExt messageExt = msgs.get(msgs.size() - 1);
String property = messageExt.getProperty(MessageConst.PROPERTY_MAX_OFFSET);
if (property != null) {
long accTotal = Long.parseLong(property) - messageExt.getQueueOffset();
if (accTotal > 0) {
this.msgAccCnt = accTotal;
}
}
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("putMessage exception", e);
}
return dispatchToConsume;
}
public long getMaxSpan() {
try {
this.lockTreeMap.readLock().lockInterruptibly();
try {
if (!this.msgTreeMap.isEmpty()) {
return this.msgTreeMap.lastKey() - this.msgTreeMap.firstKey();
}
} finally {
this.lockTreeMap.readLock().unlock();
}
} catch (InterruptedException e) {
log.error("getMaxSpan exception", e);
}
return 0;
}
public long removeMessage(final List<MessageExt> msgs) {
long result = -1;
final long now = System.currentTimeMillis();
try {
this.lockTreeMap.writeLock().lockInterruptibly();
this.lastConsumeTimestamp = now;
try {
if (!msgTreeMap.isEmpty()) {
result = this.queueOffsetMax + 1;
int removedCnt = 0;
for (MessageExt msg : msgs) {
MessageExt prev = msgTreeMap.remove(msg.getQueueOffset());
if (prev != null) {
removedCnt--;
}
}
msgCount.addAndGet(removedCnt);
if (!msgTreeMap.isEmpty()) {
result = msgTreeMap.firstKey();
}
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (Throwable t) {
log.error("removeMessage exception", t);
}
return result;
}
public TreeMap<Long, MessageExt> getMsgTreeMap() {
return msgTreeMap;
}
public AtomicLong getMsgCount() {
return msgCount;
}
public boolean isDropped() {
return dropped;
}
public void setDropped(boolean dropped) {
this.dropped = dropped;
}
public boolean isLocked() {
return locked;
}
public void setLocked(boolean locked) {
this.locked = locked;
}
public void rollback() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
this.msgTreeMap.putAll(this.msgTreeMapTemp);
this.msgTreeMapTemp.clear();
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("rollback exception", e);
}
}
public long commit() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
Long offset = this.msgTreeMapTemp.lastKey();
msgCount.addAndGet(this.msgTreeMapTemp.size() * (-1));
this.msgTreeMapTemp.clear();
if (offset != null) {
return offset + 1;
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("commit exception", e);
}
return -1;
}
public void makeMessageToCosumeAgain(List<MessageExt> msgs) {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
for (MessageExt msg : msgs) {
this.msgTreeMapTemp.remove(msg.getQueueOffset());
this.msgTreeMap.put(msg.getQueueOffset(), msg);
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("makeMessageToCosumeAgain exception", e);
}
}
public List<MessageExt> takeMessags(final int batchSize) {
List<MessageExt> result = new ArrayList<MessageExt>(batchSize);
final long now = System.currentTimeMillis();
try {
this.lockTreeMap.writeLock().lockInterruptibly();
this.lastConsumeTimestamp = now;
try {
if (!this.msgTreeMap.isEmpty()) {
for (int i = 0; i < batchSize; i++) {
Map.Entry<Long, MessageExt> entry = this.msgTreeMap.pollFirstEntry();
if (entry != null) {
result.add(entry.getValue());
msgTreeMapTemp.put(entry.getKey(), entry.getValue());
} else {
break;
}
}
}
if (result.isEmpty()) {
consuming = false;
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("take Messages exception", e);
}
return result;
}
public void clear() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
this.msgTreeMap.clear();
this.msgTreeMapTemp.clear();
this.msgCount.set(0);
this.queueOffsetMax = 0L;
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("rollback exception", e);
}
}
public long getLastLockTimestamp() {
return lastLockTimestamp;
}
public void setLastLockTimestamp(long lastLockTimestamp) {
this.lastLockTimestamp = lastLockTimestamp;
}
public Lock getLockConsume() {
return lockConsume;
}
public long getLastPullTimestamp() {
return lastPullTimestamp;
}
public void setLastPullTimestamp(long lastPullTimestamp) {
this.lastPullTimestamp = lastPullTimestamp;
}
public long getMsgAccCnt() {
return msgAccCnt;
}
public void setMsgAccCnt(long msgAccCnt) {
this.msgAccCnt = msgAccCnt;
}
public long getTryUnlockTimes() {
return this.tryUnlockTimes.get();
}
public void incTryUnlockTimes() {
this.tryUnlockTimes.incrementAndGet();
}
public void fillProcessQueueInfo(final ProcessQueueInfo info) {
try {
this.lockTreeMap.readLock().lockInterruptibly();
if (!this.msgTreeMap.isEmpty()) {
info.setCachedMsgMinOffset(this.msgTreeMap.firstKey());
info.setCachedMsgMaxOffset(this.msgTreeMap.lastKey());
info.setCachedMsgCount(this.msgTreeMap.size());
}
if (!this.msgTreeMapTemp.isEmpty()) {
info.setTransactionMsgMinOffset(this.msgTreeMapTemp.firstKey());
info.setTransactionMsgMaxOffset(this.msgTreeMapTemp.lastKey());
info.setTransactionMsgCount(this.msgTreeMapTemp.size());
}
info.setLocked(this.locked);
info.setTryUnlockTimes(this.tryUnlockTimes.get());
info.setLastLockTimestamp(this.lastLockTimestamp);
info.setDroped(this.dropped);
info.setLastPullTimestamp(this.lastPullTimestamp);
info.setLastConsumeTimestamp(this.lastConsumeTimestamp);
} catch (Exception e) {
} finally {
this.lockTreeMap.readLock().unlock();
}
}
public long getLastConsumeTimestamp() {
return lastConsumeTimestamp;
}
public void setLastConsumeTimestamp(long lastConsumeTimestamp) {
this.lastConsumeTimestamp = lastConsumeTimestamp;
}
}
| tgou/RocketMQ | rocketmq-client/src/main/java/com/alibaba/rocketmq/client/impl/consumer/ProcessQueue.java | Java | apache-2.0 | 12,629 |
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Net;
using System.Runtime.Serialization.Json;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Xml.Linq;
using FeedProcessor.Contracts;
using FeedProcessor.Enums;
using FeedProcessor.FeedItems;
using FeedProcessor.Net;
namespace FeedProcessor.Feeds
{
/// <summary>
/// A feed which loads tweets from the twitter straming API.
/// </summary>
internal class TwitterStreamingFeed : Feed
{
/// <summary>
/// A mapping of twitter user ids to user names.
/// </summary>
private static Dictionary<string, string> _twitterUserNameCache = new Dictionary<string, string>();
/// <summary>
/// The username to use for the twitter streaming API.
/// </summary>
private string _twitterUsername;
/// <summary>
/// The password to use for the twitter streaming API.
/// </summary>
private string _twitterPassword;
/// <summary>
/// A JSON deserializer.
/// </summary>
private DataContractJsonSerializer _json = new DataContractJsonSerializer(typeof(TwitterJsonStatus));
/// <summary>
/// The background task which is connected to the streaming API.
/// </summary>
private Task _task;
/// <summary>
/// Initializes a new instance of the <see cref="TwitterStreamingFeed"/> class.
/// </summary>
/// <param name="username">The username.</param>
/// <param name="password">The password.</param>
internal TwitterStreamingFeed(string username, string password)
: base(TimeSpan.FromDays(1), DateTime.MinValue)
{
_twitterUsername = username;
_twitterPassword = password;
SourceType = SourceType.Twitter;
}
/// <summary>
/// Builds the query that is passed to the feed service.
/// </summary>
/// <returns>The query URI.</returns>
internal override Uri BuildQuery()
{
return new Uri(string.Format(CultureInfo.InvariantCulture, "http://stream.twitter.com/1/statuses/filter.json?{0}", Query));
}
/// <summary>
/// Initiates a request to the feed service.
/// </summary>
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes", Justification = "Really do want all exceptions."), System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Globalization", "CA1303:Do not pass literals as localized parameters", MessageId = "System.Debug.WriteLine(System.String)", Justification = "It's just a log message.")]
protected override void Poll()
{
if (_task != null)
{
return;
}
_task = Task.Factory.StartNew(new Action(() =>
{
HttpWebResponse webResponse = null;
StreamReader responseStream = null;
HttpWebRequest webRequest = null;
int wait = 250;
try
{
while (true)
{
try
{
// Connect
webRequest = (HttpWebRequest)WebRequest.Create(BuildQuery());
webRequest.Credentials = new NetworkCredential(_twitterUsername, _twitterPassword);
webRequest.Timeout = -1;
webResponse = (HttpWebResponse)webRequest.GetResponse();
responseStream = new StreamReader(webResponse.GetResponseStream(), Encoding.GetEncoding("utf-8"));
// Read the stream.
while (true)
{
wait = 250;
ProcessResponse(responseStream.ReadLine());
RetryTime(HttpStatusCode.OK);
}
}
catch (WebException ex)
{
Debug.WriteLine(ex.Message);
if (ex.Status == WebExceptionStatus.ProtocolError)
{
// -- From Twitter Docs --
// When a HTTP error (> 200) is returned, back off exponentially.
// Perhaps start with a 10 second wait, double on each subsequent failure,
// and finally cap the wait at 240 seconds.
// Exponential Backoff
if (wait < 10000)
{
wait = 10000;
}
else
{
if (wait < 240000)
{
wait = wait * 2;
}
}
}
else
{
// -- From Twitter Docs --
// When a network error (TCP/IP level) is encountered, back off linearly.
// Perhaps start at 250 milliseconds and cap at 16 seconds.
// Linear Backoff
if (wait < 16000)
{
wait += 250;
}
}
}
catch (Exception ex)
{
Debug.WriteLine(ex.Message);
}
finally
{
if (webRequest != null)
{
webRequest.Abort();
}
if (responseStream != null)
{
responseStream.Close();
responseStream = null;
}
if (webResponse != null)
{
webResponse.Close();
webResponse = null;
}
Debug.WriteLine("Waiting: " + wait);
RetryTime(HttpStatusCode.NotAcceptable);
Thread.Sleep(wait);
}
}
}
catch (Exception ex)
{
RetryTime(HttpStatusCode.NotAcceptable);
Debug.WriteLine(ex.Message);
Debug.WriteLine("Waiting: " + wait);
Thread.Sleep(wait);
}
}));
}
/// <summary>
/// Processes the response from the feed service.
/// </summary>
/// <param name="response">response from the feed service.</param>
internal override void ProcessResponse(object responseObject)
{
string response = responseObject.ToString();
byte[] byteArray = Encoding.UTF8.GetBytes(response);
TwitterJsonStatus status = null;
using (MemoryStream stream = new MemoryStream(byteArray))
{
status = _json.ReadObject(stream) as TwitterJsonStatus;
}
StatusFeedItem feedItem = new StatusFeedItem
{
Author = status.user.screen_name,
AvatarUri = new Uri(status.user.profile_image_url),
Date = DateTimeOffset.ParseExact(status.created_at, "ddd MMM dd HH:mm:ss zzz yyyy", CultureInfo.InvariantCulture).DateTime,
ServiceId = status.id,
Uri = new Uri(string.Format(CultureInfo.InvariantCulture, "http://twitter.com/#!/{0}/status/{1}", status.user.screen_name, status.id)),
Status = status.text
};
RaiseGotNewFeedItem(feedItem);
}
/// <summary>
/// Returns a time after which it's ok to make another query.
/// </summary>
/// <param name="httpStatusCode">The HTTP status code returned from the last attempt.</param>
/// <returns>
/// The time after which it's ok to make another query.
/// </returns>
internal override DateTime RetryTime(HttpStatusCode httpStatusCode)
{
RaiseSourceStatusUpdated(httpStatusCode == HttpStatusCode.OK);
return DateTime.MinValue;
}
/// <summary>
/// Releases unmanaged and - optionally - managed resources
/// </summary>
/// <param name="disposing"><c>true</c> to release both managed and unmanaged resources; <c>false</c> to release only unmanaged resources.</param>
protected override void Dispose(bool disposing)
{
base.Dispose(disposing);
}
#region Utility Methods
#region GetFlickrUserIdFromUserName
/// <summary>
/// Callback for GetTwitterUserIdFromUserName.
/// </summary>
/// <param name="userId">The userId returned by the twitter API.</param>
internal delegate void GetTwitterUserIdFromUserNameCallback(string userId);
/// <summary>
/// Gets the twitter user id from a username.
/// </summary>
/// <param name="username">The username.</param>
/// <param name="callback">The callback.</param>
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes", Justification = "Really do want all exceptions.")]
internal static void GetTwitterUserIdFromUserName(string username, GetTwitterUserIdFromUserNameCallback callback)
{
if (_twitterUserNameCache.ContainsValue(username))
{
callback((from kvp in _twitterUserNameCache where kvp.Value == username select kvp.Key).FirstOrDefault());
return;
}
string query = string.Format(CultureInfo.InvariantCulture, "http://api.twitter.com/1/users/show.xml?screen_name={0}", username);
AsyncWebRequest request = new AsyncWebRequest();
request.Request(new Uri(query));
request.Result += (sender, e) =>
{
if (e.Status != HttpStatusCode.OK)
{
callback(null);
}
try
{
string userid = XDocument.Parse(e.Response).Element("user").Element("id").Value;
_twitterUserNameCache[userid] = username;
callback(userid);
}
catch
{
callback(null);
}
};
}
#endregion
#endregion
}
}
| stimulant/SocialStream | FeedProcessor/Feeds/TwitterStreamingFeed.cs | C# | apache-2.0 | 11,420 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Alexander Y. Kleymenov
* @version $Revision$
*/
package javax.crypto.spec;
import java.io.Serializable;
import java.security.spec.KeySpec;
import java.util.Arrays;
import javax.crypto.SecretKey;
/**
* A key specification for a <code>SecretKey</code> and also a secret key
* implementation that is provider-independent. It can be used for raw secret
* keys that can be specified as <code>byte[]</code>.
*/
public class SecretKeySpec implements SecretKey, KeySpec, Serializable {
// The 5.0 spec. doesn't declare this serialVersionUID field
// In order to be compatible it is explicitly declared here
// for details see HARMONY-233
private static final long serialVersionUID = 6577238317307289933L;
private final byte[] key;
private final String algorithm;
private final String format = "RAW";
/**
* Creates a new <code>SecretKeySpec</code> for the specified key data and
* algorithm name.
*
* @param key
* the key data.
* @param algorithm
* the algorithm name.
* @throws IllegalArgumentException
* if the key data or the algorithm name is null or if the key
* data is empty.
*/
public SecretKeySpec(byte[] key, String algorithm) {
if (key == null) {
throw new IllegalArgumentException("key == null");
}
if (key.length == 0) {
throw new IllegalArgumentException("key.length == 0");
}
if (algorithm == null) {
throw new IllegalArgumentException("algorithm == null");
}
this.algorithm = algorithm;
this.key = new byte[key.length];
System.arraycopy(key, 0, this.key, 0, key.length);
}
/**
* Creates a new <code>SecretKeySpec</code> for the key data from the
* specified buffer <code>key</code> starting at <code>offset</code> with
* length <code>len</code> and the specified <code>algorithm</code> name.
*
* @param key
* the key data.
* @param offset
* the offset.
* @param len
* the size of the key data.
* @param algorithm
* the algorithm name.
* @throws IllegalArgumentException
* if the key data or the algorithm name is null, the key data
* is empty or <code>offset</code> and <code>len</code> do not
* specify a valid chunk in the buffer <code>key</code>.
* @throws ArrayIndexOutOfBoundsException
* if <code>offset</code> or <code>len</code> is negative.
*/
public SecretKeySpec(byte[] key, int offset, int len, String algorithm) {
if (key == null) {
throw new IllegalArgumentException("key == null");
}
if (key.length == 0) {
throw new IllegalArgumentException("key.length == 0");
}
// BEGIN android-changed
if (len < 0 || offset < 0) {
throw new ArrayIndexOutOfBoundsException("len < 0 || offset < 0");
}
// END android-changed
if (key.length - offset < len) {
throw new IllegalArgumentException("key too short");
}
if (algorithm == null) {
throw new IllegalArgumentException("algorithm == null");
}
this.algorithm = algorithm;
this.key = new byte[len];
System.arraycopy(key, offset, this.key, 0, len);
}
/**
* Returns the algorithm name.
*
* @return the algorithm name.
*/
public String getAlgorithm() {
return algorithm;
}
/**
* Returns the name of the format used to encode the key.
*
* @return the format name "RAW".
*/
public String getFormat() {
return format;
}
/**
* Returns the encoded form of this secret key.
*
* @return the encoded form of this secret key.
*/
public byte[] getEncoded() {
byte[] result = new byte[key.length];
System.arraycopy(key, 0, result, 0, key.length);
return result;
}
/**
* Returns the hash code of this <code>SecretKeySpec</code> object.
*
* @return the hash code.
*/
@Override
public int hashCode() {
int result = algorithm.length();
for (byte element : key) {
result += element;
}
return result;
}
/**
* Compares the specified object with this <code>SecretKeySpec</code>
* instance.
*
* @param obj
* the object to compare.
* @return true if the algorithm name and key of both object are equal,
* otherwise false.
*/
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof SecretKeySpec)) {
return false;
}
SecretKeySpec ks = (SecretKeySpec) obj;
return (algorithm.equalsIgnoreCase(ks.algorithm))
&& (Arrays.equals(key, ks.key));
}
}
| webos21/xi | java/jcl/src/java/javax/crypto/spec/SecretKeySpec.java | Java | apache-2.0 | 5,286 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
/**
*
* CreateEditsLog Synopsis: CreateEditsLog -f numFiles StartingBlockId
* numBlocksPerFile [-r replicafactor] [-d editsLogDirectory] Default
* replication factor is 1 Default edits log directory is /tmp/EditsLogOut
*
* Create a name node's edits log in /tmp/EditsLogOut. The file
* /tmp/EditsLogOut/current/edits can be copied to a name node's
* dfs.name.dir/current direcotry and the name node can be started as usual.
*
* The files are created in /createdViaInjectingInEditsLog The file names
* contain the starting and ending blockIds; hence once can create multiple
* edits logs using this command using non overlapping block ids and feed the
* files to a single name node.
*
* See Also @link #DataNodeCluster for injecting a set of matching blocks
* created with this command into a set of simulated data nodes.
*
*/
public class CreateEditsLog {
static final String BASE_PATH = "/createdViaInjectingInEditsLog";
static final String EDITS_DIR = "/tmp/EditsLogOut";
static String edits_dir = EDITS_DIR;
static final public long BLOCK_GENERATION_STAMP = GenerationStamp.FIRST_VALID_STAMP;
static void addFiles(FSEditLog editLog, int numFiles, short replication,
int blocksPerFile, long startingBlockId,
FileNameGenerator nameGenerator) {
PermissionStatus p = new PermissionStatus("joeDoe", "people",
new FsPermission((short) 0777));
INodeDirectory dirInode = new INodeDirectory(p, 0L);
editLog.logMkDir(BASE_PATH, dirInode);
long blockSize = 10;
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB] = new BlockInfo(new Block(0, blockSize,
BLOCK_GENERATION_STAMP), replication);
}
long currentBlockId = startingBlockId;
long bidAtSync = startingBlockId;
for (int iF = 0; iF < numFiles; iF++) {
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB].setBlockId(currentBlockId++);
}
try {
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
null, replication, 0, blockSize, blocks, p, "", "",
null);
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId()
+ "_to_B" + blocks[blocksPerFile - 1].getBlockId()
+ "_";
String filePath = nameGenerator.getNextFileName("");
filePath = filePath + path;
// Log the new sub directory in edits
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
String currentDir = nameGenerator.getCurrentDir();
dirInode = new INodeDirectory(p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
editLog.logOpenFile(filePath, inode);
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K
// blocks
editLog.logSync();
bidAtSync = currentBlockId;
}
} catch (IOException e) {
System.out.println("Creating trascation for file " + iF
+ " encountered exception " + e);
}
}
System.out.println("Created edits log in directory " + edits_dir);
System.out.println(" containing " + numFiles
+ " File-Creates, each file with " + blocksPerFile + " blocks");
System.out.println(" blocks range: " + startingBlockId + " to "
+ (currentBlockId - 1));
}
static String usage = "Usage: createditlogs "
+ " -f numFiles startingBlockIds NumBlocksPerFile [-r replicafactor] "
+ "[-d editsLogDirectory]\n"
+ " Default replication factor is 1\n"
+ " Default edits log direcory is " + EDITS_DIR + "\n";
static void printUsageExit() {
System.out.println(usage);
System.exit(-1);
}
static void printUsageExit(String err) {
System.out.println(err);
printUsageExit();
}
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
long startingBlockId = 1;
int numFiles = 0;
short replication = 1;
int numBlocksPerFile = 0;
if (args.length == 0) {
printUsageExit();
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-h"))
printUsageExit();
if (args[i].equals("-f")) {
if (i + 3 >= args.length || args[i + 1].startsWith("-")
|| args[i + 2].startsWith("-")
|| args[i + 3].startsWith("-")) {
printUsageExit("Missing num files, starting block and/or number of blocks");
}
numFiles = Integer.parseInt(args[++i]);
startingBlockId = Integer.parseInt(args[++i]);
numBlocksPerFile = Integer.parseInt(args[++i]);
if (numFiles <= 0 || numBlocksPerFile <= 0) {
printUsageExit("numFiles and numBlocksPerFile most be greater than 0");
}
} else if (args[i].equals("-r") || args[i + 1].startsWith("-")) {
if (i + 1 >= args.length) {
printUsageExit("Missing num files, starting block and/or number of blocks");
}
replication = Short.parseShort(args[++i]);
} else if (args[i].equals("-d")) {
if (i + 1 >= args.length || args[i + 1].startsWith("-")) {
printUsageExit("Missing edits logs directory");
}
edits_dir = args[++i];
} else {
printUsageExit();
}
}
File editsLogDir = new File(edits_dir);
File subStructureDir = new File(edits_dir + "/"
+ Storage.STORAGE_DIR_CURRENT);
if (!editsLogDir.exists()) {
if (!editsLogDir.mkdir()) {
System.out.println("cannot create " + edits_dir);
System.exit(-1);
}
}
if (!subStructureDir.exists()) {
if (!subStructureDir.mkdir()) {
System.out.println("cannot create subdirs of " + edits_dir);
System.exit(-1);
}
}
FSImage fsImage = new FSImage(new File(edits_dir));
FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
FSEditLog editLog = fsImage.getEditLog();
editLog.createEditLogFile(fsImage.getFsEditName());
editLog.open();
addFiles(editLog, numFiles, replication, numBlocksPerFile,
startingBlockId, nameGenerator);
editLog.logSync();
editLog.close();
}
}
| shot/hadoop-source-reading | src/test/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java | Java | apache-2.0 | 7,159 |
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package log
import (
"encoding/json"
"fmt"
"os"
"reflect"
"unicode/utf8"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/util/caller"
)
// AddStructured creates a structured log entry to be written to the
// specified facility of the logger.
func AddStructured(ctx context.Context, s Severity, depth int, format string, args []interface{}) {
file, line, _ := caller.Lookup(depth + 1)
entry := LogEntry{}
entry.set(ctx, format, args)
logging.outputLogEntry(s, file, line, false, &entry)
}
// getJSON returns a JSON representation of the specified argument.
// Returns nil if the type is simple and does not require a separate
// JSON representation.
func getJSON(arg interface{}) []byte {
// Not much point in storying strings and byte slices twice, as
// they're nearly always exactly specified in the format string.
switch arg.(type) {
case string, []byte, roachpb.Key, roachpb.EncodedKey:
return nil
}
jsonBytes, err := json.Marshal(arg)
if err != nil {
return []byte(fmt.Sprintf("{\"error\": %q}", err.Error()))
}
return jsonBytes
}
func (entry *LogEntry) set(ctx context.Context, format string, args []interface{}) {
entry.Format, entry.Args = parseFormatWithArgs(format, args)
if ctx != nil {
for i := Field(0); i < maxField; i++ {
if v := ctx.Value(i); v != nil {
switch vTyp := v.(type) {
case roachpb.NodeID:
entry.NodeID = &vTyp
case roachpb.StoreID:
entry.StoreID = &vTyp
case roachpb.RangeID:
entry.RangeID = &vTyp
case roachpb.Method:
entry.Method = &vTyp
case roachpb.Key:
entry.Key = vTyp
}
}
}
}
}
// parseFormatWithArgs parses the format string, matching each
// format specifier with an argument from the args array.
func parseFormatWithArgs(format string, args []interface{}) (string, []LogEntry_Arg) {
// Process format string.
var logArgs []LogEntry_Arg
var buf []byte
var idx int
end := len(format)
for i := 0; i < end; {
lasti := i
for i < end && format[i] != '%' {
i++
}
if i > lasti {
buf = append(buf, format[lasti:i]...)
}
if i >= end {
break
}
start := i
// Process one verb.
i++
F:
for ; i < end; i++ {
switch format[i] {
case '#', '0', '+', '-', ' ':
default:
break F
}
}
// TODO(spencer): should arg numbers dynamic precision be
// supported? They're so rare, better to just panic here for now.
if i < end && (format[i] == '[' || format[i] == '*') {
panic(fmt.Sprintf("arg numbers in format not supported by logger: %s", format))
}
// Read optional width.
for ; i < end && format[i] >= '0' && format[i] <= '9'; i++ {
}
// Read optional precision.
if i < end && format[i] == '.' {
for i = i + 1; i < end && format[i] >= '0' && format[i] <= '9'; i++ {
}
}
if i >= end {
break
}
c, w := utf8.DecodeRuneInString(format[i:])
i += w
// Escape and add percent directly to format buf.
if c == '%' {
buf = append(buf, '%', '%')
continue
}
buf = append(buf, "%s"...)
// New format string always gets %s, though we use the actual
// format to generate the string here for the log argument.
if idx >= len(args) {
fmt.Fprintf(os.Stderr, "ERROR: insufficient parameters specified for format string %s", format)
return string(append(buf, format[i:]...)), logArgs
}
logArgs = append(logArgs, makeLogArg(format[start:i], args[idx]))
idx++ // advance to next arg index
}
// Add arguments which were not processed via format specifiers.
for ; idx < len(args); idx++ {
logArgs = append(logArgs, makeLogArg("%v", args[idx]))
}
return string(buf), logArgs
}
func makeLogArg(format string, arg interface{}) LogEntry_Arg {
var tstr string
if t := reflect.TypeOf(arg); t != nil {
tstr = t.String()
}
return LogEntry_Arg{
Type: tstr,
Str: fmt.Sprintf(format, arg),
Json: getJSON(arg),
}
}
| jamesgraves/cockroach | util/log/structured.go | GO | apache-2.0 | 4,615 |
using System.Collections.Generic;
using System.Net;
using System.Text;
namespace JFrog.Artifactory.Model
{
/// <summary>
/// Artifactory MsBuild info model
/// </summary>
public class Build
{
public readonly static string STARTED_FORMAT = "{0}";//.000+0000";
public readonly static string ARTIFACTORY_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.ssszzzz";
/// <summary>
/// build/assembly version
/// </summary>
public string version { get { return "1.0.1"; } }
/// <summary>
/// project name
/// </summary>
public string name { get; set; }
/// <summary>
/// build number
/// </summary>
public string number { get; set; }
public string type { get; set; }
public BuildAgent buildAgent { get; set; }
public Agent agent { get; set; }
/// <summary>
/// Build start time
/// </summary>
public string started { get; set; }
/// <summary>
/// Build start time
/// </summary>
public string startedDateMillis { get; set; }
/// <summary>
/// Build duration in millis
/// </summary>
public long durationMillis { get; set; }
/// <summary>
/// The user who executed the build (TFS user or system user)
/// </summary>
public string principal { get; set; }
/// <summary>
/// The artifactory user used for deploythe build’s artifacts
/// </summary>
public string artifactoryPrincipal { get; set; }
/// <summary>
/// build url in the build server
/// </summary>
public string url { get; set; }
/// <summary>
/// system variables
/// </summary>
public IDictionary<string,string> properties { get; set; }
/// <summary>
/// Version control revision (Changeset number in TFS)
/// </summary>
public string vcsRevision { get; set; }
public LicenseControl licenseControl { get; set; }
public BlackDuckGovernance blackDuckGovernance { get; set; }
public BuildRetention buildRetention { get; set; }
/// <summary>
/// A list of one or more modules produced by this build
/// </summary>
public List<Module> modules { get; set; }
public DeployClient deployClient { set; get; }
public Dictionary<string, string> getDefaultProperties()
{
Dictionary<string, string> result = new Dictionary<string, string>();
result.Add("build.name", name);
result.Add("build.number", number);
result.Add("build.timestamp", startedDateMillis);
result.Add("vcs.revision", vcsRevision);
return result;
}
/// <summary>
/// Preparing the properties (Matrix params) to suitable Url Query
/// </summary>
/// <param name="matrixParam"></param>
/// <returns></returns>
public static string buildMatrixParamsString(List<KeyValuePair<string, string>> matrixParam)
{
StringBuilder matrix = new StringBuilder();
if (matrixParam != null)
{
matrixParam.ForEach(
pair => matrix.Append(";").Append(WebUtility.UrlEncode(pair.Key)).Append("=").
Append(WebUtility.UrlEncode(pair.Value))
);
}
return matrix.ToString();
}
}
/// <summary>
/// Build agent name and version, for example MSBuild 12.0
/// </summary>
public class BuildAgent
{
public string name { get; set; }
public string version { get; set; }
}
/// <summary>
/// CI server name and version, for example TFS 2013
/// </summary>
//public class Agent
//{
// public string name { get; set; }
// public string version { get; set; }
//}
public class LicenseControl
{
public string runChecks { get; set; }
public string includePublishedArtifacts { get; set; }
public string autoDiscover { get; set; }
public List<string> licenseViolationsRecipients { get; set; }
public List<string> scopes { get; set; }
}
public class BlackDuckGovernance
{
public string runChecks { get; set; }
public string appName { get; set; }
public string appVersion { get; set; }
public string autoCreateMissingComponentRequests { get; set; }
public string autoDiscardStaleComponentRequests { get; set; }
public string includePublishedArtifacts { get; set; }
public List<string> reportRecipients { get; set; }
public List<string> scopes { get; set; }
}
public class BuildRetention
{
public int count { get; set; }
public bool deleteBuildArtifacts { get; set; }
public List<string> buildNumbersNotToBeDiscarded { get; set; }
}
/// <summary>
/// build module data
/// </summary>
public class Module
{
public Module(string projectName)
{
Artifacts = new HashSet<Artifact>(new Artifact());
Dependencies = new List<Dependency>();
id = projectName;
}
/// <summary>
/// module identifier
/// </summary>
public string id { get; set; }
/// <summary>
/// A list of artifacts deployed for this module
/// </summary>
public HashSet<Artifact> Artifacts { get; set; }
/// <summary>
/// A list of dependencies used when building this module
/// </summary>
public List<Dependency> Dependencies { get; set; }
}
public class Artifact : IEqualityComparer<Artifact>
{
public string type { get; set; }
public string sha1 { get; set; }
public string md5 { get; set; }
public string name { get; set; }
public bool Equals(Artifact a, Artifact b)
{
if (!a.type.Equals(b.type))
return false;
if (!a.sha1.Equals(b.sha1))
return false;
if (!a.md5.Equals(b.md5))
return false;
if (!a.name.Equals(b.name))
return false;
return true;
}
public int GetHashCode(Artifact obj)
{
int hash = 17;
hash = hash * 31 + obj.type.GetHashCode();
hash = hash * 31 + obj.sha1.GetHashCode();
hash = hash * 31 + obj.md5.GetHashCode();
hash = hash * 31 + obj.name.GetHashCode();
return hash;
}
}
public class Dependency
{
public string type { get; set; }
public string sha1 { get; set; }
public string md5 { get; set; }
public string id { get; set; }
public List<string> scopes { get; set; }
}
public class DeployClient
{
public int timeout { get; set; }
public Proxy proxy { set; get; }
}
public class Proxy
{
private string host;
private int port;
private string username;
private string password;
private bool isCredentialsExists;
public Proxy() { }
public Proxy(string host, int port)
{
this.host = host;
this.port = port;
this.isCredentialsExists = false;
}
public Proxy(string host, int port, string username, string password)
{
this.host = host;
this.port = port;
this.username = username;
this.password = password;
this.isCredentialsExists = true;
}
public string Host { get { return this.host; } }
public int Port { get { return this.port; } }
public string Username { get { return this.username; } }
public string Password { get { return this.password; } }
public bool IsCredentialsExists { get { return this.isCredentialsExists; } }
public bool IsBypass { get; set; }
}
//public static class Json
//{
// public static const string version = "version";
// public static const string name = "name";
// public static const string number = "number";
// public static const string buildAgent = "buildAgent";
// public static const string agent = "agent";
// public static const string started = "started";
// public static const string durationMillis = "durationMillis";
// public static const string principal = "principal";
// public static const string artifactoryPrincipal = "artifactoryPrincipal";
// public static const string url = "url";
// public static const string vcsRevision = "vcsRevision";
// public static const string licenseControl = "licenseControl";
// public static const string buildRetention = "buildRetention";
// public static const string properties = "properties";
// public static const string modules = "modules";
// public static const string dependencies = "dependencies";
// public static const string artifacts = "artifacts";
// public static const string scopes = "scopes";
//}
}
| nilleb/msbuild-artifactory-plugin | msbuild-artifactory-plugin/Model/Build.cs | C# | apache-2.0 | 9,387 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.datapipeline.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.datapipeline.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* DeactivatePipelineResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DeactivatePipelineResultJsonUnmarshaller implements Unmarshaller<DeactivatePipelineResult, JsonUnmarshallerContext> {
public DeactivatePipelineResult unmarshall(JsonUnmarshallerContext context) throws Exception {
DeactivatePipelineResult deactivatePipelineResult = new DeactivatePipelineResult();
return deactivatePipelineResult;
}
private static DeactivatePipelineResultJsonUnmarshaller instance;
public static DeactivatePipelineResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new DeactivatePipelineResultJsonUnmarshaller();
return instance;
}
}
| aws/aws-sdk-java | aws-java-sdk-datapipeline/src/main/java/com/amazonaws/services/datapipeline/model/transform/DeactivatePipelineResultJsonUnmarshaller.java | Java | apache-2.0 | 1,666 |
/**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.placeholders;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Tag;
import java.util.Map;
/**
* An extension of the {@link Id} interface that allows the list of tag names attached
* to the Id to be declared in advance of the use of the metric. This can be used to
* provide a default value for a tag or to use a TagFactory implementation that uses
* context available in the execution environment to compute the value of the tag.
*/
public interface PlaceholderId {
/** Description of the measurement that is being collected. */
String name();
/** New id with an additional tag value. */
PlaceholderId withTag(String k, String v);
/** New id with an additional tag value. */
PlaceholderId withTag(Tag t);
/** New id with additional tag values. */
PlaceholderId withTags(Iterable<Tag> tags);
/** New id with additional tag values. */
PlaceholderId withTags(Map<String, String> tags);
/**
* New id with an additional tag factory.
* @param factory
* the factory to use to generate the values for the tag
*/
PlaceholderId withTagFactory(TagFactory factory);
/**
* New id with additional tag factories.
* @param factories
* a collection of factories for producing values for the tags
*/
PlaceholderId withTagFactories(Iterable<TagFactory> factories);
/**
* Invokes each of the associated tag factories to produce a Id based on the
* runtime context available when this method is invoked. If an associated
* TagFactory produces a non-null Tag, then the returned Id will have that
* Tag associated with it.
*
* @return an Id that has the same name as this id and the resolved tag values attached
*/
Id resolveToId();
}
| pstout/spectator | spectator-ext-placeholders/src/main/java/com/netflix/spectator/placeholders/PlaceholderId.java | Java | apache-2.0 | 2,369 |
# Copyright 2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add tenant_id to lcm_subscriptions and lcm_op_occs
Revision ID: d6ae359ab0d6
Revises: 3ff50553e9d3
Create Date: 2022-01-06 13:35:53.868106
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd6ae359ab0d6'
down_revision = '3ff50553e9d3'
def upgrade(active_plugins=None, options=None):
op.add_column('vnf_lcm_subscriptions',
sa.Column('tenant_id', sa.String(length=64),
nullable=False))
op.add_column('vnf_lcm_op_occs',
sa.Column('tenant_id', sa.String(length=64),
nullable=False))
| stackforge/tacker | tacker/db/migration/alembic_migrations/versions/d6ae359ab0d6_add_tenant_id_to_lcm_subscriptions_and_.py | Python | apache-2.0 | 1,237 |