code
stringlengths 3
1.01M
| repo_name
stringlengths 5
116
| path
stringlengths 3
311
| language
stringclasses 30
values | license
stringclasses 15
values | size
int64 3
1.01M
|
|---|---|---|---|---|---|
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
import java.io._
import sbt.internal._
import sbt.internal.util.{
AttributeEntry,
AttributeMap,
ConsoleOut,
GlobalLogging,
MainAppender,
Settings,
Terminal => ITerminal,
}
object PluginCommandTestPlugin0 extends AutoPlugin { override def requires = empty }
package subpackage {
object PluginCommandTestPlugin1 extends AutoPlugin { override def requires = empty }
}
object PluginCommandTest extends verify.BasicTestSuite {
import subpackage._
import FakeState._
test("`plugin` command should work for plugins within nested in one package") {
val output = processCommand(
"plugin sbt.PluginCommandTestPlugin0",
PluginCommandTestPlugin0,
PluginCommandTestPlugin1
)
assert(output.contains("sbt.PluginCommandTestPlugin0 is activated."))
}
test("it should work for plugins nested more than one package") {
val output = processCommand(
"plugin sbt.subpackage.PluginCommandTestPlugin1",
PluginCommandTestPlugin0,
PluginCommandTestPlugin1
)
assert(output.contains("sbt.subpackage.PluginCommandTestPlugin1 is activated."))
}
test("it should suggest a plugin when given an incorrect plugin with a similar name") {
val output = processCommand(
"plugin PluginCommandTestPlugin0",
PluginCommandTestPlugin0,
PluginCommandTestPlugin1
)
assert(
output.contains(
"Not a valid plugin: PluginCommandTestPlugin0 (similar: sbt.PluginCommandTestPlugin0, sbt.subpackage.PluginCommandTestPlugin1)"
)
)
}
}
object FakeState {
def processCommand(input: String, enabledPlugins: AutoPlugin*): String = {
val outBuffer = new ByteArrayOutputStream
val logFile = File.createTempFile("sbt", ".log")
try {
val state = FakeState(logFile, enabledPlugins: _*)
ITerminal.withOut(new PrintStream(outBuffer, true)) {
MainLoop.processCommand(Exec(input, None), state)
}
new String(outBuffer.toByteArray)
} finally {
logFile.delete()
()
}
}
def apply(logFile: File, plugins: AutoPlugin*) = {
val base = new File("").getAbsoluteFile
val testProject = Project("test-project", base).setAutoPlugins(plugins)
val settings: Seq[Def.Setting[_]] = Nil
val currentProject = Map(testProject.base.toURI -> testProject.id)
val currentEval: () => sbt.compiler.Eval = () => Load.mkEval(Nil, base, Nil)
val sessionSettings =
SessionSettings(base.toURI, currentProject, Nil, Map.empty, Nil, currentEval)
val delegates: (Scope) => Seq[Scope] = _ => Nil
val scopeLocal: Def.ScopeLocal = _ => Nil
val (cMap, data: Settings[Scope]) =
Def.makeWithCompiledMap(settings)(delegates, scopeLocal, Def.showFullKey)
val extra: KeyIndex => BuildUtil[_] = (keyIndex) =>
BuildUtil(base.toURI, Map.empty, keyIndex, data)
val structureIndex: StructureIndex =
Load.structureIndex(data, settings, extra, Map.empty)
val streams: (State) => BuildStreams.Streams = null
val loadedDefinitions: LoadedDefinitions = new LoadedDefinitions(
base,
Nil,
ClassLoader.getSystemClassLoader,
Nil,
Seq(testProject),
Nil
)
val pluginData = PluginData(Nil, Nil, None, None, Nil, Nil, Nil, Nil, Nil, None)
val builds: DetectedModules[BuildDef] = new DetectedModules[BuildDef](Nil)
val detectedAutoPlugins: Seq[DetectedAutoPlugin] =
plugins.map(p => DetectedAutoPlugin(p.label, p, hasAutoImport = false))
val detectedPlugins = new DetectedPlugins(detectedAutoPlugins, builds)
val loadedPlugins =
new LoadedPlugins(base, pluginData, ClassLoader.getSystemClassLoader, detectedPlugins)
val buildUnit = new BuildUnit(base.toURI, base, loadedDefinitions, loadedPlugins)
val (partBuildUnit: PartBuildUnit, _) = Load.loaded(buildUnit)
val loadedBuildUnit = Load.resolveProjects(base.toURI, partBuildUnit, _ => testProject.id)
val units = Map(base.toURI -> loadedBuildUnit)
val buildStructure = new BuildStructure(
units,
base.toURI,
settings,
data,
structureIndex,
streams,
delegates,
scopeLocal,
cMap,
)
val attributes = AttributeMap.empty ++ AttributeMap(
AttributeEntry(Keys.sessionSettings, sessionSettings),
AttributeEntry(Keys.stateBuildStructure, buildStructure)
)
State(
null,
Seq(BuiltinCommands.plugin),
Set.empty,
None,
List(),
State.newHistory,
attributes,
GlobalLogging.initial(
MainAppender.globalDefault(ConsoleOut.globalProxy),
logFile,
ConsoleOut.globalProxy
),
None,
State.Continue
)
}
}
|
xuwei-k/xsbt
|
main/src/test/scala/PluginCommandTest.scala
|
Scala
|
apache-2.0
| 4,851
|
from .forms import SetupForm
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from splunkdj.decorators.render import render_to
from splunkdj.setup import create_setup_view_context
@login_required
def home(request):
# Redirect to the default view, which happens to be a non-framework view
return redirect('/en-us/app/twitter2/twitter_general')
@render_to('twitter2:setup.html')
@login_required
def setup(request):
result = create_setup_view_context(
request,
SetupForm,
reverse('twitter2:home'))
# HACK: Workaround DVPL-4647 (Splunk 6.1 and below):
# Refresh current app's state so that non-framework views
# observe when the app becomes configured.
service = request.service
app_name = service.namespace['app']
service.apps[app_name].post('_reload')
return result
|
dakiri/splunk-app-twitter
|
twitter2/django/twitter2/views.py
|
Python
|
apache-2.0
| 944
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/gamelift/model/DeleteVpcPeeringConnectionRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::GameLift::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
DeleteVpcPeeringConnectionRequest::DeleteVpcPeeringConnectionRequest() :
m_fleetIdHasBeenSet(false),
m_vpcPeeringConnectionIdHasBeenSet(false)
{
}
Aws::String DeleteVpcPeeringConnectionRequest::SerializePayload() const
{
JsonValue payload;
if(m_fleetIdHasBeenSet)
{
payload.WithString("FleetId", m_fleetId);
}
if(m_vpcPeeringConnectionIdHasBeenSet)
{
payload.WithString("VpcPeeringConnectionId", m_vpcPeeringConnectionId);
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection DeleteVpcPeeringConnectionRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "GameLift.DeleteVpcPeeringConnection"));
return headers;
}
|
cedral/aws-sdk-cpp
|
aws-cpp-sdk-gamelift/source/model/DeleteVpcPeeringConnectionRequest.cpp
|
C++
|
apache-2.0
| 1,590
|
<?php
declare(strict_types=1);
namespace DoctrineMigrations;
use Doctrine\DBAL\Schema\Schema;
use Doctrine\Migrations\AbstractMigration;
use App\Migration\PostgresMigration;
/**
* Auto-generated Migration: Please modify to your needs!
*/
final class Version20210611212326 extends PostgresMigration
{
public function getDescription() : string
{
return '';
}
public function up(Schema $schema) : void
{
// this up() migration is auto-generated, please modify it to your needs
$this->abortIf($this->connection->getDatabasePlatform()->getName() !== 'postgresql', 'Migration can only be executed safely on \'postgresql\'.');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellapp_coverletter ADD PRIMARY KEY (fellApp_id, coverLetter_id)');
$this->processSql('ALTER INDEX idx_a95c4269b3e07c1d RENAME TO IDX_263579C13111FEBE');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellapp_cv ADD PRIMARY KEY (fellApp_id, cv_id)');
$this->processSql('ALTER INDEX idx_3429326acfe419e2 RENAME TO IDX_3385375CCFE419E2');
$this->processSql('ALTER INDEX idx_14f0c7a72a90cb41 RENAME TO IDX_7A4E80292D3CCE77');
$this->processSql('ALTER INDEX idx_2e24ab552a90cb41 RENAME TO IDX_F1D6BF172D3CCE77');
$this->processSql('ALTER INDEX idx_badd2c672a90cb41 RENAME TO IDX_F5802FB72D3CCE77');
$this->processSql('ALTER INDEX idx_f844cf582a90cb41 RENAME TO IDX_328CB2B62D3CCE77');
$this->processSql('ALTER INDEX idx_49db28d62a90cb41 RENAME TO IDX_FD0966152D3CCE77');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellapp_document ADD PRIMARY KEY (fellApp_id, document_id)');
$this->processSql('ALTER INDEX idx_5183dcd7c33f7837 RENAME TO IDX_49B2072FC33F7837');
$this->processSql('ALTER INDEX idx_5e7a41de2a90cb41 RENAME TO IDX_ED040A2B2D3CCE77');
$this->processSql('ALTER INDEX idx_128512462a90cb41 RENAME TO IDX_AB4C9BE2D3CCE77');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellapp_avatar ADD PRIMARY KEY (fellApp_id, avatar_id)');
$this->processSql('ALTER INDEX idx_688529cc86383b10 RENAME TO IDX_27D82A1C86383B10');
$this->processSql('ALTER INDEX idx_544c3f747e4972b4 RENAME TO IDX_4EB06F7FB32D0A6C');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellowshipapplication_examination ADD PRIMARY KEY (fellowshipApplication_id, examination_id)');
$this->processSql('ALTER INDEX idx_602a117bdad0cfbf RENAME TO IDX_62CF1DE3DAD0CFBF');
$this->processSql('ALTER INDEX idx_dfc03c307e4972b4 RENAME TO IDX_C53C6C3BB32D0A6C');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellowshipapplication_citizenship ADD PRIMARY KEY (fellowshipApplication_id, citizenship_id)');
$this->processSql('ALTER INDEX idx_ac0f1100c9709c85 RENAME TO IDX_AEEA1D98C9709C85');
$this->processSql('ALTER INDEX idx_8beaa2047e4972b4 RENAME TO IDX_BA8165E2B32D0A6C');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_fellowshipapplication_boardcertification ADD PRIMARY KEY (fellowshipApplication_id, boardCertification_id)');
$this->processSql('ALTER INDEX idx_7b3ba6a5c2b1f452 RENAME TO IDX_299D9554D58E8F2F');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_googleformconfig_fellowshipsubspecialty ADD PRIMARY KEY (googleformconfig_id, fellowshipsubspecialty_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE fellapp_reference_document ADD PRIMARY KEY (reference_id, document_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE calllog_calllogentrymessage_document ADD PRIMARY KEY (message_id, document_id)');
$this->processSql('ALTER INDEX idx_15b668721aca1422 RENAME TO IDX_5AFC0F4BCD46F646');
$this->processSql('ALTER INDEX idx_156240fb3d3c30d3 RENAME TO IDX_DADF79673D3C30D3');
$this->processSql('ALTER INDEX encounter_unique00000 RENAME TO encounter_unique');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_encounter ADD PRIMARY KEY (message_id, encounter_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_accession ADD PRIMARY KEY (message_id, accession_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_block ADD PRIMARY KEY (message_id, block_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_imaging ADD PRIMARY KEY (message_id, imaging_id)');
$this->processSql('ALTER INDEX idx_6700c13e537a1329 RENAME TO IDX_E5F1439D537A1329');
$this->processSql('ALTER INDEX idx_79d11d14537a1329 RENAME TO IDX_FB209FB7537A1329');
$this->processSql('ALTER INDEX idx_3ec324c3537a1329 RENAME TO IDX_BC32A660537A1329');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_editors ADD PRIMARY KEY (message_id, editorInfo_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_input ADD PRIMARY KEY (message_id, input_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_associations ADD PRIMARY KEY (message_id, association_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_message_destination ADD PRIMARY KEY (message_id, destination_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_messagecategory_formnode ADD PRIMARY KEY (messageCategory_id, formNode_id)');
$this->processSql('ALTER INDEX idx_4aae944a88dbad51 RENAME TO IDX_9160929476694CD');
$this->processSql('ALTER INDEX idx_8e61413f88dbad51 RENAME TO IDX_9FD813AB476694CD');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_partpaper_document ADD PRIMARY KEY (partpaper_id, document_id)');
$this->processSql('ALTER INDEX part_unique00000 RENAME TO part_unique');
$this->processSql('ALTER INDEX idx_f6616091373182ea RENAME TO IDX_70FDEF46F88CBB76');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE scan_persitesettings_institution ADD PRIMARY KEY (perSiteSettings_id, institution_id)');
$this->processSql('ALTER INDEX idx_d956bfe410405986 RENAME TO IDX_3D7C119510405986');
$this->processSql('ALTER INDEX idx_d58d7e02bcfb922f RENAME TO IDX_D84E5574B2A22366');
$this->processSql('ALTER INDEX idx_a24210c9bcfb922f RENAME TO IDX_B57D6BB4B2A22366');
$this->processSql('ALTER INDEX procedure_unique00000 RENAME TO procedure_unique');
$this->processSql('ALTER INDEX idx_b7ab567ba66bd30d RENAME TO IDX_F8E1314271E73169');
$this->processSql('ALTER INDEX idx_f64015367909e1ed RENAME TO IDX_39FD2CAA7909E1ED');
$this->processSql('ALTER TABLE transres_adminreview ADD reviewProjectType VARCHAR(255) DEFAULT NULL');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_antibody_document ADD PRIMARY KEY (request_id, document_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_invoice_document ADD PRIMARY KEY (invoice_id, document_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_principalinvestigator ADD PRIMARY KEY (project_id, principalinvestigator_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_coinvestigator ADD PRIMARY KEY (project_id, coinvestigator_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_pathologist ADD PRIMARY KEY (project_id, pathologist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_contact ADD PRIMARY KEY (project_id, contact_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_document ADD PRIMARY KEY (project_id, document_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_irbapprovalletter ADD PRIMARY KEY (project_id, irbApprovalLetters_id)');
$this->processSql('ALTER INDEX idx_f11d06226072379a RENAME TO IDX_3BD57BCC72698C7A');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_project_humantissueform ADD PRIMARY KEY (project_id, humanTissueForm_id)');
$this->processSql('ALTER INDEX idx_117b33b4debe2636 RENAME TO IDX_FE149F5AA27D545F');
$this->processSql('ALTER INDEX idx_22b6422d166d1f9c RENAME TO IDX_C3A8494B166D1F9C');
$this->processSql('ALTER INDEX idx_c4c1047e166d1f9c RENAME TO IDX_B7C3DE2166D1F9C');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_request_principalinvestigator ADD PRIMARY KEY (request_id, principalinvestigator_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_request_document ADD PRIMARY KEY (request_id, document_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_request_packingslippdf ADD PRIMARY KEY (request_id, packingSlipPdf_id)');
$this->processSql('ALTER INDEX idx_f2120a63ff061bbc RENAME TO IDX_5E2751FB7F71D5A');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_request_oldpackingslippdf ADD PRIMARY KEY (request_id, oldPackingSlipPdf_id)');
$this->processSql('ALTER INDEX idx_2a7da8f1d8164bd RENAME TO IDX_10E9AF556BD350A1');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_request_antibody ADD PRIMARY KEY (request_id, antibody_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_request_businesspurpose ADD PRIMARY KEY (request_id, businessPurpose_id)');
$this->processSql('ALTER INDEX idx_45e86e982b3ab653 RENAME TO IDX_8A5557046467B583');
$this->processSql('ALTER INDEX siteparameters_unique00000 RENAME TO siteParameters_unique');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_transressiteparameters_transreslogo ADD PRIMARY KEY (transResSiteParameter_id, transresLogo_id)');
$this->processSql('ALTER INDEX idx_77e35b187408ff6 RENAME TO IDX_F898B8B948FDB66A');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE transres_transressiteparameters_transrespackingsliplogo ADD PRIMARY KEY (transResSiteParameter_id, transresPackingSlipLogo_id)');
$this->processSql('ALTER INDEX idx_e1d041ec7a315316 RENAME TO IDX_7ECB11F76E565D6D');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_accountrequest_institution ADD PRIMARY KEY (request_id, institution_id)');
$this->processSql('ALTER INDEX idx_465c3939834995b1 RENAME TO IDX_BF2A5B6F834995B1');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_documentcontainer_document ADD PRIMARY KEY (documentcontainer_id, document_id)');
$this->processSql('ALTER INDEX idx_fb465c67c5b7a34a RENAME TO IDX_F172E05AB974D123');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_fellowshipsubspecialty_coordinator ADD PRIMARY KEY (fellowshipSubspecialty_id, coordinator_id)');
$this->processSql('ALTER INDEX idx_8e5bd5b6e7877946 RENAME TO IDX_708D2BCCE7877946');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_fellowshipsubspecialty_director ADD PRIMARY KEY (fellowshipSubspecialty_id, director_id)');
$this->processSql('ALTER INDEX idx_68324fb8899fb366 RENAME TO IDX_FFFA6A60899FB366');
$this->processSql('ALTER INDEX idx_e8b806bc8e87796 RENAME TO IDX_166EF8C6802D4908');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_users_researchlabs ADD PRIMARY KEY (user_id, researchlab_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_users_grants ADD PRIMARY KEY (user_id, grant_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_users_publications ADD PRIMARY KEY (user_id, publication_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_users_books ADD PRIMARY KEY (user_id, book_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_collaborationinstitution_collaboration ADD PRIMARY KEY (collaborationInstitution_id, collaboration_id)');
$this->processSql('ALTER INDEX idx_33047118ef1544ce RENAME TO IDX_832A3FC4EF1544CE');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_location_assistant ADD PRIMARY KEY (location_id, assistant_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_logger_institutions ADD PRIMARY KEY (logger_id, institution_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_medicaltitle_medicalspeciality ADD PRIMARY KEY (medicaltitle_id, medicalspeciality_id)');
$this->processSql('ALTER INDEX idx_759d120b38d5860e RENAME TO IDX_D1ADC86DC1A3E458');
$this->processSql('ALTER INDEX idx_3681a952ca5ecd96 RENAME TO IDX_52955BA8DC6F0D8');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_organizationalgroupdefault_permittedinstitutionalphiscope ADD PRIMARY KEY (permittedInstitutionalPHIScope_id, institution_id)');
$this->processSql('ALTER INDEX idx_205b297510405986 RENAME TO IDX_78626FE310405986');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_organizationalgroupdefault_language ADD PRIMARY KEY (organizationalgroupdefault_id, languagelist_id)');
$this->processSql('ALTER INDEX idx_a4cc6e35d88ec86e RENAME TO IDX_243B3090D88EC86E');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_organizationalgroupdefault_locationtype ADD PRIMARY KEY (organizationalgroupdefault_id, locationtypelist_id)');
$this->processSql('ALTER INDEX idx_fc2629a3d296b97 RENAME TO IDX_A5107C4D3D296B97');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_permission_institution ADD PRIMARY KEY (permission_id, institution_id)');
$this->processSql('ALTER INDEX idx_d4bb041eb9556f54 RENAME TO IDX_8CD97CB3C5961D3D');
$this->processSql('ALTER INDEX platformlist_unique00000 RENAME TO platformlist_unique');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_userpreferences_languages ADD PRIMARY KEY (userpreferences_id, languagelist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_preferences_institutions ADD PRIMARY KEY (preferences_id, institution_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_roles_attributes ADD PRIMARY KEY (roles_id, roleattributelist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_rooms_floors ADD PRIMARY KEY (roomlist_id, floorlist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_rooms_buildings ADD PRIMARY KEY (roomlist_id, buildinglist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_sites_lowestroles ADD PRIMARY KEY (site_id, role_id)');
$this->processSql('ALTER INDEX idx_64afd0fed60322ac RENAME TO IDX_A56EFFFAD60322AC');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_site_document ADD PRIMARY KEY (site_id, document_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_siteparameter_platformlogo ADD PRIMARY KEY (siteParameter_id, platformLogo_id)');
$this->processSql('ALTER INDEX idx_89f2aef6ea5be894 RENAME TO IDX_29C001C825E6D108');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_siteparameter_emailcriticalerrorexceptionuser ADD PRIMARY KEY (siteparameter_id, exceptionuser_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_suites_floors ADD PRIMARY KEY (suitelist_id, floorlist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_suites_buildings ADD PRIMARY KEY (suitelist_id, buildinglist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_trainings_majors ADD PRIMARY KEY (training_id, majortraininglist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_trainings_minors ADD PRIMARY KEY (training_id, minortraininglist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_trainings_honors ADD PRIMARY KEY (training_id, honortraininglist_id)');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE user_userpositions_positiontypes ADD PRIMARY KEY (userposition_id, positiontypelist_id)');
$this->processSql('ALTER INDEX idx_a29abe84aae046c8 RENAME TO IDX_61BE9D18AAE046C8');
$this->processSql('DROP INDEX "primary"');
$this->processSql('ALTER TABLE vacreq_settings_user ADD PRIMARY KEY (settings_id, emailuser_id)');
}
public function down(Schema $schema) : void
{
// this down() migration is auto-generated, please modify it to your needs
$this->abortIf($this->connection->getDatabasePlatform()->getName() !== 'postgresql', 'Migration can only be executed safely on \'postgresql\'.');
$this->processSql('CREATE SCHEMA public');
$this->processSql('ALTER INDEX idx_7a4e80292d3cce77 RENAME TO IDX_14F0C7A72A90CB41');
$this->processSql('DROP INDEX pk__fellapp___8efd92876c0d4efa');
$this->processSql('ALTER TABLE fellapp_fellApp_document ADD PRIMARY KEY (document_id, fellapp_id)');
$this->processSql('ALTER INDEX idx_49b2072fc33f7837 RENAME TO IDX_5183DCD7C33F7837');
$this->processSql('DROP INDEX pk__fellapp___09b4914b61e7e62e');
$this->processSql('ALTER TABLE fellapp_fellApp_avatar ADD PRIMARY KEY (avatar_id, fellapp_id)');
$this->processSql('ALTER INDEX idx_27d82a1c86383b10 RENAME TO IDX_688529CC86383B10');
$this->processSql('DROP INDEX pk__fellapp___4ab74df40c4b8cc9');
$this->processSql('ALTER TABLE fellapp_fellowshipApplication_boardCertification ADD PRIMARY KEY (boardcertification_id, fellowshipapplication_id)');
$this->processSql('ALTER INDEX idx_299d9554d58e8f2f RENAME TO IDX_7B3BA6A5C2B1F452');
$this->processSql('ALTER INDEX idx_328cb2b62d3cce77 RENAME TO IDX_F844CF582A90CB41');
$this->processSql('DROP INDEX pk__fellapp___2bad743366caf130');
$this->processSql('ALTER TABLE fellapp_fellApp_cv ADD PRIMARY KEY (cv_id, fellapp_id)');
$this->processSql('ALTER INDEX idx_3385375ccfe419e2 RENAME TO IDX_3429326ACFE419E2');
$this->processSql('ALTER INDEX idx_4eb06f7fb32d0a6c RENAME TO IDX_544C3F747E4972B4');
$this->processSql('DROP INDEX pk__fellapp___88a87edecd56af33');
$this->processSql('ALTER TABLE fellapp_fellowshipApplication_examination ADD PRIMARY KEY (examination_id, fellowshipapplication_id)');
$this->processSql('ALTER INDEX idx_62cf1de3dad0cfbf RENAME TO IDX_602A117BDAD0CFBF');
$this->processSql('ALTER INDEX idx_ab4c9be2d3cce77 RENAME TO IDX_128512462A90CB41');
$this->processSql('DROP INDEX pk__fellapp___a7ce995aacd4854f');
$this->processSql('ALTER TABLE fellapp_fellowshipApplication_citizenship ADD PRIMARY KEY (citizenship_id, fellowshipapplication_id)');
$this->processSql('ALTER INDEX idx_aeea1d98c9709c85 RENAME TO IDX_AC0F1100C9709C85');
$this->processSql('ALTER INDEX idx_ba8165e2b32d0a6c RENAME TO IDX_8BEAA2047E4972B4');
$this->processSql('ALTER INDEX idx_c53c6c3bb32d0a6c RENAME TO IDX_DFC03C307E4972B4');
$this->processSql('DROP INDEX pk__calllog___d2d9006cd26843c2');
$this->processSql('ALTER TABLE calllog_calllogentrymessage_document ADD PRIMARY KEY (document_id, message_id)');
$this->processSql('ALTER INDEX idx_ed040a2b2d3cce77 RENAME TO IDX_5E7A41DE2A90CB41');
$this->processSql('ALTER INDEX idx_f1d6bf172d3cce77 RENAME TO IDX_2E24AB552A90CB41');
$this->processSql('ALTER INDEX idx_f5802fb72d3cce77 RENAME TO IDX_BADD2C672A90CB41');
$this->processSql('ALTER INDEX idx_fd0966152d3cce77 RENAME TO IDX_49DB28D62A90CB41');
$this->processSql('ALTER INDEX idx_9160929476694cd RENAME TO IDX_4AAE944A88DBAD51');
$this->processSql('ALTER INDEX idx_9fd813ab476694cd RENAME TO IDX_8E61413F88DBAD51');
$this->processSql('ALTER INDEX idx_b57d6bb4b2a22366 RENAME TO IDX_A24210C9BCFB922F');
$this->processSql('ALTER INDEX encounter_unique RENAME TO encounter_unique00000');
$this->processSql('ALTER INDEX idx_dadf79673d3c30d3 RENAME TO IDX_156240FB3D3C30D3');
$this->processSql('DROP INDEX pk__scan_mes__e7607da6e5168bbc');
$this->processSql('ALTER TABLE scan_message_encounter ADD PRIMARY KEY (encounter_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__159b6203a22c0690');
$this->processSql('ALTER TABLE scan_message_accession ADD PRIMARY KEY (accession_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__c1d888a170878e78');
$this->processSql('ALTER TABLE scan_message_block ADD PRIMARY KEY (block_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__40cebaf52dc1a81e');
$this->processSql('ALTER TABLE scan_message_associations ADD PRIMARY KEY (association_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__338f038ae84631cf');
$this->processSql('ALTER TABLE scan_message_editors ADD PRIMARY KEY (editorinfo_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__0eef7bdf3ae540b7');
$this->processSql('ALTER TABLE scan_message_destination ADD PRIMARY KEY (destination_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__4e177218167dde59');
$this->processSql('ALTER TABLE scan_messageCategory_formNode ADD PRIMARY KEY (formnode_id, messagecategory_id)');
$this->processSql('DROP INDEX pk__scan_mes__44f76bebfb8170ef');
$this->processSql('ALTER TABLE scan_message_imaging ADD PRIMARY KEY (imaging_id, message_id)');
$this->processSql('DROP INDEX pk__scan_mes__6b87801903e45e2f');
$this->processSql('ALTER TABLE scan_message_input ADD PRIMARY KEY (input_id, message_id)');
$this->processSql('ALTER INDEX idx_e5f1439d537a1329 RENAME TO IDX_6700C13E537A1329');
$this->processSql('ALTER INDEX idx_fb209fb7537a1329 RENAME TO IDX_79D11D14537A1329');
$this->processSql('DROP INDEX pk__scan_par__557c9e9ef2622a88');
$this->processSql('ALTER TABLE scan_partpaper_document ADD PRIMARY KEY (document_id, partpaper_id)');
$this->processSql('ALTER INDEX part_unique RENAME TO part_unique00000');
$this->processSql('ALTER INDEX idx_70fdef46f88cbb76 RENAME TO IDX_F6616091373182EA');
$this->processSql('DROP INDEX pk__scan_per__666b204c33f2b2f0');
$this->processSql('ALTER TABLE scan_perSiteSettings_institution ADD PRIMARY KEY (institution_id, persitesettings_id)');
$this->processSql('ALTER INDEX idx_3d7c119510405986 RENAME TO IDX_D956BFE410405986');
$this->processSql('ALTER INDEX idx_f8e1314271e73169 RENAME TO IDX_B7AB567BA66BD30D');
$this->processSql('ALTER INDEX procedure_unique RENAME TO procedure_unique00000');
$this->processSql('ALTER INDEX idx_d84e5574b2a22366 RENAME TO IDX_D58D7E02BCFB922F');
$this->processSql('ALTER INDEX idx_39fd2caa7909e1ed RENAME TO IDX_F64015367909E1ED');
$this->processSql('ALTER TABLE transres_adminReview DROP reviewProjectType');
$this->processSql('DROP INDEX pk__transres__0b11e36ce7ca5d70');
$this->processSql('ALTER TABLE transres_project_coinvestigator ADD PRIMARY KEY (coinvestigator_id, project_id)');
$this->processSql('DROP INDEX pk__transres__19312a38df2593d2');
$this->processSql('ALTER TABLE transres_project_irbApprovalLetter ADD PRIMARY KEY (irbapprovalletters_id, project_id)');
$this->processSql('ALTER INDEX idx_3bd57bcc72698c7a RENAME TO IDX_F11D06226072379A');
$this->processSql('DROP INDEX pk__transres__cc5d79b7365852fa');
$this->processSql('ALTER TABLE transres_project_contact ADD PRIMARY KEY (contact_id, project_id)');
$this->processSql('DROP INDEX pk__transres__651ff0956882d397');
$this->processSql('ALTER TABLE transres_project_document ADD PRIMARY KEY (document_id, project_id)');
$this->processSql('DROP INDEX pk__transres__2ceb93c3334224d5');
$this->processSql('ALTER TABLE transres_invoice_document ADD PRIMARY KEY (document_id, invoice_id)');
$this->processSql('DROP INDEX pk__transres__63a44e26c1580c92');
$this->processSql('ALTER TABLE transres_project_pathologist ADD PRIMARY KEY (pathologist_id, project_id)');
$this->processSql('DROP INDEX pk__transres__03e9258de8f3c1ee');
$this->processSql('ALTER TABLE transres_project_humanTissueForm ADD PRIMARY KEY (humantissueform_id, project_id)');
$this->processSql('ALTER INDEX idx_fe149f5aa27d545f RENAME TO IDX_117B33B4DEBE2636');
$this->processSql('DROP INDEX pk__transres__2b00b9da0588eb04');
$this->processSql('ALTER TABLE transres_request_oldPackingSlipPdf ADD PRIMARY KEY (oldpackingslippdf_id, request_id)');
$this->processSql('ALTER INDEX idx_10e9af556bd350a1 RENAME TO IDX_2A7DA8F1D8164BD');
$this->processSql('DROP INDEX pk__user_acc__f69d38a6ebc64f4d');
$this->processSql('ALTER TABLE user_accountrequest_institution ADD PRIMARY KEY (institution_id, request_id)');
$this->processSql('DROP INDEX pk__transres__c1b5d78589dcf2e9');
$this->processSql('ALTER TABLE transres_request_document ADD PRIMARY KEY (document_id, request_id)');
$this->processSql('DROP INDEX pk__transres__88f7f6574d8245ca');
$this->processSql('ALTER TABLE transres_transResSiteParameters_transresLogo ADD PRIMARY KEY (transreslogo_id, transressiteparameter_id)');
$this->processSql('ALTER INDEX idx_f898b8b948fdb66a RENAME TO IDX_77E35B187408FF6');
$this->processSql('DROP INDEX pk__transres__29a021d4af61d6c6');
$this->processSql('ALTER TABLE transres_transResSiteParameters_transresPackingSlipLogo ADD PRIMARY KEY (transrespackingsliplogo_id, transressiteparameter_id)');
$this->processSql('ALTER INDEX idx_7ecb11f76e565d6d RENAME TO IDX_E1D041EC7A315316');
$this->processSql('DROP INDEX pk__transres__87e3c6fcd2c2530c');
$this->processSql('ALTER TABLE transres_request_packingSlipPdf ADD PRIMARY KEY (packingslippdf_id, request_id)');
$this->processSql('ALTER INDEX idx_5e2751fb7f71d5a RENAME TO IDX_F2120A63FF061BBC');
$this->processSql('DROP INDEX pk__transres__b8a8b8ef0400bab2');
$this->processSql('ALTER TABLE transres_request_businessPurpose ADD PRIMARY KEY (businesspurpose_id, request_id)');
$this->processSql('ALTER INDEX idx_8a5557046467b583 RENAME TO IDX_45E86E982B3AB653');
$this->processSql('DROP INDEX pk__transres__565ed6eabd7c780d');
$this->processSql('ALTER TABLE transres_request_principalinvestigator ADD PRIMARY KEY (principalinvestigator_id, request_id)');
$this->processSql('ALTER INDEX idx_b7c3de2166d1f9c RENAME TO IDX_C4C1047E166D1F9C');
$this->processSql('DROP INDEX pk__transres__b1b06a8356f708f9');
$this->processSql('ALTER TABLE transres_request_antibody ADD PRIMARY KEY (antibody_id, request_id)');
$this->processSql('ALTER INDEX idx_c3a8494b166d1f9c RENAME TO IDX_22B6422D166D1F9C');
$this->processSql('ALTER INDEX idx_bf2a5b6f834995b1 RENAME TO IDX_465C3939834995B1');
$this->processSql('DROP INDEX pk__user_col__c48da81b7b714316');
$this->processSql('ALTER TABLE user_collaborationInstitution_collaboration ADD PRIMARY KEY (collaboration_id, collaborationinstitution_id)');
$this->processSql('ALTER INDEX idx_832a3fc4ef1544ce RENAME TO IDX_33047118EF1544CE');
$this->processSql('DROP INDEX pk__user_doc__2253520cf77fb24d');
$this->processSql('ALTER TABLE user_documentcontainer_document ADD PRIMARY KEY (document_id, documentcontainer_id)');
$this->processSql('ALTER INDEX idx_f172e05ab974d123 RENAME TO IDX_FB465C67C5B7A34A');
$this->processSql('DROP INDEX pk__user_fel__07eb659e4a39ae1b');
$this->processSql('ALTER TABLE user_fellowshipSubspecialty_director ADD PRIMARY KEY (director_id, fellowshipsubspecialty_id)');
$this->processSql('ALTER INDEX idx_fffa6a60899fb366 RENAME TO IDX_68324FB8899FB366');
$this->processSql('ALTER INDEX idx_166ef8c6802d4908 RENAME TO IDX_E8B806BC8E87796');
$this->processSql('DROP INDEX pk__user_fel__28db425da1707d96');
$this->processSql('ALTER TABLE user_fellowshipSubspecialty_coordinator ADD PRIMARY KEY (coordinator_id, fellowshipsubspecialty_id)');
$this->processSql('ALTER INDEX idx_708d2bcce7877946 RENAME TO IDX_8E5BD5B6E7877946');
$this->processSql('DROP INDEX pk__user_loc__35bcd52c208294ed');
$this->processSql('ALTER TABLE user_location_assistant ADD PRIMARY KEY (assistant_id, location_id)');
$this->processSql('ALTER INDEX idx_d1adc86dc1a3e458 RENAME TO IDX_759D120B38D5860E');
$this->processSql('DROP INDEX pk__user_log__97ffb833a182e135');
$this->processSql('ALTER TABLE user_logger_institutions ADD PRIMARY KEY (institution_id, logger_id)');
$this->processSql('DROP INDEX pk__user_med__689a97da1484ea19');
$this->processSql('ALTER TABLE user_medicaltitle_medicalspeciality ADD PRIMARY KEY (medicalspeciality_id, medicaltitle_id)');
$this->processSql('DROP INDEX pk__user_org__2798b6cee2fa03dc');
$this->processSql('ALTER TABLE user_organizationalGroupDefault_permittedInstitutionalPHIScope ADD PRIMARY KEY (institution_id, permittedinstitutionalphiscope_id)');
$this->processSql('ALTER INDEX idx_78626fe310405986 RENAME TO IDX_205B297510405986');
$this->processSql('ALTER INDEX idx_52955ba8dc6f0d8 RENAME TO IDX_3681A952CA5ECD96');
$this->processSql('DROP INDEX pk__user_org__5fe4fc345e1141a6');
$this->processSql('ALTER TABLE user_organizationalGroupDefault_locationtype ADD PRIMARY KEY (locationtypelist_id, organizationalgroupdefault_id)');
$this->processSql('ALTER INDEX idx_a5107c4d3d296b97 RENAME TO IDX_FC2629A3D296B97');
$this->processSql('DROP INDEX pk__user_per__0b7d9b53a84a7d17');
$this->processSql('ALTER TABLE user_permission_institution ADD PRIMARY KEY (institution_id, permission_id)');
$this->processSql('ALTER INDEX platformlist_unique RENAME TO platformlist_unique00000');
$this->processSql('DROP INDEX pk__user_pre__4536d4e84049c021');
$this->processSql('ALTER TABLE user_preferences_institutions ADD PRIMARY KEY (institution_id, preferences_id)');
$this->processSql('ALTER INDEX idx_8cd97cb3c5961d3d RENAME TO IDX_D4BB041EB9556F54');
$this->processSql('DROP INDEX pk__user_rol__5f8f6d8eb114b79e');
$this->processSql('ALTER TABLE user_roles_attributes ADD PRIMARY KEY (roleattributelist_id, roles_id)');
$this->processSql('DROP INDEX pk__user_sit__6b49b54038d4a002');
$this->processSql('ALTER TABLE user_site_document ADD PRIMARY KEY (document_id, site_id)');
$this->processSql('DROP INDEX pk__user_sit__db00a856bd763053');
$this->processSql('ALTER TABLE user_siteparameter_platformLogo ADD PRIMARY KEY (platformlogo_id, siteparameter_id)');
$this->processSql('ALTER INDEX idx_29c001c825e6d108 RENAME TO IDX_89F2AEF6EA5BE894');
$this->processSql('DROP INDEX pk__user_sit__f5766ba66a7a5b55');
$this->processSql('ALTER TABLE user_siteparameter_emailcriticalerrorexceptionuser ADD PRIMARY KEY (exceptionuser_id, siteparameter_id)');
$this->processSql('DROP INDEX pk__user_sit__654f4d96ed048b52');
$this->processSql('ALTER TABLE user_sites_lowestRoles ADD PRIMARY KEY (role_id, site_id)');
$this->processSql('ALTER INDEX idx_a56efffad60322ac RENAME TO IDX_64AFD0FED60322AC');
$this->processSql('DROP INDEX pk__user_roo__ac9af705947b0756');
$this->processSql('ALTER TABLE user_rooms_floors ADD PRIMARY KEY (floorlist_id, roomlist_id)');
$this->processSql('DROP INDEX pk__user_tra__5c60edaced31c275');
$this->processSql('ALTER TABLE user_trainings_minors ADD PRIMARY KEY (minortraininglist_id, training_id)');
$this->processSql('DROP INDEX pk__user_sui__49566268fd665a3c');
$this->processSql('ALTER TABLE user_suites_buildings ADD PRIMARY KEY (buildinglist_id, suitelist_id)');
$this->processSql('DROP INDEX pk__user_tra__ae3b80e435df1f7b');
$this->processSql('ALTER TABLE user_trainings_majors ADD PRIMARY KEY (majortraininglist_id, training_id)');
$this->processSql('DROP INDEX pk__user_tra__813e488d7f13a8d3');
$this->processSql('ALTER TABLE user_trainings_honors ADD PRIMARY KEY (honortraininglist_id, training_id)');
$this->processSql('DROP INDEX pk__user_use__946aa66016ca2554');
$this->processSql('ALTER TABLE user_userpreferences_languages ADD PRIMARY KEY (languagelist_id, userpreferences_id)');
$this->processSql('DROP INDEX pk__user_use__31f366e3bcc33919');
$this->processSql('ALTER TABLE user_users_publications ADD PRIMARY KEY (publication_id, user_id)');
$this->processSql('DROP INDEX pk__user_use__bd2ee6a18eb4edb3');
$this->processSql('ALTER TABLE user_users_books ADD PRIMARY KEY (book_id, user_id)');
$this->processSql('DROP INDEX pk__user_use__ce4d3a9f97fedef6');
$this->processSql('ALTER TABLE user_users_researchlabs ADD PRIMARY KEY (researchlab_id, user_id)');
$this->processSql('DROP INDEX pk__user_use__3327d6c1bcbc2e61');
$this->processSql('ALTER TABLE user_users_grants ADD PRIMARY KEY (grant_id, user_id)');
$this->processSql('DROP INDEX pk__vacreq_s__17bd0de8d9078ca0');
$this->processSql('ALTER TABLE vacreq_settings_user ADD PRIMARY KEY (emailuser_id, settings_id)');
$this->processSql('ALTER INDEX siteparameters_unique RENAME TO siteparameters_unique00000');
$this->processSql('DROP INDEX pk__transres__f2f4f1fa47f2f505');
$this->processSql('ALTER TABLE transres_project_principalinvestigator ADD PRIMARY KEY (principalinvestigator_id, project_id)');
$this->processSql('DROP INDEX pk__user_sui__2d7f55ed9f48fa8b');
$this->processSql('ALTER TABLE user_suites_floors ADD PRIMARY KEY (floorlist_id, suitelist_id)');
$this->processSql('DROP INDEX pk__fellapp___57e065a2d50023cc');
$this->processSql('ALTER TABLE fellapp_reference_document ADD PRIMARY KEY (document_id, reference_id)');
$this->processSql('DROP INDEX pk__user_org__9da457f1e95d5a3d');
$this->processSql('ALTER TABLE user_organizationalGroupDefault_language ADD PRIMARY KEY (languagelist_id, organizationalgroupdefault_id)');
$this->processSql('ALTER INDEX idx_243b3090d88ec86e RENAME TO IDX_A4CC6E35D88EC86E');
$this->processSql('DROP INDEX pk__fellapp___1e656710b5f511b5');
$this->processSql('ALTER TABLE fellapp_fellApp_coverLetter ADD PRIMARY KEY (coverletter_id, fellapp_id)');
$this->processSql('ALTER INDEX idx_263579c13111febe RENAME TO IDX_A95C4269B3E07C1D');
$this->processSql('DROP INDEX pk__transres__c1b5d785d13af751');
$this->processSql('ALTER TABLE transres_antibody_document ADD PRIMARY KEY (document_id, request_id)');
$this->processSql('ALTER INDEX idx_5afc0f4bcd46f646 RENAME TO IDX_15B668721ACA1422');
$this->processSql('DROP INDEX pk__user_use__56c84e47dcdd4347');
$this->processSql('ALTER TABLE user_userPositions_positionTypes ADD PRIMARY KEY (positiontypelist_id, userposition_id)');
$this->processSql('ALTER INDEX idx_61be9d18aae046c8 RENAME TO IDX_A29ABE84AAE046C8');
$this->processSql('DROP INDEX pk__fellapp___256da0502a31d670');
$this->processSql('ALTER TABLE fellapp_googleformconfig_fellowshipsubspecialty ADD PRIMARY KEY (fellowshipsubspecialty_id, googleformconfig_id)');
$this->processSql('DROP INDEX pk__user_roo__c8b3c08035703210');
$this->processSql('ALTER TABLE user_rooms_buildings ADD PRIMARY KEY (buildinglist_id, roomlist_id)');
$this->processSql('ALTER INDEX idx_bc32a660537a1329 RENAME TO IDX_3EC324C3537A1329');
}
}
|
victorbrodsky/order-lab
|
orderflex/src/Migrations/Version20210611212326.php
|
PHP
|
apache-2.0
| 37,814
|
//
// Last.Backend LLC CONFIDENTIAL
// __________________
//
// [2014] - [2019] Last.Backend LLC
// All Rights Reserved.
//
// NOTICE: All information contained herein is, and remains
// the property of Last.Backend LLC and its suppliers,
// if any. The intellectual and technical concepts contained
// herein are proprietary to Last.Backend LLC
// and its suppliers and may be covered by Russian Federation and Foreign Patents,
// patents in process, and are protected by trade secret or copyright law.
// Dissemination of this information or reproduction of this material
// is strictly forbidden unless prior written permission is obtained
// from Last.Backend LLC.
//
// +build linux
package cni
import (
"github.com/lastbackend/lastbackend/pkg/runtime/cni"
"github.com/lastbackend/lastbackend/pkg/runtime/cni/local"
"github.com/lastbackend/lastbackend/pkg/runtime/cni/vxlan"
"github.com/spf13/viper"
)
func New(v *viper.Viper) (cni.CNI, error) {
switch v.GetString("network.cni.type") {
case "vxlan":
return vxlan.New(v.GetString("network.interface"))
default:
return local.New()
}
}
|
deployithq/deployit
|
pkg/runtime/cni/cni/cni_linux.go
|
GO
|
apache-2.0
| 1,106
|
Bridge.merge(new System.Globalization.CultureInfo("en-ZM", true), {
englishName: "English (Zambia)",
nativeName: "English (Zambia)",
numberFormat: Bridge.merge(new System.Globalization.NumberFormatInfo(), {
nanSymbol: "NaN",
negativeSign: "-",
positiveSign: "+",
negativeInfinitySymbol: "-∞",
positiveInfinitySymbol: "∞",
percentSymbol: "%",
percentGroupSizes: [3],
percentDecimalDigits: 2,
percentDecimalSeparator: ".",
percentGroupSeparator: ",",
percentPositivePattern: 1,
percentNegativePattern: 1,
currencySymbol: "K",
currencyGroupSizes: [3],
currencyDecimalDigits: 2,
currencyDecimalSeparator: ".",
currencyGroupSeparator: ",",
currencyNegativePattern: 1,
currencyPositivePattern: 0,
numberGroupSizes: [3],
numberDecimalDigits: 2,
numberDecimalSeparator: ".",
numberGroupSeparator: ",",
numberNegativePattern: 1
}),
dateTimeFormat: Bridge.merge(new System.Globalization.DateTimeFormatInfo(), {
abbreviatedDayNames: ["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],
abbreviatedMonthGenitiveNames: ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec",""],
abbreviatedMonthNames: ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec",""],
amDesignator: "AM",
dateSeparator: "/",
dayNames: ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],
firstDayOfWeek: 1,
fullDateTimePattern: "dddd, d MMMM yyyy h:mm:ss tt",
longDatePattern: "dddd, d MMMM yyyy",
longTimePattern: "h:mm:ss tt",
monthDayPattern: "d MMMM",
monthGenitiveNames: ["January","February","March","April","May","June","July","August","September","October","November","December",""],
monthNames: ["January","February","March","April","May","June","July","August","September","October","November","December",""],
pmDesignator: "PM",
rfc1123: "ddd, dd MMM yyyy HH':'mm':'ss 'GMT'",
shortDatePattern: "dd/MM/yyyy",
shortestDayNames: ["Su","Mo","Tu","We","Th","Fr","Sa"],
shortTimePattern: "h:mm tt",
sortableDateTimePattern: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
sortableDateTimePattern1: "yyyy'-'MM'-'dd",
timeSeparator: ":",
universalSortableDateTimePattern: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
yearMonthPattern: "MMMM yyyy",
roundtripFormat: "yyyy'-'MM'-'dd'T'HH':'mm':'ss.fffffffzzz"
}),
TextInfo: Bridge.merge(new System.Globalization.TextInfo(), {
ANSICodePage: 1252,
CultureName: "en-ZM",
EBCDICCodePage: 500,
IsRightToLeft: false,
LCID: 4096,
listSeparator: ",",
MacCodePage: 10000,
OEMCodePage: 850,
IsReadOnly: true
})
});
|
AndreyZM/Bridge
|
Bridge/Resources/Locales/en-ZM.js
|
JavaScript
|
apache-2.0
| 2,939
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Orchestration.Airflow.Service.V1.Snippets
{
// [START composer_v1_generated_Environments_ListEnvironments_async_flattened]
using Google.Api.Gax;
using System;
using System.Linq;
using System.Threading.Tasks;
using gcoasv = Google.Cloud.Orchestration.Airflow.Service.V1;
public sealed partial class GeneratedEnvironmentsClientSnippets
{
/// <summary>Snippet for ListEnvironmentsAsync</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public async Task ListEnvironmentsAsync()
{
// Create client
EnvironmentsClient environmentsClient = await EnvironmentsClient.CreateAsync();
// Initialize request argument(s)
string parent = "";
// Make the request
PagedAsyncEnumerable<ListEnvironmentsResponse, gcoasv::Environment> response = environmentsClient.ListEnvironmentsAsync(parent);
// Iterate over all response items, lazily performing RPCs as required
await response.ForEachAsync((gcoasv::Environment item) =>
{
// Do something with each item
Console.WriteLine(item);
});
// Or iterate over pages (of server-defined size), performing one RPC per page
await response.AsRawResponses().ForEachAsync((ListEnvironmentsResponse page) =>
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (gcoasv::Environment item in page)
{
// Do something with each item
Console.WriteLine(item);
}
});
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<gcoasv::Environment> singlePage = await response.ReadPageAsync(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (gcoasv::Environment item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
}
}
// [END composer_v1_generated_Environments_ListEnvironments_async_flattened]
}
|
jskeet/google-cloud-dotnet
|
apis/Google.Cloud.Orchestration.Airflow.Service.V1/Google.Cloud.Orchestration.Airflow.Service.V1.GeneratedSnippets/EnvironmentsClient.ListEnvironmentsAsyncSnippet.g.cs
|
C#
|
apache-2.0
| 3,304
|
1.0
* Initial commit
|
serilog/serilog-sinks-datadog
|
CHANGES.md
|
Markdown
|
apache-2.0
| 21
|
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 The Go Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcloud
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/coreos/pkg/capnslog"
"google.golang.org/api/compute/v1"
"github.com/coreos/mantle/auth"
"github.com/coreos/mantle/platform"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/mantle", "platform/api/gcloud")
)
type Options struct {
Image string
Project string
Zone string
MachineType string
DiskType string
Network string
JSONKeyFile string
ServiceAuth bool
*platform.Options
}
type API struct {
client *http.Client
compute *compute.Service
options *Options
}
func New(opts *Options) (*API, error) {
const endpointPrefix = "https://www.googleapis.com/compute/v1/"
// If the image name isn't a full api endpoint accept a name beginning
// with "projects/" to specify a different project from the instance.
// Also accept a short name and use instance project.
if strings.HasPrefix(opts.Image, "projects/") {
opts.Image = endpointPrefix + opts.Image
} else if !strings.Contains(opts.Image, "/") {
opts.Image = fmt.Sprintf("%sprojects/%s/global/images/%s", endpointPrefix, opts.Project, opts.Image)
} else if !strings.HasPrefix(opts.Image, endpointPrefix) {
return nil, fmt.Errorf("GCE Image argument must be the full api endpoint, begin with 'projects/', or use the short name")
}
var (
client *http.Client
err error
)
if opts.ServiceAuth {
client = auth.GoogleServiceClient()
} else if opts.JSONKeyFile != "" {
b, err := ioutil.ReadFile(opts.JSONKeyFile)
if err != nil {
plog.Fatal(err)
}
client, err = auth.GoogleClientFromJSONKey(b)
} else {
client, err = auth.GoogleClient()
}
if err != nil {
return nil, err
}
capi, err := compute.New(client)
if err != nil {
return nil, err
}
api := &API{
client: client,
compute: capi,
options: opts,
}
return api, nil
}
func (a *API) Client() *http.Client {
return a.client
}
func (a *API) GC(gracePeriod time.Duration) error {
return a.gcInstances(gracePeriod)
}
|
glevand/coreos--mantle
|
platform/api/gcloud/api.go
|
GO
|
apache-2.0
| 2,643
|
Tutorial
--------
Images in HTML are inline elements that can be placed within a paragraph. To add an image, use the `<img>` tag
along with the `src` attribute to specify the location of the image.
<img src="/static/img/code.jpg">
You may use JavaScript to trigger an event when an image finished loading.
<img src="/static/img/code.jpg" onload="alert('image loaded')">
Resizing the image can be done using the width and height attributes of an image, or alternatively by using CSS:
<img src="/static/img/code.jpg" width="100">
<img src="/static/img/code.jpg" style="width: 100px">
Having an "alt" attribute set for the image is useful for when an image could not load or when you want to
add a tooltip description that will be displayed when hovering on top of an image.
<img src="/static/img/code.jpg" style="width: 100px" alt="A picture of some code">
### Image Types
There are three main types of image formats which you should be using.
* Lossless formats - useful for when you need pixel-perfect graphics, for example for logos.
The most common format is PNG. PNG also supports alpha transparency, meaning that you can use any background you want
and overlay the image on top of that background.
* Lossy formats - useful for displaying rich images. Using a lossless format such as PNG would be an order
of magnitude larger if used in such images. The most common format used in this category is JPG.
* Animated formats - useful for showing short animated images. The most common format is GIF, although it is a very old
yet widely supported format, with many inherent drawbacks, such as a 256 color limit on each frame, and bad compression.
### Using the CSS float attribute with images
Images can be set to float nearby text so they would blend with the text better. Notice the use of the `clear` CSS
attribute - which directs the browser to break the floating effect after the first paragraph.
<img src="/static/img/lab.png" style="float: left;">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore
magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
<p class="clear: both">Second paragraph</p>
Exercise
--------
This page does not have an exercise yet. You are welcome to contribute one by sending me a pull request:
[[https://github.com/ronreiter/interactive-tutorials]]
Tutorial Code
-------------
<!DOCTYPE html>
<html>
<head>
</head>
<body>
</body>
</html>
Expected Output
---------------
<!DOCTYPE html>
<html>
<head>
<title>Hello, World!</title>
</head>
<body>
<p>Hello, World!</p>
</body>
</html>
Solution
--------
<!DOCTYPE html>
<html>
<head>
<title>Hello, World!</title>
</head>
<body>
<p>Hello, World!</p>
</body>
</html>
|
ronreiter/interactive-tutorials
|
tutorials/learn-html.org/nl/Images.md
|
Markdown
|
apache-2.0
| 3,265
|
package com.carrotsearch.randomizedtesting;
import static org.junit.Assert.*;
import java.util.Arrays;
import java.util.Collections;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Test;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import com.carrotsearch.randomizedtesting.annotations.Seed;
import com.carrotsearch.randomizedtesting.annotations.Seeds;
public class TestParameterized extends WithNestedTestClass {
public static class Nested extends RandomizedTest {
public Nested(@Name("value") int value, @Name("string") String v) {
}
@Test
@Repeat(iterations = 3)
public void testOne() {
}
@Test
public void testTwo() {
}
@Seeds({@Seed("deadbeef"), @Seed("cafebabe")})
@Test
@Repeat(iterations = 2, useConstantSeed = true)
public void testThree() {
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return Arrays.asList($$(
$(1, "abc"),
$(2, "def")));
}
}
@Test
public void testWithRepeatsAndSeeds() {
checkTestsOutput(16, 0, 0, 0, Nested.class);
}
public static class Nested2 extends RandomizedTest {
public Nested2(@Name("paramName") int value) {
}
@Test
public void failing() {
assumeRunningNested();
fail();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
assumeRunningNested();
return Arrays.asList($$($("xyz")));
}
}
@Test
public void testNameAnnotation() {
FullResult r = checkTestsOutput(1, 0, 1, 0, Nested2.class);
Assertions.assertThat(r.getFailures()).hasSize(1);
Assertions.assertThat(r.getFailures().get(0).getDescription().getMethodName())
.contains("paramName=xyz");
Assert.assertEquals("failing", RandomizedRunner.methodName(r.getFailures().get(0).getDescription()));
}
public static class Nested3 extends Nested2 {
public Nested3(@Name("paramName") int value) {
super(value);
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return Collections.emptyList();
}
}
public static class Nested4 extends Nested3 {
public Nested4(@Name("paramName") int value) {
super(value);
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
assumeTrue(false);
throw new RuntimeException();
}
}
public static class Nested5 extends RandomizedTest {
public Nested5() {}
@Test
public void testMe() {}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return Arrays.asList(new Object[] {},
new Integer[] {});
}
}
@Test
public void testEmptyParamsList() {
checkTestsOutput(0, 0, 0, 0, Nested3.class);
checkTestsOutput(0, 0, 0, 0, Nested4.class);
}
@Test
public void testNonObjectArray() {
checkTestsOutput(2, 0, 0, 0, Nested5.class);
}
}
|
randomizedtesting/randomizedtesting
|
randomized-runner/src/test/java/com/carrotsearch/randomizedtesting/TestParameterized.java
|
Java
|
apache-2.0
| 3,109
|
module BABYLON {
export class ConvolutionPostProcess extends PostProcess{
constructor(name: string, public kernel: number[], options: number | PostProcessOptions, camera: Camera, samplingMode?: number, engine?: Engine, reusable?: boolean) {
super(name, "convolution", ["kernel", "screenSize"], null, options, camera, samplingMode, engine, reusable);
this.onApply = (effect: Effect) => {
effect.setFloat2("screenSize", this.width, this.height);
effect.setArray("kernel", this.kernel);
};
}
// Statics
// Based on http://en.wikipedia.org/wiki/Kernel_(image_processing)
public static EdgeDetect0Kernel = [1, 0, -1, 0, 0, 0, -1, 0, 1];
public static EdgeDetect1Kernel = [0, 1, 0, 1, -4, 1, 0, 1, 0];
public static EdgeDetect2Kernel = [-1, -1, -1, -1, 8, -1, -1, -1, -1];
public static SharpenKernel = [0, -1, 0, -1, 5, -1, 0, -1, 0];
public static EmbossKernel = [-2, -1, 0, -1, 1, 1, 0, 1, 2];
public static GaussianKernel = [0, 1, 0, 1, 1, 1, 0, 1, 0];
}
}
|
Temechon/Babylon.js
|
src/PostProcess/babylon.convolutionPostProcess.ts
|
TypeScript
|
apache-2.0
| 1,105
|
{template 'common/_mobile'}
手机端
|
snailto/wq
|
addons/yc_expressage/template/mobile/test/test.html
|
HTML
|
apache-2.0
| 38
|
---
layout: vakit_dashboard
title: MONDSEE, AVUSTURYA için iftar, namaz vakitleri ve hava durumu - ilçe/eyalet seç
permalink: /AVUSTURYA/MONDSEE/
---
<script type="text/javascript">
var GLOBAL_COUNTRY = 'AVUSTURYA';
var GLOBAL_CITY = 'MONDSEE';
var GLOBAL_STATE = '';
var lat = 72;
var lon = 21;
</script>
|
hakanu/iftar
|
_posts_/vakit/AVUSTURYA/MONDSEE/2017-02-01-.markdown
|
Markdown
|
apache-2.0
| 320
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import datetime
import sys
import unittest
import test_env
test_env.setup_test_env()
# From components/third_party/
import webtest
import webapp2
import stats
from components import stats_framework
from support import stats_framework_mock
from support import test_case
# pylint: disable=R0201
class Store(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.STORE, 2048, 'GS; inline')
self.response.write('Yay')
class Return(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.RETURN, 4096, 'memcache')
self.response.write('Yay')
class Lookup(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.LOOKUP, 200, 103)
self.response.write('Yay')
class Dupe(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.DUPE, 1024, 'inline')
self.response.write('Yay')
def to_str(now, delta):
"""Converts a datetime to unicode."""
now = now + datetime.timedelta(seconds=delta)
return unicode(now.strftime(stats.utils.DATETIME_FORMAT))
class StatsTest(test_case.TestCase, stats_framework_mock.MockMixIn):
def setUp(self):
super(StatsTest, self).setUp()
fake_routes = [
('/store', Store),
('/return', Return),
('/lookup', Lookup),
('/dupe', Dupe),
]
self.app = webtest.TestApp(
webapp2.WSGIApplication(fake_routes, debug=True),
extra_environ={'REMOTE_ADDR': 'fake-ip'})
stats_framework_mock.configure(self)
self.now = datetime.datetime(2010, 1, 2, 3, 4, 5, 6)
self.mock_now(self.now, 0)
def _test_handler(self, url, added_data):
stats_framework_mock.reset_timestamp(stats.STATS_HANDLER, self.now)
self.assertEqual('Yay', self.app.get(url).body)
self.assertEqual(1, len(list(stats_framework.yield_entries(None, None))))
self.mock_now(self.now, 60)
self.assertEqual(10, stats.generate_stats())
actual = stats_framework.get_stats(
stats.STATS_HANDLER, 'minutes', self.now, 1, True)
expected = [
{
'contains_lookups': 0,
'contains_requests': 0,
'downloads': 0,
'downloads_bytes': 0,
'failures': 0,
'key': datetime.datetime(2010, 1, 2, 3, 4),
'other_requests': 0,
'requests': 1,
'uploads': 0,
'uploads_bytes': 0,
},
]
expected[0].update(added_data)
self.assertEqual(expected, actual)
def test_store(self):
expected = {
'uploads': 1,
'uploads_bytes': 2048,
}
self._test_handler('/store', expected)
def test_return(self):
expected = {
'downloads': 1,
'downloads_bytes': 4096,
}
self._test_handler('/return', expected)
def test_lookup(self):
expected = {
'contains_lookups': 200,
'contains_requests': 1,
}
self._test_handler('/lookup', expected)
def test_dupe(self):
expected = {
'other_requests': 1,
}
self._test_handler('/dupe', expected)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
madecoste/swarming
|
appengine/isolate/tests/stats_test.py
|
Python
|
apache-2.0
| 3,366
|
#
# Cookbook Name:: delivery-cluster
# Library:: helpers
#
# Author:: Salim Afiune (<afiune@chef.io>)
#
# Copyright:: Copyright (c) 2015 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'openssl'
require 'fileutils'
require 'securerandom'
module DeliveryCluster
#
# Helpers Module for general purposes
#
module Helpers
module_function
# Retrive the common cluster recipes
#
# This helper will return the common cluster recipes that customers specify in the
# attribute `['delivery-cluster']['common_cluster_recipes']` plus the ones that
# Chef considered as default/needed. Those recipes will be included to the run_list
# of all the servers of the delivery-cluster.
def common_cluster_recipes(node)
default_cluster_recipes + node['delivery-cluster']['common_cluster_recipes']
end
# Retrive the default cluster recipes
#
# Return the default cluster recipes from Chef. These recipes are the ones we
# internally need to let delivery-cluster work properly.
#
# To add more recipes, simply include them to the array.
def default_cluster_recipes
['delivery-cluster::pkg_repo_management']
end
# Provisioning Driver Instance
#
# @param node [Chef::Node] Chef Node object
# @return [DeliveryCluster::Provisioning::Base] provisioning driver instance
def provisioning(node)
check_attribute?(node['delivery-cluster']['driver'], "node['delivery-cluster']['driver']")
@provisioning ||= DeliveryCluster::Provisioning.for_driver(node['delivery-cluster']['driver'], node)
end
# The current directory PATH
# This is coming from the .chef/knife.rb
#
# @return [String] current directory path
def current_dir
Chef::Config.chef_repo_path
end
# Cluster Data directory link
#
# @return [Bool] True if cluster directory is a link, False if not
def cluster_data_dir_link?
File.symlink?(File.join(current_dir, '.chef', 'delivery-cluster-data'))
end
# Delivery Cluster data directory
#
# @param node [Chef::Node] Chef Node object
# @return [String] PATH of the Delivery cluster data directory
def cluster_data_dir(node)
File.join(current_dir, '.chef', "delivery-cluster-data-#{delivery_cluster_id(node)}")
end
# Use the Private IP for SSH
#
# @param node [Chef::Node] Chef Node object
# @return [Bool] True if we need to use the private ip for ssh, False if not
def use_private_ip_for_ssh(node)
check_attribute?(node['delivery-cluster']['driver'], "node['delivery-cluster']['driver']")
node['delivery-cluster'][node['delivery-cluster']['driver']]['use_private_ip_for_ssh']
end
# Get the IP address from the Provisioning Abstraction
#
# @param node [Chef::Node] Chef Node object
# @param machine_node [Chef::Node] Chef Node object of the machine we would like to get the ip
# @return [String] ip address
def get_ip(node, machine_node)
provisioning(node).ipaddress(machine_node)
end
# Extracting the username from the provisioning abstraction
#
# @param node [Chef::Node] Chef Node object
# @return [String] username
def username(node)
provisioning(node).username
end
# Delivery Cluster ID
# If a cluster id was not provided (via the attribute) we'll generate
# a unique cluster id and immediately save it in case the CCR fails.
#
# @param node [Chef::Node] Chef Node object
# @return [String] cluster id
def delivery_cluster_id(node)
unless node['delivery-cluster']['id']
node.set['delivery-cluster']['id'] = "test-#{SecureRandom.hex(3)}"
node.save
end
node['delivery-cluster']['id']
end
# Encrypted Data Bag Secret
# Generate or load an existing encrypted data bag secret
#
# @param node [Chef::Node] Chef Node object
# @return [String] encrypted data bag secret
def encrypted_data_bag_secret(node)
@encrypted_data_bag_secret ||= begin
if File.exist?("#{cluster_data_dir(node)}/encrypted_data_bag_secret")
File.read("#{cluster_data_dir(node)}/encrypted_data_bag_secret")
else
# Ruby's `SecureRandom` module uses OpenSSL under the covers
SecureRandom.base64(512)
end
end
end
# Generate Knife Variables
# to use them to create a new knife config file that will point at the new
# delivery cluster to facilitate its management within the `cluster_data_dir`
#
# @param node [Chef::Node] Chef Node object
# @return [Hash] knife variables to render a customized knife.rb
def knife_variables(node)
{
chef_server_url: DeliveryCluster::Helpers::ChefServer.chef_server_url(node),
client_key: "#{cluster_data_dir(node)}/delivery.pem",
analytics_server_url: if DeliveryCluster::Helpers::Analytics.analytics_enabled?(node)
"https://#{DeliveryCluster::Helpers::Analytics.analytics_server_fqdn(node)}/organizations" \
"/#{node['delivery-cluster']['chef-server']['organization']}"
else
''
end,
supermarket_site: if DeliveryCluster::Helpers::Supermarket.supermarket_enabled?(node)
"https://#{DeliveryCluster::Helpers::Supermarket.supermarket_server_fqdn(node)}"
else
''
end
}
end
# Validate License File
# Because Delivery requires a license, we want to make sure that the
# user has the necessary license file on the provisioning node before we begin.
# This method will check for the license file in the compile phase to prevent
# any work being done if the user doesn't even have a license.
#
# @param node [Chef::Node] Chef Node object
def validate_license_file(node)
return unless node['delivery-cluster']['delivery']['license_file'].nil?
fail DeliveryCluster::Exceptions::LicenseNotFound, "node['delivery-cluster']['delivery']['license_file']"
end
# Validate Attribute
# As we depend on many attributes for multiple components we need a
# quick way to validate when they have been set or not.
#
# @param attr_value [NotNilValue] The value of the attribute we want to check
# @param attr_name [String] The name of the attribute
def check_attribute?(attr_value, attr_name)
fail DeliveryCluster::Exceptions::AttributeNotFound, attr_name if attr_value.nil?
end
end
# Module that exposes multiple helpers
module DSL
# Retrive the common cluster recipes
def common_cluster_recipes
DeliveryCluster::Helpers.common_cluster_recipes(node)
end
# Provisioning Driver Instance
def provisioning
DeliveryCluster::Helpers.provisioning(node)
end
# The current directory PATH
def current_dir
DeliveryCluster::Helpers.current_dir
end
# Cluster Data directory link
def cluster_data_dir_link?
DeliveryCluster::Helpers.cluster_data_dir_link?
end
# Delivery Cluster data directory
def cluster_data_dir
DeliveryCluster::Helpers.cluster_data_dir(node)
end
# Use the Private IP for SSH
def use_private_ip_for_ssh
DeliveryCluster::Helpers.use_private_ip_for_ssh(node)
end
# Get the IP address from the Provisioning Abstraction
def get_ip(machine_node)
DeliveryCluster::Helpers.get_ip(node, machine_node)
end
# Extracting the username from the provisioning abstraction
def username
DeliveryCluster::Helpers.username(node)
end
# Delivery Cluster ID
def delivery_cluster_id
DeliveryCluster::Helpers.delivery_cluster_id(node)
end
# Encrypted Data Bag Secret
def encrypted_data_bag_secret
DeliveryCluster::Helpers.encrypted_data_bag_secret(node)
end
# Generate Knife Variables
def knife_variables
DeliveryCluster::Helpers.knife_variables(node)
end
# Validate License File
def validate_license_file
DeliveryCluster::Helpers.validate_license_file(node)
end
end
end
|
jonathanmorley/delivery-cluster
|
libraries/helpers.rb
|
Ruby
|
apache-2.0
| 8,848
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper.ip;
import com.google.common.net.InetAddresses;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.NumericAnalyzer;
import org.elasticsearch.index.analysis.NumericTokenizer;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeMappingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField;
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
import org.elasticsearch.index.query.QueryParseContext;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import static org.elasticsearch.index.mapper.MapperBuilders.ipField;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
/**
*
*/
public class IpFieldMapper extends NumberFieldMapper {
public static final String CONTENT_TYPE = "ip";
public static String longToIp(long longIp) {
int octet3 = (int) ((longIp >> 24) % 256);
int octet2 = (int) ((longIp >> 16) % 256);
int octet1 = (int) ((longIp >> 8) % 256);
int octet0 = (int) ((longIp) % 256);
return octet3 + "." + octet2 + "." + octet1 + "." + octet0;
}
private static final Pattern pattern = Pattern.compile("\\.");
public static long ipToLong(String ip) {
try {
if (!InetAddresses.isInetAddress(ip)) {
throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ip address");
}
String[] octets = pattern.split(ip);
if (octets.length != 4) {
throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ipv4 address (4 dots)");
}
return (Long.parseLong(octets[0]) << 24) + (Integer.parseInt(octets[1]) << 16) +
(Integer.parseInt(octets[2]) << 8) + Integer.parseInt(octets[3]);
} catch (Exception e) {
if (e instanceof IllegalArgumentException) {
throw (IllegalArgumentException) e;
}
throw new IllegalArgumentException("failed to parse ip [" + ip + "]", e);
}
}
public static class Defaults extends NumberFieldMapper.Defaults {
public static final String NULL_VALUE = null;
public static final MappedFieldType FIELD_TYPE = new IpFieldType();
static {
FIELD_TYPE.freeze();
}
}
public static class Builder extends NumberFieldMapper.Builder<Builder, IpFieldMapper> {
protected String nullValue = Defaults.NULL_VALUE;
public Builder(String name) {
super(name, Defaults.FIELD_TYPE, Defaults.PRECISION_STEP_64_BIT);
builder = this;
}
@Override
public IpFieldMapper build(BuilderContext context) {
setupFieldType(context);
IpFieldMapper fieldMapper = new IpFieldMapper(fieldType, docValues, ignoreMalformed(context), coerce(context),
fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll);
return fieldMapper;
}
@Override
protected NamedAnalyzer makeNumberAnalyzer(int precisionStep) {
String name = precisionStep == Integer.MAX_VALUE ? "_ip/max" : ("_ip/" + precisionStep);
return new NamedAnalyzer(name, new NumericIpAnalyzer(precisionStep));
}
@Override
protected int maxPrecisionStep() {
return 64;
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
IpFieldMapper.Builder builder = ipField(name);
parseNumberField(builder, name, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();
String propName = Strings.toUnderscoreCase(entry.getKey());
Object propNode = entry.getValue();
if (propName.equals("null_value")) {
if (propNode == null) {
throw new MapperParsingException("Property [null_value] cannot be null.");
}
builder.nullValue(propNode.toString());
iterator.remove();
}
}
return builder;
}
}
public static final class IpFieldType extends NumberFieldType {
public IpFieldType() {}
protected IpFieldType(IpFieldType ref) {
super(ref);
}
@Override
public NumberFieldType clone() {
return new IpFieldType(this);
}
@Override
public Long value(Object value) {
if (value == null) {
return null;
}
if (value instanceof Number) {
return ((Number) value).longValue();
}
if (value instanceof BytesRef) {
return Numbers.bytesToLong((BytesRef) value);
}
return ipToLong(value.toString());
}
/**
* IPs should return as a string.
*/
@Override
public Object valueForSearch(Object value) {
Long val = value(value);
if (val == null) {
return null;
}
return longToIp(val);
}
@Override
public BytesRef indexedValueForSearch(Object value) {
BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
return bytesRef.get();
}
@Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return NumericRangeQuery.newLongRange(names().indexName(), numericPrecisionStep(),
lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper);
}
@Override
public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
long iValue = ipToLong(value);
long iSim;
try {
iSim = ipToLong(fuzziness.asString());
} catch (IllegalArgumentException e) {
iSim = fuzziness.asLong();
}
return NumericRangeQuery.newLongRange(names().indexName(), numericPrecisionStep(),
iValue - iSim,
iValue + iSim,
true, true);
}
}
protected IpFieldMapper(MappedFieldType fieldType, Boolean docValues,
Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
@Nullable Settings fieldDataSettings,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(fieldType, docValues, ignoreMalformed, coerce,
fieldDataSettings, indexSettings, multiFields, copyTo);
}
@Override
public MappedFieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("long");
}
private static long parseValue(Object value) {
if (value instanceof Number) {
return ((Number) value).longValue();
}
if (value instanceof BytesRef) {
return ipToLong(((BytesRef) value).utf8ToString());
}
return ipToLong(value.toString());
}
@Override
protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
String ipAsString;
if (context.externalValueSet()) {
ipAsString = (String) context.externalValue();
if (ipAsString == null) {
ipAsString = fieldType().nullValueAsString();
}
} else {
if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
ipAsString = fieldType().nullValueAsString();
} else {
ipAsString = context.parser().text();
}
}
if (ipAsString == null) {
return;
}
if (context.includeInAll(includeInAll, this)) {
context.allEntries().addText(fieldType.names().fullName(), ipAsString, fieldType.boost());
}
final long value = ipToLong(ipAsString);
if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) {
CustomLongNumericField field = new CustomLongNumericField(this, value, fieldType);
field.setBoost(fieldType.boost());
fields.add(field);
}
if (fieldType().hasDocValues()) {
addDocValue(context, fields, value);
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
super.merge(mergeWith, mergeResult);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
if (!mergeResult.simulate()) {
this.fieldType = this.fieldType.clone();
this.fieldType.setNullValue(((IpFieldMapper) mergeWith).fieldType().nullValue());
}
}
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
if (includeDefaults || fieldType.numericPrecisionStep() != Defaults.PRECISION_STEP_64_BIT) {
builder.field("precision_step", fieldType.numericPrecisionStep());
}
if (includeDefaults || fieldType().nullValueAsString() != null) {
builder.field("null_value", fieldType().nullValueAsString());
}
if (includeInAll != null) {
builder.field("include_in_all", includeInAll);
} else if (includeDefaults) {
builder.field("include_in_all", false);
}
}
public static class NumericIpAnalyzer extends NumericAnalyzer<NumericIpTokenizer> {
private final int precisionStep;
public NumericIpAnalyzer(int precisionStep) {
this.precisionStep = precisionStep;
}
@Override
protected NumericIpTokenizer createNumericTokenizer(char[] buffer) throws IOException {
return new NumericIpTokenizer(precisionStep, buffer);
}
}
public static class NumericIpTokenizer extends NumericTokenizer {
public NumericIpTokenizer(int precisionStep, char[] buffer) throws IOException {
super(new NumericTokenStream(precisionStep), buffer, null);
}
@Override
protected void setValue(NumericTokenStream tokenStream, String value) {
tokenStream.setLongValue(ipToLong(value));
}
}
}
|
jw0201/elastic
|
core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java
|
Java
|
apache-2.0
| 13,346
|
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.device.mgt.output.adapter.mqtt;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.wso2.carbon.context.PrivilegedCarbonContext;
import org.wso2.carbon.device.mgt.output.adapter.mqtt.util.MQTTAdapterPublisher;
import org.wso2.carbon.device.mgt.output.adapter.mqtt.util.MQTTEventAdapterConstants;
import org.wso2.carbon.device.mgt.output.adapter.mqtt.util.MQTTBrokerConnectionConfiguration;
import org.wso2.carbon.event.output.adapter.core.EventAdapterUtil;
import org.wso2.carbon.event.output.adapter.core.OutputEventAdapter;
import org.wso2.carbon.event.output.adapter.core.OutputEventAdapterConfiguration;
import org.wso2.carbon.event.output.adapter.core.exception.OutputEventAdapterException;
import org.wso2.carbon.event.output.adapter.core.exception.TestConnectionNotSupportedException;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* Output MQTTEventAdapter will be used to publish events with MQTT protocol to specified broker and topic.
*/
public class MQTTEventAdapter implements OutputEventAdapter {
private OutputEventAdapterConfiguration eventAdapterConfiguration;
private Map<String, String> globalProperties;
private MQTTAdapterPublisher mqttAdapterPublisher;
private int connectionKeepAliveInterval;
private String qos;
private static ThreadPoolExecutor threadPoolExecutor;
private static final Log log = LogFactory.getLog(MQTTEventAdapter.class);
private int tenantId;
public MQTTEventAdapter(OutputEventAdapterConfiguration eventAdapterConfiguration,
Map<String, String> globalProperties) {
this.eventAdapterConfiguration = eventAdapterConfiguration;
this.globalProperties = globalProperties;
Object keeAliveInternal = globalProperties.get(MQTTEventAdapterConstants.CONNECTION_KEEP_ALIVE_INTERVAL);
if (keeAliveInternal != null) {
try {
connectionKeepAliveInterval = Integer.parseInt(keeAliveInternal.toString());
} catch (NumberFormatException e) {
log.error("Error when configuring user specified connection keep alive time, using default value", e);
connectionKeepAliveInterval = MQTTEventAdapterConstants.DEFAULT_CONNECTION_KEEP_ALIVE_INTERVAL;
}
} else {
connectionKeepAliveInterval = MQTTEventAdapterConstants.DEFAULT_CONNECTION_KEEP_ALIVE_INTERVAL;
}
}
@Override
public void init() throws OutputEventAdapterException {
tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId(true);
//ThreadPoolExecutor will be assigned if it is null
if (threadPoolExecutor == null) {
int minThread;
int maxThread;
int jobQueSize;
long defaultKeepAliveTime;
//If global properties are available those will be assigned else constant values will be assigned
if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null) {
minThread = Integer.parseInt(globalProperties.get(
MQTTEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME));
} else {
minThread = MQTTEventAdapterConstants.DEFAULT_MIN_THREAD_POOL_SIZE;
}
if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null) {
maxThread = Integer.parseInt(globalProperties.get(
MQTTEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME));
} else {
maxThread = MQTTEventAdapterConstants.DEFAULT_MAX_THREAD_POOL_SIZE;
}
if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME) != null) {
defaultKeepAliveTime = Integer.parseInt(globalProperties.get(
MQTTEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME));
} else {
defaultKeepAliveTime = MQTTEventAdapterConstants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLIS;
}
if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null) {
jobQueSize = Integer.parseInt(globalProperties.get(
MQTTEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME));
} else {
jobQueSize = MQTTEventAdapterConstants.DEFAULT_EXECUTOR_JOB_QUEUE_SIZE;
}
threadPoolExecutor = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(
jobQueSize));
}
}
@Override
public void testConnect() throws TestConnectionNotSupportedException {
throw new TestConnectionNotSupportedException("Test connection is not available");
}
@Override
public void connect() {
MQTTBrokerConnectionConfiguration mqttBrokerConnectionConfiguration =
new MQTTBrokerConnectionConfiguration(eventAdapterConfiguration, globalProperties);
String clientId = eventAdapterConfiguration.getStaticProperties().get(
MQTTEventAdapterConstants.ADAPTER_CONF_CLIENTID);
qos = eventAdapterConfiguration.getStaticProperties().get(MQTTEventAdapterConstants.ADAPTER_MESSAGE_QOS);
mqttAdapterPublisher = new MQTTAdapterPublisher(mqttBrokerConnectionConfiguration, clientId);
}
@Override
public void publish(Object message, Map<String, String> dynamicProperties) {
String topic = dynamicProperties.get(MQTTEventAdapterConstants.ADAPTER_MESSAGE_TOPIC);
try {
threadPoolExecutor.submit(new MQTTSender(topic, message));
} catch (RejectedExecutionException e) {
EventAdapterUtil.logAndDrop(eventAdapterConfiguration.getName(), message, "Job queue is full", e, log,
tenantId);
}
}
@Override
public void disconnect() {
try {
if (mqttAdapterPublisher != null) {
mqttAdapterPublisher.close();
mqttAdapterPublisher = null;
}
} catch (OutputEventAdapterException e) {
log.error("Exception when closing the mqtt publisher connection on Output MQTT Adapter '" +
eventAdapterConfiguration.getName() + "'", e);
}
}
@Override
public void destroy() {
//not required
}
@Override
public boolean isPolled() {
return false;
}
class MQTTSender implements Runnable {
String topic;
Object message;
MQTTSender(String topic, Object message) {
this.topic = topic;
this.message = message;
}
@Override
public void run() {
try {
if (!mqttAdapterPublisher.isConnected()) {
synchronized (MQTTEventAdapter.class) {
if (!mqttAdapterPublisher.isConnected()) {
mqttAdapterPublisher.connect();
}
}
}
if (qos == null || qos.trim().isEmpty()) {
mqttAdapterPublisher.publish(message.toString(), topic);
} else {
mqttAdapterPublisher.publish(Integer.parseInt(qos), message.toString(), topic);
}
} catch (Throwable t) {
EventAdapterUtil.logAndDrop(eventAdapterConfiguration.getName(), message, null, t, log, tenantId);
}
}
}
}
|
lakshani/carbon-device-mgt-plugins
|
components/extensions/cdmf-transport-adapters/output/org.wso2.carbon.device.mgt.output.adapter.mqtt/src/main/java/org/wso2/carbon/device/mgt/output/adapter/mqtt/MQTTEventAdapter.java
|
Java
|
apache-2.0
| 8,572
|
package cyclops.function;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* A FunctionalInterface for side-effecting statements that accept 5 inputs (with no result).
* The five-arity specialization of {@link Consumer}.
*
* @author johnmcclean
*
* @param <T1> Type of first input parameter
* @param <T2> Type of second input parameter
* @param <T3> Type of third input parameter
* @param <T4> Type of fourth input parameter
* @param <T5> Type of fifth input parameter
*/
@FunctionalInterface
public interface Consumer5<T1, T2, T3, T4, T5> {
/**
* Performs operation with input parameters
*
* @param a the first input parameter
* @param b the second input parameter
* @param c the third input parameter
* @param d the fourth input parameter
* @param e the fifth input parameter
*/
void accept(T1 a, T2 b, T3 c, T4 d, T5 e);
/**
* Partially applyHKT the first input parameter to this C5
*
* @param s the first input parameter
* @return A curried function that eventually resolves to a Consumer
*/
default Function<T2, Function<T3, Function<T4, Consumer<T5>>>> apply(final T1 s) {
return CurryConsumer.curryC5(this)
.apply(s);
}
/**
* Partially applyHKT the first and second input parameters to this C5
*
* @param s the first input parameter
* @param s2 the second input parameter
* @return A curried function that eventually resolves to a Consumer
*/
default Function<T3, Function<T4, Consumer<T5>>> apply(final T1 s, final T2 s2) {
return CurryConsumer.curryC5(this)
.apply(s)
.apply(s2);
}
/**
* Partially applyHKT the first, second and third input parameters to this C5
*
* @param s the first input parameter
* @param s2 the second input parameter
* @param s3 the third input parameter
* @return A curried function that eventually resolves to a Consumer
*/
default Function<T4, Consumer<T5>> apply(final T1 s, final T2 s2, final T3 s3) {
return CurryConsumer.curryC5(this)
.apply(s)
.apply(s2)
.apply(s3);
}
/**
*
* Partially applyHKT the first, second, third and fourth input parameters to this C5
*
* @param s the first input parameter
* @param s2 the second input parameter
* @param s3 the third input parameter
* @param s4 the fourth input parameter
* @return A consumer of the final value
*/
default Consumer<T5> apply(final T1 s, final T2 s2, final T3 s3, final T4 s4) {
return CurryConsumer.curryC5(this)
.apply(s)
.apply(s2)
.apply(s3)
.apply(s4);
}
}
|
aol/cyclops-react
|
cyclops/src/main/java/cyclops/function/Consumer5.java
|
Java
|
apache-2.0
| 2,963
|
package alien4cloud.it.cloud;
import java.util.List;
import java.util.Set;
import org.apache.http.NameValuePair;
import org.apache.http.message.BasicNameValuePair;
import org.junit.Assert;
import alien4cloud.it.Context;
import alien4cloud.model.cloud.MatchedNetworkTemplate;
import alien4cloud.model.cloud.NetworkTemplate;
import alien4cloud.rest.cloud.CloudDTO;
import alien4cloud.rest.utils.JsonUtil;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import cucumber.api.DataTable;
import cucumber.api.java.en.And;
import cucumber.api.java.en.Then;
import cucumber.api.java.en.When;
public class CloudNetworkStepDefinitions {
@When("^I add the network with name \"([^\"]*)\" and CIDR \"([^\"]*)\" and IP version (\\d+) and gateway \"([^\"]*)\" to the cloud \"([^\"]*)\"$")
public void I_add_the_network_with_name_and_CIDR_and_IP_version_and_gateway_to_the_cloud(String name, String cidr, int ipVersion, String gateWay,
String cloudName) throws Throwable {
String cloudId = Context.getInstance().getCloudId(cloudName);
NetworkTemplate network = new NetworkTemplate();
network.setId(name);
network.setIpVersion(ipVersion);
network.setCidr(cidr);
network.setGatewayIp(gateWay);
Context.getInstance().registerRestResponse(
Context.getRestClientInstance().postJSon("/rest/clouds/" + cloudId + "/networks", JsonUtil.toString(network)));
}
@And("^The cloud with name \"([^\"]*)\" should have (\\d+) networks as resources:$")
public void The_cloud_with_name_should_have_networks_as_resources(String cloudName, int numberOfNetworks, DataTable expectedNetworksTable) throws Throwable {
String cloudId = Context.getInstance().getCloudId(cloudName);
CloudDTO cloudDTO = JsonUtil.read(Context.getRestClientInstance().get("/rest/clouds/" + cloudId), CloudDTO.class).getData();
assertNetworks(numberOfNetworks, cloudDTO.getCloud().getNetworks(), expectedNetworksTable);
}
public static void assertNetworks(int numberOfNetworks, Set<NetworkTemplate> networks, DataTable expectedNetworksTable) {
Assert.assertEquals(numberOfNetworks, networks.size());
Set<NetworkTemplate> expectedNetworks = Sets.newHashSet();
if (expectedNetworksTable != null) {
for (List<String> rows : expectedNetworksTable.raw()) {
String name = rows.get(0);
String cidr = rows.get(1);
int ipVersion = Integer.parseInt(rows.get(2));
String gateWay = rows.get(3);
NetworkTemplate network = new NetworkTemplate();
network.setId(name);
network.setIpVersion(ipVersion);
network.setCidr(cidr);
network.setGatewayIp(gateWay);
expectedNetworks.add(network);
}
}
Assert.assertEquals(expectedNetworks, networks);
}
@When("^I remove the network with name \"([^\"]*)\" from the cloud \"([^\"]*)\"$")
public void I_remove_the_network_with_name_from_the_cloud(String networkName, String cloudName) throws Throwable {
String cloudId = Context.getInstance().getCloudId(cloudName);
Context.getInstance().registerRestResponse(Context.getRestClientInstance().delete("/rest/clouds/" + cloudId + "/networks/" + networkName));
}
@Then("^The cloud with name \"([^\"]*)\" should not have any network as resources$")
public void The_cloud_with_name_should_not_have_any_network_as_resources(String cloudName) throws Throwable {
String cloudId = Context.getInstance().getCloudId(cloudName);
CloudDTO cloudDTO = JsonUtil.read(Context.getRestClientInstance().get("/rest/clouds/" + cloudId), CloudDTO.class).getData();
Assert.assertTrue(cloudDTO.getNetworks() == null || cloudDTO.getNetworks().isEmpty());
}
@When("^I match the network with name \"([^\"]*)\" of the cloud \"([^\"]*)\" to the PaaS resource \"([^\"]*)\"$")
public void I_match_the_network_with_name_of_the_cloud_to_the_PaaS_resource(String networkName, String cloudName, String paaSResourceId) throws Throwable {
String cloudId = Context.getInstance().getCloudId(cloudName);
Context.getInstance().registerRestResponse(
Context.getRestClientInstance().postUrlEncoded("/rest/clouds/" + cloudId + "/networks/" + networkName + "/resource",
Lists.<NameValuePair> newArrayList(new BasicNameValuePair("pasSResourceId", paaSResourceId))));
}
@And("^The cloud \"([^\"]*)\" should have network mapping configuration as below:$")
public void The_cloud_should_have_network_mapping_configuration_as_below(String cloudName, DataTable expectedMappings) throws Throwable {
new CloudDefinitionsSteps().I_get_the_cloud_by_id(cloudName);
CloudDTO cloudDTO = JsonUtil.read(Context.getInstance().getRestResponse(), CloudDTO.class).getData();
Set<MatchedNetworkTemplate> actualNetworks = Sets.newHashSet(cloudDTO.getNetworks().values());
Set<MatchedNetworkTemplate> expectedNetworks = Sets.newHashSet();
for (List<String> rows : expectedMappings.raw()) {
NetworkTemplate network = new NetworkTemplate();
String networkName = rows.get(0);
network.setId(networkName);
String cidr = rows.get(1);
network.setCidr(cidr);
int ipVersion = Integer.parseInt(rows.get(2));
network.setIpVersion(ipVersion);
String gatewayIp = rows.get(3);
network.setGatewayIp(gatewayIp);
String pasSResourceId = rows.get(4);
if (pasSResourceId.isEmpty()) {
pasSResourceId = null;
}
expectedNetworks.add(new MatchedNetworkTemplate(network, pasSResourceId));
}
Assert.assertEquals(expectedNetworks, actualNetworks);
}
@When("^I delete the mapping for the network \"([^\"]*)\" of the cloud \"([^\"]*)\"$")
public void I_delete_the_mapping_for_the_network_of_the_cloud(String networkName, String cloudName) throws Throwable {
String cloudId = Context.getInstance().getCloudId(cloudName);
Context.getInstance().registerRestResponse(
Context.getRestClientInstance().postUrlEncoded("/rest/clouds/" + cloudId + "/networks/" + networkName + "/resource",
Lists.<NameValuePair> newArrayList()));
}
@Then("^The cloud \"([^\"]*)\" should have empty network mapping configuration$")
public void The_cloud_should_have_empty_network_mapping_configuration(String cloudName) throws Throwable {
new CloudDefinitionsSteps().I_get_the_cloud_by_id(cloudName);
CloudDTO cloudDTO = JsonUtil.read(Context.getInstance().getRestResponse(), CloudDTO.class).getData();
Assert.assertTrue(cloudDTO.getNetworks().isEmpty());
}
}
|
igorng/alien4cloud
|
alien4cloud-rest-it/src/test/java/alien4cloud/it/cloud/CloudNetworkStepDefinitions.java
|
Java
|
apache-2.0
| 6,925
|
/*
* ModSecurity, http://www.modsecurity.org/
* Copyright (c) 2015 - 2021 Trustwave Holdings, Inc. (http://www.trustwave.com/)
*
* You may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* If any of the files related to licensing are missing or if you have any
* other questions related to licensing please contact Trustwave Holdings, Inc.
* directly using the email address security@modsecurity.org.
*
*/
#include <iostream>
#include <string>
#include <vector>
#include <list>
#include <utility>
#ifndef SRC_VARIABLES_REQBODY_ERROR_MSG_H_
#define SRC_VARIABLES_REQBODY_ERROR_MSG_H_
#include "src/variables/variable.h"
namespace modsecurity {
class Transaction;
namespace variables {
DEFINE_VARIABLE(ReqbodyErrorMsg, REQBODY_ERROR_MSG, m_variableReqbodyErrorMsg)
} // namespace variables
} // namespace modsecurity
#endif // SRC_VARIABLES_REQBODY_ERROR_MSG_H_
|
SpiderLabs/ModSecurity
|
src/variables/reqbody_error_msg.h
|
C
|
apache-2.0
| 991
|
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.skyframe;
import com.google.common.base.Function;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.devtools.build.lib.actions.Action;
import com.google.devtools.build.lib.actions.ActionCacheChecker.Token;
import com.google.devtools.build.lib.actions.ActionExecutionContext;
import com.google.devtools.build.lib.actions.ActionExecutionException;
import com.google.devtools.build.lib.actions.ActionLookupData;
import com.google.devtools.build.lib.actions.ActionLookupValue;
import com.google.devtools.build.lib.actions.AlreadyReportedActionExecutionException;
import com.google.devtools.build.lib.actions.Artifact;
import com.google.devtools.build.lib.actions.MissingInputFileException;
import com.google.devtools.build.lib.actions.NotifyOnActionCacheHit;
import com.google.devtools.build.lib.actions.PackageRootResolver;
import com.google.devtools.build.lib.actions.Root;
import com.google.devtools.build.lib.causes.Cause;
import com.google.devtools.build.lib.causes.LabelCause;
import com.google.devtools.build.lib.cmdline.LabelSyntaxException;
import com.google.devtools.build.lib.cmdline.PackageIdentifier;
import com.google.devtools.build.lib.collect.nestedset.NestedSetBuilder;
import com.google.devtools.build.lib.events.Event;
import com.google.devtools.build.lib.util.BlazeClock;
import com.google.devtools.build.lib.util.Pair;
import com.google.devtools.build.lib.util.Preconditions;
import com.google.devtools.build.lib.util.io.TimestampGranularityMonitor;
import com.google.devtools.build.lib.vfs.PathFragment;
import com.google.devtools.build.skyframe.LegacySkyKey;
import com.google.devtools.build.skyframe.SkyFunction;
import com.google.devtools.build.skyframe.SkyFunctionException;
import com.google.devtools.build.skyframe.SkyKey;
import com.google.devtools.build.skyframe.SkyValue;
import com.google.devtools.build.skyframe.ValueOrException2;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nullable;
/**
* A {@link SkyFunction} that creates {@link ActionExecutionValue}s. There are four points where
* this function can abort due to missing values in the graph:
* <ol>
* <li>For actions that discover inputs, if missing metadata needed to resolve an artifact from a
* string input in the action cache.</li>
* <li>If missing metadata for artifacts in inputs (including the artifacts above).</li>
* <li>For actions that discover inputs, if missing metadata for inputs discovered prior to
* execution.</li>
* <li>For actions that discover inputs, but do so during execution, if missing metadata for
* inputs discovered during execution.</li>
* </ol>
*/
public class ActionExecutionFunction implements SkyFunction, CompletionReceiver {
private final SkyframeActionExecutor skyframeActionExecutor;
private final AtomicReference<TimestampGranularityMonitor> tsgm;
private ConcurrentMap<Action, ContinuationState> stateMap;
public ActionExecutionFunction(SkyframeActionExecutor skyframeActionExecutor,
AtomicReference<TimestampGranularityMonitor> tsgm) {
this.skyframeActionExecutor = skyframeActionExecutor;
this.tsgm = tsgm;
stateMap = Maps.newConcurrentMap();
}
private static final Function<String, SkyKey> VAR_TO_SKYKEY =
new Function<String, SkyKey>() {
@Override
public SkyKey apply(String var) {
return LegacySkyKey.create(SkyFunctions.CLIENT_ENVIRONMENT_VARIABLE, var);
}
};
@Override
public SkyValue compute(SkyKey skyKey, Environment env) throws ActionExecutionFunctionException,
InterruptedException {
ActionLookupData actionLookupData = (ActionLookupData) skyKey.argument();
ActionLookupValue actionLookupValue =
(ActionLookupValue) env.getValue(actionLookupData.getActionLookupNode());
int actionIndex = actionLookupData.getActionIndex();
Action action = actionLookupValue.getAction(actionIndex);
skyframeActionExecutor.noteActionEvaluationStarted(actionLookupData, action);
// TODO(bazel-team): Non-volatile NotifyOnActionCacheHit actions perform worse in Skyframe than
// legacy when they are not at the top of the action graph. In legacy, they are stored
// separately, so notifying non-dirty actions is cheap. In Skyframe, they depend on the
// BUILD_ID, forcing invalidation of upward transitive closure on each build.
if ((action.isVolatile() && !(action instanceof SkyframeAwareAction))
|| action instanceof NotifyOnActionCacheHit) {
// Volatile build actions may need to execute even if none of their known inputs have changed.
// Depending on the buildID ensure that these actions have a chance to execute.
PrecomputedValue.BUILD_ID.get(env);
}
// Look up the parts of the environment that influence the action.
Map<SkyKey, SkyValue> clientEnvLookup =
env.getValues(Iterables.transform(action.getClientEnvironmentVariables(), VAR_TO_SKYKEY));
if (env.valuesMissing()) {
return null;
}
Map<String, String> clientEnv = new HashMap<>();
for (Entry<SkyKey, SkyValue> entry : clientEnvLookup.entrySet()) {
ClientEnvironmentValue envValue = (ClientEnvironmentValue) entry.getValue();
if (envValue.getValue() != null) {
clientEnv.put((String) entry.getKey().argument(), envValue.getValue());
}
}
// For restarts of this ActionExecutionFunction we use a ContinuationState variable, below, to
// avoid redoing work. However, if two actions are shared and the first one executes, when the
// second one goes to execute, we should detect that and short-circuit, even without taking
// ContinuationState into account.
boolean sharedActionAlreadyRan = skyframeActionExecutor.probeActionExecution(action);
ContinuationState state;
if (action.discoversInputs()) {
state = getState(action);
} else {
// Because this is a new state, all conditionals below about whether state has already done
// something will return false, and so we will execute all necessary steps.
state = new ContinuationState();
}
if (!state.hasCollectedInputs()) {
state.allInputs = collectInputs(action, env);
if (state.allInputs == null) {
// Missing deps.
return null;
}
} else if (state.allInputs.keysRequested != null) {
// Preserve the invariant that we ask for the same deps each build.
env.getValues(state.allInputs.keysRequested);
Preconditions.checkState(!env.valuesMissing(), "%s %s", action, state);
}
Pair<Map<Artifact, FileArtifactValue>, Map<Artifact, Collection<Artifact>>> checkedInputs =
null;
try {
// Declare deps on known inputs to action. We do this unconditionally to maintain our
// invariant of asking for the same deps each build.
Map<SkyKey, ValueOrException2<MissingInputFileException, ActionExecutionException>> inputDeps
= env.getValuesOrThrow(toKeys(state.allInputs.getAllInputs(),
action.discoversInputs() ? action.getMandatoryInputs() : null),
MissingInputFileException.class, ActionExecutionException.class);
if (!sharedActionAlreadyRan && !state.hasArtifactData()) {
// Do we actually need to find our metadata?
checkedInputs = checkInputs(env, action, inputDeps);
}
} catch (ActionExecutionException e) {
// Remove action from state map in case it's there (won't be unless it discovers inputs).
stateMap.remove(action);
throw new ActionExecutionFunctionException(e);
}
if (env.valuesMissing()) {
// There was missing artifact metadata in the graph. Wait for it to be present.
// We must check this and return here before attempting to establish any Skyframe dependencies
// of the action; see establishSkyframeDependencies why.
return null;
}
try {
establishSkyframeDependencies(env, action);
} catch (ActionExecutionException e) {
throw new ActionExecutionFunctionException(e);
}
if (env.valuesMissing()) {
return null;
}
if (checkedInputs != null) {
Preconditions.checkState(!state.hasArtifactData(), "%s %s", state, action);
state.inputArtifactData = checkedInputs.first;
state.expandedArtifacts = checkedInputs.second;
}
ActionExecutionValue result;
try {
result = checkCacheAndExecuteIfNeeded(action, state, env, clientEnv, actionLookupData);
} catch (ActionExecutionException e) {
// Remove action from state map in case it's there (won't be unless it discovers inputs).
stateMap.remove(action);
// In this case we do not report the error to the action reporter because we have already
// done it in SkyframeExecutor.reportErrorIfNotAbortingMode() method. That method
// prints the error in the top-level reporter and also dumps the recorded StdErr for the
// action. Label can be null in the case of, e.g., the SystemActionOwner (for build-info.txt).
throw new ActionExecutionFunctionException(new AlreadyReportedActionExecutionException(e));
}
if (env.valuesMissing()) {
Preconditions.checkState(stateMap.containsKey(action), action);
return null;
}
// Remove action from state map in case it's there (won't be unless it discovers inputs).
stateMap.remove(action);
actionLookupValue.actionEvaluated(actionIndex, action);
return result;
}
/**
* An action's inputs needed for execution. May not just be the result of Action#getInputs(). If
* the action cache's view of this action contains additional inputs, it will request metadata for
* them, so we consider those inputs as dependencies of this action as well. Returns null if some
* dependencies were missing and this ActionExecutionFunction needs to restart.
*
* @throws ActionExecutionFunctionException
*/
@Nullable
private AllInputs collectInputs(Action action, Environment env)
throws ActionExecutionFunctionException, InterruptedException {
Iterable<Artifact> allKnownInputs = Iterables.concat(
action.getInputs(), action.getRunfilesSupplier().getArtifacts());
if (action.inputsDiscovered()) {
return new AllInputs(allKnownInputs);
}
Preconditions.checkState(action.discoversInputs(), action);
PackageRootResolverWithEnvironment resolver = new PackageRootResolverWithEnvironment(env);
Iterable<Artifact> actionCacheInputs =
skyframeActionExecutor.getActionCachedInputs(action, resolver);
if (actionCacheInputs == null) {
Preconditions.checkState(env.valuesMissing(), action);
return null;
}
return new AllInputs(allKnownInputs, actionCacheInputs, resolver.keysRequested);
}
private static class AllInputs {
final Iterable<Artifact> defaultInputs;
@Nullable
final Iterable<Artifact> actionCacheInputs;
@Nullable
final List<SkyKey> keysRequested;
AllInputs(Iterable<Artifact> defaultInputs) {
this.defaultInputs = Preconditions.checkNotNull(defaultInputs);
this.actionCacheInputs = null;
this.keysRequested = null;
}
AllInputs(Iterable<Artifact> defaultInputs, Iterable<Artifact> actionCacheInputs,
List<SkyKey> keysRequested) {
this.defaultInputs = Preconditions.checkNotNull(defaultInputs);
this.actionCacheInputs = Preconditions.checkNotNull(actionCacheInputs);
this.keysRequested = keysRequested;
}
Iterable<Artifact> getAllInputs() {
return actionCacheInputs == null
? defaultInputs
: Iterables.concat(defaultInputs, actionCacheInputs);
}
}
/**
* Skyframe implementation of {@link PackageRootResolver}. Should be used only from SkyFunctions,
* because it uses SkyFunction.Environment for evaluation of ContainingPackageLookupValue.
*/
private static class PackageRootResolverWithEnvironment implements PackageRootResolver {
final List<SkyKey> keysRequested = new ArrayList<>();
private final Environment env;
public PackageRootResolverWithEnvironment(Environment env) {
this.env = env;
}
@Override
public Map<PathFragment, Root> findPackageRootsForFiles(Iterable<PathFragment> execPaths)
throws InterruptedException {
Preconditions.checkState(keysRequested.isEmpty(),
"resolver should only be called once: %s %s", keysRequested, execPaths);
// Create SkyKeys list based on execPaths.
Map<PathFragment, SkyKey> depKeys = new HashMap<>();
for (PathFragment path : execPaths) {
PathFragment parent = Preconditions.checkNotNull(
path.getParentDirectory(), "Must pass in files, not root directory");
Preconditions.checkArgument(!parent.isAbsolute(), path);
try {
SkyKey depKey =
ContainingPackageLookupValue.key(PackageIdentifier.discoverFromExecPath(path, true));
depKeys.put(path, depKey);
keysRequested.add(depKey);
} catch (LabelSyntaxException e) {
// This code is only used to do action cache checks. If one of the file names we got from
// the action cache is corrupted, or if the action cache is from a different Bazel
// binary, then the path may not be valid for this Bazel binary, and trigger this
// exception. In that case, it's acceptable for us to ignore the exception - we'll get an
// action cache miss and re-execute the action, which is what we should do.
continue;
}
}
Map<SkyKey, SkyValue> values = env.getValues(depKeys.values());
if (env.valuesMissing()) {
return null;
}
Map<PathFragment, Root> result = new HashMap<>();
for (PathFragment path : execPaths) {
if (!depKeys.containsKey(path)) {
continue;
}
ContainingPackageLookupValue value =
(ContainingPackageLookupValue) values.get(depKeys.get(path));
if (value.hasContainingPackage()) {
// We have found corresponding root for current execPath.
result.put(path,
Root.computeSourceRoot(
value.getContainingPackageRoot(),
value.getContainingPackageName().getRepository()));
} else {
// We haven't found corresponding root for current execPath.
result.put(path, null);
}
}
return result;
}
@Override
@Nullable
public Map<PathFragment, Root> findPackageRoots(Iterable<PathFragment> execPaths)
throws InterruptedException {
// call sites for this implementation of PackageRootResolver shouldn't be passing in
// directories.
return findPackageRootsForFiles(execPaths);
}
}
private ActionExecutionValue checkCacheAndExecuteIfNeeded(
Action action,
ContinuationState state,
Environment env,
Map<String, String> clientEnv,
ActionLookupData actionLookupData)
throws ActionExecutionException, InterruptedException {
// If this is a shared action and the other action is the one that executed, we must use that
// other action's value, provided here, since it is populated with metadata for the outputs.
if (!state.hasArtifactData()) {
return skyframeActionExecutor.executeAction(action, null, -1, null, actionLookupData);
}
// This may be recreated if we discover inputs.
ActionMetadataHandler metadataHandler = new ActionMetadataHandler(state.inputArtifactData,
action.getOutputs(), tsgm.get());
long actionStartTime = BlazeClock.nanoTime();
// We only need to check the action cache if we haven't done it on a previous run.
if (!state.hasCheckedActionCache()) {
state.token =
skyframeActionExecutor.checkActionCache(
action,
metadataHandler,
actionStartTime,
state.allInputs.actionCacheInputs,
clientEnv);
}
if (state.token == null) {
// We got a hit from the action cache -- no need to execute.
return new ActionExecutionValue(
metadataHandler.getOutputArtifactData(),
metadataHandler.getOutputTreeArtifactData(),
metadataHandler.getAdditionalOutputData());
}
// Delete the metadataHandler's cache of the action's outputs, since they are being deleted.
metadataHandler.discardOutputMetadata();
// This may be recreated if we discover inputs.
PerActionFileCache perActionFileCache = new PerActionFileCache(state.inputArtifactData);
if (action.discoversInputs()) {
if (state.discoveredInputs == null) {
try {
state.discoveredInputs = skyframeActionExecutor.discoverInputs(action,
perActionFileCache, metadataHandler, env);
Preconditions.checkState(state.discoveredInputs != null,
"discoverInputs() returned null on action %s", action);
} catch (MissingDepException e) {
Preconditions.checkState(env.valuesMissing(), action);
return null;
}
}
addDiscoveredInputs(
state.inputArtifactData, state.expandedArtifacts, state.discoveredInputs, env);
if (env.valuesMissing()) {
return null;
}
perActionFileCache = new PerActionFileCache(state.inputArtifactData);
// Stage 1 finished, let's do stage 2. The stage 1 of input discovery will have added some
// files with addDiscoveredInputs() and then have waited for those files to be available
// by returning null if env.valuesMissing() returned true. So stage 2 can now access those
// inputs to discover even more inputs and then potentially also wait for those to be
// available.
if (state.discoveredInputsStage2 == null) {
state.discoveredInputsStage2 = action.discoverInputsStage2(env);
}
if (state.discoveredInputsStage2 != null) {
addDiscoveredInputs(
state.inputArtifactData, state.expandedArtifacts, state.discoveredInputsStage2, env);
if (env.valuesMissing()) {
return null;
}
perActionFileCache = new PerActionFileCache(state.inputArtifactData);
}
metadataHandler =
new ActionMetadataHandler(state.inputArtifactData, action.getOutputs(), tsgm.get());
// Set the MetadataHandler to accept output information.
metadataHandler.discardOutputMetadata();
}
try (ActionExecutionContext actionExecutionContext =
skyframeActionExecutor.getContext(
perActionFileCache,
metadataHandler,
Collections.unmodifiableMap(state.expandedArtifacts))) {
if (!state.hasExecutedAction()) {
state.value =
skyframeActionExecutor.executeAction(
action, metadataHandler, actionStartTime, actionExecutionContext, actionLookupData);
}
} catch (IOException e) {
throw new ActionExecutionException(
"Failed to close action output", e, action, /*catastrophe=*/ false);
}
if (action.discoversInputs()) {
Iterable<Artifact> newInputs =
filterKnownInputs(action.getInputs(), state.inputArtifactData.keySet());
Map<SkyKey, SkyValue> metadataFoundDuringActionExecution =
env.getValues(toKeys(newInputs, action.getMandatoryInputs()));
state.discoveredInputs = newInputs;
if (env.valuesMissing()) {
return null;
}
if (!Iterables.isEmpty(newInputs)) {
// We are in the interesting case of an action that discovered its inputs during
// execution, and found some new ones, but the new ones were already present in the graph.
// We must therefore cache the metadata for those new ones.
Map<Artifact, FileArtifactValue> inputArtifactData = new HashMap<>();
inputArtifactData.putAll(state.inputArtifactData);
for (Map.Entry<SkyKey, SkyValue> entry : metadataFoundDuringActionExecution.entrySet()) {
inputArtifactData.put(
ArtifactSkyKey.artifact(entry.getKey()), (FileArtifactValue) entry.getValue());
}
state.inputArtifactData = inputArtifactData;
// TODO(ulfjack): This causes information loss about omitted and injected outputs. Also see
// the documentation on MetadataHandler.artifactOmitted. This works by accident because
// markOmitted is only called for remote execution, and this code only gets executed for
// local execution.
metadataHandler =
new ActionMetadataHandler(state.inputArtifactData, action.getOutputs(), tsgm.get());
}
}
Preconditions.checkState(!env.valuesMissing(), action);
skyframeActionExecutor.afterExecution(
action, metadataHandler, state.token, clientEnv, actionLookupData);
return state.value;
}
private static final Function<Artifact, SkyKey> TO_NONMANDATORY_SKYKEY =
new Function<Artifact, SkyKey>() {
@Nullable
@Override
public SkyKey apply(@Nullable Artifact artifact) {
return ArtifactSkyKey.key(artifact, /*mandatory=*/ false);
}
};
private static Iterable<SkyKey> newlyDiscoveredInputsToSkyKeys(
Iterable<Artifact> discoveredInputs, Set<Artifact> knownInputs) {
return Iterables.transform(
filterKnownInputs(discoveredInputs, knownInputs), TO_NONMANDATORY_SKYKEY);
}
private static void addDiscoveredInputs(
Map<Artifact, FileArtifactValue> inputData,
Map<Artifact, Collection<Artifact>> expandedArtifacts,
Iterable<Artifact> discoveredInputs,
Environment env)
throws InterruptedException {
// We do not do a getValuesOrThrow() call for the following reasons:
// 1. No exceptions can be thrown for non-mandatory inputs;
// 2. Any derived inputs must be in the transitive closure of this action's inputs. Therefore,
// if there was an error building one of them, then that exception would have percolated up to
// this action already, through one of its declared inputs, and we would not have reached input
// discovery.
// Therefore there is no need to catch and rethrow exceptions as there is with #checkInputs.
Map<SkyKey, SkyValue> nonMandatoryDiscovered =
env.getValues(newlyDiscoveredInputsToSkyKeys(discoveredInputs, inputData.keySet()));
if (!env.valuesMissing()) {
for (Entry<SkyKey, SkyValue> entry : nonMandatoryDiscovered.entrySet()) {
Artifact input = ArtifactSkyKey.artifact(entry.getKey());
if (entry.getValue() instanceof TreeArtifactValue) {
TreeArtifactValue treeValue = (TreeArtifactValue) entry.getValue();
expandedArtifacts.put(input, ImmutableSet.<Artifact>copyOf(treeValue.getChildren()));
inputData.putAll(treeValue.getChildValues());
inputData.put(input, treeValue.getSelfData());
} else {
inputData.put(input, (FileArtifactValue) entry.getValue());
}
}
}
}
private static void establishSkyframeDependencies(Environment env, Action action)
throws ActionExecutionException, InterruptedException {
// Before we may safely establish Skyframe dependencies, we must build all action inputs by
// requesting their ArtifactValues.
// This is very important to do, because the establishSkyframeDependencies method may request
// FileValues for input files of this action (directly requesting them, or requesting some other
// SkyValue whose builder requests FileValues), which may not yet exist if their generating
// actions have not yet run.
// See SkyframeAwareActionTest.testRaceConditionBetweenInputAcquisitionAndSkyframeDeps
Preconditions.checkState(!env.valuesMissing(), action);
if (action instanceof SkyframeAwareAction) {
// Skyframe-aware actions should be executed unconditionally, i.e. bypass action cache
// checking. See documentation of SkyframeAwareAction.
Preconditions.checkState(action.executeUnconditionally(), action);
try {
((SkyframeAwareAction) action).establishSkyframeDependencies(env);
} catch (SkyframeAwareAction.ExceptionBase e) {
throw new ActionExecutionException(e, action, false);
}
}
}
private static Iterable<SkyKey> toKeys(Iterable<Artifact> inputs,
Iterable<Artifact> mandatoryInputs) {
if (mandatoryInputs == null) {
// This is a non inputs-discovering action, so no need to distinguish mandatory from regular
// inputs.
return Iterables.transform(
inputs,
new Function<Artifact, SkyKey>() {
@Override
public SkyKey apply(Artifact artifact) {
return ArtifactSkyKey.key(artifact, true);
}
});
} else {
Collection<SkyKey> discoveredArtifacts = new HashSet<>();
Set<Artifact> mandatory = Sets.newHashSet(mandatoryInputs);
for (Artifact artifact : inputs) {
discoveredArtifacts.add(ArtifactSkyKey.key(artifact, mandatory.contains(artifact)));
}
return discoveredArtifacts;
}
}
/**
* Declare dependency on all known inputs of action. Throws exception if any are known to be
* missing. Some inputs may not yet be in the graph, in which case the builder should abort.
*/
private Pair<Map<Artifact, FileArtifactValue>, Map<Artifact, Collection<Artifact>>>
checkInputs(Environment env, Action action,
Map<SkyKey, ValueOrException2<MissingInputFileException, ActionExecutionException>> inputDeps)
throws ActionExecutionException {
int missingCount = 0;
int actionFailures = 0;
boolean catastrophe = false;
// Only populate input data if we have the input values, otherwise they'll just go unused.
// We still want to loop through the inputs to collect missing deps errors. During the
// evaluator "error bubbling", we may get one last chance at reporting errors even though
// some deps are still missing.
boolean populateInputData = !env.valuesMissing();
NestedSetBuilder<Cause> rootCauses = NestedSetBuilder.stableOrder();
Map<Artifact, FileArtifactValue> inputArtifactData =
new HashMap<>(populateInputData ? inputDeps.size() : 0);
Map<Artifact, Collection<Artifact>> expandedArtifacts =
new HashMap<>(populateInputData ? 128 : 0);
ActionExecutionException firstActionExecutionException = null;
for (Map.Entry<SkyKey, ValueOrException2<MissingInputFileException,
ActionExecutionException>> depsEntry : inputDeps.entrySet()) {
Artifact input = ArtifactSkyKey.artifact(depsEntry.getKey());
try {
SkyValue value = depsEntry.getValue().get();
if (populateInputData) {
if (value instanceof AggregatingArtifactValue) {
AggregatingArtifactValue aggregatingValue = (AggregatingArtifactValue) value;
for (Pair<Artifact, FileArtifactValue> entry : aggregatingValue.getInputs()) {
inputArtifactData.put(entry.first, entry.second);
}
// We have to cache the "digest" of the aggregating value itself,
// because the action cache checker may want it.
inputArtifactData.put(input, aggregatingValue.getSelfData());
ImmutableList.Builder<Artifact> expansionBuilder = ImmutableList.builder();
for (Pair<Artifact, FileArtifactValue> pair : aggregatingValue.getInputs()) {
expansionBuilder.add(pair.first);
}
expandedArtifacts.put(input, expansionBuilder.build());
} else if (value instanceof TreeArtifactValue) {
TreeArtifactValue treeValue = (TreeArtifactValue) value;
expandedArtifacts.put(input, ImmutableSet.<Artifact>copyOf(treeValue.getChildren()));
inputArtifactData.putAll(treeValue.getChildValues());
// Again, we cache the "digest" of the value for cache checking.
inputArtifactData.put(input, treeValue.getSelfData());
} else {
Preconditions.checkState(value instanceof FileArtifactValue, depsEntry);
inputArtifactData.put(input, (FileArtifactValue) value);
}
}
} catch (MissingInputFileException e) {
missingCount++;
if (input.getOwner() != null) {
rootCauses.add(new LabelCause(input.getOwner()));
}
} catch (ActionExecutionException e) {
actionFailures++;
if (firstActionExecutionException == null) {
firstActionExecutionException = e;
}
catastrophe = catastrophe || e.isCatastrophe();
rootCauses.addTransitive(e.getRootCauses());
}
}
// We need to rethrow first exception because it can contain useful error message
if (firstActionExecutionException != null) {
if (missingCount == 0 && actionFailures == 1) {
// In the case a single action failed, just propagate the exception upward. This avoids
// having to copy the root causes to the upwards transitive closure.
throw firstActionExecutionException;
}
throw new ActionExecutionException(firstActionExecutionException.getMessage(),
firstActionExecutionException.getCause(), action, rootCauses.build(), catastrophe,
firstActionExecutionException.getExitCode());
}
if (missingCount > 0) {
for (Cause missingInput : rootCauses.build()) {
env.getListener()
.handle(
Event.error(
action.getOwner().getLocation(),
String.format(
"%s: missing input file '%s'",
action.getOwner().getLabel(), missingInput.getLabel())));
}
throw new ActionExecutionException(missingCount + " input file(s) do not exist", action,
rootCauses.build(), /*catastrophe=*/false);
}
return Pair.of(inputArtifactData, expandedArtifacts);
}
private static Iterable<Artifact> filterKnownInputs(
Iterable<Artifact> newInputs, Set<Artifact> knownInputs) {
return Iterables.filter(newInputs, Predicates.not(Predicates.in(knownInputs)));
}
/**
* All info/warning messages associated with actions should be always displayed.
*/
@Override
public String extractTag(SkyKey skyKey) {
return null;
}
/**
* Exception to be thrown if an action is missing Skyframe dependencies that it finds are missing
* during execution/input discovery.
*/
public static class MissingDepException extends RuntimeException {}
/**
* Should be called once execution is over, and the intra-build cache of in-progress computations
* should be discarded. If the cache is non-empty (due to an interrupted/failed build), failure to
* call complete() can both cause a memory leak and incorrect results on the subsequent build.
*/
@Override
public void complete() {
// Discard all remaining state (there should be none after a successful execution).
stateMap = Maps.newConcurrentMap();
}
private ContinuationState getState(Action action) {
ContinuationState state = stateMap.get(action);
if (state == null) {
state = new ContinuationState();
Preconditions.checkState(stateMap.put(action, state) == null, action);
}
return state;
}
/**
* State to save work across restarts of ActionExecutionFunction due to missing values in the
* graph for actions that discover inputs. There are three places where we save work, all for
* actions that discover inputs:
* <ol>
* <li>If not all known input metadata (coming from Action#getInputs) is available yet, then the
* calculated set of inputs (including the inputs resolved from the action cache) is saved.</li>
* <li>If not all discovered inputs' metadata is available yet, then the known input metadata
* together with the set of discovered inputs is saved, as well as the Token used to identify
* this action to the action cache.</li>
* <li>If, after execution, new inputs are discovered whose metadata is not yet available, then
* the same data as in the previous case is saved, along with the actual result of execution.
* </li>
* </ol>
*/
private static class ContinuationState {
AllInputs allInputs;
/** Mutable map containing metadata for known artifacts. */
Map<Artifact, FileArtifactValue> inputArtifactData = null;
Map<Artifact, Collection<Artifact>> expandedArtifacts = null;
Token token = null;
Iterable<Artifact> discoveredInputs = null;
Iterable<Artifact> discoveredInputsStage2 = null;
ActionExecutionValue value = null;
boolean hasCollectedInputs() {
return allInputs != null;
}
boolean hasArtifactData() {
boolean result = inputArtifactData != null;
Preconditions.checkState(result == (expandedArtifacts != null), this);
return result;
}
boolean hasCheckedActionCache() {
// If token is null because there was an action cache hit, this method is never called again
// because we return immediately.
return token != null;
}
boolean hasExecutedAction() {
return value != null;
}
@Override
public String toString() {
return token + ", " + value + ", " + allInputs + ", " + inputArtifactData + ", "
+ discoveredInputs;
}
}
/**
* Used to declare all the exception types that can be wrapped in the exception thrown by
* {@link ActionExecutionFunction#compute}.
*/
private static final class ActionExecutionFunctionException extends SkyFunctionException {
private final ActionExecutionException actionException;
public ActionExecutionFunctionException(ActionExecutionException e) {
// We conservatively assume that the error is transient. We don't have enough information to
// distinguish non-transient errors (e.g. compilation error from a deterministic compiler)
// from transient ones (e.g. IO error).
// TODO(bazel-team): Have ActionExecutionExceptions declare their transience.
super(e, Transience.TRANSIENT);
this.actionException = e;
}
@Override
public boolean isCatastrophic() {
return actionException.isCatastrophe();
}
}
}
|
variac/bazel
|
src/main/java/com/google/devtools/build/lib/skyframe/ActionExecutionFunction.java
|
Java
|
apache-2.0
| 35,383
|
package kubeapiserver
import (
"io/ioutil"
"path"
"k8s.io/apimachinery/pkg/runtime"
"github.com/golang/glog"
configapi "github.com/openshift/origin/pkg/cmd/server/apis/config"
configapilatest "github.com/openshift/origin/pkg/cmd/server/apis/config/latest"
"github.com/openshift/origin/pkg/oc/clusterup/coreinstall/tmpformac"
)
func MakeOpenShiftControllerConfig(existingMasterConfig string, basedir string) (string, error) {
configDir := path.Join(basedir, OpenShiftControllerManagerDirName)
glog.V(1).Infof("Copying kube-apiserver config to local directory %s", OpenShiftControllerManagerDirName)
if err := tmpformac.CopyDirectory(existingMasterConfig, configDir); err != nil {
return "", err
}
// update some listen information to include starting the DNS server
masterconfigFilename := path.Join(configDir, "master-config.yaml")
originalBytes, err := ioutil.ReadFile(masterconfigFilename)
if err != nil {
return "", err
}
configObj, err := runtime.Decode(configapilatest.Codec, originalBytes)
if err != nil {
return "", err
}
masterconfig := configObj.(*configapi.MasterConfig)
masterconfig.ServingInfo.BindAddress = "0.0.0.0:8444"
configBytes, err := configapilatest.WriteYAML(masterconfig)
if err != nil {
return "", err
}
if err := ioutil.WriteFile(masterconfigFilename, configBytes, 0644); err != nil {
return "", err
}
return configDir, nil
}
|
legionus/origin
|
pkg/oc/clusterup/coreinstall/kubeapiserver/openshift_controller.go
|
GO
|
apache-2.0
| 1,396
|
<html lang="de">
<head>
<meta charset="utf-8">
<title>Blockly Graphing Calculator</title>
<link rel="stylesheet" type="text/css" href="style.css">
<script type="text/javascript" src="/storage.js"></script>
<script type="text/javascript" src="../_soy/soyutils.js"></script>
<script type="text/javascript" src="template.js"></script>
<script type="text/javascript" src="http://www.google.com/jsapi"></script>
<script>
var MSG = {
// Tooltips.
linkTooltip: 'Blöcke abspeichern und Link erzeugen.',
// Toolbox categories.
catLogic: 'Logik',
catMath: 'Mathe',
catVariables: 'Variablen',
// Misc text.
unloadWarning: 'Ihre Arbeit geht verloren, wenn Sie diese Seite verlassen.',
title: 'Graph'
};
if ('BlocklyStorage' in window) {
BlocklyStorage.HTTPREQUEST_ERROR = 'Es gab ein Problem während der HTTP Anfrage.\n';
BlocklyStorage.LINK_ALERT = 'Blöcke mit diesem Link teilen:\n\n';
BlocklyStorage.HASH_ERROR = 'Entschuldigung, konnte die gespeicherten Blöcke für "%1" nicht finden.';
BlocklyStorage.XML_ERROR = 'Kann Sicherungsdatei nicht laden.\n'+
'Diese wurde vermutlich mit einer veralteten Version von Blockly erstellt?\nXML: ';
}
var frameSrc = ['de/_messages.js',
'common/logic.js',
'common/math.js'];
</script>
</head>
<body>
<script type="text/javascript" src="graph.js"></script>
</body>
</html>
|
velniukas/blockly.cascading.jruby
|
apps/graph/de.html
|
HTML
|
apache-2.0
| 1,449
|
---
title: Managing access tokens
description: Learn how to create and manage your personal Docker Hub access tokens to securely push and pull images programmatically
keywords: docker hub, hub, security, PAT, personal access token
redirect_to: /docker-hub/access-tokens/
---
|
docker/docker.github.io
|
go/access-tokens.md
|
Markdown
|
apache-2.0
| 274
|
/*
Copyright (c) 2011 Stanislav Vitvitskiy
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.mp4parser.muxer.tracks.h264.parsing.model;
import java.io.IOException;
import java.io.OutputStream;
public abstract class BitstreamElement {
public abstract void write(OutputStream out) throws IOException;
}
|
olegloa/mp4parser
|
muxer/src/main/java/org/mp4parser/muxer/tracks/h264/parsing/model/BitstreamElement.java
|
Java
|
apache-2.0
| 1,297
|
///////////////////////////////////////////////////////////////
// Copyright 2011 John Maddock. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt
#include "setup.hpp"
#include "table_type.hpp"
#define TEST_UDT
#define TEST_DATA 4
#include <boost/math/special_functions/math_fwd.hpp>
#include "libs/math/test/test_ibeta.hpp"
void expected_results()
{
//
// Define the max and mean errors expected for
// various compilers and platforms.
//
add_expected_result(
"[^|]*", // compiler
"[^|]*", // stdlib
"[^|]*", // platform
".*", // test type(s)
"(?i).*small.*", // test data group
".*", 4000, 1000); // test function
add_expected_result(
"[^|]*", // compiler
"[^|]*", // stdlib
"[^|]*", // platform
".*", // test type(s)
"(?i).*small.*", // test data group
".*", 90, 25); // test function
add_expected_result(
"[^|]*", // compiler
"[^|]*", // stdlib
"[^|]*", // platform
".*", // test type(s)
"(?i).*medium.*", // test data group
".*", 200, 50); // test function
add_expected_result(
"[^|]*", // compiler
"[^|]*", // stdlib
"[^|]*", // platform
".*", // test type(s)
"(?i).*large.*", // test data group
".*", 5000, 500); // test function
//
// Finish off by printing out the compiler/stdlib/platform names,
// we do this to make it easier to mark up expected error rates.
//
std::cout << "Tests run with " << BOOST_COMPILER << ", "
<< BOOST_STDLIB << ", " << BOOST_PLATFORM << std::endl;
}
template <class T>
void test(T t, const char* p)
{
test_beta(t, p);
}
BOOST_AUTO_TEST_CASE( test_main )
{
using namespace boost::multiprecision;
expected_results();
//
// Test at:
// 18 decimal digits: tests 80-bit long double approximations
// 30 decimal digits: tests 128-bit long double approximations
// 35 decimal digits: tests arbitrary precision code
//
ALL_TESTS
}
|
graetzer/arangodb
|
3rdParty/boost/1.71.0/libs/multiprecision/test/math/test_ibeta_4.cpp
|
C++
|
apache-2.0
| 2,606
|
// Decompiled by Jad v1.5.8e. Copyright 2001 Pavel Kouznetsov.
// Jad home page: http://www.geocities.com/kpdus/jad.html
// Decompiler options: braces fieldsfirst space lnc
package com.tencent.connect.common;
import android.content.Context;
import android.os.Handler;
import android.os.Message;
import com.tencent.tauth.IRequestListener;
import com.tencent.tauth.IUiListener;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.SocketTimeoutException;
import org.apache.http.conn.ConnectTimeoutException;
import org.json.JSONException;
import org.json.JSONObject;
// Referenced classes of package com.tencent.connect.common:
// a, BaseApi
public class c
implements IRequestListener
{
final BaseApi a;
private IUiListener b;
private Handler c;
static IUiListener a(c c1)
{
return c1.b;
}
public void onComplete(JSONObject jsonobject)
{
Message message = c.obtainMessage();
message.obj = jsonobject;
message.what = 0;
c.sendMessage(message);
}
public void onConnectTimeoutException(ConnectTimeoutException connecttimeoutexception)
{
Message message = c.obtainMessage();
message.obj = connecttimeoutexception.getMessage();
message.what = -7;
c.sendMessage(message);
}
public void onHttpStatusException(com.tencent.utils.n n)
{
Message message = c.obtainMessage();
message.obj = n.getMessage();
message.what = -9;
c.sendMessage(message);
}
public void onIOException(IOException ioexception)
{
Message message = c.obtainMessage();
message.obj = ioexception.getMessage();
message.what = -2;
c.sendMessage(message);
}
public void onJSONException(JSONException jsonexception)
{
Message message = c.obtainMessage();
message.obj = jsonexception.getMessage();
message.what = -4;
c.sendMessage(message);
}
public void onMalformedURLException(MalformedURLException malformedurlexception)
{
Message message = c.obtainMessage();
message.obj = malformedurlexception.getMessage();
message.what = -3;
c.sendMessage(message);
}
public void onNetworkUnavailableException(com.tencent.utils.Exception exception)
{
Message message = c.obtainMessage();
message.obj = exception.getMessage();
message.what = -10;
c.sendMessage(message);
}
public void onSocketTimeoutException(SocketTimeoutException sockettimeoutexception)
{
Message message = c.obtainMessage();
message.obj = sockettimeoutexception.getMessage();
message.what = -8;
c.sendMessage(message);
}
public void onUnknowException(Exception exception)
{
Message message = c.obtainMessage();
message.obj = exception.getMessage();
message.what = -6;
c.sendMessage(message);
}
public (BaseApi baseapi, IUiListener iuilistener)
{
a = baseapi;
super();
b = iuilistener;
c = new a(this, baseapi.mContext.getMainLooper(), baseapi);
}
}
|
vishnudevk/MiBandDecompiled
|
Original Files/source/src/com/tencent/connect/common/BaseApi$TempRequestListener.java
|
Java
|
apache-2.0
| 3,195
|
using System;
using DotVVM.Framework.Compilation.ControlTree;
using DotVVM.Framework.Compilation.ControlTree.Resolved;
namespace DotVVM.Framework.Compilation
{
public class ControlType : IControlType
{
public Type Type { get; private set; }
public Type ControlBuilderType { get; private set; }
public string VirtualPath { get; private set; }
public Type DataContextRequirement { get; private set; }
ITypeDescriptor IControlType.Type => new ResolvedTypeDescriptor(Type);
ITypeDescriptor IControlType.DataContextRequirement => DataContextRequirement != null ? new ResolvedTypeDescriptor(DataContextRequirement) : null;
/// <summary>
/// Initializes a new instance of the <see cref="ControlType"/> class.
/// </summary>
public ControlType(Type type, Type controlBuilderType = null, string virtualPath = null, Type dataContextRequirement = null)
{
Type = type;
ControlBuilderType = controlBuilderType;
VirtualPath = virtualPath;
DataContextRequirement = dataContextRequirement;
}
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj))
{
return false;
}
if (ReferenceEquals(this, obj))
{
return true;
}
if (obj.GetType() != this.GetType())
{
return false;
}
return Equals((ControlType)obj);
}
protected bool Equals(ControlType other)
{
return Equals(Type, other.Type) && Equals(ControlBuilderType, other.ControlBuilderType);
}
public override int GetHashCode()
{
unchecked
{
return ((Type != null ? Type.GetHashCode() : 0) * 397) ^ (ControlBuilderType != null ? ControlBuilderType.GetHashCode() : 0);
}
}
}
}
|
kiraacorsac/dotvvm
|
src/DotVVM.Framework/Compilation/ControlType.cs
|
C#
|
apache-2.0
| 2,002
|
package com.redhat.ceylon.common.tool;
import java.io.File;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.regex.Pattern;
public class StandardArgumentParsers {
public static final ArgumentParser<String> CHAR_SEQUENCE_PARSER = new ArgumentParser<String>() {
@Override
public String parse(String argument, Tool tool) {
return argument != null ? argument : null;
}
};
public static final ArgumentParser<Boolean> BOOLEAN_PARSER = new ArgumentParser<Boolean>() {
@Override
public Boolean parse(String argument, Tool tool) {
return argument.matches("1|yes|true");
}
};
public static final ArgumentParser<Integer> INTEGER_PARSER = new ArgumentParser<Integer>() {
@Override
public Integer parse(String argument, Tool tool) {
return Integer.valueOf(argument);
}
};
public static final ArgumentParser<URI> URI_PARSER = new ArgumentParser<URI>() {
@Override
public URI parse(String argument, Tool tool) {
try {
return new URI(argument);
} catch (URISyntaxException e) {
try {
return new URI(argument.replace('\\', '/'));
} catch (URISyntaxException e2) {
File f = new File(argument);
return f.toURI();
}
}
}
};
public static class PathArgumentParser implements ArgumentParser<List<File>> {
@Override
public List<File> parse(String argument, Tool tool) {
String[] dirs = argument.split(Pattern.quote(File.pathSeparator));
ArrayList<File> result = new ArrayList<File>(dirs.length);
for (String dir : dirs) {
result.add(new File(dir));
}
return result;
}
};
public static final PathArgumentParser PATH_PARSER = new PathArgumentParser();
public static class ConstructorArgumentParser<T> implements ArgumentParser<T> {
private Constructor<T> ctor;
public ConstructorArgumentParser(Class<T> clazz) {
try {
this.ctor = clazz.getConstructor(String.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
@Override
public T parse(String argument, Tool tool) {
try {
return ctor.newInstance(argument);
} catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw (RuntimeException)cause;
} else if (cause instanceof Error) {
throw (Error)cause;
} else {
throw new RuntimeException(e);
}
} catch (InstantiationException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (IllegalArgumentException e) {
throw new RuntimeException(e);
}
}
}
public abstract static class EnumParserBase<A, E extends Enum<E>> implements EnumerableParser<A> {
protected final boolean denormalize;
protected final Class<E> enumClass;
public EnumParserBase(Class<E> enumClass, boolean denormalize) {
this.enumClass = enumClass;
this.denormalize = denormalize;
}
protected String denormalize(String argument) {
argument = argument.replace('-', '_');
for (String nm : possibilities()) {
if (nm.equalsIgnoreCase(argument)) {
argument = nm;
}
}
return argument;
}
protected E valueOf(String name) {
return EnumUtil.valueOf(enumClass, name);
}
@Override
public Iterable<String> possibilities() {
return EnumUtil.possibilities(enumClass);
}
}
public static class EnumArgumentParser<E extends Enum<E>> extends EnumParserBase<E, E> {
public EnumArgumentParser(Class<E> enumClass, boolean denormalize) {
super(enumClass, denormalize);
}
@Override
public E parse(String argument, Tool tool) {
if (denormalize) {
argument = denormalize(argument);
}
return valueOf(argument);
}
}
public static class EnumArgumentsParser<E extends Enum<E>> extends EnumParserBase<List<E>, E> {
public EnumArgumentsParser(Class<E> enumClass, boolean denormalize) {
super(enumClass, denormalize);
}
@Override
public List<E> parse(String argument, Tool tool) {
if (!argument.isEmpty()) {
String[] elems = argument.split(",");
ArrayList<E> result = new ArrayList<E>(elems.length);
for (String elem : elems) {
elem = elem.trim();
if (denormalize) {
elem = denormalize(elem);
}
result.add(valueOf(elem));
}
return result;
} else {
return new ArrayList<>(EnumSet.allOf(enumClass));
}
}
}
public static ArgumentParser<?> forClass(Class<?> setterType, ToolLoader toolLoader, boolean isSimpleType) {
if (CharSequence.class.isAssignableFrom(setterType)) {
return CHAR_SEQUENCE_PARSER;
} else if (Integer.class.isAssignableFrom(setterType)
|| Integer.TYPE.isAssignableFrom(setterType)) {
return INTEGER_PARSER;
} else if (Boolean.class.isAssignableFrom(setterType)
|| Boolean.TYPE.isAssignableFrom(setterType)) {
return BOOLEAN_PARSER;
} else if (File.class.isAssignableFrom(setterType)) {
return new ConstructorArgumentParser<>(File.class);
} else if (URI.class.isAssignableFrom(setterType)) {
return URI_PARSER;
} else if (URL.class.isAssignableFrom(setterType)) {
return new ConstructorArgumentParser<>(URL.class);
} else if (Enum.class.isAssignableFrom(setterType)) {
if (isSimpleType) {
return new EnumArgumentParser(setterType, true);
} else {
return new EnumArgumentsParser(setterType, true);
}
} else if (ToolModel.class.isAssignableFrom(setterType)) {
return new ToolModelArgumentParser(toolLoader);
} /*else if (Tool.class.isAssignableFrom(setterType)) {
return new ToolArgumentParser(toolLoader);
}*/
return null;
}
}
|
ceylon/ceylon-common
|
src/com/redhat/ceylon/common/tool/StandardArgumentParsers.java
|
Java
|
apache-2.0
| 7,170
|
package com.kuxhausen.huemore.net;
import com.kuxhausen.huemore.state.BulbState;
import java.util.List;
public class BrightnessManager {
private BrightnessPolicy mPolicy;
/**
* 1 - 100
*/
private Integer mVolumeBri;
private List<NetworkBulb> mBulbs;
public BrightnessManager(List<NetworkBulb> bulbs) {
if (bulbs == null) {
throw new IllegalArgumentException();
}
mBulbs = bulbs;
mPolicy = BrightnessPolicy.DIRECT_BRI;
}
public void setPolicy(BrightnessPolicy policy) {
if (policy == null) {
throw new IllegalArgumentException();
}
mPolicy = policy;
if (mPolicy == BrightnessPolicy.DIRECT_BRI) {
mVolumeBri = null;
}
}
public BrightnessPolicy getPolicy() {
return mPolicy;
}
public BulbState getState(NetworkBulb netBulb, NetworkBulb.GetStateConfidence confidence) {
if (netBulb == null || !mBulbs.contains(netBulb) || confidence == null) {
throw new IllegalArgumentException();
}
BulbState adjusted = netBulb.getState(confidence).clone();
if (mPolicy == BrightnessPolicy.VOLUME_BRI && adjusted.get255Bri() != null) {
Integer volume = mVolumeBri;
if (volume == null) {
volume = getAveragePercentBrightness(mBulbs, NetworkBulb.GetStateConfidence.KNOWN);
}
adjusted.set255Bri(((int) Math.round(adjusted.get255Bri() * 100.0) / volume));
}
return adjusted;
}
public void setState(NetworkBulb netBulb, BulbState targetState) {
if (netBulb == null || targetState == null || !mBulbs.contains(netBulb)) {
throw new IllegalArgumentException(
(netBulb == null) + "," + (targetState == null) + "," + (!mBulbs.contains(netBulb)));
}
BulbState adjusted = targetState.clone();
//in volume bri mode, new color with unspecified bri is assumed to at 100% of current volume
if (mPolicy == BrightnessPolicy.VOLUME_BRI && adjusted.get255Bri() == null && (
adjusted.getMiredCT() != null || adjusted.hasXY())) {
adjusted.set255Bri(255);
}
if (mPolicy == BrightnessPolicy.VOLUME_BRI && adjusted.get255Bri() != null) {
if (mVolumeBri == null) {
//calculate existing volume bri as average bri
mVolumeBri = getAveragePercentBrightness(mBulbs, NetworkBulb.GetStateConfidence.KNOWN);
}
adjusted.set255Bri((int) Math.round((mVolumeBri * adjusted.get255Bri()) / 100.0));
}
netBulb.setState(adjusted);
}
/**
* Does not update lights, only valid in volume mode
* @param newVolume ranges from 1 to 100
*/
public void setVolumeWithoutUpdate(int newVolume) {
if (mPolicy != BrightnessPolicy.VOLUME_BRI) {
throw new IllegalStateException();
}
mVolumeBri = newVolume;
}
/**
* @param brightness ranges from 1 to 100
*/
public void setBrightness(int brightness) {
int newBrightness = Math.max(1, Math.min(100, brightness));
if (mPolicy == BrightnessPolicy.VOLUME_BRI) {
if (mVolumeBri == null) {
//calculate existing volume bri as average brightness
mVolumeBri = getAveragePercentBrightness(mBulbs, NetworkBulb.GetStateConfidence.KNOWN);
}
int oldVolume = mVolumeBri;
int newVolume = newBrightness;
for (NetworkBulb bulb : mBulbs) {
Integer
oldPhysicalBri =
bulb.getState(NetworkBulb.GetStateConfidence.KNOWN).getPercentBri();
if (oldPhysicalBri == null) {
oldPhysicalBri = oldVolume;
}
oldPhysicalBri = Math.min(oldPhysicalBri, oldVolume);
int newPhysicalBri = (oldPhysicalBri * newVolume) / oldVolume;
BulbState bs = new BulbState();
bs.setPercentBri(newPhysicalBri);
bs.setTransitionTime(BulbState.TRANSITION_TIME_BRIGHTNESS_BAR);
bulb.setState(bs);
}
mVolumeBri = newVolume;
} else {
BulbState bs = new BulbState();
bs.setPercentBri(newBrightness);
bs.setTransitionTime(BulbState.TRANSITION_TIME_BRIGHTNESS_BAR);
for (NetworkBulb bulb : mBulbs) {
bulb.setState(bs);
}
}
}
/**
* @return brightness ranging from 1 to 100
*/
public int getBrightness() {
if (mPolicy == BrightnessPolicy.VOLUME_BRI) {
if (mVolumeBri == null) {
//calculate existing volume bri as average brightness
return getAveragePercentBrightness(mBulbs, NetworkBulb.GetStateConfidence.KNOWN);
} else {
return mVolumeBri;
}
} else {
return getAveragePercentBrightness(mBulbs, NetworkBulb.GetStateConfidence.GUESS);
}
}
//calculate the largest brightness among the group, returning 1 if no bulbs are sufficiently confident of brightness
private static int getLargestPercentBrightness(List<NetworkBulb> list) {
if (list == null) {
throw new IllegalArgumentException();
}
int briMax = 1;
for (NetworkBulb bulb : list) {
Integer physicalBri = bulb.getState(NetworkBulb.GetStateConfidence.KNOWN).getPercentBri();
if (physicalBri != null && physicalBri > briMax) {
briMax = physicalBri;
}
}
return briMax;
}
//calculate the average brightness among the group, returning 1 if no bulbs are sufficiently confident of brightness
private static int getAveragePercentBrightness(List<NetworkBulb> list,
NetworkBulb.GetStateConfidence confidence) {
if (list == null) {
throw new IllegalArgumentException();
}
int briSum = 0;
int briNum = 0;
for (NetworkBulb bulb : list) {
Integer physicalBri = bulb.getState(confidence).getPercentBri();
if (physicalBri != null) {
briSum += physicalBri;
briNum++;
}
}
if (briNum == 0) {
return 1;
}
return briSum / briNum;
}
public enum BrightnessPolicy {
DIRECT_BRI, VOLUME_BRI
}
}
|
ekux44/LampShade
|
mobile/src/main/java/com/kuxhausen/huemore/net/BrightnessManager.java
|
Java
|
apache-2.0
| 5,862
|
//
// Autogenerated by Thrift Compiler (0.9.0)
//
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
//
var Thrift = require('thrift').Thrift;
var ttypes = require('./recommendations_types');
//HELPER FUNCTIONS AND STRUCTURES
LasService_listUserVisitRecords_args = function(args) {
this.start = null;
this.limit = null;
if (args) {
if (args.start !== undefined) {
this.start = args.start;
}
if (args.limit !== undefined) {
this.limit = args.limit;
}
}
};
LasService_listUserVisitRecords_args.prototype = {};
LasService_listUserVisitRecords_args.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == Thrift.Type.I32) {
this.start = input.readI32();
} else {
input.skip(ftype);
}
break;
case 2:
if (ftype == Thrift.Type.I32) {
this.limit = input.readI32();
} else {
input.skip(ftype);
}
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_listUserVisitRecords_args.prototype.write = function(output) {
output.writeStructBegin('LasService_listUserVisitRecords_args');
if (this.start !== null && this.start !== undefined) {
output.writeFieldBegin('start', Thrift.Type.I32, 1);
output.writeI32(this.start);
output.writeFieldEnd();
}
if (this.limit !== null && this.limit !== undefined) {
output.writeFieldBegin('limit', Thrift.Type.I32, 2);
output.writeI32(this.limit);
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_listUserVisitRecords_result = function(args) {
this.success = null;
if (args) {
if (args.success !== undefined) {
this.success = args.success;
}
}
};
LasService_listUserVisitRecords_result.prototype = {};
LasService_listUserVisitRecords_result.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 0:
if (ftype == Thrift.Type.LIST) {
var _size16 = 0;
var _rtmp320;
this.success = [];
var _etype19 = 0;
_rtmp320 = input.readListBegin();
_etype19 = _rtmp320.etype;
_size16 = _rtmp320.size;
for (var _i21 = 0; _i21 < _size16; ++_i21)
{
var elem22 = null;
elem22 = new ttypes.UserVisitRecord();
elem22.read(input);
this.success.push(elem22);
}
input.readListEnd();
} else {
input.skip(ftype);
}
break;
case 0:
input.skip(ftype);
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_listUserVisitRecords_result.prototype.write = function(output) {
output.writeStructBegin('LasService_listUserVisitRecords_result');
if (this.success !== null && this.success !== undefined) {
output.writeFieldBegin('success', Thrift.Type.LIST, 0);
output.writeListBegin(Thrift.Type.STRUCT, this.success.length);
for (var iter23 in this.success)
{
if (this.success.hasOwnProperty(iter23))
{
iter23 = this.success[iter23];
iter23.write(output);
}
}
output.writeListEnd();
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_findVisitRecords_args = function(args) {
this.userCode = null;
this.houseType = null;
if (args) {
if (args.userCode !== undefined) {
this.userCode = args.userCode;
}
if (args.houseType !== undefined) {
this.houseType = args.houseType;
}
}
};
LasService_findVisitRecords_args.prototype = {};
LasService_findVisitRecords_args.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == Thrift.Type.STRING) {
this.userCode = input.readString();
} else {
input.skip(ftype);
}
break;
case 2:
if (ftype == Thrift.Type.BYTE) {
this.houseType = input.readByte();
} else {
input.skip(ftype);
}
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_findVisitRecords_args.prototype.write = function(output) {
output.writeStructBegin('LasService_findVisitRecords_args');
if (this.userCode !== null && this.userCode !== undefined) {
output.writeFieldBegin('userCode', Thrift.Type.STRING, 1);
output.writeString(this.userCode);
output.writeFieldEnd();
}
if (this.houseType !== null && this.houseType !== undefined) {
output.writeFieldBegin('houseType', Thrift.Type.BYTE, 2);
output.writeByte(this.houseType);
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_findVisitRecords_result = function(args) {
this.success = null;
if (args) {
if (args.success !== undefined) {
this.success = args.success;
}
}
};
LasService_findVisitRecords_result.prototype = {};
LasService_findVisitRecords_result.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 0:
if (ftype == Thrift.Type.LIST) {
var _size24 = 0;
var _rtmp328;
this.success = [];
var _etype27 = 0;
_rtmp328 = input.readListBegin();
_etype27 = _rtmp328.etype;
_size24 = _rtmp328.size;
for (var _i29 = 0; _i29 < _size24; ++_i29)
{
var elem30 = null;
elem30 = new ttypes.VisitRecord();
elem30.read(input);
this.success.push(elem30);
}
input.readListEnd();
} else {
input.skip(ftype);
}
break;
case 0:
input.skip(ftype);
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_findVisitRecords_result.prototype.write = function(output) {
output.writeStructBegin('LasService_findVisitRecords_result');
if (this.success !== null && this.success !== undefined) {
output.writeFieldBegin('success', Thrift.Type.LIST, 0);
output.writeListBegin(Thrift.Type.STRUCT, this.success.length);
for (var iter31 in this.success)
{
if (this.success.hasOwnProperty(iter31))
{
iter31 = this.success[iter31];
iter31.write(output);
}
}
output.writeListEnd();
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_listItemBasedRecommendations_args = function(args) {
this.start = null;
this.limit = null;
if (args) {
if (args.start !== undefined) {
this.start = args.start;
}
if (args.limit !== undefined) {
this.limit = args.limit;
}
}
};
LasService_listItemBasedRecommendations_args.prototype = {};
LasService_listItemBasedRecommendations_args.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == Thrift.Type.I32) {
this.start = input.readI32();
} else {
input.skip(ftype);
}
break;
case 2:
if (ftype == Thrift.Type.I32) {
this.limit = input.readI32();
} else {
input.skip(ftype);
}
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_listItemBasedRecommendations_args.prototype.write = function(output) {
output.writeStructBegin('LasService_listItemBasedRecommendations_args');
if (this.start !== null && this.start !== undefined) {
output.writeFieldBegin('start', Thrift.Type.I32, 1);
output.writeI32(this.start);
output.writeFieldEnd();
}
if (this.limit !== null && this.limit !== undefined) {
output.writeFieldBegin('limit', Thrift.Type.I32, 2);
output.writeI32(this.limit);
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_listItemBasedRecommendations_result = function(args) {
this.success = null;
if (args) {
if (args.success !== undefined) {
this.success = args.success;
}
}
};
LasService_listItemBasedRecommendations_result.prototype = {};
LasService_listItemBasedRecommendations_result.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 0:
if (ftype == Thrift.Type.LIST) {
var _size32 = 0;
var _rtmp336;
this.success = [];
var _etype35 = 0;
_rtmp336 = input.readListBegin();
_etype35 = _rtmp336.etype;
_size32 = _rtmp336.size;
for (var _i37 = 0; _i37 < _size32; ++_i37)
{
var elem38 = null;
elem38 = new ttypes.UserRecommendations();
elem38.read(input);
this.success.push(elem38);
}
input.readListEnd();
} else {
input.skip(ftype);
}
break;
case 0:
input.skip(ftype);
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_listItemBasedRecommendations_result.prototype.write = function(output) {
output.writeStructBegin('LasService_listItemBasedRecommendations_result');
if (this.success !== null && this.success !== undefined) {
output.writeFieldBegin('success', Thrift.Type.LIST, 0);
output.writeListBegin(Thrift.Type.STRUCT, this.success.length);
for (var iter39 in this.success)
{
if (this.success.hasOwnProperty(iter39))
{
iter39 = this.success[iter39];
iter39.write(output);
}
}
output.writeListEnd();
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_findItemBasedRecommendations_args = function(args) {
this.userCode = null;
if (args) {
if (args.userCode !== undefined) {
this.userCode = args.userCode;
}
}
};
LasService_findItemBasedRecommendations_args.prototype = {};
LasService_findItemBasedRecommendations_args.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == Thrift.Type.STRING) {
this.userCode = input.readString();
} else {
input.skip(ftype);
}
break;
case 0:
input.skip(ftype);
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_findItemBasedRecommendations_args.prototype.write = function(output) {
output.writeStructBegin('LasService_findItemBasedRecommendations_args');
if (this.userCode !== null && this.userCode !== undefined) {
output.writeFieldBegin('userCode', Thrift.Type.STRING, 1);
output.writeString(this.userCode);
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasService_findItemBasedRecommendations_result = function(args) {
this.success = null;
if (args) {
if (args.success !== undefined) {
this.success = args.success;
}
}
};
LasService_findItemBasedRecommendations_result.prototype = {};
LasService_findItemBasedRecommendations_result.prototype.read = function(input) {
input.readStructBegin();
while (true)
{
var ret = input.readFieldBegin();
var fname = ret.fname;
var ftype = ret.ftype;
var fid = ret.fid;
if (ftype == Thrift.Type.STOP) {
break;
}
switch (fid)
{
case 0:
if (ftype == Thrift.Type.LIST) {
var _size40 = 0;
var _rtmp344;
this.success = [];
var _etype43 = 0;
_rtmp344 = input.readListBegin();
_etype43 = _rtmp344.etype;
_size40 = _rtmp344.size;
for (var _i45 = 0; _i45 < _size40; ++_i45)
{
var elem46 = null;
elem46 = new ttypes.Recommendation();
elem46.read(input);
this.success.push(elem46);
}
input.readListEnd();
} else {
input.skip(ftype);
}
break;
case 0:
input.skip(ftype);
break;
default:
input.skip(ftype);
}
input.readFieldEnd();
}
input.readStructEnd();
return;
};
LasService_findItemBasedRecommendations_result.prototype.write = function(output) {
output.writeStructBegin('LasService_findItemBasedRecommendations_result');
if (this.success !== null && this.success !== undefined) {
output.writeFieldBegin('success', Thrift.Type.LIST, 0);
output.writeListBegin(Thrift.Type.STRUCT, this.success.length);
for (var iter47 in this.success)
{
if (this.success.hasOwnProperty(iter47))
{
iter47 = this.success[iter47];
iter47.write(output);
}
}
output.writeListEnd();
output.writeFieldEnd();
}
output.writeFieldStop();
output.writeStructEnd();
return;
};
LasServiceClient = exports.Client = function(output, pClass) {
this.output = output;
this.pClass = pClass;
this.seqid = 0;
this._reqs = {};
};
LasServiceClient.prototype = {};
LasServiceClient.prototype.listUserVisitRecords = function(start, limit, callback) {
this.seqid += 1;
this._reqs[this.seqid] = callback;
this.send_listUserVisitRecords(start, limit);
};
LasServiceClient.prototype.send_listUserVisitRecords = function(start, limit) {
var output = new this.pClass(this.output);
output.writeMessageBegin('listUserVisitRecords', Thrift.MessageType.CALL, this.seqid);
var args = new LasService_listUserVisitRecords_args();
args.start = start;
args.limit = limit;
args.write(output);
output.writeMessageEnd();
return this.output.flush();
};
LasServiceClient.prototype.recv_listUserVisitRecords = function(input,mtype,rseqid) {
var callback = this._reqs[rseqid] || function() {};
delete this._reqs[rseqid];
if (mtype == Thrift.MessageType.EXCEPTION) {
var x = new Thrift.TApplicationException();
x.read(input);
input.readMessageEnd();
return callback(x);
}
var result = new LasService_listUserVisitRecords_result();
result.read(input);
input.readMessageEnd();
if (null !== result.success) {
return callback(null, result.success);
}
return callback('listUserVisitRecords failed: unknown result');
};
LasServiceClient.prototype.findVisitRecords = function(userCode, houseType, callback) {
this.seqid += 1;
this._reqs[this.seqid] = callback;
this.send_findVisitRecords(userCode, houseType);
};
LasServiceClient.prototype.send_findVisitRecords = function(userCode, houseType) {
var output = new this.pClass(this.output);
output.writeMessageBegin('findVisitRecords', Thrift.MessageType.CALL, this.seqid);
var args = new LasService_findVisitRecords_args();
args.userCode = userCode;
args.houseType = houseType;
args.write(output);
output.writeMessageEnd();
return this.output.flush();
};
LasServiceClient.prototype.recv_findVisitRecords = function(input,mtype,rseqid) {
var callback = this._reqs[rseqid] || function() {};
delete this._reqs[rseqid];
if (mtype == Thrift.MessageType.EXCEPTION) {
var x = new Thrift.TApplicationException();
x.read(input);
input.readMessageEnd();
return callback(x);
}
var result = new LasService_findVisitRecords_result();
result.read(input);
input.readMessageEnd();
if (null !== result.success) {
return callback(null, result.success);
}
return callback('findVisitRecords failed: unknown result');
};
LasServiceClient.prototype.listItemBasedRecommendations = function(start, limit, callback) {
this.seqid += 1;
this._reqs[this.seqid] = callback;
this.send_listItemBasedRecommendations(start, limit);
};
LasServiceClient.prototype.send_listItemBasedRecommendations = function(start, limit) {
var output = new this.pClass(this.output);
output.writeMessageBegin('listItemBasedRecommendations', Thrift.MessageType.CALL, this.seqid);
var args = new LasService_listItemBasedRecommendations_args();
args.start = start;
args.limit = limit;
args.write(output);
output.writeMessageEnd();
return this.output.flush();
};
LasServiceClient.prototype.recv_listItemBasedRecommendations = function(input,mtype,rseqid) {
var callback = this._reqs[rseqid] || function() {};
delete this._reqs[rseqid];
if (mtype == Thrift.MessageType.EXCEPTION) {
var x = new Thrift.TApplicationException();
x.read(input);
input.readMessageEnd();
return callback(x);
}
var result = new LasService_listItemBasedRecommendations_result();
result.read(input);
input.readMessageEnd();
if (null !== result.success) {
return callback(null, result.success);
}
return callback('listItemBasedRecommendations failed: unknown result');
};
LasServiceClient.prototype.findItemBasedRecommendations = function(userCode, callback) {
this.seqid += 1;
this._reqs[this.seqid] = callback;
this.send_findItemBasedRecommendations(userCode);
};
LasServiceClient.prototype.send_findItemBasedRecommendations = function(userCode) {
var output = new this.pClass(this.output);
output.writeMessageBegin('findItemBasedRecommendations', Thrift.MessageType.CALL, this.seqid);
var args = new LasService_findItemBasedRecommendations_args();
args.userCode = userCode;
args.write(output);
output.writeMessageEnd();
return this.output.flush();
};
LasServiceClient.prototype.recv_findItemBasedRecommendations = function(input,mtype,rseqid) {
var callback = this._reqs[rseqid] || function() {};
delete this._reqs[rseqid];
if (mtype == Thrift.MessageType.EXCEPTION) {
var x = new Thrift.TApplicationException();
x.read(input);
input.readMessageEnd();
return callback(x);
}
var result = new LasService_findItemBasedRecommendations_result();
result.read(input);
input.readMessageEnd();
if (null !== result.success) {
return callback(null, result.success);
}
return callback('findItemBasedRecommendations failed: unknown result');
};
LasServiceProcessor = exports.Processor = function(handler) {
this._handler = handler
}
LasServiceProcessor.prototype.process = function(input, output) {
var r = input.readMessageBegin();
if (this['process_' + r.fname]) {
return this['process_' + r.fname].call(this, r.rseqid, input, output);
} else {
input.skip(Thrift.Type.STRUCT);
input.readMessageEnd();
var x = new Thrift.TApplicationException(Thrift.TApplicationExceptionType.UNKNOWN_METHOD, 'Unknown function ' + r.fname);
output.writeMessageBegin(r.fname, Thrift.MessageType.Exception, r.rseqid);
x.write(output);
output.writeMessageEnd();
output.flush();
}
}
LasServiceProcessor.prototype.process_listUserVisitRecords = function(seqid, input, output) {
var args = new LasService_listUserVisitRecords_args();
args.read(input);
input.readMessageEnd();
this._handler.listUserVisitRecords(args.start, args.limit, function (err, result) {
var result = new LasService_listUserVisitRecords_result((err != null ? err : {success: result}));
output.writeMessageBegin("listUserVisitRecords", Thrift.MessageType.REPLY, seqid);
result.write(output);
output.writeMessageEnd();
output.flush();
})
}
LasServiceProcessor.prototype.process_findVisitRecords = function(seqid, input, output) {
var args = new LasService_findVisitRecords_args();
args.read(input);
input.readMessageEnd();
this._handler.findVisitRecords(args.userCode, args.houseType, function (err, result) {
var result = new LasService_findVisitRecords_result((err != null ? err : {success: result}));
output.writeMessageBegin("findVisitRecords", Thrift.MessageType.REPLY, seqid);
result.write(output);
output.writeMessageEnd();
output.flush();
})
}
LasServiceProcessor.prototype.process_listItemBasedRecommendations = function(seqid, input, output) {
var args = new LasService_listItemBasedRecommendations_args();
args.read(input);
input.readMessageEnd();
this._handler.listItemBasedRecommendations(args.start, args.limit, function (err, result) {
var result = new LasService_listItemBasedRecommendations_result((err != null ? err : {success: result}));
output.writeMessageBegin("listItemBasedRecommendations", Thrift.MessageType.REPLY, seqid);
result.write(output);
output.writeMessageEnd();
output.flush();
})
}
LasServiceProcessor.prototype.process_findItemBasedRecommendations = function(seqid, input, output) {
var args = new LasService_findItemBasedRecommendations_args();
args.read(input);
input.readMessageEnd();
this._handler.findItemBasedRecommendations(args.userCode, function (err, result) {
var result = new LasService_findItemBasedRecommendations_result((err != null ? err : {success: result}));
output.writeMessageBegin("findItemBasedRecommendations", Thrift.MessageType.REPLY, seqid);
result.write(output);
output.writeMessageEnd();
output.flush();
})
}
|
mengke/las
|
las-ui/lib/gen-nodejs/LasService.js
|
JavaScript
|
apache-2.0
| 22,334
|
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .. import mlog
from .. import build
from ..mesonlib import MesonException, Popen_safe
from ..dependencies import Qt4Dependency
from . import ExtensionModule
import xml.etree.ElementTree as ET
from . import ModuleReturnValue
class Qt4Module(ExtensionModule):
tools_detected = False
def _detect_tools(self, env, method):
if self.tools_detected:
return
mlog.log('Detecting Qt4 tools')
# FIXME: We currently require Qt4 to exist while importing the module.
# We should make it gracefully degrade and not create any targets if
# the import is marked as 'optional' (not implemented yet)
kwargs = {'required': 'true', 'modules': 'Core', 'silent': 'true', 'method': method}
qt4 = Qt4Dependency(env, kwargs)
# Get all tools and then make sure that they are the right version
self.moc, self.uic, self.rcc = qt4.compilers_detect()
# Moc, uic and rcc write their version strings to stderr.
# Moc and rcc return a non-zero result when doing so.
# What kind of an idiot thought that was a good idea?
if self.moc.found():
stdout, stderr = Popen_safe(self.moc.get_command() + ['-v'])[1:3]
stdout = stdout.strip()
stderr = stderr.strip()
if 'Qt Meta' in stderr:
moc_ver = stderr
else:
raise MesonException('Moc preprocessor is not for Qt 4. Output:\n%s\n%s' %
(stdout, stderr))
mlog.log(' moc:', mlog.green('YES'), '(%s, %s)' %
(self.moc.get_path(), moc_ver.split()[-1]))
else:
mlog.log(' moc:', mlog.red('NO'))
if self.uic.found():
stdout, stderr = Popen_safe(self.uic.get_command() + ['-v'])[1:3]
stdout = stdout.strip()
stderr = stderr.strip()
if 'version 4.' in stderr:
uic_ver = stderr
else:
raise MesonException('Uic compiler is not for Qt4. Output:\n%s\n%s' %
(stdout, stderr))
mlog.log(' uic:', mlog.green('YES'), '(%s, %s)' %
(self.uic.get_path(), uic_ver.split()[-1]))
else:
mlog.log(' uic:', mlog.red('NO'))
if self.rcc.found():
stdout, stderr = Popen_safe(self.rcc.get_command() + ['-v'])[1:3]
stdout = stdout.strip()
stderr = stderr.strip()
if 'version 4.' in stderr:
rcc_ver = stderr
else:
raise MesonException('Rcc compiler is not for Qt 4. Output:\n%s\n%s' %
(stdout, stderr))
mlog.log(' rcc:', mlog.green('YES'), '(%s, %s)'
% (self.rcc.get_path(), rcc_ver.split()[-1]))
else:
mlog.log(' rcc:', mlog.red('NO'))
self.tools_detected = True
def parse_qrc(self, state, fname):
abspath = os.path.join(state.environment.source_dir, state.subdir, fname)
relative_part = os.path.split(fname)[0]
try:
tree = ET.parse(abspath)
root = tree.getroot()
result = []
for child in root[0]:
if child.tag != 'file':
mlog.warning("malformed rcc file: ", os.path.join(state.subdir, fname))
break
else:
result.append(os.path.join(state.subdir, relative_part, child.text))
return result
except Exception:
return []
def preprocess(self, state, args, kwargs):
rcc_files = kwargs.pop('qresources', [])
if not isinstance(rcc_files, list):
rcc_files = [rcc_files]
ui_files = kwargs.pop('ui_files', [])
if not isinstance(ui_files, list):
ui_files = [ui_files]
moc_headers = kwargs.pop('moc_headers', [])
if not isinstance(moc_headers, list):
moc_headers = [moc_headers]
moc_sources = kwargs.pop('moc_sources', [])
if not isinstance(moc_sources, list):
moc_sources = [moc_sources]
sources = kwargs.pop('sources', [])
if not isinstance(sources, list):
sources = [sources]
sources += args[1:]
method = kwargs.get('method', 'auto')
self._detect_tools(state.environment, method)
err_msg = "{0} sources specified and couldn't find {1}, " \
"please check your qt4 installation"
if len(moc_headers) + len(moc_sources) > 0 and not self.moc.found():
raise MesonException(err_msg.format('MOC', 'moc-qt4'))
if len(rcc_files) > 0:
if not self.rcc.found():
raise MesonException(err_msg.format('RCC', 'rcc-qt4'))
qrc_deps = []
for i in rcc_files:
qrc_deps += self.parse_qrc(state, i)
if len(args) > 0:
name = args[0]
else:
basename = os.path.split(rcc_files[0])[1]
name = 'qt4-' + basename.replace('.', '_')
rcc_kwargs = {'input': rcc_files,
'output': name + '.cpp',
'command': [self.rcc, '-o', '@OUTPUT@', '@INPUT@'],
'depend_files': qrc_deps}
res_target = build.CustomTarget(name, state.subdir, rcc_kwargs)
sources.append(res_target)
if len(ui_files) > 0:
if not self.uic.found():
raise MesonException(err_msg.format('UIC', 'uic-qt4'))
ui_kwargs = {'output': 'ui_@BASENAME@.h',
'arguments': ['-o', '@OUTPUT@', '@INPUT@']}
ui_gen = build.Generator([self.uic], ui_kwargs)
ui_output = ui_gen.process_files('Qt4 ui', ui_files, state)
sources.append(ui_output)
if len(moc_headers) > 0:
moc_kwargs = {'output': 'moc_@BASENAME@.cpp',
'arguments': ['@INPUT@', '-o', '@OUTPUT@']}
moc_gen = build.Generator([self.moc], moc_kwargs)
moc_output = moc_gen.process_files('Qt4 moc header', moc_headers, state)
sources.append(moc_output)
if len(moc_sources) > 0:
moc_kwargs = {'output': '@BASENAME@.moc',
'arguments': ['@INPUT@', '-o', '@OUTPUT@']}
moc_gen = build.Generator([self.moc], moc_kwargs)
moc_output = moc_gen.process_files('Qt4 moc source', moc_sources, state)
sources.append(moc_output)
return ModuleReturnValue(sources, sources)
def initialize():
mlog.warning('rcc dependencies will not work properly until this upstream issue is fixed:',
mlog.bold('https://bugreports.qt.io/browse/QTBUG-45460'))
return Qt4Module()
|
rhd/meson
|
mesonbuild/modules/qt4.py
|
Python
|
apache-2.0
| 7,453
|
//go:build linux
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
goruntime "runtime"
"sync"
"syscall"
"time"
"github.com/containerd/cgroups"
cgroupsv2 "github.com/containerd/cgroups/v2"
eventstypes "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/pkg/oom"
oomv1 "github.com/containerd/containerd/pkg/oom/v1"
oomv2 "github.com/containerd/containerd/pkg/oom/v2"
"github.com/containerd/containerd/pkg/process"
"github.com/containerd/containerd/pkg/schedcore"
"github.com/containerd/containerd/pkg/stdio"
"github.com/containerd/containerd/pkg/userns"
"github.com/containerd/containerd/runtime/v2/runc"
"github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/containerd/runtime/v2/shim"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
"github.com/containerd/containerd/sys/reaper"
runcC "github.com/containerd/go-runc"
"github.com/containerd/typeurl"
"github.com/gogo/protobuf/proto"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
exec "golang.org/x/sys/execabs"
"golang.org/x/sys/unix"
)
var (
_ = (taskAPI.TaskService)(&service{})
empty = &ptypes.Empty{}
)
// group labels specifies how the shim groups services.
// currently supports a runc.v2 specific .group label and the
// standard k8s pod label. Order matters in this list
var groupLabels = []string{
"io.containerd.runc.v2.group",
"io.kubernetes.cri.sandbox-id",
}
type spec struct {
Annotations map[string]string `json:"annotations,omitempty"`
}
// New returns a new shim service that can be used via GRPC
func New(ctx context.Context, id string, publisher shim.Publisher, shutdown func()) (shim.Shim, error) {
var (
ep oom.Watcher
err error
)
if cgroups.Mode() == cgroups.Unified {
ep, err = oomv2.New(publisher)
} else {
ep, err = oomv1.New(publisher)
}
if err != nil {
return nil, err
}
go ep.Run(ctx)
s := &service{
id: id,
context: ctx,
events: make(chan interface{}, 128),
ec: reaper.Default.Subscribe(),
ep: ep,
cancel: shutdown,
containers: make(map[string]*runc.Container),
}
go s.processExits()
runcC.Monitor = reaper.Default
if err := s.initPlatform(); err != nil {
shutdown()
return nil, errors.Wrap(err, "failed to initialized platform behavior")
}
go s.forward(ctx, publisher)
if address, err := shim.ReadAddress("address"); err == nil {
s.shimAddress = address
}
return s, nil
}
// service is the shim implementation of a remote shim over GRPC
type service struct {
mu sync.Mutex
eventSendMu sync.Mutex
context context.Context
events chan interface{}
platform stdio.Platform
ec chan runcC.Exit
ep oom.Watcher
// id only used in cleanup case
id string
containers map[string]*runc.Container
shimAddress string
cancel func()
}
func newCommand(ctx context.Context, id, containerdBinary, containerdAddress, containerdTTRPCAddress string) (*exec.Cmd, error) {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
self, err := os.Executable()
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
args := []string{
"-namespace", ns,
"-id", id,
"-address", containerdAddress,
}
cmd := exec.Command(self, args...)
cmd.Dir = cwd
cmd.Env = append(os.Environ(), "GOMAXPROCS=4")
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
return cmd, nil
}
func readSpec() (*spec, error) {
f, err := os.Open("config.json")
if err != nil {
return nil, err
}
defer f.Close()
var s spec
if err := json.NewDecoder(f).Decode(&s); err != nil {
return nil, err
}
return &s, nil
}
func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (_ string, retErr error) {
cmd, err := newCommand(ctx, opts.ID, opts.ContainerdBinary, opts.Address, opts.TTRPCAddress)
if err != nil {
return "", err
}
grouping := opts.ID
spec, err := readSpec()
if err != nil {
return "", err
}
for _, group := range groupLabels {
if groupID, ok := spec.Annotations[group]; ok {
grouping = groupID
break
}
}
address, err := shim.SocketAddress(ctx, opts.Address, grouping)
if err != nil {
return "", err
}
socket, err := shim.NewSocket(address)
if err != nil {
// the only time where this would happen is if there is a bug and the socket
// was not cleaned up in the cleanup method of the shim or we are using the
// grouping functionality where the new process should be run with the same
// shim as an existing container
if !shim.SocketEaddrinuse(err) {
return "", errors.Wrap(err, "create new shim socket")
}
if shim.CanConnect(address) {
if err := shim.WriteAddress("address", address); err != nil {
return "", errors.Wrap(err, "write existing socket for shim")
}
return address, nil
}
if err := shim.RemoveSocket(address); err != nil {
return "", errors.Wrap(err, "remove pre-existing socket")
}
if socket, err = shim.NewSocket(address); err != nil {
return "", errors.Wrap(err, "try create new shim socket 2x")
}
}
defer func() {
if retErr != nil {
socket.Close()
_ = shim.RemoveSocket(address)
}
}()
// make sure that reexec shim-v2 binary use the value if need
if err := shim.WriteAddress("address", address); err != nil {
return "", err
}
f, err := socket.File()
if err != nil {
return "", err
}
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
goruntime.LockOSThread()
if os.Getenv("SCHED_CORE") != "" {
if err := schedcore.Create(schedcore.ProcessGroup); err != nil {
return "", errors.Wrap(err, "enable sched core support")
}
}
if err := cmd.Start(); err != nil {
f.Close()
return "", err
}
goruntime.UnlockOSThread()
defer func() {
if retErr != nil {
cmd.Process.Kill()
}
}()
// make sure to wait after start
go cmd.Wait()
if data, err := io.ReadAll(os.Stdin); err == nil {
if len(data) > 0 {
var any ptypes.Any
if err := proto.Unmarshal(data, &any); err != nil {
return "", err
}
v, err := typeurl.UnmarshalAny(&any)
if err != nil {
return "", err
}
if opts, ok := v.(*options.Options); ok {
if opts.ShimCgroup != "" {
if cgroups.Mode() == cgroups.Unified {
cg, err := cgroupsv2.LoadManager("/sys/fs/cgroup", opts.ShimCgroup)
if err != nil {
return "", errors.Wrapf(err, "failed to load cgroup %s", opts.ShimCgroup)
}
if err := cg.AddProc(uint64(cmd.Process.Pid)); err != nil {
return "", errors.Wrapf(err, "failed to join cgroup %s", opts.ShimCgroup)
}
} else {
cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(opts.ShimCgroup))
if err != nil {
return "", errors.Wrapf(err, "failed to load cgroup %s", opts.ShimCgroup)
}
if err := cg.Add(cgroups.Process{
Pid: cmd.Process.Pid,
}); err != nil {
return "", errors.Wrapf(err, "failed to join cgroup %s", opts.ShimCgroup)
}
}
}
}
}
}
if err := shim.AdjustOOMScore(cmd.Process.Pid); err != nil {
return "", errors.Wrap(err, "failed to adjust OOM score for shim")
}
return address, nil
}
func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) {
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
path := filepath.Join(filepath.Dir(cwd), s.id)
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
runtime, err := runc.ReadRuntime(path)
if err != nil {
return nil, err
}
opts, err := runc.ReadOptions(path)
if err != nil {
return nil, err
}
root := process.RuncRoot
if opts != nil && opts.Root != "" {
root = opts.Root
}
r := process.NewRunc(root, path, ns, runtime, "", false)
if err := r.Delete(ctx, s.id, &runcC.DeleteOpts{
Force: true,
}); err != nil {
logrus.WithError(err).Warn("failed to remove runc container")
}
if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil {
logrus.WithError(err).Warn("failed to cleanup rootfs mount")
}
return &taskAPI.DeleteResponse{
ExitedAt: time.Now(),
ExitStatus: 128 + uint32(unix.SIGKILL),
}, nil
}
// Create a new initial process and container with the underlying OCI runtime
func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) {
s.mu.Lock()
defer s.mu.Unlock()
container, err := runc.NewContainer(ctx, s.platform, r)
if err != nil {
return nil, err
}
s.containers[r.ID] = container
s.send(&eventstypes.TaskCreate{
ContainerID: r.ID,
Bundle: r.Bundle,
Rootfs: r.Rootfs,
IO: &eventstypes.TaskIO{
Stdin: r.Stdin,
Stdout: r.Stdout,
Stderr: r.Stderr,
Terminal: r.Terminal,
},
Checkpoint: r.Checkpoint,
Pid: uint32(container.Pid()),
})
return &taskAPI.CreateTaskResponse{
Pid: uint32(container.Pid()),
}, nil
}
// Start a process
func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
// hold the send lock so that the start events are sent before any exit events in the error case
s.eventSendMu.Lock()
p, err := container.Start(ctx, r)
if err != nil {
s.eventSendMu.Unlock()
return nil, errdefs.ToGRPC(err)
}
switch r.ExecID {
case "":
switch cg := container.Cgroup().(type) {
case cgroups.Cgroup:
if err := s.ep.Add(container.ID, cg); err != nil {
logrus.WithError(err).Error("add cg to OOM monitor")
}
case *cgroupsv2.Manager:
allControllers, err := cg.RootControllers()
if err != nil {
logrus.WithError(err).Error("failed to get root controllers")
} else {
if err := cg.ToggleControllers(allControllers, cgroupsv2.Enable); err != nil {
if userns.RunningInUserNS() {
logrus.WithError(err).Debugf("failed to enable controllers (%v)", allControllers)
} else {
logrus.WithError(err).Errorf("failed to enable controllers (%v)", allControllers)
}
}
}
if err := s.ep.Add(container.ID, cg); err != nil {
logrus.WithError(err).Error("add cg to OOM monitor")
}
}
s.send(&eventstypes.TaskStart{
ContainerID: container.ID,
Pid: uint32(p.Pid()),
})
default:
s.send(&eventstypes.TaskExecStarted{
ContainerID: container.ID,
ExecID: r.ExecID,
Pid: uint32(p.Pid()),
})
}
s.eventSendMu.Unlock()
return &taskAPI.StartResponse{
Pid: uint32(p.Pid()),
}, nil
}
// Delete the initial process and container
func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
p, err := container.Delete(ctx, r)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
// if we deleted an init task, send the task delete event
if r.ExecID == "" {
s.mu.Lock()
delete(s.containers, r.ID)
s.mu.Unlock()
s.send(&eventstypes.TaskDelete{
ContainerID: container.ID,
Pid: uint32(p.Pid()),
ExitStatus: uint32(p.ExitStatus()),
ExitedAt: p.ExitedAt(),
})
}
return &taskAPI.DeleteResponse{
ExitStatus: uint32(p.ExitStatus()),
ExitedAt: p.ExitedAt(),
Pid: uint32(p.Pid()),
}, nil
}
// Exec an additional process inside the container
func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
ok, cancel := container.ReserveProcess(r.ExecID)
if !ok {
return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ExecID)
}
process, err := container.Exec(ctx, r)
if err != nil {
cancel()
return nil, errdefs.ToGRPC(err)
}
s.send(&eventstypes.TaskExecAdded{
ContainerID: container.ID,
ExecID: process.ID(),
})
return empty, nil
}
// ResizePty of a process
func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.ResizePty(ctx, r); err != nil {
return nil, errdefs.ToGRPC(err)
}
return empty, nil
}
// State returns runtime state information for a process
func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
p, err := container.Process(r.ExecID)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
st, err := p.Status(ctx)
if err != nil {
return nil, err
}
status := task.StatusUnknown
switch st {
case "created":
status = task.StatusCreated
case "running":
status = task.StatusRunning
case "stopped":
status = task.StatusStopped
case "paused":
status = task.StatusPaused
case "pausing":
status = task.StatusPausing
}
sio := p.Stdio()
return &taskAPI.StateResponse{
ID: p.ID(),
Bundle: container.Bundle,
Pid: uint32(p.Pid()),
Status: status,
Stdin: sio.Stdin,
Stdout: sio.Stdout,
Stderr: sio.Stderr,
Terminal: sio.Terminal,
ExitStatus: uint32(p.ExitStatus()),
ExitedAt: p.ExitedAt(),
}, nil
}
// Pause the container
func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.Pause(ctx); err != nil {
return nil, errdefs.ToGRPC(err)
}
s.send(&eventstypes.TaskPaused{
ContainerID: container.ID,
})
return empty, nil
}
// Resume the container
func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.Resume(ctx); err != nil {
return nil, errdefs.ToGRPC(err)
}
s.send(&eventstypes.TaskResumed{
ContainerID: container.ID,
})
return empty, nil
}
// Kill a process with the provided signal
func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.Kill(ctx, r); err != nil {
return nil, errdefs.ToGRPC(err)
}
return empty, nil
}
// Pids returns all pids inside the container
func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
pids, err := s.getContainerPids(ctx, r.ID)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
var processes []*task.ProcessInfo
for _, pid := range pids {
pInfo := task.ProcessInfo{
Pid: pid,
}
for _, p := range container.ExecdProcesses() {
if p.Pid() == int(pid) {
d := &options.ProcessDetails{
ExecID: p.ID(),
}
a, err := typeurl.MarshalAny(d)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal process %d info", pid)
}
pInfo.Info = a
break
}
}
processes = append(processes, &pInfo)
}
return &taskAPI.PidsResponse{
Processes: processes,
}, nil
}
// CloseIO of a process
func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.CloseIO(ctx, r); err != nil {
return nil, err
}
return empty, nil
}
// Checkpoint the container
func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.Checkpoint(ctx, r); err != nil {
return nil, errdefs.ToGRPC(err)
}
return empty, nil
}
// Update a running container
func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
if err := container.Update(ctx, r); err != nil {
return nil, errdefs.ToGRPC(err)
}
return empty, nil
}
// Wait for a process to exit
func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
p, err := container.Process(r.ExecID)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
p.Wait()
return &taskAPI.WaitResponse{
ExitStatus: uint32(p.ExitStatus()),
ExitedAt: p.ExitedAt(),
}, nil
}
// Connect returns shim information such as the shim's pid
func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) {
var pid int
if container, err := s.getContainer(r.ID); err == nil {
pid = container.Pid()
}
return &taskAPI.ConnectResponse{
ShimPid: uint32(os.Getpid()),
TaskPid: uint32(pid),
}, nil
}
func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (*ptypes.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
// return out if the shim is still servicing containers
if len(s.containers) > 0 {
return empty, nil
}
if s.platform != nil {
s.platform.Close()
}
if s.shimAddress != "" {
_ = shim.RemoveSocket(s.shimAddress)
}
// please make sure that temporary resource has been cleanup
// before shutdown service.
s.cancel()
close(s.events)
return empty, nil
}
func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.StatsResponse, error) {
container, err := s.getContainer(r.ID)
if err != nil {
return nil, err
}
cgx := container.Cgroup()
if cgx == nil {
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "cgroup does not exist")
}
var statsx interface{}
switch cg := cgx.(type) {
case cgroups.Cgroup:
stats, err := cg.Stat(cgroups.IgnoreNotExist)
if err != nil {
return nil, err
}
statsx = stats
case *cgroupsv2.Manager:
stats, err := cg.Stat()
if err != nil {
return nil, err
}
statsx = stats
default:
return nil, errdefs.ToGRPCf(errdefs.ErrNotImplemented, "unsupported cgroup type %T", cg)
}
data, err := typeurl.MarshalAny(statsx)
if err != nil {
return nil, err
}
return &taskAPI.StatsResponse{
Stats: data,
}, nil
}
func (s *service) processExits() {
for e := range s.ec {
s.checkProcesses(e)
}
}
func (s *service) send(evt interface{}) {
s.events <- evt
}
func (s *service) sendL(evt interface{}) {
s.eventSendMu.Lock()
s.events <- evt
s.eventSendMu.Unlock()
}
func (s *service) checkProcesses(e runcC.Exit) {
s.mu.Lock()
defer s.mu.Unlock()
for _, container := range s.containers {
if !container.HasPid(e.Pid) {
continue
}
for _, p := range container.All() {
if p.Pid() != e.Pid {
continue
}
if ip, ok := p.(*process.Init); ok {
// Ensure all children are killed
if runc.ShouldKillAllOnExit(s.context, container.Bundle) {
if err := ip.KillAll(s.context); err != nil {
logrus.WithError(err).WithField("id", ip.ID()).
Error("failed to kill init's children")
}
}
}
p.SetExited(e.Status)
s.sendL(&eventstypes.TaskExit{
ContainerID: container.ID,
ID: p.ID(),
Pid: uint32(e.Pid),
ExitStatus: uint32(e.Status),
ExitedAt: p.ExitedAt(),
})
return
}
return
}
}
func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, error) {
container, err := s.getContainer(id)
if err != nil {
return nil, err
}
p, err := container.Process("")
if err != nil {
return nil, errdefs.ToGRPC(err)
}
ps, err := p.(*process.Init).Runtime().Ps(ctx, id)
if err != nil {
return nil, err
}
pids := make([]uint32, 0, len(ps))
for _, pid := range ps {
pids = append(pids, uint32(pid))
}
return pids, nil
}
func (s *service) forward(ctx context.Context, publisher shim.Publisher) {
ns, _ := namespaces.Namespace(ctx)
ctx = namespaces.WithNamespace(context.Background(), ns)
for e := range s.events {
err := publisher.Publish(ctx, runc.GetTopic(e), e)
if err != nil {
logrus.WithError(err).Error("post event")
}
}
publisher.Close()
}
func (s *service) getContainer(id string) (*runc.Container, error) {
s.mu.Lock()
container := s.containers[id]
s.mu.Unlock()
if container == nil {
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "container not created")
}
return container, nil
}
// initialize a single epoll fd to manage our consoles. `initPlatform` should
// only be called once.
func (s *service) initPlatform() error {
if s.platform != nil {
return nil
}
p, err := runc.NewPlatform()
if err != nil {
return err
}
s.platform = p
return nil
}
|
mikebrow/containerd
|
runtime/v2/runc/v2/service.go
|
GO
|
apache-2.0
| 21,424
|
/*
* Copyright © 2012-2014 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.tephra.distributed;
import co.cask.tephra.InvalidTruncateTimeException;
import co.cask.tephra.Transaction;
import co.cask.tephra.TransactionCouldNotTakeSnapshotException;
import co.cask.tephra.TransactionNotInProgressException;
import co.cask.tephra.distributed.thrift.TInvalidTruncateTimeException;
import co.cask.tephra.distributed.thrift.TTransactionCouldNotTakeSnapshotException;
import co.cask.tephra.distributed.thrift.TTransactionNotInProgressException;
import co.cask.tephra.distributed.thrift.TTransactionServer;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.TTransport;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* This class is a wrapper around the thrift tx service client, it takes
* Operations, converts them into thrift objects, calls the thrift
* client, and converts the results back to data fabric classes.
* This class also instruments the thrift calls with metrics.
*/
public class TransactionServiceThriftClient {
private static final Function<byte[], ByteBuffer> BYTES_WRAPPER = new Function<byte[], ByteBuffer>() {
@Override
public ByteBuffer apply(byte[] input) {
return ByteBuffer.wrap(input);
}
};
/**
* The thrift transport layer. We need this when we close the connection.
*/
TTransport transport;
/**
* The actual thrift client.
*/
TTransactionServer.Client client;
/**
* Whether this client is valid for use.
*/
private final AtomicBoolean isValid = new AtomicBoolean(true);
/**
* Constructor from an existing, connected thrift transport.
*
* @param transport the thrift transport layer. It must already be connected
*/
public TransactionServiceThriftClient(TTransport transport) {
this.transport = transport;
// thrift protocol layer, we use binary because so does the service
TProtocol protocol = new TBinaryProtocol(transport);
// and create a thrift client
this.client = new TTransactionServer.Client(protocol);
}
/**
* close this client. may be called multiple times
*/
public void close() {
if (this.transport.isOpen()) {
this.transport.close();
}
}
public Transaction startLong() throws TException {
try {
return TransactionConverterUtils.unwrap(client.startLong());
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public Transaction startShort() throws TException {
try {
return TransactionConverterUtils.unwrap(client.startShort());
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public Transaction startShort(int timeout) throws TException {
try {
return TransactionConverterUtils.unwrap(client.startShortTimeout(timeout));
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public boolean canCommit(Transaction tx, Collection<byte[]> changeIds)
throws TException, TransactionNotInProgressException {
try {
return client.canCommitTx(TransactionConverterUtils.wrap(tx),
ImmutableSet.copyOf(Iterables.transform(changeIds, BYTES_WRAPPER))).isValue();
} catch (TTransactionNotInProgressException e) {
throw new TransactionNotInProgressException(e.getMessage());
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public boolean commit(Transaction tx) throws TException, TransactionNotInProgressException {
try {
return client.commitTx(TransactionConverterUtils.wrap(tx)).isValue();
} catch (TTransactionNotInProgressException e) {
throw new TransactionNotInProgressException(e.getMessage());
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public void abort(Transaction tx) throws TException {
try {
client.abortTx(TransactionConverterUtils.wrap(tx));
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public boolean invalidate(long tx) throws TException {
try {
return client.invalidateTx(tx);
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public Transaction checkpoint(Transaction tx) throws TException {
try {
return TransactionConverterUtils.unwrap(client.checkpoint(TransactionConverterUtils.wrap(tx)));
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public InputStream getSnapshotStream() throws TException, TransactionCouldNotTakeSnapshotException {
try {
ByteBuffer buffer = client.getSnapshot();
if (buffer.hasArray()) {
return new ByteArrayInputStream(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
}
// The ByteBuffer is not backed by array. Read the content to a new byte array and return an InputStream of that.
byte[] snapshot = new byte[buffer.remaining()];
buffer.get(snapshot);
return new ByteArrayInputStream(snapshot);
} catch (TTransactionCouldNotTakeSnapshotException e) {
throw new TransactionCouldNotTakeSnapshotException(e.getMessage());
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public String status() throws TException {
try {
return client.status();
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public void resetState() throws TException {
try {
client.resetState();
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public boolean truncateInvalidTx(Set<Long> invalidTxIds) throws TException {
try {
return client.truncateInvalidTx(invalidTxIds).isValue();
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public boolean truncateInvalidTxBefore(long time) throws TException, InvalidTruncateTimeException {
try {
return client.truncateInvalidTxBefore(time).isValue();
} catch (TInvalidTruncateTimeException e) {
throw new InvalidTruncateTimeException(e.getMessage());
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public int getInvalidSize() throws TException {
try {
return client.invalidTxSize();
} catch (TException e) {
isValid.set(false);
throw e;
}
}
public boolean isValid() {
return isValid.get();
}
}
|
cdapio/tephra
|
tephra-core/src/main/java/co/cask/tephra/distributed/TransactionServiceThriftClient.java
|
Java
|
apache-2.0
| 7,323
|
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using Microsoft.VisualStudio.Shell.Interop;
using Microsoft.VisualStudio.TextManager.Interop;
namespace Microsoft.VisualStudio.LanguageServices.Implementation.ProjectSystem.CPS
{
internal sealed partial class CPSProject : IVsReportExternalErrors, IVsLanguageServiceBuildErrorReporter2
{
public int ClearAllErrors()
{
return _externalErrorReporterOpt.Value.ClearAllErrors();
}
public int AddNewErrors(IVsEnumExternalErrors pErrors)
{
return _externalErrorReporterOpt.Value.AddNewErrors(pErrors);
}
public int GetErrors(out IVsEnumExternalErrors pErrors)
{
return _externalErrorReporterOpt.Value.GetErrors(out pErrors);
}
public int ReportError(string bstrErrorMessage, string bstrErrorId, VSTASKPRIORITY nPriority, int iLine, int iColumn, string bstrFileName)
{
return _externalErrorReporterOpt.Value.ReportError(bstrErrorMessage, bstrErrorId, nPriority, iLine, iColumn, bstrFileName);
}
public int ClearErrors()
{
return _externalErrorReporterOpt.Value.ClearErrors();
}
public void ReportError2(string bstrErrorMessage, string bstrErrorId, VSTASKPRIORITY nPriority, int iStartLine, int iStartColumn, int iEndLine, int iEndColumn, string bstrFileName)
{
_externalErrorReporterOpt.Value.ReportError2(bstrErrorMessage, bstrErrorId, nPriority, iStartLine, iStartColumn, iEndLine, iEndColumn, bstrFileName);
}
}
}
|
aelij/roslyn
|
src/VisualStudio/Core/Impl/ProjectSystem/CPS/CPSProject_ExternalErrorReporting.cs
|
C#
|
apache-2.0
| 1,708
|
-- @description function_in_subqry_constant_withfunc2_79.sql
-- @db_name functionproperty
-- @author tungs1
-- @modified 2013-04-03 12:00:00
-- @created 2013-04-03 12:00:00
-- @tags functionProperties
SELECT * FROM foo, (SELECT func1_sql_setint_stb(func2_mod_int_stb(5)) from foo) r order by 1,2,3;
|
lintzc/gpdb
|
src/test/tinc/tincrepo/functions/functionProperty/sql/function_in_subqry_constant_withfunc2_79.sql
|
SQL
|
apache-2.0
| 301
|
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed
with this work for additional information regarding copyright
ownership. The ASF licenses this file to you under the Apache
License, Version 2.0 (the License); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
Copyright 1999-2007 Rogue Wave Software, Inc.
-->
<HTML>
<HEAD>
<TITLE><stdexcept></TITLE>
<LINK REL=StyleSheet HREF="../rw.css" TYPE="text/css" TITLE="Apache stdcxx Stylesheet"></HEAD>
<BODY BGCOLOR=#FFFFFF>
<A HREF="stack.html"><IMG SRC="images/bprev.gif" WIDTH=20 HEIGHT=21 ALT="Previous file" BORDER=O></A><A HREF="noframes.html"><IMG SRC="images/btop.gif" WIDTH=56 HEIGHT=21 ALT="Top of Document" BORDER=O></A><A HREF="booktoc.html"><IMG SRC="images/btoc.gif" WIDTH=56 HEIGHT=21 ALT="Contents" BORDER=O></A><A HREF="tindex.html"><IMG SRC="images/bindex.gif" WIDTH=56 HEIGHT=21 ALT="Index page" BORDER=O></A><A HREF="streamiterators.html"><IMG SRC="images/bnext.gif" WIDTH=25 HEIGHT=21 ALT="Next file" BORDER=O></A><DIV CLASS="DOCUMENTNAME"><B>Apache C++ Standard Library Reference Guide</B></DIV>
<H2><stdexcept></H2>
<P><B>Library:</B> <A HREF="2-3.html">Diagnostics</A></P>
<PRE><HR><B><I>Header</I></B><HR></PRE>
<UL>
<LI><A HREF="#sec1">Local Index</A></LI>
<LI><A HREF="#sec2">Summary</A></LI>
<LI><A HREF="#sec3">Synopsis</A></LI>
<LI><A HREF="#sec4">See Also</A></LI>
<LI><A HREF="#sec5">Standards Conformance</A></LI>
</UL>
<A NAME="sec1"><H3>Local Index</H3></A>
No Entries
<A NAME="sec2"><H3>Summary</H3></A>
<P>The header <SAMP><stdexcept></SAMP> is part of the Diagnostics library of the C++ Standard Library. It defines the classes that the C++ Standard Library itself and C++ programs may use to report certain errors. These classes include: <B><I><A HREF="logic-error.html">logic_error</A></I></B>, <B><I><A HREF="domain-error.html">domain_error</A></I></B>, <B><I><A HREF="invalid-argument.html">invalid_argument</A></I></B>, <B><I><A HREF="length-error.html">length_error</A></I></B>, <B><I><A HREF="out-of-range.html">out_of_range</A></I></B>, <B><I><A HREF="runtime-error.html">runtime_error</A></I></B>, <B><I><A HREF="range-error.html">range_error</A></I></B>, <B><I><A HREF="overflow-error.html">overflow_error</A></I></B>, and <B><I><A HREF="underflow-error.html">underflow_error</A></I></B>.</P>
<A NAME="sec3"><H3>Synopsis</H3></A>
<PRE>namespace std {
class <SAMP><A HREF="logic-error.html">logic_error</A></SAMP>;
class <SAMP><A HREF="domain-error.html">domain_error</A></SAMP>;
class <SAMP><A HREF="invalid-argument.html">invalid_argument</A></SAMP>;
class <SAMP><A HREF="length-error.html">length_error</A></SAMP>;
class <SAMP><A HREF="out-of-range.html">out_of_range</A></SAMP>;
class <SAMP><A HREF="runtime-error.html">runtime_error</A></SAMP>;
class <SAMP><A HREF="range-error.html">range_error</A></SAMP>;
class <SAMP><A HREF="overflow-error.html">overflow_error</A></SAMP>;
class <SAMP><A HREF="underflow-error.html">underflow_error</A></SAMP>;
}
</PRE>
<A NAME="sec4"><H3>See Also</H3></A>
<P><B><I><A HREF="logic-error.html">logic_error</A></I></B>, <B><I><A HREF="domain-error.html">domain_error</A></I></B>, <B><I><A HREF="invalid-argument.html">invalid_argument</A></I></B>, <B><I><A HREF="length-error.html">length_error</A></I></B>, <B><I><A HREF="out-of-range.html">out_of_range</A></I></B>, <B><I><A HREF="runtime-error.html">runtime_error</A></I></B>, <B><I><A HREF="range-error.html">range_error</A></I></B>, <B><I><A HREF="overflow-error.html">overflow_error</A></I></B>, <B><I><A HREF="underflow-error.html">underflow_error</A></I></B>, <A HREF="exceptions.html">Exceptions</A></P>
<A NAME="sec5"><H3>Standards Conformance</H3></A>
<P><I>ISO/IEC 14882:1998 -- International Standard for Information Systems --Programming Language C++, Section 19.1</I></P>
<BR>
<HR>
<A HREF="stack.html"><IMG SRC="images/bprev.gif" WIDTH=20 HEIGHT=21 ALT="Previous file" BORDER=O></A><A HREF="noframes.html"><IMG SRC="images/btop.gif" WIDTH=56 HEIGHT=21 ALT="Top of Document" BORDER=O></A><A HREF="booktoc.html"><IMG SRC="images/btoc.gif" WIDTH=56 HEIGHT=21 ALT="Contents" BORDER=O></A><A HREF="tindex.html"><IMG SRC="images/bindex.gif" WIDTH=56 HEIGHT=21 ALT="Index page" BORDER=O></A><A HREF="streamiterators.html"><IMG SRC="images/bnext.gif" WIDTH=20 HEIGHT=21 ALT="Next file" BORDER=O></A>
<!-- Google Analytics tracking code -->
<script src="http://www.google-analytics.com/urchin.js" type="text/javascript">
</script>
<script type="text/javascript">
_uacct = "UA-1775151-1";
urchinTracker();
</script>
<!-- end of Google Analytics tracking code -->
</BODY>
</HTML>
|
pathscale/stdcxx
|
doc/stdlibref/stdexcept-h.html
|
HTML
|
apache-2.0
| 5,229
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package groundstation
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/restjson"
)
// GroundStation provides the API operation methods for making requests to
// AWS Ground Station. See this package's package overview docs
// for details on the service.
//
// GroundStation methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type GroundStation struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "GroundStation" // Name of service.
EndpointsID = "groundstation" // ID to lookup a service endpoint with.
ServiceID = "GroundStation" // ServiceID is a unique identifer of a specific service.
)
// New creates a new instance of the GroundStation client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a GroundStation client from just a session.
// svc := groundstation.New(mySession)
//
// // Create a GroundStation client with additional configuration
// svc := groundstation.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *GroundStation {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
c.SigningName = "groundstation"
}
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GroundStation {
svc := &GroundStation{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2019-05-23",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a GroundStation operation and runs any
// custom request initialization.
func (c *GroundStation) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}
|
HotelsDotCom/kube-aws
|
vendor/github.com/aws/aws-sdk-go/service/groundstation/service.go
|
GO
|
apache-2.0
| 3,235
|
#!/bin/bash
export TMPDIR=/home/joe/workspace/ffmpeg-3.3.3/temp
NDK=/home/joe/Android/Sdk/ndk-bundle
PLATFORM=$NDK/platforms/android-14/arch-arm/
TOOLCHAIN=$NDK/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64
CPU=arm
PREFIX=/home/joe/workspace/ffmpeg-3.3.3/out/arm/
ADDI_CFLAGS="-marm"
function build_one
{
./configure \
--prefix=$PREFIX \
--enable-shared \
--disable-static \
--disable-doc \
--disable-ffmpeg \
--disable-ffplay \
--disable-ffprobe \
--disable-ffserver \
--disable-doc \
--disable-symver \
--enable-small \
--cross-prefix=$TOOLCHAIN/bin/arm-linux-androideabi- \
--target-os=linux \
--arch=arm \
--enable-cross-compile \
--sysroot=$PLATFORM \
--extra-cflags="-Os -fpic $ADDI_CFLAGS" \
--extra-ldflags="$ADDI_LDFLAGS" \
$ADDITIONAL_CONFIGURE_FLAG
make clean
make
make install
}
build_one
|
joetang1989/Android-Universal-Image-Loader-Study
|
note/11.FFmpeg编译/build-scripts-of-ffmpeg-x264-for-android-ndk/参考脚本/buildffmpeg.sh
|
Shell
|
apache-2.0
| 814
|
package org.elasticsearch.cloud.rackspace;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.jclouds.ContextBuilder;
import org.jclouds.domain.Location;
import org.jclouds.domain.LocationBuilder;
import org.jclouds.domain.LocationScope;
import org.jclouds.openstack.swift.v1.blobstore.RegionScopedBlobStoreContext;
/**
* User: Joe Linn
* Date: 3/4/14
* Time: 10:09 AM
*/
public class CloudFilesService extends AbstractLifecycleComponent<CloudFilesService>{
private RegionScopedBlobStoreContext context;
private Location location;
@Inject
protected CloudFilesService(Settings settings, SettingsFilter settingsFilter) {
super(settings);
settingsFilter.addFilter(new RackspaceSettingsFilter());
}
public synchronized Location location(){
if(location != null){
return location;
}
final String dataCenter = componentSettings.get("region", "ORD");
location = new LocationBuilder().scope(LocationScope.REGION).id(dataCenter).description("A Rackspace data center.").build();
return location;
}
public synchronized RegionScopedBlobStoreContext context(){
if(context != null){
return context;
}
String account = settings.get("rackspace.account");
String key = settings.get("rackspace.key");
context = ContextBuilder.newBuilder("rackspace-cloudfiles-us").credentials(account, key).buildView(RegionScopedBlobStoreContext.class);
return context;
}
@Override
protected void doStart() throws ElasticsearchException {
}
@Override
protected void doStop() throws ElasticsearchException {
}
@Override
protected void doClose() throws ElasticsearchException {
if(context != null){
context.close();
}
}
}
|
jlinn/elasticsearch-cloud-rackspace
|
src/main/java/org/elasticsearch/cloud/rackspace/CloudFilesService.java
|
Java
|
apache-2.0
| 2,067
|
$(function () {
//popover for app list table
$('[data-toggle="popover"]').popover()
});
|
Intera/urlaubsverwaltung
|
src/main/resources/static/js/popover.js
|
JavaScript
|
apache-2.0
| 94
|
<form class="publish-modal-form" role="form" ng-submit="publish()">
<div class="modal-header">
<button type="button" class="close" ng-click="cancel()">
<span aria-hidden="true">×</span>
<span class="sr-only">Close</span>
</button>
<h3 class="modal-title"
ng-if="!isList"
translate="home.publish.headerLabel">Publish Pipeline</h3>
<h3 class="modal-title"
ng-if="isList"
translate="home.publish.listHeaderLabel">Publish Pipelines</h3>
</div>
<div class="modal-body">
<ng-include src="'app/home/alerts/error/errorAlert.tpl.html'"></ng-include>
<div ng-if="!isList" class="form-group" show-errors="{ trigger: 'keypress' }">
<label class="control-label" translate="global.form.name">Name</label>
<i class="fa fa-info-circle help-icon"
popover-placement="right"
popover-append-to-body="false"
popover="{{'home.library.nameValidation' | translate}}"
popover-trigger="mouseenter"></i>
<input type="text"
disabled
name="name"
class="form-control"
ng-pattern="$root.common.namePattern"
ng-required
pattern="{{$root.common.namePattern}}"
title="{{'home.library.nameValidation' | translate}}"
placeholder="{{'home.library.namePlaceholder' | translate}}"
ng-model="commitPipelineModel.name"
focus-me="true">
<p class="help-block">{{'home.library.nameValidation' | translate}}</p>
</div>
<div class="form-group">
<label class="control-label" translate="home.library.commitMessage">Commit Message</label>
<textarea autofocus class="form-control" rows="2"
required
placeholder="{{'home.library.commitMessagePlaceholder' | translate}}"
ng-model="commitPipelineModel.commitMessage"></textarea>
</div>
<ul class="properties clearfix">
<li class="pull-right">
<span class="properties-label">{{'home.publish.pipelineRepository' | translate}}: </span>
<span class="properties-value">
<a href="{{remoteBaseUrl}}" target="_blank">{{remoteBaseUrl}}</a>
</span>
</li>
</ul>
</div>
<div class="modal-footer">
<button type="reset" class="btn btn-default" ng-disabled="publishing" ng-click="cancel()"
translate="global.form.cancel">Cancel</button>
<button type="submit" class="btn btn-primary" ng-disabled="publishing"
translate="global.form.publish">Publish</button>
</div>
</form>
|
studanshu/datacollector
|
datacollector-ui/src/main/webapp/app/home/library/publish/publishModal.tpl.html
|
HTML
|
apache-2.0
| 2,593
|
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import optparse
import re
import socket
from swift.common import exceptions
from swift.common.utils import expand_ipv6, is_valid_ip, is_valid_ipv4, \
is_valid_ipv6
def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['region']
t2 = dev['zone']
t3 = dev['ip']
t4 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3),
(t1, t2, t3, t4))
def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0
| | |
| | +---- device id 1
| | |
| | +---- device id 2
| |
| +---- 192.168.101.2 -+---- device id 3
| |
| +---- device id 4
| |
| +---- device id 5
|
+---- zone 2 -+---- 192.168.102.1 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.102.2 -+---- device id 9
|
+---- device id 10
region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12
| |
| +---- device id 13
| |
| +---- device id 14
|
+---- 192.168.201.2 -+---- device id 15
|
+---- device id 16
|
+---- device id 17
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 1), (1, 2)],
(2,): [(2, 1)],
(1, 1): [(1, 1, 192.168.101.1),
(1, 1, 192.168.101.2)],
(1, 2): [(1, 2, 192.168.102.1),
(1, 2, 192.168.102.2)],
(2, 1): [(2, 1, 192.168.201.1),
(2, 1, 192.168.201.2)],
(1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0),
(1, 1, 192.168.101.1, 1),
(1, 1, 192.168.101.1, 2)],
(1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3),
(1, 1, 192.168.101.2, 4),
(1, 1, 192.168.101.2, 5)],
(1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6),
(1, 2, 192.168.102.1, 7),
(1, 2, 192.168.102.1, 8)],
(1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9),
(1, 2, 192.168.102.2, 10)],
(2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12),
(2, 1, 192.168.201.1, 13),
(2, 1, 192.168.201.1, 14)],
(2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15),
(2, 1, 192.168.201.2, 16),
(2, 1, 192.168.201.2, 17)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
def validate_and_normalize_ip(ip):
"""
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
"""
# first convert to lower case
new_ip = ip.lower()
if is_valid_ipv4(new_ip):
return new_ip
elif is_valid_ipv6(new_ip):
return expand_ipv6(new_ip)
else:
raise ValueError('Invalid ip %s' % ip)
def validate_and_normalize_address(address):
"""
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Section 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic.
"""
new_address = address.lstrip('[').rstrip(']')
if address.startswith('[') and address.endswith(']'):
return validate_and_normalize_ip(new_address)
new_address = new_address.lower()
if is_valid_ipv4(new_address):
return new_address
elif is_valid_ipv6(new_address):
return expand_ipv6(new_address)
elif is_valid_hostname(new_address):
return new_address
else:
raise ValueError('Invalid address %s' % address)
def is_valid_hostname(hostname):
"""
Return True if the provided hostname is a valid hostname
"""
if len(hostname) < 1 or len(hostname) > 255:
return False
if hostname.endswith('.'):
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def is_local_device(my_ips, my_port, dev_ip, dev_port):
"""
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
To support accurate locality determination in the server-per-port
deployment, when my_port is None, only IP addresses are used for
determining locality (dev_port is ignored).
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips.
"""
candidate_ips = []
if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip):
try:
# get the ip for this host; use getaddrinfo so that
# it works for both ipv4 and ipv6 addresses
addrinfo = socket.getaddrinfo(dev_ip, dev_port)
for addr in addrinfo:
family = addr[0]
dev_ip = addr[4][0] # get the ip-address
if family == socket.AF_INET6:
dev_ip = expand_ipv6(dev_ip)
candidate_ips.append(dev_ip)
except socket.gaierror:
return False
else:
if is_valid_ipv6(dev_ip):
dev_ip = expand_ipv6(dev_ip)
candidate_ips = [dev_ip]
for dev_ip in candidate_ips:
if dev_ip in my_ips and (my_port is None or dev_port == my_port):
return True
return False
def parse_search_value(search_value):
"""The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>R<r_ip>:<r_port>/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included.
"""
orig_search_value = search_value
match = {}
if search_value.startswith('d'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['id'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('r'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['region'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('z'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['zone'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('-'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while i < len(search_value) and search_value[i] in '0123456789.':
i += 1
match['ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['ip'] = validate_and_normalize_ip(match['ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['port'] = int(search_value[1:i])
search_value = search_value[i:]
# replication parameters
if search_value.startswith('R'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while (i < len(search_value) and
search_value[i] in '0123456789.'):
i += 1
match['replication_ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['replication_ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'replication_ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['replication_ip'] = \
validate_and_normalize_ip(match['replication_ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['replication_port'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('/'):
i = 1
while i < len(search_value) and search_value[i] != '_':
i += 1
match['device'] = search_value[1:i]
search_value = search_value[i:]
if search_value.startswith('_'):
match['meta'] = search_value[1:]
search_value = ''
if search_value:
raise ValueError('Invalid <search-value>: %s' %
repr(orig_search_value))
return match
def parse_search_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for searching.
:param opts: optparse style options
:returns: a dictionary with search values to filter devices,
supported parameters are id, region, zone, ip, port,
replication_ip, replication_port, device, weight, meta
"""
search_values = {}
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
value = getattr(opts, key, None)
if value:
if key == 'ip' or key == 'replication_ip':
value = validate_and_normalize_address(value)
search_values[key] = value
return search_values
def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values
def parse_add_value(add_value):
"""
Convert an add value, like 'r1z2-10.1.2.3:7878/sdf', to a dictionary.
If the string does not start with 'r<N>', then the value of 'region' in
the returned dictionary will be None. Callers should check for this and
set a reasonable default. This is done so callers can emit errors or
warnings if desired.
Similarly, 'replication_ip' and 'replication_port' will be None if not
specified.
:returns: dictionary with keys 'region', 'zone', 'ip', 'port', 'device',
'replication_ip', 'replication_port', 'meta'
:raises ValueError: if add_value is malformed
"""
region = None
rest = add_value
if add_value.startswith('r'):
i = 1
while i < len(add_value) and add_value[i].isdigit():
i += 1
region = int(add_value[1:i])
rest = add_value[i:]
if not rest.startswith('z'):
raise ValueError('Invalid add value: %s' % add_value)
i = 1
while i < len(rest) and rest[i].isdigit():
i += 1
zone = int(rest[1:i])
rest = rest[i:]
if not rest.startswith('-'):
raise ValueError('Invalid add value: %s' % add_value)
ip, port, rest = parse_address(rest[1:])
replication_ip = replication_port = None
if rest.startswith('R'):
replication_ip, replication_port, rest = \
parse_address(rest[1:])
if not rest.startswith('/'):
raise ValueError(
'Invalid add value: %s' % add_value)
i = 1
while i < len(rest) and rest[i] != '_':
i += 1
device_name = rest[1:i]
if not validate_device_name(device_name):
raise ValueError('Invalid device name')
rest = rest[i:]
meta = ''
if rest.startswith('_'):
meta = rest[1:]
return {'region': region, 'zone': zone, 'ip': ip, 'port': port,
'device': device_name, 'replication_ip': replication_ip,
'replication_port': replication_port, 'meta': meta}
def parse_address(rest):
if rest.startswith('['):
# remove first [] for ip
rest = rest.replace('[', '', 1).replace(']', '', 1)
pos = 0
while (pos < len(rest) and
not (rest[pos] == 'R' or rest[pos] == '/')):
pos += 1
address = rest[:pos]
rest = rest[pos:]
port_start = address.rfind(':')
if port_start == -1:
raise ValueError('Invalid port in add value')
ip = address[:port_start]
try:
port = int(address[(port_start + 1):])
except (TypeError, ValueError):
raise ValueError(
'Invalid port %s in add value' % address[port_start:])
# if this is an ipv6 address then we want to convert it
# to all lowercase and use its fully expanded representation
# to make searches easier
ip = validate_and_normalize_ip(ip)
return (ip, port, rest)
def validate_args(argvish):
"""
Build OptionParse and validate it whether the format is new command-line
format or not.
"""
opts, args = parse_args(argvish)
# id can be 0 (swift starts generating id from 0),
# also zone, region and weight can be set to zero.
new_cmd_format = opts.id is not None or opts.region is not None or \
opts.zone is not None or opts.ip or opts.port or \
opts.replication_ip or opts.replication_port or \
opts.device or opts.weight is not None or opts.meta
return (new_cmd_format, opts, args)
def parse_args(argvish):
"""
Build OptionParser and evaluate command line arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-u', '--id', type="int",
help="Device ID")
parser.add_option('-r', '--region', type="int",
help="Region")
parser.add_option('-z', '--zone', type="int",
help="Zone")
parser.add_option('-i', '--ip', type="string",
help="IP address")
parser.add_option('-p', '--port', type="int",
help="Port number")
parser.add_option('-j', '--replication-ip', type="string",
help="Replication IP address")
parser.add_option('-q', '--replication-port', type="int",
help="Replication port number")
parser.add_option('-d', '--device', type="string",
help="Device name (e.g. md0, sdb1)")
parser.add_option('-w', '--weight', type="float",
help="Device weight")
parser.add_option('-m', '--meta', type="string", default="",
help="Extra device info (just a string)")
parser.add_option('-I', '--change-ip', type="string",
help="IP address for change")
parser.add_option('-P', '--change-port', type="int",
help="Port number for change")
parser.add_option('-J', '--change-replication-ip', type="string",
help="Replication IP address for change")
parser.add_option('-Q', '--change-replication-port', type="int",
help="Replication port number for change")
parser.add_option('-D', '--change-device', type="string",
help="Device name (e.g. md0, sdb1) for change")
parser.add_option('-M', '--change-meta', type="string", default="",
help="Extra device info (just a string) for change")
parser.add_option('-y', '--yes', default=False, action="store_true",
help="Assume a yes response to all questions")
return parser.parse_args(argvish)
def parse_builder_ring_filename_args(argvish):
first_arg = argvish[1]
if first_arg.endswith('.ring.gz'):
ring_file = first_arg
builder_file = first_arg[:-len('.ring.gz')] + '.builder'
else:
builder_file = first_arg
if not builder_file.endswith('.builder'):
ring_file = first_arg
else:
ring_file = builder_file[:-len('.builder')]
ring_file += '.ring.gz'
return builder_file, ring_file
def build_dev_from_opts(opts):
"""
Convert optparse stype options into a device dictionary.
"""
for attribute, shortopt, longopt in (['region', '-r', '--region'],
['zone', '-z', '--zone'],
['ip', '-i', '--ip'],
['port', '-p', '--port'],
['device', '-d', '--device'],
['weight', '-w', '--weight']):
if getattr(opts, attribute, None) is None:
raise ValueError('Required argument %s/%s not specified.' %
(shortopt, longopt))
ip = validate_and_normalize_address(opts.ip)
replication_ip = validate_and_normalize_address(
(opts.replication_ip or opts.ip))
replication_port = opts.replication_port or opts.port
if not validate_device_name(opts.device):
raise ValueError('Invalid device name')
return {'region': opts.region, 'zone': opts.zone, 'ip': ip,
'port': opts.port, 'device': opts.device, 'meta': opts.meta,
'replication_ip': replication_ip,
'replication_port': replication_port, 'weight': opts.weight}
def dispersion_report(builder, search_filter=None,
verbose=False, recalculate=False):
if recalculate or not builder._dispersion_graph:
builder._build_dispersion_graph()
max_allowed_replicas = builder._build_max_replicas_by_tier()
worst_tier = None
max_dispersion = 0.0
sorted_graph = []
for tier, replica_counts in sorted(builder._dispersion_graph.items()):
tier_name = get_tier_name(tier, builder)
if search_filter and not re.match(search_filter, tier_name):
continue
max_replicas = int(max_allowed_replicas[tier])
at_risk_parts = sum(replica_counts[i] * (i - max_replicas)
for i in range(max_replicas + 1,
len(replica_counts)))
placed_parts = sum(replica_counts[i] * i for i in range(
1, len(replica_counts)))
tier_dispersion = 100.0 * at_risk_parts / placed_parts
if tier_dispersion > max_dispersion:
max_dispersion = tier_dispersion
worst_tier = tier_name
if not verbose:
continue
tier_report = {
'max_replicas': max_replicas,
'placed_parts': placed_parts,
'dispersion': tier_dispersion,
'replicas': replica_counts,
}
sorted_graph.append((tier_name, tier_report))
return {
'max_dispersion': max_dispersion,
'worst_tier': worst_tier,
'graph': sorted_graph,
}
def validate_replicas_by_tier(replicas, replicas_by_tier):
"""
Validate the sum of the replicas at each tier.
The sum of the replicas at each tier should be less than or very close to
the upper limit indicated by replicas
:param replicas: float,the upper limit of replicas
:param replicas_by_tier: defaultdict,the replicas by tier
"""
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(replicas_by_tier[t] for t in
replicas_by_tier if len(t) == i)
if abs(replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, replicas, tier_name))
def format_device(region=None, zone=None, ip=None, device=None, **kwargs):
"""
Convert device dict or tier attributes to a representative string.
:returns: a string, the normalized format of a device tier
"""
return "r%sz%s-%s/%s" % (region, zone, ip, device)
def get_tier_name(tier, builder):
if len(tier) == 1:
return "r%s" % (tier[0], )
if len(tier) == 2:
return "r%sz%s" % (tier[0], tier[1])
if len(tier) == 3:
return "r%sz%s-%s" % (tier[0], tier[1], tier[2])
if len(tier) == 4:
device = builder.devs[tier[3]] or {}
return format_device(tier[0], tier[1], tier[2], device.get(
'device', 'IDd%s' % tier[3]))
def validate_device_name(device_name):
return not (
device_name.startswith(' ') or
device_name.endswith(' ') or
len(device_name) == 0)
def pretty_dev(device):
return format_device(**device)
|
smerritt/swift
|
swift/common/ring/utils.py
|
Python
|
apache-2.0
| 26,082
|
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.console;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.actionSystem.DataContext;
import com.intellij.openapi.fileTypes.PlainTextLanguage;
import com.intellij.openapi.util.Disposer;
import com.intellij.psi.PsiDocumentManager;
import com.intellij.testFramework.EditorTestUtil;
import com.intellij.testFramework.LightPlatformCodeInsightTestCase;
import com.intellij.testFramework.TestActionEvent;
/**
* @author Yuli Fiterman
*/
public class ConsoleHistoryControllerTest extends LightPlatformCodeInsightTestCase {
private LanguageConsoleImpl myConsole;
private ConsoleHistoryController myHistoryController;
private ConsoleExecuteAction myExecAction;
@Override
public void setUp() throws Exception {
super.setUp();
myConsole = new LanguageConsoleImpl(getProject(), "Test console", PlainTextLanguage.INSTANCE);
myConsole.setConsoleEditorEnabled(true);
myExecAction = new ConsoleExecuteAction(myConsole, new MockExecutionActionHandler());
myExecAction.registerCustomShortcutSet(myExecAction.getShortcutSet(), myConsole.getConsoleEditor().getComponent());
myHistoryController = new ConsoleHistoryController(new ConsoleRootType("test console", null) {
}, null, myConsole);
myHistoryController.setModel(PrefixHistoryModelKt.createModel("default", myConsole));
myHistoryController.install();
myConsole.setConsoleEditorEnabled(true);
myEditor = myConsole.getConsoleEditor();
myVFile = myConsole.getVirtualFile();
myFile = PsiDocumentManager.getInstance(getProject()).getPsiFile(myEditor.getDocument());
}
private void setCaretWithText(String markedText) {
myConsole.setInputText(markedText);
EditorTestUtil.CaretAndSelectionState state = EditorTestUtil.extractCaretAndSelectionMarkers(myConsole.getEditorDocument());
EditorTestUtil.setCaretsAndSelection(myConsole.getConsoleEditor(), state);
PsiDocumentManager.getInstance(getProject()).commitAllDocuments();
}
private void executeCommand() {
PsiDocumentManager.getInstance(getProject()).commitAllDocuments();
myExecAction.actionPerformed(new TestActionEvent());
}
private void execStatementList1() {
myConsole.setInputText("Statement 1");
executeCommand();
myConsole.setInputText("Statement 2");
executeCommand();
myConsole.setInputText("Statement 3");
executeCommand();
myConsole.setInputText("Different Prefix");
executeCommand();
assertEquals("", myConsole.getEditorDocument().getText());
}
private void consoleNext() {
myHistoryController.getHistoryNext().actionPerformed(AnActionEvent.createFromDataContext("test", null, DataContext.EMPTY_CONTEXT));
}
private void consolePrev() {
myHistoryController.getHistoryPrev().actionPerformed(AnActionEvent.createFromDataContext("test", null, DataContext.EMPTY_CONTEXT));
}
public void testNavigateUp() {
execStatementList1();
setCaretWithText("Statement<caret> 4");
consoleNext();
checkResultByText("Statement 3<caret>");
}
public void testNavigateDown() {
execStatementList1();
setCaretWithText("Statement<caret> 4");
consoleNext();
consolePrev();
checkResultByText("Statement 4<caret>");
}
public void testRepeatNavigateDown() {
execStatementList1();
setCaretWithText("Statement<caret> 4");
consoleNext();
consoleNext();
consolePrev();
consolePrev();
checkResultByText("Statement 4<caret>");
}
//PY-26413
public void testNavigateDown2() {
execStatementList1();
setCaretWithText("<caret>Statement 4");
consoleNext();
consoleNext();
setCaretWithText("Statement<caret> 3");
consolePrev();
checkResultByText("Different Prefix<caret>");
}
public void testNavigateUpNoPrefix() {
execStatementList1();
setCaretWithText("<caret>Statement 4");
consoleNext();
checkResultByText("Different Prefix");
}
@Override
public void tearDown() throws Exception {
try {
Disposer.dispose(myConsole);
myVFile = null;
}
catch (Throwable e) {
addSuppressedException(e);
}
finally {
super.tearDown();
}
}
private static class MockExecutionActionHandler extends BaseConsoleExecuteActionHandler {
MockExecutionActionHandler() {
super(true);
}
}
}
|
msebire/intellij-community
|
platform/lang-impl/testSources/com/intellij/execution/console/ConsoleHistoryControllerTest.java
|
Java
|
apache-2.0
| 4,959
|
//===--- UnknownSyntax.h - Swift Unknown Syntax Interface -------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_SYNTAX_UNKNOWNSYNTAX_H
#define SWIFT_SYNTAX_UNKNOWNSYNTAX_H
#include "swift/Syntax/SyntaxData.h"
#include "swift/Syntax/Syntax.h"
#include <vector>
namespace swift {
namespace syntax {
#pragma mark unknown-syntax API
/// A chunk of "unknown" syntax.
///
/// Effectively wraps a tree of RawSyntax.
///
/// This should not be vended by SyntaxFactory.
class UnknownSyntax : public Syntax {
friend struct SyntaxFactory;
friend class Syntax;
virtual void validate() const override;
public:
UnknownSyntax(const RC<SyntaxData> Root, const SyntaxData *Data)
: Syntax(Root, Data) {}
/// Get the number of child nodes in this piece of syntax, not including
/// tokens.
size_t getNumChildren() const;
/// Get the Nth child of this piece of syntax.
Syntax getChild(const size_t N) const;
static bool classof(const Syntax *S) {
return S->isUnknown();
}
};
} // end namespace syntax
} // end namespace swift
#endif // SWIFT_SYNTAX_UNKNOWNSYNTAX_H
|
djwbrown/swift
|
include/swift/Syntax/UnknownSyntax.h
|
C
|
apache-2.0
| 1,508
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.io.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import java.io.IOException;
public class OrcFile {
public static final String MAGIC = "ORC";
public static final String COMPRESSION = "orc.compress";
static final String DEFAULT_COMPRESSION = "ZLIB";
public static final String COMPRESSION_BLOCK_SIZE = "orc.compress.size";
static final String DEFAULT_COMPRESSION_BLOCK_SIZE = "262144";
public static final String STRIPE_SIZE = "orc.stripe.size";
static final String DEFAULT_STRIPE_SIZE = "268435456";
public static final String ROW_INDEX_STRIDE = "orc.row.index.stride";
static final String DEFAULT_ROW_INDEX_STRIDE = "10000";
public static final String ENABLE_INDEXES = "orc.create.index";
/**
* Create an ORC file reader.
* @param fs file system
* @param path file name to read from
* @return a new ORC file reader.
* @throws IOException
*/
public static Reader createReader(FileSystem fs, Path path
) throws IOException {
return new ReaderImpl(fs, path);
}
/**
* Create an ORC file streamFactory.
* @param fs file system
* @param path filename to write to
* @param inspector the ObjectInspector that inspects the rows
* @param stripeSize the number of bytes in a stripe
* @param compress how to compress the file
* @param bufferSize the number of bytes to compress at once
* @param rowIndexStride the number of rows between row index entries or
* 0 to suppress all indexes
* @return a new ORC file streamFactory
* @throws IOException
*/
public static Writer createWriter(FileSystem fs,
Path path,
ObjectInspector inspector,
long stripeSize,
CompressionKind compress,
int bufferSize,
int rowIndexStride) throws IOException {
return new WriterImpl(fs, path, inspector, stripeSize, compress,
bufferSize, rowIndexStride);
}
}
|
jorgemarsal/orc
|
src/main/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
|
Java
|
apache-2.0
| 3,094
|
<!DOCTYPE html>
<html>
<head>
<script></script>
<script>
function performTest(api)
{
api.Outline.init();
if (api.Outline.detectSectionNumbering())
api.tests.TestLib.setupOutlineNumbering();
api.PostponedActions.perform();
}
</script>
</head>
<body>
<h1 id="item1">First heading</h1>
<p>Default <a href="#item1"></a></p>
<p>corinthia-ref-num <a class="corinthia-ref-num" href="#item1"></a></p>
<p>corinthia-ref-text <a class="corinthia-ref-text" href="#item1"></a></p>
<p>corinthia-ref-caption-text <a class="corinthia-ref-caption-text" href="#item1"></a></p>
<p>corinthia-ref-label-num <a class="corinthia-ref-label-num" href="#item1"></a></p>
</body>
</html>
|
corinthia/corinthia-editorlib
|
tests/outline/refType-section-unnumbered-input.html
|
HTML
|
apache-2.0
| 680
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"fmt"
"sort"
"strconv"
"strings"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/util/version"
)
const (
// HighAvailability is alpha in v1.9
HighAvailability = "HighAvailability"
// CoreDNS is alpha in v1.9
CoreDNS = "CoreDNS"
// SelfHosting is beta in v1.8
SelfHosting = "SelfHosting"
// StoreCertsInSecrets is alpha in v1.8
StoreCertsInSecrets = "StoreCertsInSecrets"
// SupportIPVSProxyMode is alpha in v1.8
SupportIPVSProxyMode = "SupportIPVSProxyMode"
)
var v190 = version.MustParseSemantic("v1.9.0-alpha.1")
// InitFeatureGates are the default feature gates for the init command
var InitFeatureGates = FeatureList{
SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}},
StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}},
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
SupportIPVSProxyMode: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
}
// Feature represents a feature being gated
type Feature struct {
utilfeature.FeatureSpec
MinimumVersion *version.Version
}
// FeatureList represents a list of feature gates
type FeatureList map[string]Feature
// ValidateVersion ensures that a feature gate list is compatible with the chosen kubernetes version
func ValidateVersion(allFeatures FeatureList, requestedFeatures map[string]bool, requestedVersion string) error {
if requestedVersion == "" {
return nil
}
parsedExpVersion, err := version.ParseSemantic(requestedVersion)
if err != nil {
return fmt.Errorf("Error parsing version %s: %v", requestedVersion, err)
}
for k := range requestedFeatures {
if minVersion := allFeatures[k].MinimumVersion; minVersion != nil {
if !parsedExpVersion.AtLeast(minVersion) {
return fmt.Errorf(
"the requested kubernetes version (%s) is incompatible with the %s feature gate, which needs %s as a minimum",
requestedVersion, k, minVersion)
}
}
}
return nil
}
// Enabled indicates whether a feature name has been enabled
func Enabled(featureList map[string]bool, featureName string) bool {
return featureList[string(featureName)]
}
// Supports indicates whether a feature name is supported on the given
// feature set
func Supports(featureList FeatureList, featureName string) bool {
for k := range featureList {
if featureName == string(k) {
return true
}
}
return false
}
// Keys returns a slice of feature names for a given feature set
func Keys(featureList FeatureList) []string {
var list []string
for k := range featureList {
list = append(list, string(k))
}
return list
}
// KnownFeatures returns a slice of strings describing the FeatureList features.
func KnownFeatures(f *FeatureList) []string {
var known []string
for k, v := range *f {
pre := ""
if v.PreRelease != utilfeature.GA {
pre = fmt.Sprintf("%s - ", v.PreRelease)
}
known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.Default))
}
sort.Strings(known)
return known
}
// NewFeatureGate parse a string of the form "key1=value1,key2=value2,..." into a
// map[string]bool of known keys or returns an error.
func NewFeatureGate(f *FeatureList, value string) (map[string]bool, error) {
featureGate := map[string]bool{}
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) != 2 {
return nil, fmt.Errorf("missing bool value for feature-gate key:%s", s)
}
k := strings.TrimSpace(arr[0])
v := strings.TrimSpace(arr[1])
if !Supports(*f, k) {
return nil, fmt.Errorf("unrecognized feature-gate key: %s", k)
}
boolValue, err := strconv.ParseBool(v)
if err != nil {
return nil, fmt.Errorf("invalid value %v for feature-gate key: %s, use true|false instead", v, k)
}
featureGate[k] = boolValue
}
return featureGate, nil
}
|
zjj2wry/kubernetes
|
cmd/kubeadm/app/features/features.go
|
GO
|
apache-2.0
| 4,765
|
---
title: Push Messaging plugin from mBlox
date: 2012-10-25 00:00:00 Z
tags:
- Guest Post
- Plugin
- Android
- iOS
- Guide
author: mBlox
---
Push messaging can help keep users active and engaged. This is especially true of consumer apps where usage drops off rapidly after the initial install. Push messaging can also be used to inform users of any updates on the server. For enterprise apps, this prevents unnecessary queries that burden the server and saves battery life.
The mBlox push messaging plugin allows you to take advantage of the following capabilities:
* Send push messages to iOS and Android devices
* Send geo-fenced messages to users
* View real-time analytics
* Add users into groups for easy targeting
* Send rich messages (videos, pictures, html) and control how they are rendered in the app
Integrating the Push Messaging plugin from [mBlox](https://developer.mblox.com/) into your PhoneGap app is easy – you simply add two more plugins to your application’s plugin list and extend the declarations depending on target platform and you’re done. This adds the push messaging capabilities to your app.
To push messages using the mBlox platform, you can either use the REST API to send messages or use the web-based dashboard called the Engagement Manager. The Engagement Manager makes it easy for non-technical users to send messages.
The developer [pricing](https://developer.mblox.com/pricing) is pay as you go. There are no monthly fees; if you don’t use the service, you don’t have to pay. You get access to the Engagement Manager right away. To make it easy for you to get started, we will give you a starting credit of $10. You can use the coupon code **PHONEGAP** by November 15, 2012 to get additional $30 credit.
You can find the plugin on mBlox developer site:
[https://developer.mblox.com/](https://developer.mblox.com/)
The documentation is available here:
[https://developer.mblox.com/docs](https://developer.mblox.com/docs)
About mBlox
mBlox, the leader in mobile engagement, helps brands, agencies and enterprises create meaningful connections with their customers on mobile devices anytime and nearly anywhere. Our network of more than 800 mobile operators around the world enables businesses to reach nearly 5 billion consumers. We make it easy to use interactive text message campaigns, push notifications and geolocation in order to drive revenue, lifetime customer value and ROI.
|
phonegap/blog
|
_posts/blog/2012/10/2012-10-25-mblox-plugin.md
|
Markdown
|
apache-2.0
| 2,436
|
#include <memory>
#include <vector>
#include "common/event/dispatcher_impl.h"
#include "common/network/utility.h"
#include "common/tcp/conn_pool.h"
#include "common/upstream/upstream_impl.h"
#include "test/common/upstream/utility.h"
#include "test/mocks/event/mocks.h"
#include "test/mocks/network/mocks.h"
#include "test/mocks/runtime/mocks.h"
#include "test/mocks/upstream/mocks.h"
#include "test/test_common/printers.h"
#include "test/test_common/utility.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using testing::_;
using testing::InSequence;
using testing::Invoke;
using testing::NiceMock;
using testing::Property;
using testing::Return;
namespace Envoy {
namespace Tcp {
namespace {
struct TestConnectionState : public ConnectionPool::ConnectionState {
TestConnectionState(int id, std::function<void()> on_destructor)
: id_(id), on_destructor_(on_destructor) {}
~TestConnectionState() override { on_destructor_(); }
int id_;
std::function<void()> on_destructor_;
};
} // namespace
/**
* Mock callbacks used for conn pool testing.
*/
struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks {
void onPoolReady(ConnectionPool::ConnectionDataPtr&& conn,
Upstream::HostDescriptionConstSharedPtr host) override {
conn_data_ = std::move(conn);
host_ = host;
pool_ready_.ready();
}
void onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason,
Upstream::HostDescriptionConstSharedPtr host) override {
reason_ = reason;
host_ = host;
pool_failure_.ready();
}
ReadyWatcher pool_failure_;
ReadyWatcher pool_ready_;
ConnectionPool::ConnectionDataPtr conn_data_{};
absl::optional<ConnectionPool::PoolFailureReason> reason_;
Upstream::HostDescriptionConstSharedPtr host_;
};
/**
* A test version of ConnPoolImpl that allows for mocking.
*/
class ConnPoolImplForTest : public ConnPoolImpl {
public:
ConnPoolImplForTest(Event::MockDispatcher& dispatcher,
Upstream::ClusterInfoConstSharedPtr cluster,
NiceMock<Event::MockTimer>* upstream_ready_timer)
: ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"),
Upstream::ResourcePriority::Default, nullptr, nullptr),
mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {}
~ConnPoolImplForTest() override {
EXPECT_EQ(0U, ready_conns_.size());
EXPECT_EQ(0U, busy_conns_.size());
EXPECT_EQ(0U, pending_requests_.size());
}
MOCK_METHOD(void, onConnReleasedForTest, ());
MOCK_METHOD(void, onConnDestroyedForTest, ());
struct TestConnection {
Network::MockClientConnection* connection_;
Event::MockTimer* connect_timer_;
Network::ReadFilterSharedPtr filter_;
};
void expectConnCreate() {
test_conns_.emplace_back();
TestConnection& test_conn = test_conns_.back();
test_conn.connection_ = new NiceMock<Network::MockClientConnection>();
test_conn.connect_timer_ = new NiceMock<Event::MockTimer>(&mock_dispatcher_);
EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _))
.WillOnce(Return(test_conn.connection_));
EXPECT_CALL(*test_conn.connection_, addReadFilter(_))
.WillOnce(Invoke(
[&](Network::ReadFilterSharedPtr filter) -> void { test_conn.filter_ = filter; }));
EXPECT_CALL(*test_conn.connection_, connect());
EXPECT_CALL(*test_conn.connect_timer_, enableTimer(_, _));
}
void expectEnableUpstreamReady() {
EXPECT_FALSE(upstream_ready_enabled_);
EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation();
}
void expectAndRunUpstreamReady() {
EXPECT_TRUE(upstream_ready_enabled_);
mock_upstream_ready_timer_->invokeCallback();
EXPECT_FALSE(upstream_ready_enabled_);
}
Event::MockDispatcher& mock_dispatcher_;
NiceMock<Event::MockTimer>* mock_upstream_ready_timer_;
std::vector<TestConnection> test_conns_;
protected:
void onConnReleased(ConnPoolImpl::ActiveConn& conn) override {
for (auto& test_conn : test_conns_) {
if (conn.conn_.get() == test_conn.connection_) {
onConnReleasedForTest();
break;
}
}
ConnPoolImpl::onConnReleased(conn);
}
void onConnDestroyed(ConnPoolImpl::ActiveConn& conn) override {
for (auto i = test_conns_.begin(); i != test_conns_.end(); i++) {
if (conn.conn_.get() == i->connection_) {
onConnDestroyedForTest();
test_conns_.erase(i);
break;
}
}
ConnPoolImpl::onConnDestroyed(conn);
}
};
/**
* Test fixture for connection pool tests.
*/
class TcpConnPoolImplTest : public testing::Test {
public:
TcpConnPoolImplTest()
: upstream_ready_timer_(new NiceMock<Event::MockTimer>(&dispatcher_)),
conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {}
~TcpConnPoolImplTest() override {
EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges()));
}
NiceMock<Event::MockDispatcher> dispatcher_;
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
NiceMock<Event::MockTimer>* upstream_ready_timer_;
ConnPoolImplForTest conn_pool_;
NiceMock<Runtime::MockLoader> runtime_;
};
/**
* Test fixture for connection pool destructor tests.
*/
class TcpConnPoolImplDestructorTest : public testing::Test {
public:
TcpConnPoolImplDestructorTest()
: upstream_ready_timer_(new NiceMock<Event::MockTimer>(&dispatcher_)),
conn_pool_{new ConnPoolImpl(dispatcher_,
Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"),
Upstream::ResourcePriority::Default, nullptr, nullptr)} {}
~TcpConnPoolImplDestructorTest() override = default;
void prepareConn() {
connection_ = new NiceMock<Network::MockClientConnection>();
connect_timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);
EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_));
EXPECT_CALL(*connect_timer_, enableTimer(_, _));
callbacks_ = std::make_unique<ConnPoolCallbacks>();
ConnectionPool::Cancellable* handle = conn_pool_->newConnection(*callbacks_);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(*connect_timer_, disableTimer());
EXPECT_CALL(callbacks_->pool_ready_, ready());
connection_->raiseEvent(Network::ConnectionEvent::Connected);
}
NiceMock<Event::MockDispatcher> dispatcher_;
std::shared_ptr<Upstream::MockClusterInfo> cluster_{new NiceMock<Upstream::MockClusterInfo>()};
NiceMock<Event::MockTimer>* upstream_ready_timer_;
NiceMock<Event::MockTimer>* connect_timer_;
NiceMock<Network::MockClientConnection>* connection_;
std::unique_ptr<ConnPoolImpl> conn_pool_;
std::unique_ptr<ConnPoolCallbacks> callbacks_;
};
/**
* Helper for dealing with an active test connection.
*/
struct ActiveTestConn {
enum class Type {
Pending, // pending request, waiting for free connection
InProgress, // connection created, no callback
CreateConnection, // connection callback occurs after newConnection
Immediate, // connection callback occurs during newConnection
};
ActiveTestConn(TcpConnPoolImplTest& parent, size_t conn_index, Type type)
: parent_(parent), conn_index_(conn_index) {
if (type == Type::CreateConnection || type == Type::InProgress) {
parent.conn_pool_.expectConnCreate();
}
if (type == Type::Immediate) {
expectNewConn();
}
handle_ = parent.conn_pool_.newConnection(callbacks_);
if (type == Type::Immediate) {
EXPECT_EQ(nullptr, handle_);
verifyConn();
} else {
EXPECT_NE(nullptr, handle_);
}
if (type == Type::CreateConnection) {
completeConnection();
}
}
void completeConnection() {
ASSERT_FALSE(completed_);
EXPECT_CALL(*parent_.conn_pool_.test_conns_[conn_index_].connect_timer_, disableTimer());
expectNewConn();
parent_.conn_pool_.test_conns_[conn_index_].connection_->raiseEvent(
Network::ConnectionEvent::Connected);
verifyConn();
completed_ = true;
}
void expectNewConn() { EXPECT_CALL(callbacks_.pool_ready_, ready()); }
void releaseConn() { callbacks_.conn_data_.reset(); }
void verifyConn() {
EXPECT_EQ(&callbacks_.conn_data_->connection(),
parent_.conn_pool_.test_conns_[conn_index_].connection_);
}
TcpConnPoolImplTest& parent_;
size_t conn_index_;
Tcp::ConnectionPool::Cancellable* handle_{};
ConnPoolCallbacks callbacks_;
bool completed_{};
};
/**
* Verify that connections are drained when requested.
*/
TEST_F(TcpConnPoolImplTest, DrainConnections) {
cluster_->resetResourceManager(3, 1024, 1024, 1, 1);
InSequence s;
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection);
ActiveTestConn c3(*this, 2, ActiveTestConn::Type::InProgress);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c1.releaseConn();
// This will destroy the ready connection and set requests remaining to 1 on the busy and pending
// connections.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.drainConnections();
dispatcher_.clearDeferredDeleteList();
// This will destroy the busy connection when the response finishes.
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
c2.releaseConn();
dispatcher_.clearDeferredDeleteList();
// This will destroy the pending connection when the response finishes.
c3.conn_index_ = 0; // c1/c2 have been deleted from test_conns_.
c3.completeConnection();
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
c3.releaseConn();
dispatcher_.clearDeferredDeleteList();
}
/**
* Test all timing stats are set.
*/
TEST_F(TcpConnPoolImplTest, VerifyTimingStats) {
EXPECT_CALL(cluster_->stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _));
EXPECT_CALL(cluster_->stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _));
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c1.releaseConn();
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that buffer limits are set.
*/
TEST_F(TcpConnPoolImplTest, VerifyBufferLimits) {
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192));
EXPECT_CALL(*conn_pool_.test_conns_.back().connection_, setBufferLimits(8192));
EXPECT_CALL(callbacks.pool_failure_, ready());
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that upstream callback fire for assigned connections.
*/
TEST_F(TcpConnPoolImplTest, UpstreamCallbacks) {
Buffer::OwnedImpl buffer;
InSequence s;
ConnectionPool::MockUpstreamCallbacks callbacks;
// Create connection, set UpstreamCallbacks
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
c1.callbacks_.conn_data_->addUpstreamCallbacks(callbacks);
// Expect invocation when connection's ReadFilter::onData is invoked
EXPECT_CALL(callbacks, onUpstreamData(_, _));
EXPECT_EQ(Network::FilterStatus::StopIteration,
conn_pool_.test_conns_[0].filter_->onData(buffer, false));
EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark());
for (auto* cb : conn_pool_.test_conns_[0].connection_->callbacks_) {
cb->onAboveWriteBufferHighWatermark();
}
EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark());
for (auto* cb : conn_pool_.test_conns_[0].connection_->callbacks_) {
cb->onBelowWriteBufferLowWatermark();
}
// Shutdown normally.
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c1.releaseConn();
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that upstream callback close event fires for assigned connections.
*/
TEST_F(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) {
Buffer::OwnedImpl buffer;
InSequence s;
ConnectionPool::MockUpstreamCallbacks callbacks;
// Create connection, set UpstreamCallbacks
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
c1.callbacks_.conn_data_->addUpstreamCallbacks(callbacks);
EXPECT_CALL(callbacks, onEvent(Network::ConnectionEvent::RemoteClose));
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that a connection pool functions without upstream callbacks.
*/
TEST_F(TcpConnPoolImplTest, NoUpstreamCallbacks) {
Buffer::OwnedImpl buffer;
InSequence s;
// Create connection.
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
// Trigger connection's ReadFilter::onData -- connection pool closes connection.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
EXPECT_EQ(Network::FilterStatus::StopIteration,
conn_pool_.test_conns_[0].filter_->onData(buffer, false));
dispatcher_.clearDeferredDeleteList();
}
/**
* Tests a request that generates a new connection, completes, and then a second request that uses
* the same connection.
*/
TEST_F(TcpConnPoolImplTest, MultipleRequestAndResponse) {
InSequence s;
// Request 1 should kick off a new connection.
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c1.releaseConn();
// Request 2 should not.
ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Immediate);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c2.releaseConn();
// Cause the connection to go away.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Tests ConnectionState assignment, lookup and destruction.
*/
TEST_F(TcpConnPoolImplTest, ConnectionStateLifecycle) {
InSequence s;
bool state_destroyed = false;
// Request 1 should kick off a new connection.
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
auto* state = new TestConnectionState(1, [&]() -> void { state_destroyed = true; });
c1.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(state));
EXPECT_EQ(state, c1.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c1.releaseConn();
EXPECT_FALSE(state_destroyed);
// Request 2 should not.
ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Immediate);
EXPECT_EQ(state, c2.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
c2.releaseConn();
EXPECT_FALSE(state_destroyed);
// Cause the connection to go away.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
EXPECT_TRUE(state_destroyed);
}
/**
* Test when we overflow max pending requests.
*/
TEST_F(TcpConnPoolImplTest, MaxPendingRequests) {
cluster_->resetResourceManager(1, 1, 1024, 1, 1);
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
ConnPoolCallbacks callbacks2;
EXPECT_CALL(callbacks2.pool_failure_, ready());
Tcp::ConnectionPool::Cancellable* handle2 = conn_pool_.newConnection(callbacks2);
EXPECT_EQ(nullptr, handle2);
handle->cancel(ConnectionPool::CancelPolicy::Default);
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(ConnectionPool::PoolFailureReason::Overflow, callbacks2.reason_);
EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value());
}
/**
* Tests a connection failure before a request is bound which should result in the pending request
* getting purged.
*/
TEST_F(TcpConnPoolImplTest, RemoteConnectFailure) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(callbacks.pool_failure_, ready());
EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(ConnectionPool::PoolFailureReason::RemoteConnectionFailure, callbacks.reason_);
EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value());
EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value());
}
/**
* Tests a connection failure before a request is bound which should result in the pending request
* getting purged.
*/
TEST_F(TcpConnPoolImplTest, LocalConnectFailure) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(callbacks.pool_failure_, ready());
EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::LocalClose);
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(ConnectionPool::PoolFailureReason::LocalConnectionFailure, callbacks.reason_);
EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value());
EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value());
}
/**
* Tests a connect timeout. Also test that we can add a new request during ejection processing.
*/
TEST_F(TcpConnPoolImplTest, ConnectTimeout) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks1;
conn_pool_.expectConnCreate();
EXPECT_NE(nullptr, conn_pool_.newConnection(callbacks1));
ConnPoolCallbacks callbacks2;
EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void {
conn_pool_.expectConnCreate();
EXPECT_NE(nullptr, conn_pool_.newConnection(callbacks2));
}));
conn_pool_.test_conns_[0].connect_timer_->invokeCallback();
EXPECT_CALL(callbacks2.pool_failure_, ready());
conn_pool_.test_conns_[1].connect_timer_->invokeCallback();
EXPECT_CALL(conn_pool_, onConnDestroyedForTest()).Times(2);
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(ConnectionPool::PoolFailureReason::Timeout, callbacks1.reason_);
EXPECT_EQ(ConnectionPool::PoolFailureReason::Timeout, callbacks2.reason_);
EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value());
EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value());
}
/**
* Test cancelling before the request is bound to a connection.
*/
TEST_F(TcpConnPoolImplTest, CancelBeforeBound) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
handle->cancel(ConnectionPool::CancelPolicy::Default);
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Cause the connection to go away.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test cancelling before the request is bound to a connection, with connection close.
*/
TEST_F(TcpConnPoolImplTest, CancelAndCloseBeforeBound) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
// Expect the connection is closed.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
handle->cancel(ConnectionPool::CancelPolicy::CloseExcess);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test an upstream disconnection while there is a bound request.
*/
TEST_F(TcpConnPoolImplTest, DisconnectWhileBound) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(callbacks.pool_ready_, ready());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Kill the connection while it has an active request.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test upstream disconnection of one request while another is pending.
*/
TEST_F(TcpConnPoolImplTest, DisconnectWhilePending) {
cluster_->resetResourceManager(1, 1024, 1024, 1, 1);
InSequence s;
// First request connected.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());
EXPECT_CALL(callbacks.pool_ready_, ready());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Second request pending.
ConnPoolCallbacks callbacks2;
ConnectionPool::Cancellable* handle2 = conn_pool_.newConnection(callbacks2);
EXPECT_NE(nullptr, handle2);
// Connection closed, triggering new connection for pending request.
conn_pool_.expectConnCreate();
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::LocalClose);
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
dispatcher_.clearDeferredDeleteList();
// test_conns_[0] was replaced with a new connection
EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer());
EXPECT_CALL(callbacks2.pool_ready_, ready());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
callbacks2.conn_data_.reset();
// Disconnect
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that we correctly handle reaching max connections.
*/
TEST_F(TcpConnPoolImplTest, MaxConnections) {
InSequence s;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
// Request 2 should not kick off a new connection.
ConnPoolCallbacks callbacks2;
handle = conn_pool_.newConnection(callbacks2);
EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value());
EXPECT_NE(nullptr, handle);
// Connect event will bind to request 1.
EXPECT_CALL(callbacks.pool_ready_, ready());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
// Finishing request 1 will immediately bind to request 2.
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
conn_pool_.expectEnableUpstreamReady();
EXPECT_CALL(callbacks2.pool_ready_, ready());
callbacks.conn_data_.reset();
conn_pool_.expectAndRunUpstreamReady();
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
callbacks2.conn_data_.reset();
// Cause the connection to go away.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test when we reach max requests per connection.
*/
TEST_F(TcpConnPoolImplTest, MaxRequestsPerConnection) {
InSequence s;
cluster_->max_requests_per_connection_ = 1;
// Request 1 should kick off a new connection.
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(callbacks.pool_ready_, ready());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
callbacks.conn_data_.reset();
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value());
EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value());
}
/*
* Test that multiple connections can be assigned at once.
*/
TEST_F(TcpConnPoolImplTest, ConcurrentConnections) {
cluster_->resetResourceManager(2, 1024, 1024, 1, 1);
InSequence s;
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection);
ActiveTestConn c3(*this, 0, ActiveTestConn::Type::Pending);
// Finish c1, which gets c3 going.
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
conn_pool_.expectEnableUpstreamReady();
c3.expectNewConn();
c1.releaseConn();
conn_pool_.expectAndRunUpstreamReady();
EXPECT_CALL(conn_pool_, onConnReleasedForTest()).Times(2);
c2.releaseConn();
c3.releaseConn();
// Disconnect both connections.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest()).Times(2);
conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Tests ConnectionState lifecycle with multiple concurrent connections.
*/
TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) {
InSequence s;
int state_destroyed = 0;
auto* s1 = new TestConnectionState(1, [&]() -> void { state_destroyed |= 1; });
auto* s2 = new TestConnectionState(2, [&]() -> void { state_destroyed |= 2; });
auto* s3 = new TestConnectionState(2, [&]() -> void { state_destroyed |= 4; });
cluster_->resetResourceManager(2, 1024, 1024, 1, 1);
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
c1.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(s1));
ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection);
c2.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(s2));
ActiveTestConn c3(*this, 0, ActiveTestConn::Type::Pending);
EXPECT_EQ(0, state_destroyed);
// Finish c1, which gets c3 going.
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
conn_pool_.expectEnableUpstreamReady();
c3.expectNewConn();
c1.releaseConn();
conn_pool_.expectAndRunUpstreamReady();
// c3 now has the state set by c1.
EXPECT_EQ(s1, c3.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());
EXPECT_EQ(s2, c2.callbacks_.conn_data_->connectionStateTyped<TestConnectionState>());
// replace c3's state
c3.callbacks_.conn_data_->setConnectionState(std::unique_ptr<TestConnectionState>(s3));
EXPECT_EQ(1, state_destroyed);
EXPECT_CALL(conn_pool_, onConnReleasedForTest()).Times(2);
c2.releaseConn();
c3.releaseConn();
EXPECT_EQ(1, state_destroyed);
// Disconnect both connections.
EXPECT_CALL(conn_pool_, onConnDestroyedForTest()).Times(2);
conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
EXPECT_EQ(7, state_destroyed);
}
/**
* Tests that the DrainCallback is invoked when the number of connections goes to zero.
*/
TEST_F(TcpConnPoolImplTest, DrainCallback) {
InSequence s;
ReadyWatcher drained;
EXPECT_CALL(drained, ready());
conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); });
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Pending);
c2.handle_->cancel(ConnectionPool::CancelPolicy::Default);
EXPECT_CALL(conn_pool_, onConnReleasedForTest());
EXPECT_CALL(drained, ready());
c1.releaseConn();
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
dispatcher_.clearDeferredDeleteList();
}
/**
* Test draining a connection pool that has a pending connection.
*/
TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) {
InSequence s;
ReadyWatcher drained;
ConnPoolCallbacks callbacks;
conn_pool_.expectConnCreate();
Tcp::ConnectionPool::Cancellable* handle = conn_pool_.newConnection(callbacks);
EXPECT_NE(nullptr, handle);
conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); });
handle->cancel(ConnectionPool::CancelPolicy::Default);
EXPECT_CALL(*conn_pool_.test_conns_[0].connection_, close(Network::ConnectionCloseType::NoFlush));
EXPECT_CALL(drained, ready());
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected);
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that the DrainCallback is invoked when a connection is closed.
*/
TEST_F(TcpConnPoolImplTest, DrainOnClose) {
ReadyWatcher drained;
EXPECT_CALL(drained, ready());
conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); });
InSequence s;
ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection);
ConnectionPool::MockUpstreamCallbacks callbacks;
c1.callbacks_.conn_data_->addUpstreamCallbacks(callbacks);
EXPECT_CALL(drained, ready());
EXPECT_CALL(callbacks, onEvent(Network::ConnectionEvent::RemoteClose))
.WillOnce(Invoke([&](Network::ConnectionEvent event) -> void {
EXPECT_EQ(Network::ConnectionEvent::RemoteClose, event);
c1.releaseConn();
}));
conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose);
EXPECT_CALL(conn_pool_, onConnDestroyedForTest());
dispatcher_.clearDeferredDeleteList();
}
/**
* Test that pending connections are closed when the connection pool is destroyed.
*/
TEST_F(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) {
connection_ = new NiceMock<Network::MockClientConnection>();
connect_timer_ = new NiceMock<Event::MockTimer>(&dispatcher_);
EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_));
EXPECT_CALL(*connect_timer_, enableTimer(_, _));
callbacks_ = std::make_unique<ConnPoolCallbacks>();
ConnectionPool::Cancellable* handle = conn_pool_->newConnection(*callbacks_);
EXPECT_NE(nullptr, handle);
EXPECT_CALL(callbacks_->pool_failure_, ready());
EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));
EXPECT_CALL(dispatcher_, clearDeferredDeleteList());
conn_pool_.reset();
}
/**
* Test that busy connections are closed when the connection pool is destroyed.
*/
TEST_F(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) {
prepareConn();
EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));
EXPECT_CALL(dispatcher_, clearDeferredDeleteList());
conn_pool_.reset();
}
/**
* Test that ready connections are closed when the connection pool is destroyed.
*/
TEST_F(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) {
prepareConn();
// Transition connection to ready list
callbacks_->conn_data_.reset();
EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush));
EXPECT_CALL(dispatcher_, clearDeferredDeleteList());
conn_pool_.reset();
}
} // namespace Tcp
} // namespace Envoy
|
jrajahalme/envoy
|
test/common/tcp/conn_pool_test.cc
|
C++
|
apache-2.0
| 32,925
|
// do NOT import all the stuff from lab 1
// Lab 2
var view = Ti.UI.createView({
backgroundColor:'#000',
top:0,
left:0,
width:'100%',
height:'100%',
layout:'vertical'
});
// create labels, buttons, text fields
var usernameView = Ti.UI.createView({
backgroundColor:'#000',
top:0,
left:0,
width:'100%',
height:'100%',
layout:'horizontal'
});
// username.add(usernameLabel);
// username.add(usernameField);
// view.add(usernameView);
var helpLabel = Ti.UI.createLabel({
color:'#abcdef',
highlightedColor:'#0f0',
backgroundColor:'transparent',
width:200,
height:'auto',
text:'LOGIN INFO'
});
var usernameField = Titanium.UI.createTextField({
value:'Joe Bloggs',
color:'#abcdef',
backgroundColor: '#fff',
height:35,
top: 10,
//left:10,
width:250,
borderRadius: 10,
font:{fontSize:15},
borderStyle:Titanium.UI.INPUT_BORDERSTYLE_LINE
});
var passwordField = Titanium.UI.createTextField({
hintText:'Enter password here',
color:'#abcdef',
backgroundColor: '#fff',
height:35,
top: 10,
//left:10,
width:250,
borderRadius: 10,
font:{fontSize:15},
borderStyle:Titanium.UI.INPUT_BORDERSTYLE_LINE
});
var submitButton = Titanium.UI.createButton({
color:'#abcdef',
top: 20,
width:100,
height:40,
font:{fontSize:20,fontWeight:'bold',fontFamily:'Helvetica Neue'},
title:'Login'
});
view.add(helpLabel);
view.add(usernameField);
view.add(passwordField);
view.add(submitButton);
Ti.UI.currentWindow.add(view);
submitButton.addEventListener('click', function() {
Titanium.UI.currentTab.open(voteWin, {
animated : true
});
});
var voteWin = Titanium.UI.createWindow({
title : 'Vote',
backButtonTitle : 'Login',
navBarHidden : false,
backgroundColor: '#000',
username: usernameField.value
});
var usernameLabel = Titanium.UI.createLabel({
color:'#abcdef',
highlightedColor:'#0f0',
backgroundColor:'transparent',
width:200,
height:'auto',
text: voteWin.username
});
voteWin.add(usernameLabel);
|
prpatel/VoterLab2
|
Resources/lab2.js
|
JavaScript
|
apache-2.0
| 2,023
|
//CHECKSTYLE:FileLength:OFF
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2017 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.vfs2.FileName;
import org.apache.commons.vfs2.FileObject;
import org.apache.commons.vfs2.FileSystemException;
import org.pentaho.di.base.AbstractMeta;
import org.pentaho.di.cluster.ClusterSchema;
import org.pentaho.di.cluster.SlaveServer;
import org.pentaho.di.core.CheckResult;
import org.pentaho.di.core.CheckResultInterface;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.util.Utils;
import org.pentaho.di.core.Counter;
import org.pentaho.di.core.DBCache;
import org.pentaho.di.core.LastUsedFile;
import org.pentaho.di.core.NotePadMeta;
import org.pentaho.di.core.ProgressMonitorListener;
import org.pentaho.di.core.Props;
import org.pentaho.di.core.Result;
import org.pentaho.di.core.ResultFile;
import org.pentaho.di.core.RowMetaAndData;
import org.pentaho.di.core.SQLStatement;
import org.pentaho.di.core.attributes.AttributesUtil;
import org.pentaho.di.core.database.Database;
import org.pentaho.di.core.database.DatabaseMeta;
import org.pentaho.di.core.exception.KettleDatabaseException;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.exception.KettleFileException;
import org.pentaho.di.core.exception.KettleMissingPluginsException;
import org.pentaho.di.core.exception.KettleRowException;
import org.pentaho.di.core.exception.KettleStepException;
import org.pentaho.di.core.exception.KettleXMLException;
import org.pentaho.di.core.extension.ExtensionPointHandler;
import org.pentaho.di.core.extension.KettleExtensionPoint;
import org.pentaho.di.core.gui.OverwritePrompter;
import org.pentaho.di.core.gui.Point;
import org.pentaho.di.core.logging.ChannelLogTable;
import org.pentaho.di.core.logging.LogChannel;
import org.pentaho.di.core.logging.LogChannelInterface;
import org.pentaho.di.core.logging.LogStatus;
import org.pentaho.di.core.logging.LogTableInterface;
import org.pentaho.di.core.logging.LoggingObjectInterface;
import org.pentaho.di.core.logging.LoggingObjectType;
import org.pentaho.di.core.logging.MetricsLogTable;
import org.pentaho.di.core.logging.PerformanceLogTable;
import org.pentaho.di.core.logging.StepLogTable;
import org.pentaho.di.core.logging.TransLogTable;
import org.pentaho.di.core.parameters.NamedParamsDefault;
import org.pentaho.di.core.reflection.StringSearchResult;
import org.pentaho.di.core.reflection.StringSearcher;
import org.pentaho.di.core.row.RowMeta;
import org.pentaho.di.core.row.RowMetaInterface;
import org.pentaho.di.core.row.ValueMetaInterface;
import org.pentaho.di.core.util.StringUtil;
import org.pentaho.di.core.variables.VariableSpace;
import org.pentaho.di.core.vfs.KettleVFS;
import org.pentaho.di.core.xml.XMLFormatter;
import org.pentaho.di.core.xml.XMLHandler;
import org.pentaho.di.core.xml.XMLInterface;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.partition.PartitionSchema;
import org.pentaho.di.repository.HasRepositoryInterface;
import org.pentaho.di.repository.Repository;
import org.pentaho.di.repository.RepositoryDirectory;
import org.pentaho.di.repository.RepositoryElementInterface;
import org.pentaho.di.repository.RepositoryObjectType;
import org.pentaho.di.resource.ResourceDefinition;
import org.pentaho.di.resource.ResourceExportInterface;
import org.pentaho.di.resource.ResourceNamingInterface;
import org.pentaho.di.resource.ResourceReference;
import org.pentaho.di.shared.SharedObjectInterface;
import org.pentaho.di.trans.step.BaseStep;
import org.pentaho.di.trans.step.RemoteStep;
import org.pentaho.di.trans.step.StepErrorMeta;
import org.pentaho.di.trans.step.StepIOMetaInterface;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.step.StepMetaChangeListenerInterface;
import org.pentaho.di.trans.step.StepMetaInterface;
import org.pentaho.di.trans.step.StepPartitioningMeta;
import org.pentaho.di.trans.step.errorhandling.StreamInterface;
import org.pentaho.di.trans.steps.jobexecutor.JobExecutorMeta;
import org.pentaho.di.trans.steps.mapping.MappingMeta;
import org.pentaho.di.trans.steps.missing.MissingTrans;
import org.pentaho.di.trans.steps.named.cluster.NamedClusterEmbedManager;
import org.pentaho.di.trans.steps.singlethreader.SingleThreaderMeta;
import org.pentaho.di.trans.steps.transexecutor.TransExecutorMeta;
import org.pentaho.metastore.api.IMetaStore;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
/**
* This class defines information about a transformation and offers methods to save and load it from XML or a PDI
* database repository, as well as methods to alter a transformation by adding/removing databases, steps, hops, etc.
*
* @since 20-jun-2003
* @author Matt Casters
*/
public class TransMeta extends AbstractMeta
implements XMLInterface, Comparator<TransMeta>, Comparable<TransMeta>, Cloneable, ResourceExportInterface,
RepositoryElementInterface, LoggingObjectInterface {
/** The package name, used for internationalization of messages. */
private static Class<?> PKG = Trans.class; // for i18n purposes, needed by Translator2!!
/** A constant specifying the tag value for the XML node of the transformation. */
public static final String XML_TAG = "transformation";
/**
* A constant used by the logging operations to indicate any logged messages are related to transformation meta-data.
*/
public static final String STRING_TRANSMETA = "Transformation metadata";
/** A constant specifying the repository element type as a Transformation. */
public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.TRANSFORMATION;
public static final int BORDER_INDENT = 20;
/** The list of steps associated with the transformation. */
protected List<StepMeta> steps;
/** The list of hops associated with the transformation. */
protected List<TransHopMeta> hops;
/** The list of dependencies associated with the transformation. */
protected List<TransDependency> dependencies;
/** The list of cluster schemas associated with the transformation. */
protected List<ClusterSchema> clusterSchemas;
/** The list of partition schemas associated with the transformation. */
private List<PartitionSchema> partitionSchemas;
/** The version string for the transformation. */
protected String trans_version;
/** The status of the transformation. */
protected int trans_status;
/** The transformation logging table associated with the transformation. */
protected TransLogTable transLogTable;
/** The performance logging table associated with the transformation. */
protected PerformanceLogTable performanceLogTable;
/** The step logging table associated with the transformation. */
protected StepLogTable stepLogTable;
/** The metricslogging table associated with the transformation. */
protected MetricsLogTable metricsLogTable;
/** The size of the current rowset. */
protected int sizeRowset;
/** The meta-data for the database connection associated with "max date" auditing information. */
protected DatabaseMeta maxDateConnection;
/** The table name associated with "max date" auditing information. */
protected String maxDateTable;
/** The field associated with "max date" auditing information. */
protected String maxDateField;
/** The amount by which to increase the "max date" value. */
protected double maxDateOffset;
/** The maximum date difference used for "max date" auditing and limiting job sizes. */
protected double maxDateDifference;
/**
* The list of arguments to the transformation.
*
* @deprecated Moved to Trans
* */
@Deprecated
protected String[] arguments;
/**
* A table of named counters.
*
* @deprecated Moved to Trans
*/
@Deprecated
protected Hashtable<String, Counter> counters;
/** Indicators for changes in steps, databases, hops, and notes. */
protected boolean changed_steps, changed_hops;
/** The database cache. */
protected DBCache dbCache;
/** The time (in nanoseconds) to wait when the input buffer is empty. */
protected int sleepTimeEmpty;
/** The time (in nanoseconds) to wait when the input buffer is full. */
protected int sleepTimeFull;
/** The previous result. */
protected Result previousResult;
/**
* The result rows.
*
* @deprecated
* */
@Deprecated
protected List<RowMetaAndData> resultRows;
/**
* The result files.
*
* @deprecated
* */
@Deprecated
protected List<ResultFile> resultFiles;
/** Whether the transformation is using unique connections. */
protected boolean usingUniqueConnections;
/** Whether the feedback is shown. */
protected boolean feedbackShown;
/** The feedback size. */
protected int feedbackSize;
/**
* Flag to indicate thread management usage. Set to default to false from version 2.5.0 on. Before that it was enabled
* by default.
*/
protected boolean usingThreadPriorityManagment;
/** The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. */
protected SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution;
/** Just a flag indicating that this is a slave transformation - internal use only, no GUI option. */
protected boolean slaveTransformation;
/** Whether the transformation is capturing step performance snap shots. */
protected boolean capturingStepPerformanceSnapShots;
/** The step performance capturing delay. */
protected long stepPerformanceCapturingDelay;
/** The step performance capturing size limit. */
protected String stepPerformanceCapturingSizeLimit;
/** The steps fields cache. */
protected Map<String, RowMetaInterface> stepsFieldsCache;
/** The loop cache. */
protected Map<String, Boolean> loopCache;
/** The log channel interface. */
protected LogChannelInterface log;
/** The list of StepChangeListeners */
protected List<StepMetaChangeListenerInterface> stepChangeListeners;
protected byte[] keyForSessionKey;
boolean isKeyPrivate;
private ArrayList<MissingTrans> missingTrans;
/**
* The TransformationType enum describes the various types of transformations in terms of execution, including Normal,
* Serial Single-Threaded, and Single-Threaded.
*/
public enum TransformationType {
/** A normal transformation. */
Normal( "Normal", BaseMessages.getString( PKG, "TransMeta.TransformationType.Normal" ) ),
/** A serial single-threaded transformation. */
SerialSingleThreaded( "SerialSingleThreaded", BaseMessages.getString(
PKG, "TransMeta.TransformationType.SerialSingleThreaded" ) ),
/** A single-threaded transformation. */
SingleThreaded( "SingleThreaded", BaseMessages
.getString( PKG, "TransMeta.TransformationType.SingleThreaded" ) );
/** The code corresponding to the transformation type. */
private String code;
/** The description of the transformation type. */
private String description;
/**
* Instantiates a new transformation type.
*
* @param code
* the code
* @param description
* the description
*/
private TransformationType( String code, String description ) {
this.code = code;
this.description = description;
}
/**
* Gets the code corresponding to the transformation type.
*
* @return the code
*/
public String getCode() {
return code;
}
/**
* Gets the description of the transformation type.
*
* @return the description
*/
public String getDescription() {
return description;
}
/**
* Gets the transformation type by code.
*
* @param transTypeCode
* the trans type code
* @return the transformation type by code
*/
public static TransformationType getTransformationTypeByCode( String transTypeCode ) {
if ( transTypeCode != null ) {
for ( TransformationType type : values() ) {
if ( type.code.equalsIgnoreCase( transTypeCode ) ) {
return type;
}
}
}
return Normal;
}
/**
* Gets the transformation types descriptions.
*
* @return the transformation types descriptions
*/
public static String[] getTransformationTypesDescriptions() {
String[] desc = new String[values().length];
for ( int i = 0; i < values().length; i++ ) {
desc[i] = values()[i].getDescription();
}
return desc;
}
}
/** The transformation type. */
protected TransformationType transformationType;
// //////////////////////////////////////////////////////////////////////////
/** A list of localized strings corresponding to string descriptions of the undo/redo actions. */
public static final String[] desc_type_undo = {
"",
BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoChange" ),
BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoNew" ),
BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoDelete" ),
BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoPosition" ) };
/** A constant specifying the tag value for the XML node of the transformation information. */
protected static final String XML_TAG_INFO = "info";
/** A constant specifying the tag value for the XML node of the order of steps. */
public static final String XML_TAG_ORDER = "order";
/** A constant specifying the tag value for the XML node of the notes. */
public static final String XML_TAG_NOTEPADS = "notepads";
/** A constant specifying the tag value for the XML node of the transformation parameters. */
public static final String XML_TAG_PARAMETERS = "parameters";
/** A constant specifying the tag value for the XML node of the transformation dependencies. */
protected static final String XML_TAG_DEPENDENCIES = "dependencies";
/** A constant specifying the tag value for the XML node of the transformation's partition schemas. */
public static final String XML_TAG_PARTITIONSCHEMAS = "partitionschemas";
/** A constant specifying the tag value for the XML node of the slave servers. */
public static final String XML_TAG_SLAVESERVERS = "slaveservers";
/** A constant specifying the tag value for the XML node of the cluster schemas. */
public static final String XML_TAG_CLUSTERSCHEMAS = "clusterschemas";
/** A constant specifying the tag value for the XML node of the steps' error-handling information. */
public static final String XML_TAG_STEP_ERROR_HANDLING = "step_error_handling";
/**
* Builds a new empty transformation. The transformation will have default logging capability and no variables, and
* all internal meta-data is cleared to defaults.
*/
public TransMeta() {
clear();
initializeVariablesFrom( null );
}
/**
* Builds a new empty transformation with a set of variables to inherit from.
*
* @param parent
* the variable space to inherit from
*/
public TransMeta( VariableSpace parent ) {
clear();
initializeVariablesFrom( parent );
}
public TransMeta( String filename, String name ) {
clear();
setFilename( filename );
this.name = name;
initializeVariablesFrom( null );
}
/**
* Constructs a new transformation specifying the filename, name and arguments.
*
* @param filename
* The filename of the transformation
* @param name
* The name of the transformation
* @param arguments
* The arguments as Strings
* @deprecated passing in arguments (a runtime argument) into the metadata is deprecated, pass it to Trans
*/
@Deprecated
public TransMeta( String filename, String name, String[] arguments ) {
clear();
setFilename( filename );
this.name = name;
this.arguments = arguments;
initializeVariablesFrom( null );
}
/**
* Compares two transformation on name, filename, repository directory, etc.
* The comparison algorithm is as follows:<br/>
* <ol>
* <li>The first transformation's filename is checked first; if it has none, the transformation comes from a
* repository. If the second transformation does not come from a repository, -1 is returned.</li>
* <li>If the transformations are both from a repository, the transformations' names are compared. If the first
* transformation has no name and the second one does, a -1 is returned.
* If the opposite is true, a 1 is returned.</li>
* <li>If they both have names they are compared as strings. If the result is non-zero it is returned. Otherwise the
* repository directories are compared using the same technique of checking empty values and then performing a string
* comparison, returning any non-zero result.</li>
* <li>If the names and directories are equal, the object revision strings are compared using the same technique of
* checking empty values and then performing a string comparison, this time ultimately returning the result of the
* string compare.</li>
* <li>If the first transformation does not come from a repository and the second one does, a 1 is returned. Otherwise
* the transformation names and filenames are subsequently compared using the same technique of checking empty values
* and then performing a string comparison, ultimately returning the result of the filename string comparison.
* </ol>
*
* @param t1
* the first transformation to compare
* @param t2
* the second transformation to compare
* @return 0 if the two transformations are equal, 1 or -1 depending on the values (see description above)
*
*/
@Override
public int compare( TransMeta t1, TransMeta t2 ) {
return super.compare( t1, t2 );
}
/**
* Compares this transformation's meta-data to the specified transformation's meta-data. This method simply calls
* compare(this, o)
*
* @param o
* the o
* @return the int
* @see #compare(TransMeta, TransMeta)
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo( TransMeta o ) {
return compare( this, o );
}
/**
* Checks whether this transformation's meta-data object is equal to the specified object. If the specified object is
* not an instance of TransMeta, false is returned. Otherwise the method returns whether a call to compare() indicates
* equality (i.e. compare(this, (TransMeta)obj)==0).
*
* @param obj
* the obj
* @return true, if successful
* @see #compare(TransMeta, TransMeta)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals( Object obj ) {
if ( !( obj instanceof TransMeta ) ) {
return false;
}
return compare( this, (TransMeta) obj ) == 0;
}
/**
* Clones the transformation meta-data object.
*
* @return a clone of the transformation meta-data object
* @see java.lang.Object#clone()
*/
@Override
public Object clone() {
return realClone( true );
}
/**
* Perform a real clone of the transformation meta-data object, including cloning all lists and copying all values. If
* the doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied
* fields will be cleared.
*
* @param doClear
* Whether to clear all of the clone's data before copying from the source object
* @return a real clone of the calling object
*/
public Object realClone( boolean doClear ) {
try {
TransMeta transMeta = (TransMeta) super.clone();
if ( doClear ) {
transMeta.clear();
} else {
// Clear out the things we're replacing below
transMeta.databases = new ArrayList<>();
transMeta.steps = new ArrayList<>();
transMeta.hops = new ArrayList<>();
transMeta.notes = new ArrayList<>();
transMeta.dependencies = new ArrayList<>();
transMeta.partitionSchemas = new ArrayList<>();
transMeta.slaveServers = new ArrayList<>();
transMeta.clusterSchemas = new ArrayList<>();
transMeta.namedParams = new NamedParamsDefault();
transMeta.stepChangeListeners = new ArrayList<>();
}
for ( DatabaseMeta db : databases ) {
transMeta.addDatabase( (DatabaseMeta) db.clone() );
}
for ( StepMeta step : steps ) {
transMeta.addStep( (StepMeta) step.clone() );
}
// PDI-15799: Step references are original yet. Set them to the clones.
for ( StepMeta step : transMeta.getSteps() ) {
final StepMetaInterface stepMetaInterface = step.getStepMetaInterface();
if ( stepMetaInterface != null ) {
final StepIOMetaInterface stepIOMeta = stepMetaInterface.getStepIOMeta();
if ( stepIOMeta != null ) {
for ( StreamInterface stream : stepIOMeta.getInfoStreams() ) {
String streamStepName = stream.getStepname();
if ( streamStepName != null ) {
StepMeta streamStepMeta = transMeta.findStep( streamStepName );
stream.setStepMeta( streamStepMeta );
}
}
}
}
}
for ( TransHopMeta hop : hops ) {
transMeta.addTransHop( (TransHopMeta) hop.clone() );
}
for ( NotePadMeta note : notes ) {
transMeta.addNote( (NotePadMeta) note.clone() );
}
for ( TransDependency dep : dependencies ) {
transMeta.addDependency( (TransDependency) dep.clone() );
}
for ( SlaveServer slave : slaveServers ) {
transMeta.getSlaveServers().add( (SlaveServer) slave.clone() );
}
for ( ClusterSchema schema : clusterSchemas ) {
transMeta.getClusterSchemas().add( schema.clone() );
}
for ( PartitionSchema schema : partitionSchemas ) {
transMeta.getPartitionSchemas().add( (PartitionSchema) schema.clone() );
}
for ( String key : listParameters() ) {
transMeta.addParameterDefinition( key, getParameterDefault( key ), getParameterDescription( key ) );
}
return transMeta;
} catch ( Exception e ) {
e.printStackTrace();
return null;
}
}
/**
* Clears the transformation's meta-data, including the lists of databases, steps, hops, notes, dependencies,
* partition schemas, slave servers, and cluster schemas. Logging information and timeouts are reset to defaults, and
* recent connection info is cleared.
*/
@Override
public void clear() {
setObjectId( null );
steps = new ArrayList<>();
hops = new ArrayList<>();
dependencies = new ArrayList<>();
partitionSchemas = new ArrayList<>();
clusterSchemas = new ArrayList<>();
namedParams = new NamedParamsDefault();
stepChangeListeners = new ArrayList<>();
slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
trans_status = -1;
trans_version = null;
transLogTable = TransLogTable.getDefault( this, this, steps );
performanceLogTable = PerformanceLogTable.getDefault( this, this );
stepLogTable = StepLogTable.getDefault( this, this );
metricsLogTable = MetricsLogTable.getDefault( this, this );
sizeRowset = Const.ROWS_IN_ROWSET;
sleepTimeEmpty = Const.TIMEOUT_GET_MILLIS;
sleepTimeFull = Const.TIMEOUT_PUT_MILLIS;
maxDateConnection = null;
maxDateTable = null;
maxDateField = null;
maxDateOffset = 0.0;
maxDateDifference = 0.0;
undo = new ArrayList<>();
max_undo = Const.MAX_UNDO;
undo_position = -1;
counters = new Hashtable<>();
resultRows = null;
super.clear();
// LOAD THE DATABASE CACHE!
dbCache = DBCache.getInstance();
resultRows = new ArrayList<>();
resultFiles = new ArrayList<>();
feedbackShown = true;
feedbackSize = Const.ROWS_UPDATE;
// Thread priority:
// - set to false in version 2.5.0
// - re-enabling in version 3.0.1 to prevent excessive locking (PDI-491)
//
usingThreadPriorityManagment = true;
// The performance monitoring options
//
capturingStepPerformanceSnapShots = false;
stepPerformanceCapturingDelay = 1000; // every 1 seconds
stepPerformanceCapturingSizeLimit = "100"; // maximum 100 data points
stepsFieldsCache = new HashMap<>();
loopCache = new HashMap<>();
transformationType = TransformationType.Normal;
log = LogChannel.GENERAL;
}
/**
* Add a new step to the transformation. Also marks that the transformation's steps have changed.
*
* @param stepMeta
* The meta-data for the step to be added.
*/
public void addStep( StepMeta stepMeta ) {
steps.add( stepMeta );
stepMeta.setParentTransMeta( this );
StepMetaInterface iface = stepMeta.getStepMetaInterface();
if ( iface instanceof StepMetaChangeListenerInterface ) {
addStepChangeListener( (StepMetaChangeListenerInterface) iface );
}
changed_steps = true;
}
/**
* Add a new step to the transformation if that step didn't exist yet. Otherwise, replace the step. This method also
* marks that the transformation's steps have changed.
*
* @param stepMeta
* The meta-data for the step to be added.
*/
public void addOrReplaceStep( StepMeta stepMeta ) {
int index = steps.indexOf( stepMeta );
if ( index < 0 ) {
index = steps.add( stepMeta ) ? 0 : index;
} else {
StepMeta previous = getStep( index );
previous.replaceMeta( stepMeta );
}
stepMeta.setParentTransMeta( this );
StepMetaInterface iface = stepMeta.getStepMetaInterface();
if ( index != -1 && iface instanceof StepMetaChangeListenerInterface ) {
addStepChangeListener( index, (StepMetaChangeListenerInterface) iface );
}
changed_steps = true;
}
/**
* Add a new hop to the transformation. The hop information (source and target steps, e.g.) should be configured in
* the TransHopMeta object before calling addTransHop(). Also marks that the transformation's hops have changed.
*
* @param hi
* The hop meta-data to be added.
*/
public void addTransHop( TransHopMeta hi ) {
hops.add( hi );
changed_hops = true;
}
/**
* Add a new dependency to the transformation.
*
* @param td
* The transformation dependency to be added.
*/
public void addDependency( TransDependency td ) {
dependencies.add( td );
}
/**
* Add a new step to the transformation at the specified index. This method sets the step's parent transformation to
* the this transformation, and marks that the transformations' steps have changed.
*
* @param p
* The index into the step list
* @param stepMeta
* The step to be added.
*/
public void addStep( int p, StepMeta stepMeta ) {
steps.add( p, stepMeta );
stepMeta.setParentTransMeta( this );
changed_steps = true;
StepMetaInterface iface = stepMeta.getStepMetaInterface();
if ( iface instanceof StepMetaChangeListenerInterface ) {
addStepChangeListener( p, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface() );
}
}
/**
* Add a new hop to the transformation on a certain location (i.e. the specified index). Also marks that the
* transformation's hops have changed.
*
* @param p
* the index into the hop list
* @param hi
* The hop to be added.
*/
public void addTransHop( int p, TransHopMeta hi ) {
try {
hops.add( p, hi );
} catch ( IndexOutOfBoundsException e ) {
hops.add( hi );
}
changed_hops = true;
}
/**
* Add a new dependency to the transformation on a certain location (i.e. the specified index).
*
* @param p
* The index into the dependencies list.
* @param td
* The transformation dependency to be added.
*/
public void addDependency( int p, TransDependency td ) {
dependencies.add( p, td );
}
/**
* Get a list of defined steps in this transformation.
*
* @return an ArrayList of defined steps.
*/
public List<StepMeta> getSteps() {
return steps;
}
/**
* Retrieves a step on a certain location (i.e. the specified index).
*
* @param i
* The index into the steps list.
* @return The desired step's meta-data.
*/
public StepMeta getStep( int i ) {
return steps.get( i );
}
/**
* Retrieves a hop on a certain location (i.e. the specified index).
*
* @param i
* The index into the hops list.
* @return The desired hop's meta-data.
*/
public TransHopMeta getTransHop( int i ) {
return hops.get( i );
}
/**
* Retrieves a dependency on a certain location (i.e. the specified index).
*
* @param i
* The index into the dependencies list.
* @return The dependency object.
*/
public TransDependency getDependency( int i ) {
return dependencies.get( i );
}
/**
* Removes a step from the transformation on a certain location (i.e. the specified index). Also marks that the
* transformation's steps have changed.
*
* @param i
* The index
*/
public void removeStep( int i ) {
if ( i < 0 || i >= steps.size() ) {
return;
}
StepMeta removeStep = steps.get( i );
StepMetaInterface iface = removeStep.getStepMetaInterface();
if ( iface instanceof StepMetaChangeListenerInterface ) {
removeStepChangeListener( (StepMetaChangeListenerInterface) iface );
}
steps.remove( i );
if ( removeStep.getStepMetaInterface() instanceof MissingTrans ) {
removeMissingTrans( (MissingTrans) removeStep.getStepMetaInterface() );
}
changed_steps = true;
}
/**
* Removes a hop from the transformation on a certain location (i.e. the specified index). Also marks that the
* transformation's hops have changed.
*
* @param i
* The index into the hops list
*/
public void removeTransHop( int i ) {
if ( i < 0 || i >= hops.size() ) {
return;
}
hops.remove( i );
changed_hops = true;
}
/**
* Removes a hop from the transformation. Also marks that the
* transformation's hops have changed.
*
* @param hop
* The hop to remove from the list of hops
*/
public void removeTransHop( TransHopMeta hop ) {
hops.remove( hop );
changed_hops = true;
}
/**
* Removes a dependency from the transformation on a certain location (i.e. the specified index).
*
* @param i
* The location
*/
public void removeDependency( int i ) {
if ( i < 0 || i >= dependencies.size() ) {
return;
}
dependencies.remove( i );
}
/**
* Clears all the dependencies from the transformation.
*/
public void removeAllDependencies() {
dependencies.clear();
}
/**
* Gets the number of steps in the transformation.
*
* @return The number of steps in the transformation.
*/
public int nrSteps() {
return steps.size();
}
/**
* Gets the number of hops in the transformation.
*
* @return The number of hops in the transformation.
*/
public int nrTransHops() {
return hops.size();
}
/**
* Gets the number of dependencies in the transformation.
*
* @return The number of dependencies in the transformation.
*/
public int nrDependencies() {
return dependencies.size();
}
/**
* Gets the number of stepChangeListeners in the transformation.
*
* @return The number of stepChangeListeners in the transformation.
*/
public int nrStepChangeListeners() {
return stepChangeListeners.size();
}
/**
* Changes the content of a step on a certain position. This is accomplished by setting the step's metadata at the
* specified index to the specified meta-data object. The new step's parent transformation is updated to be this
* transformation.
*
* @param i
* The index into the steps list
* @param stepMeta
* The step meta-data to set
*/
public void setStep( int i, StepMeta stepMeta ) {
StepMetaInterface iface = stepMeta.getStepMetaInterface();
if ( iface instanceof StepMetaChangeListenerInterface ) {
addStepChangeListener( i, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface() );
}
steps.set( i, stepMeta );
stepMeta.setParentTransMeta( this );
}
/**
* Changes the content of a hop on a certain position. This is accomplished by setting the hop's metadata at the
* specified index to the specified meta-data object.
*
* @param i
* The index into the hops list
* @param hi
* The hop meta-data to set
*/
public void setTransHop( int i, TransHopMeta hi ) {
hops.set( i, hi );
}
/**
* Gets the list of used steps, which are the steps that are connected by hops.
*
* @return a list with all the used steps
*/
public List<StepMeta> getUsedSteps() {
List<StepMeta> list = new ArrayList<>();
for ( StepMeta stepMeta : steps ) {
if ( isStepUsedInTransHops( stepMeta ) ) {
list.add( stepMeta );
}
}
return list;
}
/**
* Searches the list of steps for a step with a certain name.
*
* @param name
* The name of the step to look for
* @return The step information or null if no nothing was found.
*/
public StepMeta findStep( String name ) {
return findStep( name, null );
}
/**
* Searches the list of steps for a step with a certain name while excluding one step.
*
* @param name
* The name of the step to look for
* @param exclude
* The step information to exclude.
* @return The step information or null if nothing was found.
*/
public StepMeta findStep( String name, StepMeta exclude ) {
if ( name == null ) {
return null;
}
int excl = -1;
if ( exclude != null ) {
excl = indexOfStep( exclude );
}
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
if ( i != excl && stepMeta.getName().equalsIgnoreCase( name ) ) {
return stepMeta;
}
}
return null;
}
/**
* Searches the list of hops for a hop with a certain name.
*
* @param name
* The name of the hop to look for
* @return The hop information or null if nothing was found.
*/
public TransHopMeta findTransHop( String name ) {
int i;
for ( i = 0; i < nrTransHops(); i++ ) {
TransHopMeta hi = getTransHop( i );
if ( hi.toString().equalsIgnoreCase( name ) ) {
return hi;
}
}
return null;
}
/**
* Search all hops for a hop where a certain step is at the start.
*
* @param fromstep
* The step at the start of the hop.
* @return The hop or null if no hop was found.
*/
public TransHopMeta findTransHopFrom( StepMeta fromstep ) {
int i;
for ( i = 0; i < nrTransHops(); i++ ) {
TransHopMeta hi = getTransHop( i );
if ( hi.getFromStep() != null && hi.getFromStep().equals( fromstep ) ) { // return the first
return hi;
}
}
return null;
}
public List<TransHopMeta> findAllTransHopFrom( StepMeta fromstep ) {
return hops.stream()
.filter( hop -> hop.getFromStep() != null && hop.getFromStep().equals( fromstep ) )
.collect( Collectors.toList() );
}
/**
* Find a certain hop in the transformation.
*
* @param hi
* The hop information to look for.
* @return The hop or null if no hop was found.
*/
public TransHopMeta findTransHop( TransHopMeta hi ) {
return findTransHop( hi.getFromStep(), hi.getToStep() );
}
/**
* Search all hops for a hop where a certain step is at the start and another is at the end.
*
* @param from
* The step at the start of the hop.
* @param to
* The step at the end of the hop.
* @return The hop or null if no hop was found.
*/
public TransHopMeta findTransHop( StepMeta from, StepMeta to ) {
return findTransHop( from, to, false );
}
/**
* Search all hops for a hop where a certain step is at the start and another is at the end.
*
* @param from
* The step at the start of the hop.
* @param to
* The step at the end of the hop.
* @param disabledToo
* the disabled too
* @return The hop or null if no hop was found.
*/
public TransHopMeta findTransHop( StepMeta from, StepMeta to, boolean disabledToo ) {
for ( int i = 0; i < nrTransHops(); i++ ) {
TransHopMeta hi = getTransHop( i );
if ( hi.isEnabled() || disabledToo ) {
if ( hi.getFromStep() != null && hi.getToStep() != null && hi.getFromStep().equals( from ) && hi.getToStep()
.equals( to ) ) {
return hi;
}
}
}
return null;
}
/**
* Search all hops for a hop where a certain step is at the end.
*
* @param tostep
* The step at the end of the hop.
* @return The hop or null if no hop was found.
*/
public TransHopMeta findTransHopTo( StepMeta tostep ) {
int i;
for ( i = 0; i < nrTransHops(); i++ ) {
TransHopMeta hi = getTransHop( i );
if ( hi.getToStep() != null && hi.getToStep().equals( tostep ) ) { // Return the first!
return hi;
}
}
return null;
}
/**
* Determines whether or not a certain step is informative. This means that the previous step is sending information
* to this step, but only informative. This means that this step is using the information to process the actual stream
* of data. We use this in StreamLookup, TableInput and other types of steps.
*
* @param this_step
* The step that is receiving information.
* @param prev_step
* The step that is sending information
* @return true if prev_step if informative for this_step.
*/
public boolean isStepInformative( StepMeta this_step, StepMeta prev_step ) {
String[] infoSteps = this_step.getStepMetaInterface().getStepIOMeta().getInfoStepnames();
if ( infoSteps == null ) {
return false;
}
for ( int i = 0; i < infoSteps.length; i++ ) {
if ( prev_step.getName().equalsIgnoreCase( infoSteps[i] ) ) {
return true;
}
}
return false;
}
/**
* Counts the number of previous steps for a step name.
*
* @param stepname
* The name of the step to start from
* @return The number of preceding steps.
* @deprecated
*/
@Deprecated
public int findNrPrevSteps( String stepname ) {
return findNrPrevSteps( findStep( stepname ), false );
}
/**
* Counts the number of previous steps for a step name taking into account whether or not they are informational.
*
* @param stepname
* The name of the step to start from
* @param info
* true if only the informational steps are desired, false otherwise
* @return The number of preceding steps.
* @deprecated
*/
@Deprecated
public int findNrPrevSteps( String stepname, boolean info ) {
return findNrPrevSteps( findStep( stepname ), info );
}
/**
* Find the number of steps that precede the indicated step.
*
* @param stepMeta
* The source step
*
* @return The number of preceding steps found.
*/
public int findNrPrevSteps( StepMeta stepMeta ) {
return findNrPrevSteps( stepMeta, false );
}
/**
* Find the previous step on a certain location (i.e. the specified index).
*
* @param stepname
* The source step name
* @param nr
* the index into the step list
*
* @return The preceding step found.
* @deprecated
*/
@Deprecated
public StepMeta findPrevStep( String stepname, int nr ) {
return findPrevStep( findStep( stepname ), nr );
}
/**
* Find the previous step on a certain location taking into account the steps being informational or not.
*
* @param stepname
* The name of the step
* @param nr
* The index into the step list
* @param info
* true if only the informational steps are desired, false otherwise
* @return The step information
* @deprecated
*/
@Deprecated
public StepMeta findPrevStep( String stepname, int nr, boolean info ) {
return findPrevStep( findStep( stepname ), nr, info );
}
/**
* Find the previous step on a certain location (i.e. the specified index).
*
* @param stepMeta
* The source step information
* @param nr
* the index into the hops list
*
* @return The preceding step found.
*/
public StepMeta findPrevStep( StepMeta stepMeta, int nr ) {
return findPrevStep( stepMeta, nr, false );
}
/**
* Count the number of previous steps on a certain location taking into account the steps being informational or not.
*
* @param stepMeta
* The name of the step
* @param info
* true if only the informational steps are desired, false otherwise
* @return The number of preceding steps
* @deprecated please use method findPreviousSteps
*/
@Deprecated
public int findNrPrevSteps( StepMeta stepMeta, boolean info ) {
int count = 0;
int i;
for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) {
// Check if this previous step isn't informative (StreamValueLookup)
// We don't want fields from this stream to show up!
if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) {
count++;
}
}
}
return count;
}
/**
* Find the previous step on a certain location taking into account the steps being informational or not.
*
* @param stepMeta
* The step
* @param nr
* The index into the hops list
* @param info
* true if we only want the informational steps.
* @return The preceding step information
* @deprecated please use method findPreviousSteps
*/
@Deprecated
public StepMeta findPrevStep( StepMeta stepMeta, int nr, boolean info ) {
int count = 0;
int i;
for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) {
if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) {
if ( count == nr ) {
return hi.getFromStep();
}
count++;
}
}
}
return null;
}
/**
* Get the list of previous steps for a certain reference step. This includes the info steps.
*
* @param stepMeta
* The reference step
* @return The list of the preceding steps, including the info steps.
*/
public List<StepMeta> findPreviousSteps( StepMeta stepMeta ) {
return findPreviousSteps( stepMeta, true );
}
/**
* Get the previous steps on a certain location taking into account the steps being informational or not.
*
* @param stepMeta
* The name of the step
* @param info
* true if we only want the informational steps.
* @return The list of the preceding steps
*/
public List<StepMeta> findPreviousSteps( StepMeta stepMeta, boolean info ) {
List<StepMeta> previousSteps = new ArrayList<>();
for ( TransHopMeta hi : hops ) {
if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) {
// Check if this previous step isn't informative (StreamValueLookup)
// We don't want fields from this stream to show up!
if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) {
previousSteps.add( hi.getFromStep() );
}
}
}
return previousSteps;
}
/**
* Get the informational steps for a certain step. An informational step is a step that provides information for
* lookups, etc.
*
* @param stepMeta
* The name of the step
* @return An array of the informational steps found
*/
public StepMeta[] getInfoStep( StepMeta stepMeta ) {
String[] infoStepName = stepMeta.getStepMetaInterface().getStepIOMeta().getInfoStepnames();
if ( infoStepName == null ) {
return null;
}
StepMeta[] infoStep = new StepMeta[infoStepName.length];
for ( int i = 0; i < infoStep.length; i++ ) {
infoStep[i] = findStep( infoStepName[i] );
}
return infoStep;
}
/**
* Find the the number of informational steps for a certain step.
*
* @param stepMeta
* The step
* @return The number of informational steps found.
*/
public int findNrInfoSteps( StepMeta stepMeta ) {
if ( stepMeta == null ) {
return 0;
}
int count = 0;
for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi == null || hi.getToStep() == null ) {
log.logError( BaseMessages.getString( PKG, "TransMeta.Log.DestinationOfHopCannotBeNull" ) );
}
if ( hi != null && hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) {
// Check if this previous step isn't informative (StreamValueLookup)
// We don't want fields from this stream to show up!
if ( isStepInformative( stepMeta, hi.getFromStep() ) ) {
count++;
}
}
}
return count;
}
/**
* Find the informational fields coming from an informational step into the step specified.
*
* @param stepname
* The name of the step
* @return A row containing fields with origin.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getPrevInfoFields( String stepname ) throws KettleStepException {
return getPrevInfoFields( findStep( stepname ) );
}
/**
* Find the informational fields coming from an informational step into the step specified.
*
* @param stepMeta
* The receiving step
* @return A row containing fields with origin.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getPrevInfoFields( StepMeta stepMeta ) throws KettleStepException {
for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) {
StepMeta infoStep = hi.getFromStep();
if ( isStepInformative( stepMeta, infoStep ) ) {
RowMetaInterface row = getPrevStepFields( infoStep );
return getThisStepFields( infoStep, stepMeta, row );
}
}
}
return new RowMeta();
}
/**
* Find the number of succeeding steps for a certain originating step.
*
* @param stepMeta
* The originating step
* @return The number of succeeding steps.
* @deprecated use {@link #getNextSteps(StepMeta)}
*/
@Deprecated
public int findNrNextSteps( StepMeta stepMeta ) {
int count = 0;
int i;
for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) {
count++;
}
}
return count;
}
/**
* Find the succeeding step at a location for an originating step.
*
* @param stepMeta
* The originating step
* @param nr
* The location
* @return The step found.
* @deprecated use {@link #getNextSteps(StepMeta)}
*/
@Deprecated
public StepMeta findNextStep( StepMeta stepMeta, int nr ) {
int count = 0;
int i;
for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) {
if ( count == nr ) {
return hi.getToStep();
}
count++;
}
}
return null;
}
/**
* Retrieve an array of preceding steps for a certain destination step. This includes the info steps.
*
* @param stepMeta
* The destination step
* @return An array containing the preceding steps.
*/
public StepMeta[] getPrevSteps( StepMeta stepMeta ) {
List<StepMeta> prevSteps = new ArrayList<>();
for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hopMeta = getTransHop( i );
if ( hopMeta.isEnabled() && hopMeta.getToStep().equals( stepMeta ) ) {
prevSteps.add( hopMeta.getFromStep() );
}
}
return prevSteps.toArray( new StepMeta[prevSteps.size()] );
}
/**
* Retrieve an array of succeeding step names for a certain originating step name.
*
* @param stepname
* The originating step name
* @return An array of succeeding step names
*/
public String[] getPrevStepNames( String stepname ) {
return getPrevStepNames( findStep( stepname ) );
}
/**
* Retrieve an array of preceding steps for a certain destination step.
*
* @param stepMeta
* The destination step
* @return an array of preceding step names.
*/
public String[] getPrevStepNames( StepMeta stepMeta ) {
StepMeta[] prevStepMetas = getPrevSteps( stepMeta );
String[] retval = new String[prevStepMetas.length];
for ( int x = 0; x < prevStepMetas.length; x++ ) {
retval[x] = prevStepMetas[x].getName();
}
return retval;
}
/**
* Retrieve an array of succeeding steps for a certain originating step.
*
* @param stepMeta
* The originating step
* @return an array of succeeding steps.
* @deprecated use findNextSteps instead
*/
@Deprecated
public StepMeta[] getNextSteps( StepMeta stepMeta ) {
List<StepMeta> nextSteps = new ArrayList<>();
for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) {
nextSteps.add( hi.getToStep() );
}
}
return nextSteps.toArray( new StepMeta[nextSteps.size()] );
}
/**
* Retrieve a list of succeeding steps for a certain originating step.
*
* @param stepMeta
* The originating step
* @return an array of succeeding steps.
*/
public List<StepMeta> findNextSteps( StepMeta stepMeta ) {
List<StepMeta> nextSteps = new ArrayList<>();
for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops;
TransHopMeta hi = getTransHop( i );
if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) {
nextSteps.add( hi.getToStep() );
}
}
return nextSteps;
}
/**
* Retrieve an array of succeeding step names for a certain originating step.
*
* @param stepMeta
* The originating step
* @return an array of succeeding step names.
*/
public String[] getNextStepNames( StepMeta stepMeta ) {
StepMeta[] nextStepMeta = getNextSteps( stepMeta );
String[] retval = new String[nextStepMeta.length];
for ( int x = 0; x < nextStepMeta.length; x++ ) {
retval[x] = nextStepMeta[x].getName();
}
return retval;
}
/**
* Find the step that is located on a certain point on the canvas, taking into account the icon size.
*
* @param x
* the x-coordinate of the point queried
* @param y
* the y-coordinate of the point queried
* @param iconsize
* the iconsize
* @return The step information if a step is located at the point. Otherwise, if no step was found: null.
*/
public StepMeta getStep( int x, int y, int iconsize ) {
int i, s;
s = steps.size();
for ( i = s - 1; i >= 0; i-- ) { // Back to front because drawing goes from start to end
StepMeta stepMeta = steps.get( i );
if ( partOfTransHop( stepMeta ) || stepMeta.isDrawn() ) { // Only consider steps from active or inactive hops!
Point p = stepMeta.getLocation();
if ( p != null ) {
if ( x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize + 20 ) {
return stepMeta;
}
}
}
}
return null;
}
/**
* Determines whether or not a certain step is part of a hop.
*
* @param stepMeta
* The step queried
* @return true if the step is part of a hop.
*/
public boolean partOfTransHop( StepMeta stepMeta ) {
int i;
for ( i = 0; i < nrTransHops(); i++ ) {
TransHopMeta hi = getTransHop( i );
if ( hi.getFromStep() == null || hi.getToStep() == null ) {
return false;
}
if ( hi.getFromStep().equals( stepMeta ) || hi.getToStep().equals( stepMeta ) ) {
return true;
}
}
return false;
}
/**
* Returns the fields that are emitted by a certain step name.
*
* @param stepname
* The stepname of the step to be queried.
* @return A row containing the fields emitted.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getStepFields( String stepname ) throws KettleStepException {
StepMeta stepMeta = findStep( stepname );
if ( stepMeta != null ) {
return getStepFields( stepMeta );
} else {
return null;
}
}
/**
* Returns the fields that are emitted by a certain step.
*
* @param stepMeta
* The step to be queried.
* @return A row containing the fields emitted.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getStepFields( StepMeta stepMeta ) throws KettleStepException {
return getStepFields( stepMeta, null );
}
/**
* Gets the fields for each of the specified steps and merges them into a single set
*
* @param stepMeta
* the step meta
* @return an interface to the step fields
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getStepFields( StepMeta[] stepMeta ) throws KettleStepException {
RowMetaInterface fields = new RowMeta();
for ( int i = 0; i < stepMeta.length; i++ ) {
RowMetaInterface flds = getStepFields( stepMeta[i] );
if ( flds != null ) {
fields.mergeRowMeta( flds, stepMeta[i].getName() );
}
}
return fields;
}
/**
* Returns the fields that are emitted by a certain step.
*
* @param stepMeta
* The step to be queried.
* @param monitor
* The progress monitor for progress dialog. (null if not used!)
* @return A row containing the fields emitted.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getStepFields( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleStepException {
clearStepFieldsCachce();
setRepositoryOnMappingSteps();
return getStepFields( stepMeta, null, monitor );
}
/**
* Returns the fields that are emitted by a certain step.
*
* @param stepMeta
* The step to be queried.
* @param targetStep
* the target step
* @param monitor
* The progress monitor for progress dialog. (null if not used!)
* @return A row containing the fields emitted.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getStepFields( StepMeta stepMeta, StepMeta targetStep, ProgressMonitorListener monitor ) throws KettleStepException {
RowMetaInterface row = new RowMeta();
if ( stepMeta == null ) {
return row;
}
String fromToCacheEntry = stepMeta.getName() + ( targetStep != null ? ( "-" + targetStep.getName() ) : "" );
RowMetaInterface rowMeta = stepsFieldsCache.get( fromToCacheEntry );
if ( rowMeta != null ) {
return rowMeta;
}
// See if the step is sending ERROR rows to the specified target step.
//
if ( targetStep != null && stepMeta.isSendingErrorRowsToStep( targetStep ) ) {
// The error rows are the same as the input rows for
// the step but with the selected error fields added
//
row = getPrevStepFields( stepMeta );
// Add to this the error fields...
StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta();
row.addRowMeta( stepErrorMeta.getErrorFields() );
// Store this row in the cache
//
stepsFieldsCache.put( fromToCacheEntry, row );
return row;
}
// Resume the regular program...
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(),
String.valueOf( findNrPrevSteps( stepMeta ) ) ) );
}
int nrPrevious = findNrPrevSteps( stepMeta );
for ( int i = 0; i < nrPrevious; i++ ) {
StepMeta prevStepMeta = findPrevStep( stepMeta, i );
if ( monitor != null ) {
monitor.subTask(
BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName() ) );
}
RowMetaInterface add = getStepFields( prevStepMeta, stepMeta, monitor );
if ( add == null ) {
add = new RowMeta();
}
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FoundFieldsToAdd" ) + add.toString() );
}
if ( i == 0 ) {
row.addRowMeta( add );
} else {
// See if the add fields are not already in the row
for ( int x = 0; x < add.size(); x++ ) {
ValueMetaInterface v = add.getValueMeta( x );
ValueMetaInterface s = row.searchValueMeta( v.getName() );
if ( s == null ) {
row.addValueMeta( v );
}
}
}
}
if ( nrPrevious == 0 && stepMeta.getRemoteInputSteps().size() > 0 ) {
// Also check the remote input steps (clustering)
// Typically, if there are any, row is still empty at this point
// We'll also be at a starting point in the transformation
//
for ( RemoteStep remoteStep : stepMeta.getRemoteInputSteps() ) {
RowMetaInterface inputFields = remoteStep.getRowMeta();
for ( ValueMetaInterface inputField : inputFields.getValueMetaList() ) {
if ( row.searchValueMeta( inputField.getName() ) == null ) {
row.addValueMeta( inputField );
}
}
}
}
// Finally, see if we need to add/modify/delete fields with this step "name"
rowMeta = getThisStepFields( stepMeta, targetStep, row, monitor );
// Store this row in the cache
//
stepsFieldsCache.put( fromToCacheEntry, rowMeta );
return rowMeta;
}
/**
* Find the fields that are entering a step with a certain name.
*
* @param stepname
* The name of the step queried
* @return A row containing the fields (w/ origin) entering the step
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getPrevStepFields( String stepname ) throws KettleStepException {
clearStepFieldsCachce();
return getPrevStepFields( findStep( stepname ) );
}
/**
* Find the fields that are entering a certain step.
*
* @param stepMeta
* The step queried
* @return A row containing the fields (w/ origin) entering the step
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getPrevStepFields( StepMeta stepMeta ) throws KettleStepException {
clearStepFieldsCachce();
return getPrevStepFields( stepMeta, null );
}
/**
* Find the fields that are entering a certain step.
*
* @param stepMeta
* The step queried
* @param monitor
* The progress monitor for progress dialog. (null if not used!)
* @return A row containing the fields (w/ origin) entering the step
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getPrevStepFields( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleStepException {
clearStepFieldsCachce();
RowMetaInterface row = new RowMeta();
if ( stepMeta == null ) {
return null;
}
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(),
String.valueOf( findNrPrevSteps( stepMeta ) ) ) );
}
for ( int i = 0; i < findNrPrevSteps( stepMeta ); i++ ) {
StepMeta prevStepMeta = findPrevStep( stepMeta, i );
if ( monitor != null ) {
monitor.subTask(
BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName() ) );
}
RowMetaInterface add = getStepFields( prevStepMeta, stepMeta, monitor );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FoundFieldsToAdd2" ) + add.toString() );
}
if ( i == 0 ) {
// we expect all input streams to be of the same layout!
row.addRowMeta( add ); // recursive!
} else {
// See if the add fields are not already in the row
for ( int x = 0; x < add.size(); x++ ) {
ValueMetaInterface v = add.getValueMeta( x );
ValueMetaInterface s = row.searchValueMeta( v.getName() );
if ( s == null ) {
row.addValueMeta( v );
}
}
}
}
return row;
}
/**
* Return the fields that are emitted by a step with a certain name.
*
* @param stepname
* The name of the step that's being queried.
* @param row
* A row containing the input fields or an empty row if no input is required.
* @return A Row containing the output fields.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getThisStepFields( String stepname, RowMetaInterface row ) throws KettleStepException {
return getThisStepFields( findStep( stepname ), null, row );
}
/**
* Returns the fields that are emitted by a step.
*
* @param stepMeta
* : The StepMeta object that's being queried
* @param nextStep
* : if non-null this is the next step that's call back to ask what's being sent
* @param row
* : A row containing the input fields or an empty row if no input is required.
* @return A Row containing the output fields.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getThisStepFields( StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row ) throws KettleStepException {
return getThisStepFields( stepMeta, nextStep, row, null );
}
/**
* Returns the fields that are emitted by a step.
*
* @param stepMeta
* : The StepMeta object that's being queried
* @param nextStep
* : if non-null this is the next step that's call back to ask what's being sent
* @param row
* : A row containing the input fields or an empty row if no input is required.
* @param monitor
* the monitor
* @return A Row containing the output fields.
* @throws KettleStepException
* the kettle step exception
*/
public RowMetaInterface getThisStepFields( StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row,
ProgressMonitorListener monitor ) throws KettleStepException {
// Then this one.
if ( log.isDebug() ) {
log.logDebug( BaseMessages
.getString( PKG, "TransMeta.Log.GettingFieldsFromStep", stepMeta.getName(), stepMeta.getStepID() ) );
}
String name = stepMeta.getName();
if ( monitor != null ) {
monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingFieldsFromStepTask.Title", name ) );
}
StepMetaInterface stepint = stepMeta.getStepMetaInterface();
RowMetaInterface[] inform = null;
StepMeta[] lu = getInfoStep( stepMeta );
if ( Utils.isEmpty( lu ) ) {
inform = new RowMetaInterface[] { stepint.getTableFields(), };
} else {
inform = new RowMetaInterface[lu.length];
for ( int i = 0; i < lu.length; i++ ) {
inform[i] = getStepFields( lu[i] );
}
}
setRepositoryOnMappingSteps();
// Go get the fields...
//
RowMetaInterface before = row.clone();
compatibleGetStepFields( stepint, row, name, inform, nextStep, this );
if ( !isSomethingDifferentInRow( before, row ) ) {
stepint.getFields( before, name, inform, nextStep, this, repository, metaStore );
// pass the clone object to prevent from spoiling data by other steps
row = before;
}
return row;
}
@SuppressWarnings( "deprecation" )
private void compatibleGetStepFields( StepMetaInterface stepint, RowMetaInterface row, String name,
RowMetaInterface[] inform, StepMeta nextStep, VariableSpace space ) throws KettleStepException {
stepint.getFields( row, name, inform, nextStep, space );
}
private boolean isSomethingDifferentInRow( RowMetaInterface before, RowMetaInterface after ) {
if ( before.size() != after.size() ) {
return true;
}
for ( int i = 0; i < before.size(); i++ ) {
ValueMetaInterface beforeValueMeta = before.getValueMeta( i );
ValueMetaInterface afterValueMeta = after.getValueMeta( i );
if ( stringsDifferent( beforeValueMeta.getName(), afterValueMeta.getName() ) ) {
return true;
}
if ( beforeValueMeta.getType() != afterValueMeta.getType() ) {
return true;
}
if ( beforeValueMeta.getLength() != afterValueMeta.getLength() ) {
return true;
}
if ( beforeValueMeta.getPrecision() != afterValueMeta.getPrecision() ) {
return true;
}
if ( stringsDifferent( beforeValueMeta.getOrigin(), afterValueMeta.getOrigin() ) ) {
return true;
}
if ( stringsDifferent( beforeValueMeta.getComments(), afterValueMeta.getComments() ) ) {
return true;
}
if ( stringsDifferent( beforeValueMeta.getConversionMask(), afterValueMeta.getConversionMask() ) ) {
return true;
}
if ( stringsDifferent( beforeValueMeta.getStringEncoding(), afterValueMeta.getStringEncoding() ) ) {
return true;
}
if ( stringsDifferent( beforeValueMeta.getDecimalSymbol(), afterValueMeta.getDecimalSymbol() ) ) {
return true;
}
if ( stringsDifferent( beforeValueMeta.getGroupingSymbol(), afterValueMeta.getGroupingSymbol() ) ) {
return true;
}
}
return false;
}
private boolean stringsDifferent( String one, String two ) {
if ( one == null && two == null ) {
return false;
}
if ( one == null && two != null ) {
return true;
}
if ( one != null && two == null ) {
return true;
}
return !one.equals( two );
}
/**
* Set the Repository object on the Mapping step That way the mapping step can determine the output fields for
* repository hosted mappings... This is the exception to the rule so we don't pass this through the getFields()
* method. TODO: figure out a way to make this more generic.
*/
private void setRepositoryOnMappingSteps() {
for ( StepMeta step : steps ) {
if ( step.getStepMetaInterface() instanceof MappingMeta ) {
( (MappingMeta) step.getStepMetaInterface() ).setRepository( repository );
( (MappingMeta) step.getStepMetaInterface() ).setMetaStore( metaStore );
}
if ( step.getStepMetaInterface() instanceof SingleThreaderMeta ) {
( (SingleThreaderMeta) step.getStepMetaInterface() ).setRepository( repository );
( (SingleThreaderMeta) step.getStepMetaInterface() ).setMetaStore( metaStore );
}
if ( step.getStepMetaInterface() instanceof JobExecutorMeta ) {
( (JobExecutorMeta) step.getStepMetaInterface() ).setRepository( repository );
( (JobExecutorMeta) step.getStepMetaInterface() ).setMetaStore( metaStore );
}
if ( step.getStepMetaInterface() instanceof TransExecutorMeta ) {
( (TransExecutorMeta) step.getStepMetaInterface() ).setRepository( repository );
( (TransExecutorMeta) step.getStepMetaInterface() ).setMetaStore( metaStore );
}
}
}
/**
* Checks if the transformation is using the specified partition schema.
*
* @param partitionSchema
* the partition schema
* @return true if the transformation is using the partition schema, false otherwise
*/
public boolean isUsingPartitionSchema( PartitionSchema partitionSchema ) {
// Loop over all steps and see if the partition schema is used.
for ( int i = 0; i < nrSteps(); i++ ) {
StepPartitioningMeta stepPartitioningMeta = getStep( i ).getStepPartitioningMeta();
if ( stepPartitioningMeta != null ) {
PartitionSchema check = stepPartitioningMeta.getPartitionSchema();
if ( check != null && check.equals( partitionSchema ) ) {
return true;
}
}
}
return false;
}
/**
* Checks if the transformation is using a cluster schema.
*
* @return true if a cluster schema is used on one or more steps in this transformation, false otherwise
*/
public boolean isUsingAClusterSchema() {
return isUsingClusterSchema( null );
}
/**
* Checks if the transformation is using the specified cluster schema.
*
* @param clusterSchema
* the cluster schema to check
* @return true if the specified cluster schema is used on one or more steps in this transformation
*/
public boolean isUsingClusterSchema( ClusterSchema clusterSchema ) {
// Loop over all steps and see if the partition schema is used.
for ( int i = 0; i < nrSteps(); i++ ) {
ClusterSchema check = getStep( i ).getClusterSchema();
if ( check != null && ( clusterSchema == null || check.equals( clusterSchema ) ) ) {
return true;
}
}
return false;
}
/**
* Checks if the transformation is using the specified slave server.
*
* @param slaveServer
* the slave server
* @return true if the transformation is using the slave server, false otherwise
* @throws KettleException
* if any errors occur while checking for the slave server
*/
public boolean isUsingSlaveServer( SlaveServer slaveServer ) throws KettleException {
// Loop over all steps and see if the slave server is used.
for ( int i = 0; i < nrSteps(); i++ ) {
ClusterSchema clusterSchema = getStep( i ).getClusterSchema();
if ( clusterSchema != null ) {
for ( SlaveServer check : clusterSchema.getSlaveServers() ) {
if ( check.equals( slaveServer ) ) {
return true;
}
}
return true;
}
}
return false;
}
/**
* Checks if the transformation is referenced by a repository.
*
* @return true if the transformation is referenced by a repository, false otherwise
*/
public boolean isRepReference() {
return isRepReference( getFilename(), this.getName() );
}
/**
* Checks if the transformation is referenced by a file. If the transformation is not referenced by a repository, it
* is assumed to be referenced by a file.
*
* @return true if the transformation is referenced by a file, false otherwise
* @see #isRepReference()
*/
public boolean isFileReference() {
return !isRepReference( getFilename(), this.getName() );
}
/**
* Checks (using the exact filename and transformation name) if the transformation is referenced by a repository. If
* referenced by a repository, the exact filename should be empty and the exact transformation name should be
* non-empty.
*
* @param exactFilename
* the exact filename
* @param exactTransname
* the exact transformation name
* @return true if the transformation is referenced by a repository, false otherwise
*/
public static boolean isRepReference( String exactFilename, String exactTransname ) {
return Utils.isEmpty( exactFilename ) && !Utils.isEmpty( exactTransname );
}
/**
* Checks (using the exact filename and transformation name) if the transformation is referenced by a file. If
* referenced by a repository, the exact filename should be non-empty and the exact transformation name should be
* empty.
*
* @param exactFilename
* the exact filename
* @param exactTransname
* the exact transformation name
* @return true if the transformation is referenced by a file, false otherwise
* @see #isRepReference(String, String)
*/
public static boolean isFileReference( String exactFilename, String exactTransname ) {
return !isRepReference( exactFilename, exactTransname );
}
/**
* Finds the location (index) of the specified hop.
*
* @param hi
* The hop queried
* @return The location of the hop, or -1 if nothing was found.
*/
public int indexOfTransHop( TransHopMeta hi ) {
return hops.indexOf( hi );
}
/**
* Finds the location (index) of the specified step.
*
* @param stepMeta
* The step queried
* @return The location of the step, or -1 if nothing was found.
*/
public int indexOfStep( StepMeta stepMeta ) {
return steps.indexOf( stepMeta );
}
/**
* Gets the file type. For TransMeta, this returns a value corresponding to Transformation
*
* @return the file type
* @see org.pentaho.di.core.EngineMetaInterface#getFileType()
*/
@Override
public String getFileType() {
return LastUsedFile.FILE_TYPE_TRANSFORMATION;
}
/**
* Gets the transformation filter names.
*
* @return the filter names
* @see org.pentaho.di.core.EngineMetaInterface#getFilterNames()
*/
@Override
public String[] getFilterNames() {
return Const.getTransformationFilterNames();
}
/**
* Gets the transformation filter extensions. For TransMeta, this method returns the value of
* {@link Const#STRING_TRANS_FILTER_EXT}
*
* @return the filter extensions
* @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions()
*/
@Override
public String[] getFilterExtensions() {
return Const.STRING_TRANS_FILTER_EXT;
}
/**
* Gets the default extension for a transformation. For TransMeta, this method returns the value of
* {@link Const#STRING_TRANS_DEFAULT_EXT}
*
* @return the default extension
* @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension()
*/
@Override
public String getDefaultExtension() {
return Const.STRING_TRANS_DEFAULT_EXT;
}
/**
* Gets the XML representation of this transformation.
*
* @return the XML representation of this transformation
* @throws KettleException
* if any errors occur during generation of the XML
* @see org.pentaho.di.core.xml.XMLInterface#getXML()
*/
@Override
public String getXML() throws KettleException {
return getXML( true, true, true, true, true );
}
/**
* Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster,
* or partition information as specified by the parameters
*
* @param includeSteps
* whether to include step data
* @param includeDatabase
* whether to include database data
* @param includeSlaves
* whether to include slave server data
* @param includeClusters
* whether to include cluster data
* @param includePartitions
* whether to include partition data
* @return the XML representation of this transformation
* @throws KettleException
* if any errors occur during generation of the XML
*/
public String getXML( boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters,
boolean includePartitions ) throws KettleException {
//Clear the embedded named clusters. We will be repopulating from steps that used named clusters
getNamedClusterEmbedManager().clear();
Props props = null;
if ( Props.isInitialized() ) {
props = Props.getInstance();
}
StringBuilder retval = new StringBuilder( 800 );
retval.append( XMLHandler.openTag( XML_TAG ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_INFO ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.addTagValue( "name", name ) );
retval.append( " " ).append( XMLHandler.addTagValue( "description", description ) );
retval.append( " " ).append( XMLHandler.addTagValue( "extended_description", extendedDescription ) );
retval.append( " " ).append( XMLHandler.addTagValue( "trans_version", trans_version ) );
retval.append( " " ).append( XMLHandler.addTagValue( "trans_type", transformationType.getCode() ) );
if ( trans_status >= 0 ) {
retval.append( " " ).append( XMLHandler.addTagValue( "trans_status", trans_status ) );
}
retval.append( " " ).append( XMLHandler.addTagValue( "directory",
directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR ) );
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARAMETERS ) ).append( Const.CR );
String[] parameters = listParameters();
for ( int idx = 0; idx < parameters.length; idx++ ) {
retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[idx] ) );
retval.append( " " )
.append( XMLHandler.addTagValue( "default_value", getParameterDefault( parameters[idx] ) ) );
retval.append( " " )
.append( XMLHandler.addTagValue( "description", getParameterDescription( parameters[idx] ) ) );
retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ).append( Const.CR );
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARAMETERS ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.openTag( "log" ) ).append( Const.CR );
// Add the metadata for the various logging tables
//
retval.append( transLogTable.getXML() );
retval.append( performanceLogTable.getXML() );
retval.append( channelLogTable.getXML() );
retval.append( stepLogTable.getXML() );
retval.append( metricsLogTable.getXML() );
retval.append( " " ).append( XMLHandler.closeTag( "log" ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.openTag( "maxdate" ) ).append( Const.CR );
retval.append( " " )
.append( XMLHandler.addTagValue( "connection", maxDateConnection == null ? "" : maxDateConnection.getName() ) );
retval.append( " " ).append( XMLHandler.addTagValue( "table", maxDateTable ) );
retval.append( " " ).append( XMLHandler.addTagValue( "field", maxDateField ) );
retval.append( " " ).append( XMLHandler.addTagValue( "offset", maxDateOffset ) );
retval.append( " " ).append( XMLHandler.addTagValue( "maxdiff", maxDateDifference ) );
retval.append( " " ).append( XMLHandler.closeTag( "maxdate" ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.addTagValue( "size_rowset", sizeRowset ) );
retval.append( " " ).append( XMLHandler.addTagValue( "sleep_time_empty", sleepTimeEmpty ) );
retval.append( " " ).append( XMLHandler.addTagValue( "sleep_time_full", sleepTimeFull ) );
retval.append( " " ).append( XMLHandler.addTagValue( "unique_connections", usingUniqueConnections ) );
retval.append( " " ).append( XMLHandler.addTagValue( "feedback_shown", feedbackShown ) );
retval.append( " " ).append( XMLHandler.addTagValue( "feedback_size", feedbackSize ) );
retval.append( " " ).append( XMLHandler.addTagValue( "using_thread_priorities", usingThreadPriorityManagment ) );
retval.append( " " ).append( XMLHandler.addTagValue( "shared_objects_file", sharedObjectsFile ) );
// Performance monitoring
//
retval.append( " " )
.append( XMLHandler.addTagValue( "capture_step_performance", capturingStepPerformanceSnapShots ) );
retval.append( " " )
.append( XMLHandler.addTagValue( "step_performance_capturing_delay", stepPerformanceCapturingDelay ) );
retval.append( " " )
.append( XMLHandler.addTagValue( "step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit ) );
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_DEPENDENCIES ) ).append( Const.CR );
for ( int i = 0; i < nrDependencies(); i++ ) {
TransDependency td = getDependency( i );
retval.append( td.getXML() );
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_DEPENDENCIES ) ).append( Const.CR );
// The partitioning schemas...
//
if ( includePartitions ) {
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARTITIONSCHEMAS ) ).append( Const.CR );
for ( int i = 0; i < partitionSchemas.size(); i++ ) {
PartitionSchema partitionSchema = partitionSchemas.get( i );
retval.append( partitionSchema.getXML() );
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARTITIONSCHEMAS ) ).append( Const.CR );
}
// The slave servers...
//
if ( includeSlaves ) {
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR );
for ( int i = 0; i < slaveServers.size(); i++ ) {
SlaveServer slaveServer = slaveServers.get( i );
retval.append( slaveServer.getXML() );
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR );
}
// The cluster schemas...
//
if ( includeClusters ) {
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_CLUSTERSCHEMAS ) ).append( Const.CR );
for ( int i = 0; i < clusterSchemas.size(); i++ ) {
ClusterSchema clusterSchema = clusterSchemas.get( i );
retval.append( clusterSchema.getXML() );
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_CLUSTERSCHEMAS ) ).append( Const.CR );
}
retval.append( " " ).append( XMLHandler.addTagValue( "created_user", createdUser ) );
retval.append( " " ).append( XMLHandler.addTagValue( "created_date", XMLHandler.date2string( createdDate ) ) );
retval.append( " " ).append( XMLHandler.addTagValue( "modified_user", modifiedUser ) );
retval.append( " " ).append( XMLHandler.addTagValue( "modified_date", XMLHandler.date2string( modifiedDate ) ) );
try {
retval.append( " " ).append( XMLHandler.addTagValue( "key_for_session_key", keyForSessionKey ) );
} catch ( Exception ex ) {
log.logError( "Unable to decode key", ex );
}
retval.append( " " ).append( XMLHandler.addTagValue( "is_key_private", isKeyPrivate ) );
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_INFO ) ).append( Const.CR );
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_NOTEPADS ) ).append( Const.CR );
if ( notes != null ) {
for ( int i = 0; i < nrNotes(); i++ ) {
NotePadMeta ni = getNote( i );
retval.append( ni.getXML() );
}
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_NOTEPADS ) ).append( Const.CR );
// The database connections...
if ( includeDatabase ) {
for ( int i = 0; i < nrDatabases(); i++ ) {
DatabaseMeta dbMeta = getDatabase( i );
if ( props != null && props.areOnlyUsedConnectionsSavedToXML() ) {
if ( isDatabaseConnectionUsed( dbMeta ) ) {
retval.append( dbMeta.getXML() );
}
} else {
retval.append( dbMeta.getXML() );
}
}
}
if ( includeSteps ) {
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_ORDER ) ).append( Const.CR );
for ( int i = 0; i < nrTransHops(); i++ ) {
TransHopMeta transHopMeta = getTransHop( i );
retval.append( transHopMeta.getXML() );
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_ORDER ) ).append( Const.CR );
/* The steps... */
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
if ( stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface ) {
( (HasRepositoryInterface) stepMeta.getStepMetaInterface() ).setRepository( repository );
}
retval.append( stepMeta.getXML() );
}
/* The error handling metadata on the steps */
retval.append( " " ).append( XMLHandler.openTag( XML_TAG_STEP_ERROR_HANDLING ) ).append( Const.CR );
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
if ( stepMeta.getStepErrorMeta() != null ) {
retval.append( stepMeta.getStepErrorMeta().getXML() );
}
}
retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_STEP_ERROR_HANDLING ) ).append( Const.CR );
}
// The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment.
retval.append( slaveStepCopyPartitionDistribution.getXML() );
// Is this a slave transformation or not?
retval.append( " " ).append( XMLHandler.addTagValue( "slave_transformation", slaveTransformation ) );
// Also store the attribute groups
//
retval.append( AttributesUtil.getAttributesXml( attributesMap ) );
retval.append( XMLHandler.closeTag( XML_TAG ) ).append( Const.CR );
return XMLFormatter.format( retval.toString() );
}
/**
* Parses a file containing the XML that describes the transformation. No default connections are loaded since no
* repository is available at this time. Since the filename is set, internal variables are being set that relate to
* this.
*
* @param fname
* The filename
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname ) throws KettleXMLException, KettleMissingPluginsException {
this( fname, true );
}
/**
* Parses a file containing the XML that describes the transformation. No default connections are loaded since no
* repository is available at this time. Since the filename is set, variables are set in the specified variable space
* that relate to this.
*
* @param fname
* The filename
* @param parentVariableSpace
* the parent variable space
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, VariableSpace parentVariableSpace ) throws KettleXMLException,
KettleMissingPluginsException {
this( fname, null, true, parentVariableSpace );
}
/**
* Parses a file containing the XML that describes the transformation. No default connections are loaded since no
* repository is available at this time.
*
* @param fname
* The filename
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, boolean setInternalVariables ) throws KettleXMLException,
KettleMissingPluginsException {
this( fname, null, setInternalVariables );
}
/**
* Parses a file containing the XML that describes the transformation.
*
* @param fname
* The filename
* @param rep
* The repository to load the default set of connections from, null if no repository is available
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, Repository rep ) throws KettleXMLException, KettleMissingPluginsException {
this( fname, rep, true );
}
/**
* Parses a file containing the XML that describes the transformation.
*
* @param fname
* The filename
* @param rep
* The repository to load the default set of connections from, null if no repository is available
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, Repository rep, boolean setInternalVariables ) throws KettleXMLException,
KettleMissingPluginsException {
this( fname, rep, setInternalVariables, null );
}
/**
* Parses a file containing the XML that describes the transformation.
*
* @param fname
* The filename
* @param rep
* The repository to load the default set of connections from, null if no repository is available
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace ) throws KettleXMLException, KettleMissingPluginsException {
this( fname, rep, setInternalVariables, parentVariableSpace, null );
}
/**
* Parses a file containing the XML that describes the transformation.
*
* @param fname
* The filename
* @param rep
* The repository to load the default set of connections from, null if no repository is available
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @param prompter
* the changed/replace listener or null if there is none
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace,
OverwritePrompter prompter ) throws KettleXMLException, KettleMissingPluginsException {
this( fname, null, rep, setInternalVariables, parentVariableSpace, prompter );
}
/**
* Parses a file containing the XML that describes the transformation.
*
* @param fname
* The filename
* @param metaStore
* the metadata store to reference (or null if there is none)
* @param rep
* The repository to load the default set of connections from, null if no repository is available
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @param prompter
* the changed/replace listener or null if there is none
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables,
VariableSpace parentVariableSpace, OverwritePrompter prompter )
throws KettleXMLException, KettleMissingPluginsException {
this.metaStore = metaStore;
this.repository = rep;
// OK, try to load using the VFS stuff...
Document doc = null;
try {
doc = XMLHandler.loadXMLFile( KettleVFS.getFileObject( fname, parentVariableSpace ) );
} catch ( KettleFileException e ) {
throw new KettleXMLException( BaseMessages.getString(
PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname ), e );
}
if ( doc != null ) {
// Root node:
Node transnode = XMLHandler.getSubNode( doc, XML_TAG );
if ( transnode == null ) {
throw new KettleXMLException( BaseMessages.getString(
PKG, "TransMeta.Exception.NotValidTransformationXML", fname ) );
}
// Load from this node...
loadXML( transnode, fname, metaStore, rep, setInternalVariables, parentVariableSpace, prompter );
} else {
throw new KettleXMLException( BaseMessages.getString(
PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname ) );
}
}
/**
* Instantiates a new transformation meta-data object.
*
* @param xmlStream
* the XML input stream from which to read the transformation definition
* @param rep
* the repository
* @param setInternalVariables
* whether to set internal variables as a result of the creation
* @param parentVariableSpace
* the parent variable space
* @param prompter
* a GUI component that will prompt the user if the new transformation will overwrite an existing one
* @throws KettleXMLException
* if any errors occur during parsing of the specified stream
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( InputStream xmlStream, Repository rep, boolean setInternalVariables,
VariableSpace parentVariableSpace, OverwritePrompter prompter )
throws KettleXMLException, KettleMissingPluginsException {
Document doc = XMLHandler.loadXMLFile( xmlStream, null, false, false );
Node transnode = XMLHandler.getSubNode( doc, XML_TAG );
loadXML( transnode, rep, setInternalVariables, parentVariableSpace, prompter );
}
/**
* Parse a file containing the XML that describes the transformation. Specify a repository to load default list of
* database connections from and to reference in mappings etc.
*
* @param transnode
* The XML node to load from
* @param rep
* the repository to reference.
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public TransMeta( Node transnode, Repository rep ) throws KettleXMLException, KettleMissingPluginsException {
loadXML( transnode, rep, false );
}
/**
* Parses an XML DOM (starting at the specified Node) that describes the transformation.
*
* @param transnode
* The XML node to load from
* @param rep
* The repository to load the default list of database connections from (null if no repository is available)
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public void loadXML( Node transnode, Repository rep, boolean setInternalVariables ) throws KettleXMLException,
KettleMissingPluginsException {
loadXML( transnode, rep, setInternalVariables, null );
}
/**
* Parses an XML DOM (starting at the specified Node) that describes the transformation.
*
* @param transnode
* The XML node to load from
* @param rep
* The repository to load the default list of database connections from (null if no repository is available)
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public void loadXML( Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace )
throws KettleXMLException, KettleMissingPluginsException {
loadXML( transnode, rep, setInternalVariables, parentVariableSpace, null );
}
/**
* Parses an XML DOM (starting at the specified Node) that describes the transformation.
*
* @param transnode
* The XML node to load from
* @param rep
* The repository to load the default list of database connections from (null if no repository is available)
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @param prompter
* the changed/replace listener or null if there is none
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public void loadXML( Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace,
OverwritePrompter prompter ) throws KettleXMLException, KettleMissingPluginsException {
loadXML( transnode, null, rep, setInternalVariables, parentVariableSpace, prompter );
}
/**
* Parses an XML DOM (starting at the specified Node) that describes the transformation.
*
* @param transnode
* The XML node to load from
* @param fname
* The filename
* @param rep
* The repository to load the default list of database connections from (null if no repository is available)
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @param prompter
* the changed/replace listener or null if there is none
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public void loadXML( Node transnode, String fname, Repository rep, boolean setInternalVariables,
VariableSpace parentVariableSpace, OverwritePrompter prompter )
throws KettleXMLException, KettleMissingPluginsException {
loadXML( transnode, fname, null, rep, setInternalVariables, parentVariableSpace, prompter );
}
/**
* Parses an XML DOM (starting at the specified Node) that describes the transformation.
*
* @param transnode
* The XML node to load from
* @param fname
* The filename
* @param rep
* The repository to load the default list of database connections from (null if no repository is available)
* @param setInternalVariables
* true if you want to set the internal variables based on this transformation information
* @param parentVariableSpace
* the parent variable space to use during TransMeta construction
* @param prompter
* the changed/replace listener or null if there is none
* @throws KettleXMLException
* if any errors occur during parsing of the specified file
* @throws KettleMissingPluginsException
* in case missing plugins were found (details are in the exception in that case)
*/
public void loadXML( Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables,
VariableSpace parentVariableSpace, OverwritePrompter prompter )
throws KettleXMLException, KettleMissingPluginsException {
KettleMissingPluginsException
missingPluginsException =
new KettleMissingPluginsException(
BaseMessages.getString( PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception" ) );
this.metaStore = metaStore; // Remember this as the primary meta store.
try {
Props props = null;
if ( Props.isInitialized() ) {
props = Props.getInstance();
}
initializeVariablesFrom( parentVariableSpace );
try {
// Clear the transformation
clear();
// If we are not using a repository, we are getting the transformation from a file
// Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890
if ( null == rep ) {
setFilename( fname );
}
// Read all the database connections from the repository to make sure that we don't overwrite any there by
// loading from XML.
//
try {
sharedObjectsFile = XMLHandler.getTagValue( transnode, "info", "shared_objects_file" );
sharedObjects = rep != null ? rep.readTransSharedObjects( this ) : readSharedObjects();
} catch ( Exception e ) {
log
.logError( BaseMessages.getString( PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString() ) );
log.logError( Const.getStackTracker( e ) );
}
// Load the database connections, slave servers, cluster schemas & partition schemas into this object.
//
importFromMetaStore();
// Handle connections
int n = XMLHandler.countNodes( transnode, DatabaseMeta.XML_TAG );
Set<String> privateTransformationDatabases = new HashSet<>( n );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.WeHaveConnections", String.valueOf( n ) ) );
}
for ( int i = 0; i < n; i++ ) {
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtConnection" ) + i );
}
Node nodecon = XMLHandler.getSubNodeByNr( transnode, DatabaseMeta.XML_TAG, i );
DatabaseMeta dbcon = new DatabaseMeta( nodecon );
dbcon.shareVariablesWith( this );
if ( !dbcon.isShared() ) {
privateTransformationDatabases.add( dbcon.getName() );
}
DatabaseMeta exist = findDatabase( dbcon.getName() );
if ( exist == null ) {
addDatabase( dbcon );
} else {
if ( !exist.isShared() ) { // otherwise, we just keep the shared connection.
if ( shouldOverwrite( prompter, props, BaseMessages.getString( PKG,
"TransMeta.Message.OverwriteConnectionYN", dbcon.getName() ), BaseMessages.getString( PKG,
"TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) {
int idx = indexOfDatabase( exist );
removeDatabase( idx );
addDatabase( idx, dbcon );
}
}
}
}
setPrivateDatabases( privateTransformationDatabases );
// Read the notes...
Node notepadsnode = XMLHandler.getSubNode( transnode, XML_TAG_NOTEPADS );
int nrnotes = XMLHandler.countNodes( notepadsnode, NotePadMeta.XML_TAG );
for ( int i = 0; i < nrnotes; i++ ) {
Node notepadnode = XMLHandler.getSubNodeByNr( notepadsnode, NotePadMeta.XML_TAG, i );
NotePadMeta ni = new NotePadMeta( notepadnode );
notes.add( ni );
}
// Handle Steps
int s = XMLHandler.countNodes( transnode, StepMeta.XML_TAG );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.ReadingSteps" ) + s + " steps..." );
}
for ( int i = 0; i < s; i++ ) {
Node stepnode = XMLHandler.getSubNodeByNr( transnode, StepMeta.XML_TAG, i );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtStep" ) + i );
}
StepMeta stepMeta = new StepMeta( stepnode, databases, metaStore );
stepMeta.setParentTransMeta( this ); // for tracing, retain hierarchy
if ( stepMeta.isMissing() ) {
addMissingTrans( (MissingTrans) stepMeta.getStepMetaInterface() );
}
// Check if the step exists and if it's a shared step.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
//
StepMeta check = findStep( stepMeta.getName() );
if ( check != null ) {
if ( !check.isShared() ) {
// Don't overwrite shared objects
addOrReplaceStep( stepMeta );
} else {
check.setDraw( stepMeta.isDrawn() ); // Just keep the drawn flag and location
check.setLocation( stepMeta.getLocation() );
}
} else {
addStep( stepMeta ); // simply add it.
}
}
// Read the error handling code of the steps...
//
Node errorHandlingNode = XMLHandler.getSubNode( transnode, XML_TAG_STEP_ERROR_HANDLING );
int nrErrorHandlers = XMLHandler.countNodes( errorHandlingNode, StepErrorMeta.XML_ERROR_TAG );
for ( int i = 0; i < nrErrorHandlers; i++ ) {
Node stepErrorMetaNode = XMLHandler.getSubNodeByNr( errorHandlingNode, StepErrorMeta.XML_ERROR_TAG, i );
StepErrorMeta stepErrorMeta = new StepErrorMeta( this, stepErrorMetaNode, steps );
if ( stepErrorMeta.getSourceStep() != null ) {
stepErrorMeta.getSourceStep().setStepErrorMeta( stepErrorMeta ); // a bit of a trick, I know.
}
}
// Have all StreamValueLookups, etc. reference the correct source steps...
//
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
StepMetaInterface sii = stepMeta.getStepMetaInterface();
if ( sii != null ) {
sii.searchInfoAndTargetSteps( steps );
}
}
// Handle Hops
//
Node ordernode = XMLHandler.getSubNode( transnode, XML_TAG_ORDER );
n = XMLHandler.countNodes( ordernode, TransHopMeta.XML_HOP_TAG );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.WeHaveHops" ) + n + " hops..." );
}
for ( int i = 0; i < n; i++ ) {
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtHop" ) + i );
}
Node hopnode = XMLHandler.getSubNodeByNr( ordernode, TransHopMeta.XML_HOP_TAG, i );
TransHopMeta hopinf = new TransHopMeta( hopnode, steps );
hopinf.setErrorHop( isErrorNode( errorHandlingNode, hopnode ) );
addTransHop( hopinf );
}
//
// get transformation info:
//
Node infonode = XMLHandler.getSubNode( transnode, XML_TAG_INFO );
// Name
//
setName( XMLHandler.getTagValue( infonode, "name" ) );
// description
//
description = XMLHandler.getTagValue( infonode, "description" );
// extended description
//
extendedDescription = XMLHandler.getTagValue( infonode, "extended_description" );
// trans version
//
trans_version = XMLHandler.getTagValue( infonode, "trans_version" );
// trans status
//
trans_status = Const.toInt( XMLHandler.getTagValue( infonode, "trans_status" ), -1 );
String transTypeCode = XMLHandler.getTagValue( infonode, "trans_type" );
transformationType = TransformationType.getTransformationTypeByCode( transTypeCode );
// Optionally load the repository directory...
//
if ( rep != null ) {
String directoryPath = XMLHandler.getTagValue( infonode, "directory" );
if ( directoryPath != null ) {
directory = rep.findDirectory( directoryPath );
if ( directory == null ) { // not found
directory = new RepositoryDirectory(); // The root as default
}
}
}
// Read logging table information
//
Node logNode = XMLHandler.getSubNode( infonode, "log" );
if ( logNode != null ) {
// Backward compatibility...
//
Node transLogNode = XMLHandler.getSubNode( logNode, TransLogTable.XML_TAG );
if ( transLogNode == null ) {
// Load the XML
//
transLogTable.findField( TransLogTable.ID.LINES_READ )
.setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "read" ) ) );
transLogTable.findField( TransLogTable.ID.LINES_WRITTEN )
.setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "write" ) ) );
transLogTable.findField( TransLogTable.ID.LINES_INPUT )
.setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "input" ) ) );
transLogTable.findField( TransLogTable.ID.LINES_OUTPUT )
.setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "output" ) ) );
transLogTable.findField( TransLogTable.ID.LINES_UPDATED )
.setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "update" ) ) );
transLogTable.findField( TransLogTable.ID.LINES_REJECTED )
.setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "rejected" ) ) );
transLogTable.setConnectionName( XMLHandler.getTagValue( infonode, "log", "connection" ) );
transLogTable.setSchemaName( XMLHandler.getTagValue( infonode, "log", "schema" ) );
transLogTable.setTableName( XMLHandler.getTagValue( infonode, "log", "table" ) );
transLogTable.findField( TransLogTable.ID.ID_BATCH )
.setEnabled( "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "log", "use_batchid" ) ) );
transLogTable.findField( TransLogTable.ID.LOG_FIELD )
.setEnabled( "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "log", "USE_LOGFIELD" ) ) );
transLogTable.setLogSizeLimit( XMLHandler.getTagValue( infonode, "log", "size_limit_lines" ) );
transLogTable.setLogInterval( XMLHandler.getTagValue( infonode, "log", "interval" ) );
transLogTable.findField( TransLogTable.ID.CHANNEL_ID ).setEnabled( false );
transLogTable.findField( TransLogTable.ID.LINES_REJECTED ).setEnabled( false );
performanceLogTable.setConnectionName( transLogTable.getConnectionName() );
performanceLogTable.setTableName( XMLHandler.getTagValue( infonode, "log", "step_performance_table" ) );
} else {
transLogTable.loadXML( transLogNode, databases, steps );
}
Node perfLogNode = XMLHandler.getSubNode( logNode, PerformanceLogTable.XML_TAG );
if ( perfLogNode != null ) {
performanceLogTable.loadXML( perfLogNode, databases, steps );
}
Node channelLogNode = XMLHandler.getSubNode( logNode, ChannelLogTable.XML_TAG );
if ( channelLogNode != null ) {
channelLogTable.loadXML( channelLogNode, databases, steps );
}
Node stepLogNode = XMLHandler.getSubNode( logNode, StepLogTable.XML_TAG );
if ( stepLogNode != null ) {
stepLogTable.loadXML( stepLogNode, databases, steps );
}
Node metricsLogNode = XMLHandler.getSubNode( logNode, MetricsLogTable.XML_TAG );
if ( metricsLogNode != null ) {
metricsLogTable.loadXML( metricsLogNode, databases, steps );
}
}
// Maxdate range options...
String maxdatcon = XMLHandler.getTagValue( infonode, "maxdate", "connection" );
maxDateConnection = findDatabase( maxdatcon );
maxDateTable = XMLHandler.getTagValue( infonode, "maxdate", "table" );
maxDateField = XMLHandler.getTagValue( infonode, "maxdate", "field" );
String offset = XMLHandler.getTagValue( infonode, "maxdate", "offset" );
maxDateOffset = Const.toDouble( offset, 0.0 );
String mdiff = XMLHandler.getTagValue( infonode, "maxdate", "maxdiff" );
maxDateDifference = Const.toDouble( mdiff, 0.0 );
// Check the dependencies as far as dates are concerned...
// We calculate BEFORE we run the MAX of these dates
// If the date is larger then enddate, startdate is set to MIN_DATE
//
Node depsNode = XMLHandler.getSubNode( infonode, XML_TAG_DEPENDENCIES );
int nrDeps = XMLHandler.countNodes( depsNode, TransDependency.XML_TAG );
for ( int i = 0; i < nrDeps; i++ ) {
Node depNode = XMLHandler.getSubNodeByNr( depsNode, TransDependency.XML_TAG, i );
TransDependency transDependency = new TransDependency( depNode, databases );
if ( transDependency.getDatabase() != null && transDependency.getFieldname() != null ) {
addDependency( transDependency );
}
}
// Read the named parameters.
Node paramsNode = XMLHandler.getSubNode( infonode, XML_TAG_PARAMETERS );
int nrParams = XMLHandler.countNodes( paramsNode, "parameter" );
for ( int i = 0; i < nrParams; i++ ) {
Node paramNode = XMLHandler.getSubNodeByNr( paramsNode, "parameter", i );
String paramName = XMLHandler.getTagValue( paramNode, "name" );
String defaultValue = XMLHandler.getTagValue( paramNode, "default_value" );
String descr = XMLHandler.getTagValue( paramNode, "description" );
addParameterDefinition( paramName, defaultValue, descr );
}
// Read the partitioning schemas
//
Node partSchemasNode = XMLHandler.getSubNode( infonode, XML_TAG_PARTITIONSCHEMAS );
int nrPartSchemas = XMLHandler.countNodes( partSchemasNode, PartitionSchema.XML_TAG );
for ( int i = 0; i < nrPartSchemas; i++ ) {
Node partSchemaNode = XMLHandler.getSubNodeByNr( partSchemasNode, PartitionSchema.XML_TAG, i );
PartitionSchema partitionSchema = new PartitionSchema( partSchemaNode );
// Check if the step exists and if it's a shared step.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
//
PartitionSchema check = findPartitionSchema( partitionSchema.getName() );
if ( check != null ) {
if ( !check.isShared() ) {
// we don't overwrite shared objects.
if ( shouldOverwrite( prompter, props, BaseMessages
.getString( PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName() ),
BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) {
addOrReplacePartitionSchema( partitionSchema );
}
}
} else {
partitionSchemas.add( partitionSchema );
}
}
// Have all step partitioning meta-data reference the correct schemas that we just loaded
//
for ( int i = 0; i < nrSteps(); i++ ) {
StepPartitioningMeta stepPartitioningMeta = getStep( i ).getStepPartitioningMeta();
if ( stepPartitioningMeta != null ) {
stepPartitioningMeta.setPartitionSchemaAfterLoading( partitionSchemas );
}
StepPartitioningMeta targetStepPartitioningMeta = getStep( i ).getTargetStepPartitioningMeta();
if ( targetStepPartitioningMeta != null ) {
targetStepPartitioningMeta.setPartitionSchemaAfterLoading( partitionSchemas );
}
}
// Read the slave servers...
//
Node slaveServersNode = XMLHandler.getSubNode( infonode, XML_TAG_SLAVESERVERS );
int nrSlaveServers = XMLHandler.countNodes( slaveServersNode, SlaveServer.XML_TAG );
for ( int i = 0; i < nrSlaveServers; i++ ) {
Node slaveServerNode = XMLHandler.getSubNodeByNr( slaveServersNode, SlaveServer.XML_TAG, i );
SlaveServer slaveServer = new SlaveServer( slaveServerNode );
if ( slaveServer.getName() == null ) {
log.logError( BaseMessages.getString( PKG, "TransMeta.Log.WarningWhileCreationSlaveServer", slaveServer.getName() ) );
continue;
}
slaveServer.shareVariablesWith( this );
// Check if the object exists and if it's a shared object.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
SlaveServer check = findSlaveServer( slaveServer.getName() );
if ( check != null ) {
if ( !check.isShared() ) {
// we don't overwrite shared objects.
if ( shouldOverwrite( prompter, props,
BaseMessages.getString( PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName() ),
BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) {
addOrReplaceSlaveServer( slaveServer );
}
}
} else {
slaveServers.add( slaveServer );
}
}
// Read the cluster schemas
//
Node clusterSchemasNode = XMLHandler.getSubNode( infonode, XML_TAG_CLUSTERSCHEMAS );
int nrClusterSchemas = XMLHandler.countNodes( clusterSchemasNode, ClusterSchema.XML_TAG );
for ( int i = 0; i < nrClusterSchemas; i++ ) {
Node clusterSchemaNode = XMLHandler.getSubNodeByNr( clusterSchemasNode, ClusterSchema.XML_TAG, i );
ClusterSchema clusterSchema = new ClusterSchema( clusterSchemaNode, slaveServers );
clusterSchema.shareVariablesWith( this );
// Check if the object exists and if it's a shared object.
// If so, then we will keep the shared version, not this one.
// The stored XML is only for backup purposes.
ClusterSchema check = findClusterSchema( clusterSchema.getName() );
if ( check != null ) {
if ( !check.isShared() ) {
// we don't overwrite shared objects.
if ( shouldOverwrite( prompter, props,
BaseMessages.getString( PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName() ),
BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) {
addOrReplaceClusterSchema( clusterSchema );
}
}
} else {
clusterSchemas.add( clusterSchema );
}
}
// Have all step clustering schema meta-data reference the correct cluster schemas that we just loaded
//
for ( int i = 0; i < nrSteps(); i++ ) {
getStep( i ).setClusterSchemaAfterLoading( clusterSchemas );
}
String srowset = XMLHandler.getTagValue( infonode, "size_rowset" );
sizeRowset = Const.toInt( srowset, Const.ROWS_IN_ROWSET );
sleepTimeEmpty =
Const.toInt( XMLHandler.getTagValue( infonode, "sleep_time_empty" ), Const.TIMEOUT_GET_MILLIS );
sleepTimeFull = Const.toInt( XMLHandler.getTagValue( infonode, "sleep_time_full" ), Const.TIMEOUT_PUT_MILLIS );
usingUniqueConnections = "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "unique_connections" ) );
feedbackShown = !"N".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "feedback_shown" ) );
feedbackSize = Const.toInt( XMLHandler.getTagValue( infonode, "feedback_size" ), Const.ROWS_UPDATE );
usingThreadPriorityManagment =
!"N".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "using_thread_priorities" ) );
// Performance monitoring for steps...
//
capturingStepPerformanceSnapShots =
"Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "capture_step_performance" ) );
stepPerformanceCapturingDelay =
Const.toLong( XMLHandler.getTagValue( infonode, "step_performance_capturing_delay" ), 1000 );
stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue( infonode, "step_performance_capturing_size_limit" );
// Created user/date
createdUser = XMLHandler.getTagValue( infonode, "created_user" );
String createDate = XMLHandler.getTagValue( infonode, "created_date" );
if ( createDate != null ) {
createdDate = XMLHandler.stringToDate( createDate );
}
// Changed user/date
modifiedUser = XMLHandler.getTagValue( infonode, "modified_user" );
String modDate = XMLHandler.getTagValue( infonode, "modified_date" );
if ( modDate != null ) {
modifiedDate = XMLHandler.stringToDate( modDate );
}
Node partitionDistNode = XMLHandler.getSubNode( transnode, SlaveStepCopyPartitionDistribution.XML_TAG );
if ( partitionDistNode != null ) {
slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution( partitionDistNode );
} else {
slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); // leave empty
}
// Is this a slave transformation?
//
slaveTransformation = "Y".equalsIgnoreCase( XMLHandler.getTagValue( transnode, "slave_transformation" ) );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.NumberOfStepsReaded" ) + nrSteps() );
log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.NumberOfHopsReaded" ) + nrTransHops() );
}
sortSteps();
// Load the attribute groups map
//
attributesMap = AttributesUtil.loadAttributes( XMLHandler.getSubNode( transnode, AttributesUtil.XML_TAG ) );
keyForSessionKey = XMLHandler.stringToBinary( XMLHandler.getTagValue( infonode, "key_for_session_key" ) );
isKeyPrivate = "Y".equals( XMLHandler.getTagValue( infonode, "is_key_private" ) );
} catch ( KettleXMLException xe ) {
throw new KettleXMLException( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorReadingTransformation" ),
xe );
} catch ( KettleException e ) {
throw new KettleXMLException( e );
} finally {
initializeVariablesFrom( null );
if ( setInternalVariables ) {
setInternalKettleVariables();
}
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationMetaLoaded.id, this );
}
} catch ( Exception e ) {
// See if we have missing plugins to report, those take precedence!
//
if ( !missingPluginsException.getMissingPluginDetailsList().isEmpty() ) {
throw missingPluginsException;
} else {
throw new KettleXMLException( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorReadingTransformation" ),
e );
}
} finally {
if ( !missingPluginsException.getMissingPluginDetailsList().isEmpty() ) {
throw missingPluginsException;
}
}
}
public byte[] getKey() {
return keyForSessionKey;
}
public void setKey( byte[] key ) {
this.keyForSessionKey = key;
}
public boolean isPrivateKey() {
return isKeyPrivate;
}
public void setPrivateKey( boolean privateKey ) {
this.isKeyPrivate = privateKey;
}
@Override
public boolean loadSharedObject( SharedObjectInterface object ) {
if ( !super.loadSharedObject( object ) ) {
if ( object instanceof StepMeta ) {
StepMeta stepMeta = (StepMeta) object;
addOrReplaceStep( stepMeta );
} else if ( object instanceof PartitionSchema ) {
PartitionSchema partitionSchema = (PartitionSchema) object;
addOrReplacePartitionSchema( partitionSchema );
} else if ( object instanceof ClusterSchema ) {
ClusterSchema clusterSchema = (ClusterSchema) object;
clusterSchema.shareVariablesWith( this );
addOrReplaceClusterSchema( clusterSchema );
} else {
return false;
}
}
return true;
}
/**
* Gets a List of all the steps that are used in at least one active hop. These steps will be used to execute the
* transformation. The others will not be executed.<br/>
* Update 3.0 : we also add those steps that are not linked to another hop, but have at least one remote input or
* output step defined.
*
* @param all
* true if you want to get ALL the steps from the transformation, false otherwise
* @return A List of steps
*/
public List<StepMeta> getTransHopSteps( boolean all ) {
List<StepMeta> st = new ArrayList<>();
int idx;
for ( int x = 0; x < nrTransHops(); x++ ) {
TransHopMeta hi = getTransHop( x );
if ( hi.isEnabled() || all ) {
idx = st.indexOf( hi.getFromStep() ); // FROM
if ( idx < 0 ) {
st.add( hi.getFromStep() );
}
idx = st.indexOf( hi.getToStep() ); // TO
if ( idx < 0 ) {
st.add( hi.getToStep() );
}
}
}
// Also, add the steps that need to be painted, but are not part of a hop
for ( int x = 0; x < nrSteps(); x++ ) {
StepMeta stepMeta = getStep( x );
if ( stepMeta.isDrawn() && !isStepUsedInTransHops( stepMeta ) ) {
st.add( stepMeta );
}
if ( !stepMeta.getRemoteInputSteps().isEmpty() || !stepMeta.getRemoteOutputSteps().isEmpty() ) {
if ( !st.contains( stepMeta ) ) {
st.add( stepMeta );
}
}
}
return st;
}
/**
* Checks if a step has been used in a hop or not.
*
* @param stepMeta
* The step queried.
* @return true if a step is used in a hop (active or not), false otherwise
*/
public boolean isStepUsedInTransHops( StepMeta stepMeta ) {
TransHopMeta fr = findTransHopFrom( stepMeta );
TransHopMeta to = findTransHopTo( stepMeta );
if ( fr != null || to != null ) {
return true;
}
return false;
}
/**
* Checks if any selected step has been used in a hop or not.
*
* @param stepMeta
* The step queried.
* @return true if a step is used in a hop (active or not), false otherwise
*/
public boolean isAnySelectedStepUsedInTransHops() {
List<StepMeta> selectedSteps = getSelectedSteps();
int i = 0;
while ( i < selectedSteps.size() ) {
StepMeta stepMeta = selectedSteps.get( i );
if ( isStepUsedInTransHops( stepMeta ) ) {
return true;
}
i++;
}
return false;
}
/**
* Clears the different changed flags of the transformation.
*
*/
@Override
public void clearChanged() {
changed_steps = false;
changed_hops = false;
for ( int i = 0; i < nrSteps(); i++ ) {
getStep( i ).setChanged( false );
if ( getStep( i ).getStepPartitioningMeta() != null ) {
getStep( i ).getStepPartitioningMeta().hasChanged( false );
}
}
for ( int i = 0; i < nrTransHops(); i++ ) {
getTransHop( i ).setChanged( false );
}
for ( int i = 0; i < partitionSchemas.size(); i++ ) {
partitionSchemas.get( i ).setChanged( false );
}
for ( int i = 0; i < clusterSchemas.size(); i++ ) {
clusterSchemas.get( i ).setChanged( false );
}
super.clearChanged();
}
/**
* Checks whether or not the steps have changed.
*
* @return true if the steps have been changed, false otherwise
*/
public boolean haveStepsChanged() {
if ( changed_steps ) {
return true;
}
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
if ( stepMeta.hasChanged() ) {
return true;
}
if ( stepMeta.getStepPartitioningMeta() != null && stepMeta.getStepPartitioningMeta().hasChanged() ) {
return true;
}
}
return false;
}
/**
* Checks whether or not any of the hops have been changed.
*
* @return true if a hop has been changed, false otherwise
*/
public boolean haveHopsChanged() {
if ( changed_hops ) {
return true;
}
for ( int i = 0; i < nrTransHops(); i++ ) {
TransHopMeta hi = getTransHop( i );
if ( hi.hasChanged() ) {
return true;
}
}
return false;
}
/**
* Checks whether or not any of the partitioning schemas have been changed.
*
* @return true if the partitioning schemas have been changed, false otherwise
*/
public boolean havePartitionSchemasChanged() {
for ( int i = 0; i < partitionSchemas.size(); i++ ) {
PartitionSchema ps = partitionSchemas.get( i );
if ( ps.hasChanged() ) {
return true;
}
}
return false;
}
/**
* Checks whether or not any of the clustering schemas have been changed.
*
* @return true if the clustering schemas have been changed, false otherwise
*/
public boolean haveClusterSchemasChanged() {
for ( int i = 0; i < clusterSchemas.size(); i++ ) {
ClusterSchema cs = clusterSchemas.get( i );
if ( cs.hasChanged() ) {
return true;
}
}
return false;
}
/**
* Checks whether or not the transformation has changed.
*
* @return true if the transformation has changed, false otherwise
*/
@Override
public boolean hasChanged() {
if ( super.hasChanged() ) {
return true;
}
if ( haveStepsChanged() ) {
return true;
}
if ( haveHopsChanged() ) {
return true;
}
if ( havePartitionSchemasChanged() ) {
return true;
}
if ( haveClusterSchemasChanged() ) {
return true;
}
return false;
}
private boolean isErrorNode( Node errorHandingNode, Node checkNode ) {
if ( errorHandingNode != null ) {
NodeList errors = errorHandingNode.getChildNodes();
Node nodeHopFrom = XMLHandler.getSubNode( checkNode, TransHopMeta.XML_FROM_TAG );
Node nodeHopTo = XMLHandler.getSubNode( checkNode, TransHopMeta.XML_TO_TAG );
int i = 0;
while ( i < errors.getLength() ) {
Node errorNode = errors.item( i );
if ( !StepErrorMeta.XML_ERROR_TAG.equals( errorNode.getNodeName() ) ) {
i++;
continue;
}
Node errorSourceNode = XMLHandler.getSubNode( errorNode, StepErrorMeta.XML_SOURCE_STEP_TAG );
Node errorTagetNode = XMLHandler.getSubNode( errorNode, StepErrorMeta.XML_TARGET_STEP_TAG );
String sourceContent = errorSourceNode.getTextContent().trim();
String tagetContent = errorTagetNode.getTextContent().trim();
if ( sourceContent.equals( nodeHopFrom.getTextContent().trim() )
&& tagetContent.equals( nodeHopTo.getTextContent().trim() ) ) {
return true;
}
i++;
}
}
return false;
}
/**
* See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the
* previous steps. If you keep going backward and find the step, there is a loop. Both the informational and the
* normal steps need to be checked for loops!
*
* @param stepMeta
* The step position to start looking
*
* @return true if a loop has been found, false if no loop is found.
*/
public boolean hasLoop( StepMeta stepMeta ) {
clearLoopCache();
return hasLoop( stepMeta, null, true ) || hasLoop( stepMeta, null, false );
}
/**
* See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the
* previous steps. If you keep going backward and find the original step again, there is a loop.
*
* @param stepMeta
* The step position to start looking
* @param lookup
* The original step when wandering around the transformation.
* @param info
* Check the informational steps or not.
*
* @return true if a loop has been found, false if no loop is found.
*/
private boolean hasLoop( StepMeta stepMeta, StepMeta lookup, boolean info ) {
String
cacheKey =
stepMeta.getName() + " - " + ( lookup != null ? lookup.getName() : "" ) + " - " + ( info ? "true" : "false" );
Boolean loop = loopCache.get( cacheKey );
if ( loop != null ) {
return loop.booleanValue();
}
boolean hasLoop = false;
int nr = findNrPrevSteps( stepMeta, info );
for ( int i = 0; i < nr && !hasLoop; i++ ) {
StepMeta prevStepMeta = findPrevStep( stepMeta, i, info );
if ( prevStepMeta != null ) {
if ( prevStepMeta.equals( stepMeta ) ) {
hasLoop = true;
break; // no need to check more but caching this one below
} else if ( prevStepMeta.equals( lookup ) ) {
hasLoop = true;
break; // no need to check more but caching this one below
} else if ( hasLoop( prevStepMeta, lookup == null ? stepMeta : lookup, info ) ) {
hasLoop = true;
break; // no need to check more but caching this one below
}
}
}
// Store in the cache...
//
loopCache.put( cacheKey, Boolean.valueOf( hasLoop ) );
return hasLoop;
}
/**
* Mark all steps in the transformation as selected.
*
*/
public void selectAll() {
int i;
for ( i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
stepMeta.setSelected( true );
}
for ( i = 0; i < nrNotes(); i++ ) {
NotePadMeta ni = getNote( i );
ni.setSelected( true );
}
setChanged();
notifyObservers( "refreshGraph" );
}
/**
* Clear the selection of all steps.
*
*/
public void unselectAll() {
int i;
for ( i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
stepMeta.setSelected( false );
}
for ( i = 0; i < nrNotes(); i++ ) {
NotePadMeta ni = getNote( i );
ni.setSelected( false );
}
}
/**
* Get an array of all the selected step locations.
*
* @return The selected step locations.
*/
public Point[] getSelectedStepLocations() {
List<Point> points = new ArrayList<>();
for ( StepMeta stepMeta : getSelectedSteps() ) {
Point p = stepMeta.getLocation();
points.add( new Point( p.x, p.y ) ); // explicit copy of location
}
return points.toArray( new Point[points.size()] );
}
/**
* Get an array of all the selected note locations.
*
* @return The selected note locations.
*/
public Point[] getSelectedNoteLocations() {
List<Point> points = new ArrayList<>();
for ( NotePadMeta ni : getSelectedNotes() ) {
Point p = ni.getLocation();
points.add( new Point( p.x, p.y ) ); // explicit copy of location
}
return points.toArray( new Point[points.size()] );
}
/**
* Gets a list of the selected steps.
*
* @return A list of all the selected steps.
*/
public List<StepMeta> getSelectedSteps() {
List<StepMeta> selection = new ArrayList<>();
for ( StepMeta stepMeta : steps ) {
if ( stepMeta.isSelected() ) {
selection.add( stepMeta );
}
}
return selection;
}
/**
* Gets an array of all the selected step names.
*
* @return An array of all the selected step names.
*/
public String[] getSelectedStepNames() {
List<StepMeta> selection = getSelectedSteps();
String[] retval = new String[selection.size()];
for ( int i = 0; i < retval.length; i++ ) {
StepMeta stepMeta = selection.get( i );
retval[i] = stepMeta.getName();
}
return retval;
}
/**
* Gets an array of the locations of an array of steps.
*
* @param steps
* An array of steps
* @return an array of the locations of an array of steps
*/
public int[] getStepIndexes( List<StepMeta> steps ) {
int[] retval = new int[steps.size()];
for ( int i = 0; i < steps.size(); i++ ) {
retval[i] = indexOfStep( steps.get( i ) );
}
return retval;
}
/**
* Gets the maximum size of the canvas by calculating the maximum location of a step.
*
* @return Maximum coordinate of a step in the transformation + (100,100) for safety.
*/
public Point getMaximum() {
int maxx = 0, maxy = 0;
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
Point loc = stepMeta.getLocation();
if ( loc.x > maxx ) {
maxx = loc.x;
}
if ( loc.y > maxy ) {
maxy = loc.y;
}
}
for ( int i = 0; i < nrNotes(); i++ ) {
NotePadMeta notePadMeta = getNote( i );
Point loc = notePadMeta.getLocation();
if ( loc.x + notePadMeta.width > maxx ) {
maxx = loc.x + notePadMeta.width;
}
if ( loc.y + notePadMeta.height > maxy ) {
maxy = loc.y + notePadMeta.height;
}
}
return new Point( maxx + 100, maxy + 100 );
}
/**
* Gets the minimum point on the canvas of a transformation.
*
* @return Minimum coordinate of a step in the transformation
*/
public Point getMinimum() {
int minx = Integer.MAX_VALUE, miny = Integer.MAX_VALUE;
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
Point loc = stepMeta.getLocation();
if ( loc.x < minx ) {
minx = loc.x;
}
if ( loc.y < miny ) {
miny = loc.y;
}
}
for ( int i = 0; i < nrNotes(); i++ ) {
NotePadMeta notePadMeta = getNote( i );
Point loc = notePadMeta.getLocation();
if ( loc.x < minx ) {
minx = loc.x;
}
if ( loc.y < miny ) {
miny = loc.y;
}
}
if ( minx > BORDER_INDENT && minx != Integer.MAX_VALUE ) {
minx -= BORDER_INDENT;
} else {
minx = 0;
}
if ( miny > BORDER_INDENT && miny != Integer.MAX_VALUE ) {
miny -= BORDER_INDENT;
} else {
miny = 0;
}
return new Point( minx, miny );
}
/**
* Gets the names of all the steps.
*
* @return An array of step names.
*/
public String[] getStepNames() {
String[] retval = new String[nrSteps()];
for ( int i = 0; i < nrSteps(); i++ ) {
retval[i] = getStep( i ).getName();
}
return retval;
}
/**
* Gets all the steps as an array.
*
* @return An array of all the steps in the transformation.
*/
public StepMeta[] getStepsArray() {
StepMeta[] retval = new StepMeta[nrSteps()];
for ( int i = 0; i < nrSteps(); i++ ) {
retval[i] = getStep( i );
}
return retval;
}
/**
* Looks in the transformation to find a step in a previous location starting somewhere.
*
* @param startStep
* The starting step
* @param stepToFind
* The step to look for backward in the transformation
* @return true if we can find the step in an earlier location in the transformation.
*/
public boolean findPrevious( StepMeta startStep, StepMeta stepToFind ) {
String key = startStep.getName() + " - " + stepToFind.getName();
Boolean result = loopCache.get( key );
if ( result != null ) {
return result;
}
// Normal steps
//
List<StepMeta> previousSteps = findPreviousSteps( startStep, false );
for ( int i = 0; i < previousSteps.size(); i++ ) {
StepMeta stepMeta = previousSteps.get( i );
if ( stepMeta.equals( stepToFind ) ) {
loopCache.put( key, true );
return true;
}
boolean found = findPrevious( stepMeta, stepToFind ); // Look further back in the tree.
if ( found ) {
loopCache.put( key, true );
return true;
}
}
// Info steps
List<StepMeta> infoSteps = findPreviousSteps( startStep, true );
for ( int i = 0; i < infoSteps.size(); i++ ) {
StepMeta stepMeta = infoSteps.get( i );
if ( stepMeta.equals( stepToFind ) ) {
loopCache.put( key, true );
return true;
}
boolean found = findPrevious( stepMeta, stepToFind ); // Look further back in the tree.
if ( found ) {
loopCache.put( key, true );
return true;
}
}
loopCache.put( key, false );
return false;
}
/**
* Puts the steps in alphabetical order.
*/
public void sortSteps() {
try {
Collections.sort( steps );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorOfSortingSteps" ) + e );
log.logError( Const.getStackTracker( e ) );
}
}
/**
* Sorts all the hops in the transformation.
*/
public void sortHops() {
Collections.sort( hops );
}
/** The previous count. */
private long prevCount;
/**
* Puts the steps in a more natural order: from start to finish. For the moment, we ignore splits and joins. Splits
* and joins can't be listed sequentially in any case!
*
* @return a map containing all the previous steps per step
*/
public Map<StepMeta, Map<StepMeta, Boolean>> sortStepsNatural() {
long startTime = System.currentTimeMillis();
prevCount = 0;
// First create a map where all the previous steps of another step are kept...
//
final Map<StepMeta, Map<StepMeta, Boolean>> stepMap = new HashMap<>();
// Also cache the previous steps
//
final Map<StepMeta, List<StepMeta>> previousCache = new HashMap<>();
// Cache calculation of steps before another
//
Map<StepMeta, Map<StepMeta, Boolean>> beforeCache = new HashMap<>();
for ( StepMeta stepMeta : steps ) {
// What are the previous steps? (cached version for performance)
//
List<StepMeta> prevSteps = previousCache.get( stepMeta );
if ( prevSteps == null ) {
prevSteps = findPreviousSteps( stepMeta );
prevCount++;
previousCache.put( stepMeta, prevSteps );
}
// Now get the previous steps recursively, store them in the step map
//
for ( StepMeta prev : prevSteps ) {
Map<StepMeta, Boolean> beforePrevMap = updateFillStepMap( previousCache, beforeCache, stepMeta, prev );
stepMap.put( stepMeta, beforePrevMap );
// Store it also in the beforeCache...
//
beforeCache.put( prev, beforePrevMap );
}
}
Collections.sort( steps, new Comparator<StepMeta>() {
@Override
public int compare( StepMeta o1, StepMeta o2 ) {
Map<StepMeta, Boolean> beforeMap = stepMap.get( o1 );
if ( beforeMap != null ) {
if ( beforeMap.get( o2 ) == null ) {
return -1;
} else {
return 1;
}
} else {
return o1.getName().compareToIgnoreCase( o2.getName() );
}
}
} );
long endTime = System.currentTimeMillis();
log.logBasic(
BaseMessages.getString( PKG, "TransMeta.Log.TimeExecutionStepSort", ( endTime - startTime ), prevCount ) );
return stepMap;
}
/**
* Fills a map with all steps previous to the given step. This method uses a caching technique, so if a map is
* provided that contains the specified previous step, it is immediately returned to avoid unnecessary processing.
* Otherwise, the previous steps are determined and added to the map recursively, and a cache is constructed for later
* use.
*
* @param previousCache
* the previous cache, must be non-null
* @param beforeCache
* the before cache, must be non-null
* @param originStepMeta
* the origin step meta
* @param previousStepMeta
* the previous step meta
* @return the map
*/
private Map<StepMeta, Boolean> updateFillStepMap( Map<StepMeta, List<StepMeta>> previousCache,
Map<StepMeta, Map<StepMeta, Boolean>> beforeCache, StepMeta originStepMeta, StepMeta previousStepMeta ) {
// See if we have a hash map to store step occurrence (located before the step)
//
Map<StepMeta, Boolean> beforeMap = beforeCache.get( previousStepMeta );
if ( beforeMap == null ) {
beforeMap = new HashMap<>();
} else {
return beforeMap; // Nothing left to do here!
}
// Store the current previous step in the map
//
beforeMap.put( previousStepMeta, Boolean.TRUE );
// Figure out all the previous steps as well, they all need to go in there...
//
List<StepMeta> prevSteps = previousCache.get( previousStepMeta );
if ( prevSteps == null ) {
prevSteps = findPreviousSteps( previousStepMeta );
prevCount++;
previousCache.put( previousStepMeta, prevSteps );
}
// Now, get the previous steps for stepMeta recursively...
// We only do this when the beforeMap is not known yet...
//
for ( StepMeta prev : prevSteps ) {
Map<StepMeta, Boolean> beforePrevMap = updateFillStepMap( previousCache, beforeCache, originStepMeta, prev );
// Keep a copy in the cache...
//
beforeCache.put( prev, beforePrevMap );
// Also add it to the new map for this step...
//
beforeMap.putAll( beforePrevMap );
}
return beforeMap;
}
/**
* Sorts the hops in a natural way: from beginning to end.
*/
public void sortHopsNatural() {
// Loop over the hops...
for ( int j = 0; j < nrTransHops(); j++ ) {
// Buble sort: we need to do this several times...
for ( int i = 0; i < nrTransHops() - 1; i++ ) {
TransHopMeta one = getTransHop( i );
TransHopMeta two = getTransHop( i + 1 );
StepMeta a = two.getFromStep();
StepMeta b = one.getToStep();
if ( !findPrevious( a, b ) && !a.equals( b ) ) {
setTransHop( i + 1, one );
setTransHop( i, two );
}
}
}
}
/**
* Determines the impact of the different steps in a transformation on databases, tables and field.
*
* @param impact
* An ArrayList of DatabaseImpact objects.
* @param monitor
* a progress monitor listener to be updated as the transformation is analyzed
* @throws KettleStepException
* if any errors occur during analysis
*/
public void analyseImpact( List<DatabaseImpact> impact, ProgressMonitorListener monitor ) throws KettleStepException {
if ( monitor != null ) {
monitor
.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.DeterminingImpactTask.Title" ), nrSteps() );
}
boolean stop = false;
for ( int i = 0; i < nrSteps() && !stop; i++ ) {
if ( monitor != null ) {
monitor.subTask(
BaseMessages.getString( PKG, "TransMeta.Monitor.LookingAtStepTask.Title" ) + ( i + 1 ) + "/" + nrSteps() );
}
StepMeta stepMeta = getStep( i );
RowMetaInterface prev = getPrevStepFields( stepMeta );
StepMetaInterface stepint = stepMeta.getStepMetaInterface();
RowMetaInterface inform = null;
StepMeta[] lu = getInfoStep( stepMeta );
if ( lu != null ) {
inform = getStepFields( lu );
} else {
inform = stepint.getTableFields();
}
compatibleAnalyseImpactStep( impact, stepint, this, stepMeta, prev, inform );
stepint.analyseImpact( impact, this, stepMeta, prev, null, null, inform, repository, metaStore );
if ( monitor != null ) {
monitor.worked( 1 );
stop = monitor.isCanceled();
}
}
if ( monitor != null ) {
monitor.done();
}
}
@SuppressWarnings( "deprecation" )
private void compatibleAnalyseImpactStep( List<DatabaseImpact> impact, StepMetaInterface stepint, TransMeta transMeta,
StepMeta stepMeta, RowMetaInterface prev, RowMetaInterface inform ) throws KettleStepException {
stepint.analyseImpact( impact, transMeta, stepMeta, prev, null, null, inform );
}
/**
* Proposes an alternative stepname when the original already exists.
*
* @param stepname
* The stepname to find an alternative for
* @return The suggested alternative stepname.
*/
public String getAlternativeStepname( String stepname ) {
String newname = stepname;
StepMeta stepMeta = findStep( newname );
int nr = 1;
while ( stepMeta != null ) {
nr++;
newname = stepname + " " + nr;
stepMeta = findStep( newname );
}
return newname;
}
/**
* Builds a list of all the SQL statements that this transformation needs in order to work properly.
*
* @return An ArrayList of SQLStatement objects.
* @throws KettleStepException
* if any errors occur during SQL statement generation
*/
public List<SQLStatement> getSQLStatements() throws KettleStepException {
return getSQLStatements( null );
}
/**
* Builds a list of all the SQL statements that this transformation needs in order to work properly.
*
* @param monitor
* a progress monitor listener to be updated as the SQL statements are generated
* @return An ArrayList of SQLStatement objects.
* @throws KettleStepException
* if any errors occur during SQL statement generation
*/
public List<SQLStatement> getSQLStatements( ProgressMonitorListener monitor ) throws KettleStepException {
if ( monitor != null ) {
monitor.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title" ), nrSteps() + 1 );
}
List<SQLStatement> stats = new ArrayList<>();
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
if ( monitor != null ) {
monitor.subTask(
BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForStepTask.Title", "" + stepMeta ) );
}
RowMetaInterface prev = getPrevStepFields( stepMeta );
SQLStatement sqlCompat = compatibleStepMetaGetSQLStatements( stepMeta.getStepMetaInterface(), stepMeta, prev );
if ( sqlCompat.getSQL() != null || sqlCompat.hasError() ) {
stats.add( sqlCompat );
}
SQLStatement
sql =
stepMeta.getStepMetaInterface().getSQLStatements( this, stepMeta, prev, repository, metaStore );
if ( sql.getSQL() != null || sql.hasError() ) {
stats.add( sql );
}
if ( monitor != null ) {
monitor.worked( 1 );
}
}
// Also check the sql for the logtable...
//
if ( monitor != null ) {
monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title2" ) );
}
if ( transLogTable.getDatabaseMeta() != null && ( !Utils.isEmpty( transLogTable.getTableName() ) || !Utils
.isEmpty( performanceLogTable.getTableName() ) ) ) {
try {
for ( LogTableInterface logTable : new LogTableInterface[] { transLogTable, performanceLogTable,
channelLogTable, stepLogTable, } ) {
if ( logTable.getDatabaseMeta() != null && !Utils.isEmpty( logTable.getTableName() ) ) {
Database db = null;
try {
db = new Database( this, transLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
RowMetaInterface fields = logTable.getLogRecord( LogStatus.START, null, null ).getRowMeta();
String
schemaTable =
logTable.getDatabaseMeta()
.getQuotedSchemaTableCombination( logTable.getSchemaName(), logTable.getTableName() );
String sql = db.getDDL( schemaTable, fields );
if ( !Utils.isEmpty( sql ) ) {
SQLStatement stat = new SQLStatement( "<this transformation>", transLogTable.getDatabaseMeta(), sql );
stats.add( stat );
}
} catch ( Exception e ) {
throw new KettleDatabaseException(
"Unable to connect to logging database [" + logTable.getDatabaseMeta() + "]", e );
} finally {
if ( db != null ) {
db.disconnect();
}
}
}
}
} catch ( KettleDatabaseException dbe ) {
SQLStatement stat = new SQLStatement( "<this transformation>", transLogTable.getDatabaseMeta(), null );
stat.setError(
BaseMessages.getString( PKG, "TransMeta.SQLStatement.ErrorDesc.ErrorObtainingTransformationLogTableInfo" )
+ dbe.getMessage() );
stats.add( stat );
}
}
if ( monitor != null ) {
monitor.worked( 1 );
}
if ( monitor != null ) {
monitor.done();
}
return stats;
}
@SuppressWarnings( "deprecation" )
private SQLStatement compatibleStepMetaGetSQLStatements( StepMetaInterface stepMetaInterface, StepMeta stepMeta,
RowMetaInterface prev ) throws KettleStepException {
return stepMetaInterface.getSQLStatements( this, stepMeta, prev );
}
/**
* Get the SQL statements (needed to run this transformation) as a single String.
*
* @return the SQL statements needed to run this transformation
* @throws KettleStepException
* if any errors occur during SQL statement generation
*/
public String getSQLStatementsString() throws KettleStepException {
String sql = "";
List<SQLStatement> stats = getSQLStatements();
for ( int i = 0; i < stats.size(); i++ ) {
SQLStatement stat = stats.get( i );
if ( !stat.hasError() && stat.hasSQL() ) {
sql += stat.getSQL();
}
}
return sql;
}
/**
* Checks all the steps and fills a List of (CheckResult) remarks.
*
* @param remarks
* The remarks list to add to.
* @param only_selected
* true to check only the selected steps, false for all steps
* @param monitor
* a progress monitor listener to be updated as the SQL statements are generated
*/
@Deprecated
public void checkSteps( List<CheckResultInterface> remarks, boolean only_selected, ProgressMonitorListener monitor ) {
checkSteps( remarks, only_selected, monitor, this, null, null );
}
/**
* Checks all the steps and fills a List of (CheckResult) remarks.
*
* @param remarks
* The remarks list to add to.
* @param only_selected
* true to check only the selected steps, false for all steps
* @param monitor
* a progress monitor listener to be updated as the SQL statements are generated
*/
public void checkSteps( List<CheckResultInterface> remarks, boolean only_selected, ProgressMonitorListener monitor,
VariableSpace space, Repository repository, IMetaStore metaStore ) {
try {
remarks.clear(); // Start with a clean slate...
Map<ValueMetaInterface, String> values = new Hashtable<>();
String[] stepnames;
StepMeta[] steps;
List<StepMeta> selectedSteps = getSelectedSteps();
if ( !only_selected || selectedSteps.isEmpty() ) {
stepnames = getStepNames();
steps = getStepsArray();
} else {
stepnames = getSelectedStepNames();
steps = selectedSteps.toArray( new StepMeta[selectedSteps.size()] );
}
ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.BeforeCheckSteps.id,
new CheckStepsExtension( remarks, space, this, steps, repository, metaStore ) );
boolean stop_checking = false;
if ( monitor != null ) {
monitor.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.VerifyingThisTransformationTask.Title" ),
steps.length + 2 );
}
for ( int i = 0; i < steps.length && !stop_checking; i++ ) {
if ( monitor != null ) {
monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.VerifyingStepTask.Title", stepnames[i] ) );
}
StepMeta stepMeta = steps[i];
int nrinfo = findNrInfoSteps( stepMeta );
StepMeta[] infostep = null;
if ( nrinfo > 0 ) {
infostep = getInfoStep( stepMeta );
}
RowMetaInterface info = null;
if ( infostep != null ) {
try {
info = getStepFields( infostep );
} catch ( KettleStepException kse ) {
info = null;
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG,
"TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingStepInfoFields.Description",
"" + stepMeta, Const.CR + kse.getMessage() ), stepMeta );
remarks.add( cr );
}
}
// The previous fields from non-informative steps:
RowMetaInterface prev = null;
try {
prev = getPrevStepFields( stepMeta );
} catch ( KettleStepException kse ) {
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingInputFields.Description",
"" + stepMeta, Const.CR + kse.getMessage() ), stepMeta );
remarks.add( cr );
// This is a severe error: stop checking...
// Otherwise we wind up checking time & time again because nothing gets put in the database
// cache, the timeout of certain databases is very long... (Oracle)
stop_checking = true;
}
if ( isStepUsedInTransHops( stepMeta ) ) {
// Get the input & output steps!
// Copy to arrays:
String[] input = getPrevStepNames( stepMeta );
String[] output = getNextStepNames( stepMeta );
// Check step specific info...
ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.BeforeCheckStep.id,
new CheckStepsExtension( remarks, space, this, new StepMeta[] { stepMeta }, repository, metaStore ) );
stepMeta.check( remarks, this, prev, input, output, info, space, repository, metaStore );
ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.AfterCheckStep.id,
new CheckStepsExtension( remarks, space, this, new StepMeta[] { stepMeta }, repository, metaStore ) );
// See if illegal characters etc. were used in field-names...
if ( prev != null ) {
for ( int x = 0; x < prev.size(); x++ ) {
ValueMetaInterface v = prev.getValueMeta( x );
String name = v.getName();
if ( name == null ) {
values.put( v,
BaseMessages.getString( PKG, "TransMeta.Value.CheckingFieldName.FieldNameIsEmpty.Description" ) );
} else if ( name.indexOf( ' ' ) >= 0 ) {
values.put( v, BaseMessages
.getString( PKG, "TransMeta.Value.CheckingFieldName.FieldNameContainsSpaces.Description" ) );
} else {
char[] list =
new char[] { '.', ',', '-', '/', '+', '*', '\'', '\t', '"', '|', '@', '(', ')', '{', '}', '!',
'^' };
for ( int c = 0; c < list.length; c++ ) {
if ( name.indexOf( list[c] ) >= 0 ) {
values.put( v, BaseMessages.getString( PKG,
"TransMeta.Value.CheckingFieldName.FieldNameContainsUnfriendlyCodes.Description",
String.valueOf( list[c] ) ) );
}
}
}
}
// Check if 2 steps with the same name are entering the step...
if ( prev.size() > 1 ) {
String[] fieldNames = prev.getFieldNames();
String[] sortedNames = Const.sortStrings( fieldNames );
String prevName = sortedNames[0];
for ( int x = 1; x < sortedNames.length; x++ ) {
// Checking for doubles
if ( prevName.equalsIgnoreCase( sortedNames[x] ) ) {
// Give a warning!!
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultWarning.HaveTheSameNameField.Description",
prevName ), stepMeta );
remarks.add( cr );
} else {
prevName = sortedNames[x];
}
}
}
} else {
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultError.CannotFindPreviousFields.Description" )
+ stepMeta.getName(), stepMeta );
remarks.add( cr );
}
} else {
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING,
BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultWarning.StepIsNotUsed.Description" ),
stepMeta );
remarks.add( cr );
}
// Also check for mixing rows...
try {
checkRowMixingStatically( stepMeta, null );
} catch ( KettleRowException e ) {
CheckResult cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, e.getMessage(), stepMeta );
remarks.add( cr );
}
if ( monitor != null ) {
monitor.worked( 1 ); // progress bar...
if ( monitor.isCanceled() ) {
stop_checking = true;
}
}
}
// Also, check the logging table of the transformation...
if ( monitor == null || !monitor.isCanceled() ) {
if ( monitor != null ) {
monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingTheLoggingTableTask.Title" ) );
}
if ( transLogTable.getDatabaseMeta() != null ) {
Database logdb = new Database( this, transLogTable.getDatabaseMeta() );
logdb.shareVariablesWith( this );
try {
logdb.connect();
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_OK,
BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.ConnectingWorks.Description" ),
null );
remarks.add( cr );
if ( transLogTable.getTableName() != null ) {
if ( logdb.checkTableExists( transLogTable.getTableName() ) ) {
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultOK.LoggingTableExists.Description",
transLogTable.getTableName() ), null );
remarks.add( cr );
RowMetaInterface fields = transLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta();
String sql = logdb.getDDL( transLogTable.getTableName(), fields );
if ( sql == null || sql.length() == 0 ) {
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_OK,
BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.CorrectLayout.Description" ),
null );
remarks.add( cr );
} else {
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG,
"TransMeta.CheckResult.TypeResultError.LoggingTableNeedsAdjustments.Description" ) + Const.CR
+ sql, null );
remarks.add( cr );
}
} else {
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultError.LoggingTableDoesNotExist.Description" ),
null );
remarks.add( cr );
}
} else {
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultError.LogTableNotSpecified.Description" ),
null );
remarks.add( cr );
}
} catch ( KettleDatabaseException dbe ) {
// Ignore errors
} finally {
logdb.disconnect();
}
}
if ( monitor != null ) {
monitor.worked( 1 );
}
}
if ( monitor != null ) {
monitor.subTask( BaseMessages
.getString( PKG, "TransMeta.Monitor.CheckingForDatabaseUnfriendlyCharactersInFieldNamesTask.Title" ) );
}
if ( values.size() > 0 ) {
for ( ValueMetaInterface v : values.keySet() ) {
String message = values.get( v );
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages
.getString( PKG, "TransMeta.CheckResult.TypeResultWarning.Description", v.getName(), message,
v.getOrigin() ), findStep( v.getOrigin() ) );
remarks.add( cr );
}
} else {
CheckResult
cr =
new CheckResult( CheckResultInterface.TYPE_RESULT_OK,
BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.Description" ), null );
remarks.add( cr );
}
if ( monitor != null ) {
monitor.worked( 1 );
}
ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.AfterCheckSteps.id,
new CheckStepsExtension( remarks, space, this, steps, repository, metaStore ) );
} catch ( Exception e ) {
log.logError( Const.getStackTracker( e ) );
throw new RuntimeException( e );
}
}
/**
* Gets the result rows.
*
* @return a list containing the result rows.
* @deprecated Moved to Trans to make this class stateless
*/
@Deprecated
public List<RowMetaAndData> getResultRows() {
return resultRows;
}
/**
* Sets the list of result rows.
*
* @param resultRows
* The list of result rows to set.
* @deprecated Moved to Trans to make this class stateless
*/
@Deprecated
public void setResultRows( List<RowMetaAndData> resultRows ) {
this.resultRows = resultRows;
}
/**
* Gets the repository directory path and name of the transformation.
*
* @return The repository directory path plus the name of the transformation
*/
public String getPathAndName() {
if ( getRepositoryDirectory().isRoot() ) {
return getRepositoryDirectory().getPath() + getName();
} else {
return getRepositoryDirectory().getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + getName();
}
}
/**
* Gets the arguments used for this transformation.
*
* @return an array of String arguments for the transformation
* @deprecated moved to Trans
*/
@Deprecated
public String[] getArguments() {
return arguments;
}
/**
* Sets the arguments used for this transformation.
*
* @param arguments
* The arguments to set.
* @deprecated moved to Trans
*/
@Deprecated
public void setArguments( String[] arguments ) {
this.arguments = arguments;
}
/**
* Gets the counters (database sequence values, e.g.) for the transformation.
*
* @return a named table of counters.
* @deprecated moved to Trans
*/
@Deprecated
public Hashtable<String, Counter> getCounters() {
return counters;
}
/**
* Sets the counters (database sequence values, e.g.) for the transformation.
*
* @param counters
* The counters to set.
* @deprecated moved to Trans
*/
@Deprecated
public void setCounters( Hashtable<String, Counter> counters ) {
this.counters = counters;
}
/**
* Gets a list of dependencies for the transformation
*
* @return a list of the dependencies for the transformation
*/
public List<TransDependency> getDependencies() {
return dependencies;
}
/**
* Sets the dependencies for the transformation.
*
* @param dependencies
* The dependency list to set.
*/
public void setDependencies( List<TransDependency> dependencies ) {
this.dependencies = dependencies;
}
/**
* Gets the database connection associated with "max date" processing. The connection, along with a specified table
* and field, allows for the filtering of the number of rows to process in a transformation by time, such as only
* processing the rows/records since the last time the transformation ran correctly. This can be used for auditing and
* throttling data during warehousing operations.
*
* @return Returns the meta-data associated with the most recent database connection.
*/
public DatabaseMeta getMaxDateConnection() {
return maxDateConnection;
}
/**
* Sets the database connection associated with "max date" processing.
*
* @param maxDateConnection
* the database meta-data to set
* @see #getMaxDateConnection()
*/
public void setMaxDateConnection( DatabaseMeta maxDateConnection ) {
this.maxDateConnection = maxDateConnection;
}
/**
* Gets the maximum date difference between start and end dates for row/record processing. This can be used for
* auditing and throttling data during warehousing operations.
*
* @return the maximum date difference
*/
public double getMaxDateDifference() {
return maxDateDifference;
}
/**
* Sets the maximum date difference between start and end dates for row/record processing.
*
* @param maxDateDifference
* The date difference to set.
* @see #getMaxDateDifference()
*/
public void setMaxDateDifference( double maxDateDifference ) {
this.maxDateDifference = maxDateDifference;
}
/**
* Gets the date field associated with "max date" processing. This allows for the filtering of the number of rows to
* process in a transformation by time, such as only processing the rows/records since the last time the
* transformation ran correctly. This can be used for auditing and throttling data during warehousing operations.
*
* @return a string representing the date for the most recent database connection.
* @see #getMaxDateConnection()
*/
public String getMaxDateField() {
return maxDateField;
}
/**
* Sets the date field associated with "max date" processing.
*
* @param maxDateField
* The date field to set.
* @see #getMaxDateField()
*/
public void setMaxDateField( String maxDateField ) {
this.maxDateField = maxDateField;
}
/**
* Gets the amount by which to increase the "max date" difference. This is used in "max date" processing, and can be
* used to provide more fine-grained control of the date range. For example, if the end date specifies a minute for
* which the data is not complete, you can "roll-back" the end date by one minute by
*
* @return Returns the maxDateOffset.
* @see #setMaxDateOffset(double)
*/
public double getMaxDateOffset() {
return maxDateOffset;
}
/**
* Sets the amount by which to increase the end date in "max date" processing. This can be used to provide more
* fine-grained control of the date range. For example, if the end date specifies a minute for which the data is not
* complete, you can "roll-back" the end date by one minute by setting the offset to -60.
*
* @param maxDateOffset
* The maxDateOffset to set.
*/
public void setMaxDateOffset( double maxDateOffset ) {
this.maxDateOffset = maxDateOffset;
}
/**
* Gets the database table providing a date to be used in "max date" processing. This allows for the filtering of the
* number of rows to process in a transformation by time, such as only processing the rows/records since the last time
* the transformation ran correctly.
*
* @return Returns the maxDateTable.
* @see #getMaxDateConnection()
*/
public String getMaxDateTable() {
return maxDateTable;
}
/**
* Sets the table name associated with "max date" processing.
*
* @param maxDateTable
* The maxDateTable to set.
* @see #getMaxDateTable()
*/
public void setMaxDateTable( String maxDateTable ) {
this.maxDateTable = maxDateTable;
}
/**
* Gets the size of the rowsets.
*
* @return Returns the size of the rowsets.
*/
public int getSizeRowset() {
String rowSetSize = getVariable( Const.KETTLE_TRANS_ROWSET_SIZE );
int altSize = Const.toInt( rowSetSize, 0 );
if ( altSize > 0 ) {
return altSize;
} else {
return sizeRowset;
}
}
/**
* Sets the size of the rowsets. This method allows you to change the size of the buffers between the connected steps
* in a transformation. <b>NOTE:</b> Do not change this parameter unless you are running low on memory, for example.
*
* @param sizeRowset
* The sizeRowset to set.
*/
public void setSizeRowset( int sizeRowset ) {
this.sizeRowset = sizeRowset;
}
/**
* Gets the database cache object.
*
* @return the database cache object.
*/
public DBCache getDbCache() {
return dbCache;
}
/**
* Sets the database cache object.
*
* @param dbCache
* the database cache object to set
*/
public void setDbCache( DBCache dbCache ) {
this.dbCache = dbCache;
}
/**
* Gets the version of the transformation.
*
* @return The version of the transformation
*/
public String getTransversion() {
return trans_version;
}
/**
* Sets the version of the transformation.
*
* @param n
* The new version description of the transformation
*/
public void setTransversion( String n ) {
trans_version = n;
}
/**
* Sets the status of the transformation.
*
* @param n
* The new status description of the transformation
*/
public void setTransstatus( int n ) {
trans_status = n;
}
/**
* Gets the status of the transformation.
*
* @return The status of the transformation
*/
public int getTransstatus() {
return trans_status;
}
/**
* Gets a textual representation of the transformation. If its name has been set, it will be returned, otherwise the
* classname is returned.
*
* @return the textual representation of the transformation.
*/
@Override
public String toString() {
if ( !Utils.isEmpty( filename ) ) {
if ( Utils.isEmpty( name ) ) {
return filename;
} else {
return filename + " : " + name;
}
}
if ( name != null ) {
if ( directory != null ) {
String path = directory.getPath();
if ( path.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) {
return path + name;
} else {
return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name;
}
} else {
return name;
}
} else {
return TransMeta.class.getName();
}
}
/**
* Cancels queries opened for checking & fieldprediction.
*
* @throws KettleDatabaseException
* if any errors occur during query cancellation
*/
public void cancelQueries() throws KettleDatabaseException {
for ( int i = 0; i < nrSteps(); i++ ) {
getStep( i ).getStepMetaInterface().cancelQueries();
}
}
/**
* Gets the arguments (and their values) used by this transformation. If argument values are supplied by parameter,
* the values will used for the arguments. If the values are null or empty, the method will attempt to use argument
* values from a previous execution.
*
* @param arguments
* the values for the arguments
* @return A row with the used arguments (and their values) in it.
*/
public Map<String, String> getUsedArguments( String[] arguments ) {
Map<String, String> transArgs = new HashMap<>();
for ( int i = 0; i < nrSteps(); i++ ) {
StepMetaInterface smi = getStep( i ).getStepMetaInterface();
Map<String, String> stepArgs = smi.getUsedArguments(); // Get the command line arguments that this step uses.
if ( stepArgs != null ) {
transArgs.putAll( stepArgs );
}
}
// OK, so perhaps, we can use the arguments from a previous execution?
String[] saved = Props.isInitialized() ? Props.getInstance().getLastArguments() : null;
// Set the default values on it...
// Also change the name to "Argument 1" .. "Argument 10"
//
for ( String argument : transArgs.keySet() ) {
String value = "";
int argNr = Const.toInt( argument, -1 );
if ( arguments != null && argNr > 0 && argNr <= arguments.length ) {
value = Const.NVL( arguments[argNr - 1], "" );
}
if ( value.length() == 0 ) { // try the saved option...
if ( argNr > 0 && argNr < saved.length && saved[argNr] != null ) {
value = saved[argNr - 1];
}
}
transArgs.put( argument, value );
}
return transArgs;
}
/**
* Gets the amount of time (in nano-seconds) to wait while the input buffer is empty.
*
* @return the number of nano-seconds to wait while the input buffer is empty.
*/
public int getSleepTimeEmpty() {
return sleepTimeEmpty;
}
/**
* Gets the amount of time (in nano-seconds) to wait while the input buffer is full.
*
* @return the number of nano-seconds to wait while the input buffer is full.
*/
public int getSleepTimeFull() {
return sleepTimeFull;
}
/**
* Sets the amount of time (in nano-seconds) to wait while the input buffer is empty.
*
* @param sleepTimeEmpty
* the number of nano-seconds to wait while the input buffer is empty.
*/
public void setSleepTimeEmpty( int sleepTimeEmpty ) {
this.sleepTimeEmpty = sleepTimeEmpty;
}
/**
* Sets the amount of time (in nano-seconds) to wait while the input buffer is full.
*
* @param sleepTimeFull
* the number of nano-seconds to wait while the input buffer is full.
*/
public void setSleepTimeFull( int sleepTimeFull ) {
this.sleepTimeFull = sleepTimeFull;
}
/**
* This method asks all steps in the transformation whether or not the specified database connection is used. The
* connection is used in the transformation if any of the steps uses it or if it is being used to log to.
*
* @param databaseMeta
* The connection to check
* @return true if the connection is used in this transformation.
*/
public boolean isDatabaseConnectionUsed( DatabaseMeta databaseMeta ) {
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
DatabaseMeta[] dbs = stepMeta.getStepMetaInterface().getUsedDatabaseConnections();
for ( int d = 0; d < dbs.length; d++ ) {
if ( dbs[d].equals( databaseMeta ) ) {
return true;
}
}
}
if ( transLogTable.getDatabaseMeta() != null && transLogTable.getDatabaseMeta().equals( databaseMeta ) ) {
return true;
}
return false;
}
/**
* Gets a list of all the strings used in this transformation. The parameters indicate which collections to search and
* which to exclude.
*
* @param searchSteps
* true if steps should be searched, false otherwise
* @param searchDatabases
* true if databases should be searched, false otherwise
* @param searchNotes
* true if notes should be searched, false otherwise
* @param includePasswords
* true if passwords should be searched, false otherwise
* @return a list of search results for strings used in the transformation.
*/
public List<StringSearchResult> getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes,
boolean includePasswords ) {
List<StringSearchResult> stringList = new ArrayList<>();
if ( searchSteps ) {
// Loop over all steps in the transformation and see what the used vars are...
for ( int i = 0; i < nrSteps(); i++ ) {
StepMeta stepMeta = getStep( i );
stringList.add( new StringSearchResult( stepMeta.getName(), stepMeta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.StepName" ) ) );
if ( stepMeta.getDescription() != null ) {
stringList.add( new StringSearchResult( stepMeta.getDescription(), stepMeta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.StepDescription" ) ) );
}
StepMetaInterface metaInterface = stepMeta.getStepMetaInterface();
StringSearcher.findMetaData( metaInterface, 1, stringList, stepMeta, this );
}
}
// Loop over all steps in the transformation and see what the used vars are...
if ( searchDatabases ) {
for ( int i = 0; i < nrDatabases(); i++ ) {
DatabaseMeta meta = getDatabase( i );
stringList.add( new StringSearchResult( meta.getName(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseConnectionName" ) ) );
if ( meta.getHostname() != null ) {
stringList.add( new StringSearchResult( meta.getHostname(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseHostName" ) ) );
}
if ( meta.getDatabaseName() != null ) {
stringList.add( new StringSearchResult( meta.getDatabaseName(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseName" ) ) );
}
if ( meta.getUsername() != null ) {
stringList.add( new StringSearchResult( meta.getUsername(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseUsername" ) ) );
}
if ( meta.getPluginId() != null ) {
stringList.add( new StringSearchResult( meta.getPluginId(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseTypeDescription" ) ) );
}
if ( meta.getDatabasePortNumberString() != null ) {
stringList.add( new StringSearchResult( meta.getDatabasePortNumberString(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabasePort" ) ) );
}
if ( meta.getServername() != null ) {
stringList.add( new StringSearchResult( meta.getServername(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseServer" ) ) );
}
if ( includePasswords ) {
if ( meta.getPassword() != null ) {
stringList.add( new StringSearchResult( meta.getPassword(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabasePassword" ) ) );
}
}
}
}
// Loop over all steps in the transformation and see what the used vars are...
if ( searchNotes ) {
for ( int i = 0; i < nrNotes(); i++ ) {
NotePadMeta meta = getNote( i );
if ( meta.getNote() != null ) {
stringList.add( new StringSearchResult( meta.getNote(), meta, this,
BaseMessages.getString( PKG, "TransMeta.SearchMetadata.NotepadText" ) ) );
}
}
}
return stringList;
}
/**
* Get a list of all the strings used in this transformation. The parameters indicate which collections to search and
* which to exclude.
*
* @param searchSteps
* true if steps should be searched, false otherwise
* @param searchDatabases
* true if databases should be searched, false otherwise
* @param searchNotes
* true if notes should be searched, false otherwise
* @return a list of search results for strings used in the transformation.
*/
public List<StringSearchResult> getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes ) {
return getStringList( searchSteps, searchDatabases, searchNotes, false );
}
/**
* Gets a list of the used variables in this transformation.
*
* @return a list of the used variables in this transformation.
*/
public List<String> getUsedVariables() {
// Get the list of Strings.
List<StringSearchResult> stringList = getStringList( true, true, false, true );
List<String> varList = new ArrayList<>();
// Look around in the strings, see what we find...
for ( int i = 0; i < stringList.size(); i++ ) {
StringSearchResult result = stringList.get( i );
StringUtil.getUsedVariables( result.getString(), varList, false );
}
return varList;
}
/**
* Gets the previous result.
*
* @return the previous Result.
* @deprecated this was moved to Trans to keep the metadata stateless
*/
@Deprecated
public Result getPreviousResult() {
return previousResult;
}
/**
* Sets the previous result.
*
* @param previousResult
* The previous Result to set.
* @deprecated this was moved to Trans to keep the metadata stateless
*/
@Deprecated
public void setPreviousResult( Result previousResult ) {
this.previousResult = previousResult;
}
/**
* Gets a list of the files in the result.
*
* @return a list of ResultFiles.
*
* @deprecated this was moved to Trans to keep the metadata stateless
*/
@Deprecated
public List<ResultFile> getResultFiles() {
return resultFiles;
}
/**
* Sets the list of the files in the result.
*
* @param resultFiles
* The list of ResultFiles to set.
* @deprecated this was moved to Trans to keep the metadata stateless
*/
@Deprecated
public void setResultFiles( List<ResultFile> resultFiles ) {
this.resultFiles = resultFiles;
}
/**
* Gets a list of partition schemas for this transformation.
*
* @return a list of PartitionSchemas
*/
public List<PartitionSchema> getPartitionSchemas() {
return partitionSchemas;
}
/**
* Sets the list of partition schemas for this transformation.
*
* @param partitionSchemas
* the list of PartitionSchemas to set
*/
public void setPartitionSchemas( List<PartitionSchema> partitionSchemas ) {
this.partitionSchemas = partitionSchemas;
}
/**
* Gets the partition schemas' names.
*
* @return a String array containing the available partition schema names.
*/
public String[] getPartitionSchemasNames() {
String[] names = new String[partitionSchemas.size()];
for ( int i = 0; i < names.length; i++ ) {
names[i] = partitionSchemas.get( i ).getName();
}
return names;
}
/**
* Checks if is feedback shown.
*
* @return true if feedback is shown, false otherwise
*/
public boolean isFeedbackShown() {
return feedbackShown;
}
/**
* Sets whether the feedback should be shown.
*
* @param feedbackShown
* true if feedback should be shown, false otherwise
*/
public void setFeedbackShown( boolean feedbackShown ) {
this.feedbackShown = feedbackShown;
}
/**
* Gets the feedback size.
*
* @return the feedback size
*/
public int getFeedbackSize() {
return feedbackSize;
}
/**
* Sets the feedback size.
*
* @param feedbackSize
* the feedback size to set
*/
public void setFeedbackSize( int feedbackSize ) {
this.feedbackSize = feedbackSize;
}
/**
* Checks if the transformation is using unique database connections.
*
* @return true if the transformation is using unique database connections, false otherwise
*/
public boolean isUsingUniqueConnections() {
return usingUniqueConnections;
}
/**
* Sets whether the transformation is using unique database connections.
*
* @param usingUniqueConnections
* true if the transformation is using unique database connections, false otherwise
*/
public void setUsingUniqueConnections( boolean usingUniqueConnections ) {
this.usingUniqueConnections = usingUniqueConnections;
}
/**
* Gets a list of the cluster schemas used by the transformation.
*
* @return a list of ClusterSchemas
*/
public List<ClusterSchema> getClusterSchemas() {
return clusterSchemas;
}
/**
* Sets list of the cluster schemas used by the transformation.
*
* @param clusterSchemas
* the list of ClusterSchemas to set
*/
public void setClusterSchemas( List<ClusterSchema> clusterSchemas ) {
this.clusterSchemas = clusterSchemas;
}
/**
* Gets the cluster schema names.
*
* @return a String array containing the cluster schemas' names
*/
public String[] getClusterSchemaNames() {
String[] names = new String[clusterSchemas.size()];
for ( int i = 0; i < names.length; i++ ) {
names[i] = clusterSchemas.get( i ).getName();
}
return names;
}
/**
* Find a partition schema using its name.
*
* @param name
* The name of the partition schema to look for.
* @return the partition with the specified name of null if nothing was found
*/
public PartitionSchema findPartitionSchema( String name ) {
for ( int i = 0; i < partitionSchemas.size(); i++ ) {
PartitionSchema schema = partitionSchemas.get( i );
if ( schema.getName().equalsIgnoreCase( name ) ) {
return schema;
}
}
return null;
}
/**
* Find a clustering schema using its name.
*
* @param name
* The name of the clustering schema to look for.
* @return the cluster schema with the specified name of null if nothing was found
*/
public ClusterSchema findClusterSchema( String name ) {
for ( int i = 0; i < clusterSchemas.size(); i++ ) {
ClusterSchema schema = clusterSchemas.get( i );
if ( schema.getName().equalsIgnoreCase( name ) ) {
return schema;
}
}
return null;
}
/**
* Add a new partition schema to the transformation if that didn't exist yet. Otherwise, replace it.
*
* @param partitionSchema
* The partition schema to be added.
*/
public void addOrReplacePartitionSchema( PartitionSchema partitionSchema ) {
int index = partitionSchemas.indexOf( partitionSchema );
if ( index < 0 ) {
partitionSchemas.add( partitionSchema );
} else {
PartitionSchema previous = partitionSchemas.get( index );
previous.replaceMeta( partitionSchema );
}
setChanged();
}
/**
* Add a new cluster schema to the transformation if that didn't exist yet. Otherwise, replace it.
*
* @param clusterSchema
* The cluster schema to be added.
*/
public void addOrReplaceClusterSchema( ClusterSchema clusterSchema ) {
int index = clusterSchemas.indexOf( clusterSchema );
if ( index < 0 ) {
clusterSchemas.add( clusterSchema );
} else {
ClusterSchema previous = clusterSchemas.get( index );
previous.replaceMeta( clusterSchema );
}
setChanged();
}
protected List<SharedObjectInterface> getAllSharedObjects() {
List<SharedObjectInterface> shared = super.getAllSharedObjects();
shared.addAll( steps );
shared.addAll( partitionSchemas );
shared.addAll( clusterSchemas );
return shared;
}
/**
* Checks whether the transformation is using thread priority management.
*
* @return true if the transformation is using thread priority management, false otherwise
*/
public boolean isUsingThreadPriorityManagment() {
return usingThreadPriorityManagment;
}
/**
* Sets whether the transformation is using thread priority management.
*
* @param usingThreadPriorityManagment
* true if the transformation is using thread priority management, false otherwise
*/
public void setUsingThreadPriorityManagment( boolean usingThreadPriorityManagment ) {
this.usingThreadPriorityManagment = usingThreadPriorityManagment;
}
/**
* Check a step to see if there are no multiple steps to read from. If so, check to see if the receiving rows are all
* the same in layout. We only want to ONLY use the DBCache for this to prevent GUI stalls.
*
* @param stepMeta
* the step to check
* @param monitor
* the monitor
* @throws KettleRowException
* in case we detect a row mixing violation
*/
public void checkRowMixingStatically( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleRowException {
int nrPrevious = findNrPrevSteps( stepMeta );
if ( nrPrevious > 1 ) {
RowMetaInterface referenceRow = null;
// See if all previous steps send out the same rows...
for ( int i = 0; i < nrPrevious; i++ ) {
StepMeta previousStep = findPrevStep( stepMeta, i );
try {
RowMetaInterface row = getStepFields( previousStep, monitor ); // Throws KettleStepException
if ( referenceRow == null ) {
referenceRow = row;
} else if ( !stepMeta.getStepMetaInterface().excludeFromRowLayoutVerification() ) {
BaseStep.safeModeChecking( referenceRow, row );
}
} catch ( KettleStepException e ) {
// We ignore this one because we are in the process of designing the transformation, anything intermediate can
// go wrong.
}
}
}
}
/**
* Sets the internal kettle variables.
*
* @param var
* the new internal kettle variables
*/
@Override
public void setInternalKettleVariables( VariableSpace var ) {
setInternalFilenameKettleVariables( var );
setInternalNameKettleVariable( var );
// The name of the directory in the repository
//
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY,
directory != null ? directory.getPath() : "" );
boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null;
if ( hasRepoDir ) {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY,
variables.getVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) );
} else {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY,
variables.getVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) );
}
// Here we don't remove the job specific parameters, as they may come in handy.
//
if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) == null ) {
variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "Parent Job File Directory" );
}
if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME ) == null ) {
variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "Parent Job Filename" );
}
if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_NAME ) == null ) {
variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, "Parent Job Name" );
}
if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) == null ) {
variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "Parent Job Repository Directory" );
}
variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY,
variables.getVariable( repository != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY
: Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) );
}
/**
* Sets the internal name kettle variable.
*
* @param var
* the new internal name kettle variable
*/
@Override
protected void setInternalNameKettleVariable( VariableSpace var ) {
// The name of the transformation
//
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( name, "" ) );
}
/**
* Sets the internal filename kettle variables.
*
* @param var
* the new internal filename kettle variables
*/
@Override
protected void setInternalFilenameKettleVariables( VariableSpace var ) {
// If we have a filename that's defined, set variables. If not, clear them.
//
if ( !Utils.isEmpty( filename ) ) {
try {
FileObject fileObject = KettleVFS.getFileObject( filename, var );
FileName fileName = fileObject.getName();
// The filename of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() );
// The directory of the transformation
FileName fileDir = fileName.getParent();
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() );
} catch ( KettleFileException e ) {
log.logError( "Unexpected error setting internal filename variables!", e );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
} else {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY,
variables.getVariable( repository != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY
: Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) );
}
/**
* Finds the mapping input step with the specified name. If no mapping input step is found, null is returned
*
* @param stepname
* the name to search for
* @return the step meta-data corresponding to the desired mapping input step, or null if no step was found
* @throws KettleStepException
* if any errors occur during the search
*/
public StepMeta findMappingInputStep( String stepname ) throws KettleStepException {
if ( !Utils.isEmpty( stepname ) ) {
StepMeta stepMeta = findStep( stepname ); // TODO verify that it's a mapping input!!
if ( stepMeta == null ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "TransMeta.Exception.StepNameNotFound", stepname ) );
}
return stepMeta;
} else {
// Find the first mapping input step that fits the bill.
StepMeta stepMeta = null;
for ( StepMeta mappingStep : steps ) {
if ( mappingStep.getStepID().equals( "MappingInput" ) ) {
if ( stepMeta == null ) {
stepMeta = mappingStep;
} else if ( stepMeta != null ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "TransMeta.Exception.OnlyOneMappingInputStepAllowed", "2" ) );
}
}
}
if ( stepMeta == null ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "TransMeta.Exception.OneMappingInputStepRequired" ) );
}
return stepMeta;
}
}
/**
* Finds the mapping output step with the specified name. If no mapping output step is found, null is returned.
*
* @param stepname
* the name to search for
* @return the step meta-data corresponding to the desired mapping input step, or null if no step was found
* @throws KettleStepException
* if any errors occur during the search
*/
public StepMeta findMappingOutputStep( String stepname ) throws KettleStepException {
if ( !Utils.isEmpty( stepname ) ) {
StepMeta stepMeta = findStep( stepname ); // TODO verify that it's a mapping output step.
if ( stepMeta == null ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "TransMeta.Exception.StepNameNotFound", stepname ) );
}
return stepMeta;
} else {
// Find the first mapping output step that fits the bill.
StepMeta stepMeta = null;
for ( StepMeta mappingStep : steps ) {
if ( mappingStep.getStepID().equals( "MappingOutput" ) ) {
if ( stepMeta == null ) {
stepMeta = mappingStep;
} else if ( stepMeta != null ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "TransMeta.Exception.OnlyOneMappingOutputStepAllowed", "2" ) );
}
}
}
if ( stepMeta == null ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "TransMeta.Exception.OneMappingOutputStepRequired" ) );
}
return stepMeta;
}
}
/**
* Gets a list of the resource dependencies.
*
* @return a list of ResourceReferences
*/
public List<ResourceReference> getResourceDependencies() {
List<ResourceReference> resourceReferences = new ArrayList<>();
for ( StepMeta stepMeta : steps ) {
resourceReferences.addAll( stepMeta.getResourceDependencies( this ) );
}
return resourceReferences;
}
/**
* Exports the specified objects to a flat-file system, adding content with filename keys to a set of definitions. The
* supplied resource naming interface allows the object to name appropriately without worrying about those parts of
* the implementation specific details.
*
* @param space
* the variable space to use
* @param definitions
* @param resourceNamingInterface
* @param repository
* The repository to optionally load other resources from (to be converted to XML)
* @param metaStore
* the metaStore in which non-kettle metadata could reside.
*
* @return the filename of the exported resource
*/
@Override
public String exportResources( VariableSpace space, Map<String, ResourceDefinition> definitions,
ResourceNamingInterface resourceNamingInterface, Repository repository, IMetaStore metaStore ) throws KettleException {
try {
// Handle naming for both repository and XML bases resources...
//
String baseName;
String originalPath;
String fullname;
String extension = "ktr";
if ( Utils.isEmpty( getFilename() ) ) {
// Assume repository...
//
originalPath = directory.getPath();
baseName = getName();
fullname =
directory.getPath()
+ ( directory.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR )
? "" : RepositoryDirectory.DIRECTORY_SEPARATOR ) + getName() + "." + extension; //
} else {
// Assume file
//
FileObject fileObject = KettleVFS.getFileObject( space.environmentSubstitute( getFilename() ), space );
originalPath = fileObject.getParent().getURL().toString();
baseName = fileObject.getName().getBaseName();
fullname = fileObject.getURL().toString();
}
String
exportFileName =
resourceNamingInterface
.nameResource( baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.TRANSFORMATION );
ResourceDefinition definition = definitions.get( exportFileName );
if ( definition == null ) {
// If we do this once, it will be plenty :-)
//
TransMeta transMeta = (TransMeta) this.realClone( false );
// transMeta.copyVariablesFrom(space);
// Add used resources, modify transMeta accordingly
// Go through the list of steps, etc.
// These critters change the steps in the cloned TransMeta
// At the end we make a new XML version of it in "exported"
// format...
// loop over steps, databases will be exported to XML anyway.
//
for ( StepMeta stepMeta : transMeta.getSteps() ) {
stepMeta.exportResources( space, definitions, resourceNamingInterface, repository, metaStore );
}
// Change the filename, calling this sets internal variables
// inside of the transformation.
//
transMeta.setFilename( exportFileName );
// All objects get re-located to the root folder
//
transMeta.setRepositoryDirectory( new RepositoryDirectory() );
// Set a number of parameters for all the data files referenced so far...
//
Map<String, String> directoryMap = resourceNamingInterface.getDirectoryMap();
if ( directoryMap != null ) {
for ( String directory : directoryMap.keySet() ) {
String parameterName = directoryMap.get( directory );
transMeta.addParameterDefinition( parameterName, directory, "Data file path discovered during export" );
}
}
// At the end, add ourselves to the map...
//
String transMetaContent = transMeta.getXML();
definition = new ResourceDefinition( exportFileName, transMetaContent );
// Also remember the original filename (if any), including variables etc.
//
if ( Utils.isEmpty( this.getFilename() ) ) { // Repository
definition.setOrigin( fullname );
} else {
definition.setOrigin( this.getFilename() );
}
definitions.put( fullname, definition );
}
return exportFileName;
} catch ( FileSystemException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename() ), e );
} catch ( KettleFileException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename() ), e );
}
}
/**
* Gets the slave step copy partition distribution.
*
* @return the SlaveStepCopyPartitionDistribution
*/
public SlaveStepCopyPartitionDistribution getSlaveStepCopyPartitionDistribution() {
return slaveStepCopyPartitionDistribution;
}
/**
* Sets the slave step copy partition distribution.
*
* @param slaveStepCopyPartitionDistribution
* the slaveStepCopyPartitionDistribution to set
*/
public void setSlaveStepCopyPartitionDistribution(
SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution ) {
this.slaveStepCopyPartitionDistribution = slaveStepCopyPartitionDistribution;
}
/**
* Finds the first used cluster schema.
*
* @return the first used cluster schema
*/
public ClusterSchema findFirstUsedClusterSchema() {
for ( StepMeta stepMeta : steps ) {
if ( stepMeta.getClusterSchema() != null ) {
return stepMeta.getClusterSchema();
}
}
return null;
}
/**
* Checks whether the transformation is a slave transformation.
*
* @return true if the transformation is a slave transformation, false otherwise
*/
public boolean isSlaveTransformation() {
return slaveTransformation;
}
/**
* Sets whether the transformation is a slave transformation.
*
* @param slaveTransformation
* true if the transformation is a slave transformation, false otherwise
*/
public void setSlaveTransformation( boolean slaveTransformation ) {
this.slaveTransformation = slaveTransformation;
}
/**
* Checks whether the transformation is capturing step performance snapshots.
*
* @return true if the transformation is capturing step performance snapshots, false otherwise
*/
public boolean isCapturingStepPerformanceSnapShots() {
return capturingStepPerformanceSnapShots;
}
/**
* Sets whether the transformation is capturing step performance snapshots.
*
* @param capturingStepPerformanceSnapShots
* true if the transformation is capturing step performance snapshots, false otherwise
*/
public void setCapturingStepPerformanceSnapShots( boolean capturingStepPerformanceSnapShots ) {
this.capturingStepPerformanceSnapShots = capturingStepPerformanceSnapShots;
}
/**
* Gets the step performance capturing delay.
*
* @return the step performance capturing delay
*/
public long getStepPerformanceCapturingDelay() {
return stepPerformanceCapturingDelay;
}
/**
* Sets the step performance capturing delay.
*
* @param stepPerformanceCapturingDelay
* the stepPerformanceCapturingDelay to set
*/
public void setStepPerformanceCapturingDelay( long stepPerformanceCapturingDelay ) {
this.stepPerformanceCapturingDelay = stepPerformanceCapturingDelay;
}
/**
* Gets the step performance capturing size limit.
*
* @return the step performance capturing size limit
*/
public String getStepPerformanceCapturingSizeLimit() {
return stepPerformanceCapturingSizeLimit;
}
/**
* Sets the step performance capturing size limit.
*
* @param stepPerformanceCapturingSizeLimit
* the step performance capturing size limit to set
*/
public void setStepPerformanceCapturingSizeLimit( String stepPerformanceCapturingSizeLimit ) {
this.stepPerformanceCapturingSizeLimit = stepPerformanceCapturingSizeLimit;
}
/**
* Clears the step fields and loop caches.
*/
public void clearCaches() {
clearStepFieldsCachce();
clearLoopCache();
}
/**
* Clears the step fields cachce.
*/
private void clearStepFieldsCachce() {
stepsFieldsCache.clear();
}
/**
* Clears the loop cache.
*/
private void clearLoopCache() {
loopCache.clear();
}
/**
* Gets the repository element type.
*
* @return the repository element type
* @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType()
*/
@Override
public RepositoryObjectType getRepositoryElementType() {
return REPOSITORY_ELEMENT_TYPE;
}
/**
* Gets the log channel.
*
* @return the log channel
*/
public LogChannelInterface getLogChannel() {
return log;
}
/**
* Gets the log channel ID.
*
* @return the log channel ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId()
*/
@Override
public String getLogChannelId() {
return log.getLogChannelId();
}
/**
* Gets the object type.
*
* @return the object type
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType()
*/
@Override
public LoggingObjectType getObjectType() {
return LoggingObjectType.TRANSMETA;
}
/**
* Gets the log table for the transformation.
*
* @return the log table for the transformation
*/
public TransLogTable getTransLogTable() {
return transLogTable;
}
/**
* Sets the log table for the transformation.
*
* @param the
* log table to set
*/
public void setTransLogTable( TransLogTable transLogTable ) {
this.transLogTable = transLogTable;
}
/**
* Gets the performance log table for the transformation.
*
* @return the performance log table for the transformation
*/
public PerformanceLogTable getPerformanceLogTable() {
return performanceLogTable;
}
/**
* Sets the performance log table for the transformation.
*
* @param performanceLogTable
* the performance log table to set
*/
public void setPerformanceLogTable( PerformanceLogTable performanceLogTable ) {
this.performanceLogTable = performanceLogTable;
}
/**
* Gets the step log table for the transformation.
*
* @return the step log table for the transformation
*/
public StepLogTable getStepLogTable() {
return stepLogTable;
}
/**
* Sets the step log table for the transformation.
*
* @param stepLogTable
* the step log table to set
*/
public void setStepLogTable( StepLogTable stepLogTable ) {
this.stepLogTable = stepLogTable;
}
/**
* Gets a list of the log tables (transformation, step, performance, channel) for the transformation.
*
* @return a list of LogTableInterfaces for the transformation
*/
public List<LogTableInterface> getLogTables() {
List<LogTableInterface> logTables = new ArrayList<>();
logTables.add( transLogTable );
logTables.add( stepLogTable );
logTables.add( performanceLogTable );
logTables.add( channelLogTable );
logTables.add( metricsLogTable );
return logTables;
}
/**
* Gets the transformation type.
*
* @return the transformationType
*/
public TransformationType getTransformationType() {
return transformationType;
}
/**
* Sets the transformation type.
*
* @param transformationType
* the transformationType to set
*/
public void setTransformationType( TransformationType transformationType ) {
this.transformationType = transformationType;
}
/**
* Utility method to write the XML of this transformation to a file, mostly for testing purposes.
*
* @param filename
* The filename to save to
* @throws KettleXMLException
* in case something goes wrong.
*/
public void writeXML( String filename ) throws KettleXMLException {
FileOutputStream fos = null;
try {
fos = new FileOutputStream( filename );
fos.write( XMLHandler.getXMLHeader().getBytes( Const.XML_ENCODING ) );
fos.write( getXML().getBytes( Const.XML_ENCODING ) );
} catch ( Exception e ) {
throw new KettleXMLException( "Unable to save to XML file '" + filename + "'", e );
} finally {
if ( fos != null ) {
try {
fos.close();
} catch ( IOException e ) {
throw new KettleXMLException( "Unable to close file '" + filename + "'", e );
}
}
}
}
/**
* Checks whether the transformation has repository references.
*
* @return true if the transformation has repository references, false otherwise
*/
public boolean hasRepositoryReferences() {
for ( StepMeta stepMeta : steps ) {
if ( stepMeta.getStepMetaInterface().hasRepositoryReferences() ) {
return true;
}
}
return false;
}
/**
* Looks up the references after a repository import.
*
* @param repository
* the repository to reference.
* @throws KettleException
* the kettle exception
*/
public void lookupRepositoryReferences( Repository repository ) throws KettleException {
for ( StepMeta stepMeta : steps ) {
stepMeta.getStepMetaInterface().lookupRepositoryReferences( repository );
}
}
/**
* @return the metricsLogTable
*/
public MetricsLogTable getMetricsLogTable() {
return metricsLogTable;
}
/**
* @param metricsLogTable
* the metricsLogTable to set
*/
public void setMetricsLogTable( MetricsLogTable metricsLogTable ) {
this.metricsLogTable = metricsLogTable;
}
@Override
public boolean isGatheringMetrics() {
return log.isGatheringMetrics();
}
@Override
public void setGatheringMetrics( boolean gatheringMetrics ) {
log.setGatheringMetrics( gatheringMetrics );
}
@Override
public boolean isForcingSeparateLogging() {
return log.isForcingSeparateLogging();
}
@Override
public void setForcingSeparateLogging( boolean forcingSeparateLogging ) {
log.setForcingSeparateLogging( forcingSeparateLogging );
}
public void addStepChangeListener( StepMetaChangeListenerInterface listener ) {
stepChangeListeners.add( listener );
}
public void addStepChangeListener( int p, StepMetaChangeListenerInterface list ) {
int indexListener = -1;
int indexListenerRemove = -1;
StepMeta rewriteStep = steps.get( p );
StepMetaInterface iface = rewriteStep.getStepMetaInterface();
if ( iface instanceof StepMetaChangeListenerInterface ) {
for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) {
indexListener++;
if ( listener.equals( iface ) ) {
indexListenerRemove = indexListener;
}
}
if ( indexListenerRemove >= 0 ) {
stepChangeListeners.add( indexListenerRemove, list );
} else if ( stepChangeListeners.size() == 0 && p == 0 ) {
stepChangeListeners.add( list );
}
}
}
public void removeStepChangeListener( StepMetaChangeListenerInterface list ) {
int indexListener = -1;
int indexListenerRemove = -1;
for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) {
indexListener++;
if ( listener.equals( list ) ) {
indexListenerRemove = indexListener;
}
}
if ( indexListenerRemove >= 0 ) {
stepChangeListeners.remove( indexListenerRemove );
}
}
public void notifyAllListeners( StepMeta oldMeta, StepMeta newMeta ) {
for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) {
listener.onStepChange( this, oldMeta, newMeta );
}
}
public boolean containsStepMeta( StepMeta stepMeta ) {
return steps.contains( stepMeta );
}
public List<MissingTrans> getMissingTrans() {
return missingTrans;
}
public void addMissingTrans( MissingTrans trans ) {
if ( missingTrans == null ) {
missingTrans = new ArrayList<>();
}
missingTrans.add( trans );
}
public void removeMissingTrans( MissingTrans trans ) {
if ( missingTrans != null && trans != null && missingTrans.contains( trans ) ) {
missingTrans.remove( trans );
}
}
@Override
public boolean hasMissingPlugins() {
return missingTrans != null && !missingTrans.isEmpty();
}
@Override
public NamedClusterEmbedManager getNamedClusterEmbedManager( ) {
if ( namedClusterEmbedManager == null ) {
namedClusterEmbedManager = new NamedClusterEmbedManager( this, getLogChannel() );
}
return namedClusterEmbedManager;
}
}
|
TatsianaKasiankova/pentaho-kettle
|
engine/src/main/java/org/pentaho/di/trans/TransMeta.java
|
Java
|
apache-2.0
| 220,790
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_40) on Thu Apr 16 20:30:05 CDT 2015 -->
<title>org.jsimpledb.kv.simple (Java Class Library API)</title>
<meta name="date" content="2015-04-16">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="org.jsimpledb.kv.simple (Java Class Library API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../org/jsimpledb/kv/mvcc/package-summary.html">Prev Package</a></li>
<li><a href="../../../../org/jsimpledb/kv/sql/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/jsimpledb/kv/simple/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 title="Package" class="title">Package org.jsimpledb.kv.simple</h1>
<div class="docSummary">
<div class="block">A simple in-memory implementation of the <a href="../../../../org/jsimpledb/kv/KVDatabase.html" title="interface in org.jsimpledb.kv"><code>KVDatabase</code></a> interface.</div>
</div>
<p>See: <a href="#package.description">Description</a></p>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<table class="typeSummary" border="0" cellpadding="3" cellspacing="0" summary="Class Summary table, listing classes, and an explanation">
<caption><span>Class Summary</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Class</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="../../../../org/jsimpledb/kv/simple/SimpleKVDatabase.html" title="class in org.jsimpledb.kv.simple">SimpleKVDatabase</a></td>
<td class="colLast">
<div class="block">Simple implementation of the <a href="../../../../org/jsimpledb/kv/KVDatabase.html" title="interface in org.jsimpledb.kv"><code>KVDatabase</code></a> interface that provides a concurrent, transactional view
of an underlying <a href="../../../../org/jsimpledb/kv/KVStore.html" title="interface in org.jsimpledb.kv"><code>KVStore</code></a> with strong ACID semantics (<b>D</b>urability must be provided by the <a href="../../../../org/jsimpledb/kv/KVStore.html" title="interface in org.jsimpledb.kv"><code>KVStore</code></a>).</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><a href="../../../../org/jsimpledb/kv/simple/SimpleKVTransaction.html" title="class in org.jsimpledb.kv.simple">SimpleKVTransaction</a></td>
<td class="colLast">
<div class="block"><a href="../../../../org/jsimpledb/kv/KVTransaction.html" title="interface in org.jsimpledb.kv"><code>KVTransaction</code></a> implementation for <a href="../../../../org/jsimpledb/kv/simple/SimpleKVDatabase.html" title="class in org.jsimpledb.kv.simple"><code>SimpleKVDatabase</code></a>.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><a href="../../../../org/jsimpledb/kv/simple/XMLKVDatabase.html" title="class in org.jsimpledb.kv.simple">XMLKVDatabase</a></td>
<td class="colLast">
<div class="block">Simple persistent <a href="../../../../org/jsimpledb/kv/KVDatabase.html" title="interface in org.jsimpledb.kv"><code>KVDatabase</code></a> backed by an XML file stored in a <a href="http://dellroad-stuff.googlecode.com/svn/trunk/publish/reports/javadoc/org/dellroad/stuff/io/StreamRepository.html?is-external=true" title="class or interface in org.dellroad.stuff.io"><code>StreamRepository</code></a>.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><a href="../../../../org/jsimpledb/kv/simple/XMLKVTransaction.html" title="class in org.jsimpledb.kv.simple">XMLKVTransaction</a></td>
<td class="colLast">
<div class="block">Transaction associated with a <a href="../../../../org/jsimpledb/kv/simple/XMLKVDatabase.html" title="class in org.jsimpledb.kv.simple"><code>XMLKVDatabase</code></a>.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
<a name="package.description">
<!-- -->
</a>
<h2 title="Package org.jsimpledb.kv.simple Description">Package org.jsimpledb.kv.simple Description</h2>
<div class="block">A simple in-memory implementation of the <a href="../../../../org/jsimpledb/kv/KVDatabase.html" title="interface in org.jsimpledb.kv"><code>KVDatabase</code></a> interface.</div>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../org/jsimpledb/kv/mvcc/package-summary.html">Prev Package</a></li>
<li><a href="../../../../org/jsimpledb/kv/sql/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/jsimpledb/kv/simple/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
|
mmayorivera/jsimpledb
|
publish/reports/javadoc/org/jsimpledb/kv/simple/package-summary.html
|
HTML
|
apache-2.0
| 7,657
|
---
author: Sophie DeBenedetto
author_link: https://github.com/sophiedebenedetto
categories: general
tags: ['live view']
date: 2019-10-20
layout: post
title: Building a Table Sort UI with Live View's `live_link`
excerpt: >
We'll use LiveView's `live_link/2` together with the `handle_params/3` callback to allow users to sort a table in real-time.
---
LiveView makes it easy to solve for some of the most common UI challenges with little to no front-end code. It allows us to save JavaScript for the hard stuff––for complex and sophisticated UI changes. In building out a recent admin-facing view that included a table of student cohorts at the Flatiron School, I found myself reaching for LiveView. In just a few lines of backend code, my sortable table was up and running. Keep reading to see how you can leverage LiveView's `live_link/2` and `handle_params/3` to build out such a feature.
## The Feature
Our view presents a table of student cohorts that looks like this:

Users need to be able to sort this table by cohort name, campus, start date or status. We'd also like to ensure that the "sort by" attribute is included in the URL's query params, so that users can share links to sorted views.
Here's a look at the behavior we're going for. Note how the URL changes when we click on a given column heading to sort the table.
<iframe width="560" height="315" src="https://www.youtube.com/embed/-4VRaX1uEhk" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
## Using `live_link/2`
LiveView's [`live_link/2`](https://hexdocs.pm/phoenix_live_view/Phoenix.LiveView.html#module-live-navigation) function allows page navigation using the browser's [pushState API](https://developer.mozilla.org/en-US/docs/Web/API/History_API). This will ensure that that URL will change to include whatever parameters we include in a given `live_link/2` call.
One important thing to note before we proceed, however. In order to use the live navigation features, our live view needs to be mounted directly in the router, _not_ rendered from a controller action.
Our router mounts the live view like this:
```elixir
# lib/course_conductor_web/router.ex
scope "/", CourseConductorWeb do
pipe_through([:browser, :auth])
live "/cohorts", CohortsLive
end
```
And we're ready to get started!
We'll start by turning the `"Name"` table header into a live link.
```html
# lib/course_conductor_web/templates/cohorts/index.html.leex
<table>
<th><%= live_link "Name", to: Routes.live_path(@socket, CourseConductorWeb.CohortsLive, %{sort_by: "name"}) %></th>
...
</table>
```
The `live_link/2` function generates a live link for HTML5 pushState based navigation *without* page reloads.
With the help of the the `Routes.live_path` helper, we're generating the following live link: `"/cohorts?sort_by=name"`. Since this route belongs to the `CohortsLive` live view that we've already mounted, _and_ since that live view is defined in our router (as opposed to rendered from a controller action), this means we will invoke our existing live view's `handle_params/3` function _without mounting a new LiveView_. Pretty cool!
Let's take a look at how we can implement a `handle_params/3` function now.
## Implementing `handle_params/3`
The `handle_params/3` callback is invoked under two circumstances.
* After `mount/2` is called (i.e. when the live view first renders)
* When a live navigation event, like a live link click, occurs. This second circumstance only triggers this callback when, as described above, the live view we are linking to is the same live view we are currently on _and_ the LiveView is defined in the router.
`handle_params/3` receives three arguments:
* The query parameters
* The requested url
* The socket
We can use `handle_params/3` to update socket state and therefore trigger a server re-render of the template.
Given that `handle_params/3` will be invoked by our live view whenever our `"Name"` live link is clicked, we need to implement this function in our live view to match and act on the `sort_by` params our live link will send.
Assuming we have the following live view that mounts and renders a list of cohorts:
```elixir
# lib/course_conductor_web/live/cohorts_live.ex
defmodule CourseConductorWeb.CohortsLive do
use Phoenix.LiveView
def render(assigns) do
Phoenix.View.render(CourseConductorWeb.CohortView, "index.html", assigns)
end
def mount(_, socket) do
cohorts = Cohort.all_cohorts()
{:ok, assign(socket, cohorts: cohorts)}
end
end
```
We'll implement our `handle_params/3` function like this:
```elixir
# lib/course_conductor_web/live/cohorts_live.ex
def handle_params(%{"sort_by" => sort_by}, _uri, socket) do
case sort_by do
sort_by
when sort_by in ~w(name) ->
{:noreply, assign(socket, cohorts: sort_cohorts(socket.assigns.cohorts, sort_by))}
_ ->
{:noreply, socket}
end
end
def handle_params(_params, _uri, socket) do
{:noreply, socket}
end
def sort_cohorts(cohort, "name") do
Enum.sort_by(cohorts, fn cohort -> cohort.name end)
end
```
Note that we've included a "catch-all" version of the `handle_params/3` function that will be invoked if someone navigates to `/cohorts` and includes query params that do not match the `"sort_by"` param that we care about. If our live view receives such a request, it will not update state.
Now, when a user clicks the `"Name"` live link, two things will happen:
* The browser's pushState API will be leveraged to change the URL to `/cohorts?sort_by=name`
* Our already-mounted live view's `handle_params/3` function will be invoked with the params `%{"sort_by" => "name"}`
Our `handle_params/3` function will then sort the cohorts stored `socket.assigns` by cohort name and update the socket state with the sorted list. The template will therefore re-render with the sorted list.
Since `handle_params/3` is _also_ called after `mount/2`, we have therefore allowed a user to navigate directly to `/cohorts?sort_by=name` via their browser and see the live view render with a table of cohorts already sorted by name. And just like that we've enabled users to share links to sorted table views with zero additional lines of code!
## More Sorting!
Now that our "sort by name" feature is up and running, let's add the remaining live links to allow users to sort by the other attributes we listed earlier: campus, start date and status.
First, we'll make each of these table headers into a live link:
```html
<table>
<th><%= live_link "Name", to: Routes.live_path(@socket, CourseConductorWeb.CohortsLive, %{sort_by: "name"}) %></th>
<th><%= live_link "Campus", to: Routes.live_path(@socket, CourseConductorWeb.CohortsLive, %{sort_by: "campus"}) %></th>
<th><%= live_link "Start Date", to: Routes.live_path(@socket, CourseConductorWeb.CohortsLive, %{sort_by: "start_date"}) %></th>
<th><%= live_link "Status", to: Routes.live_path(@socket, CourseConductorWeb.CohortsLive, %{sort_by: "status"}) %></th>
</table>
```
And we'll build out our `handle_params/3` function to operate on params describing a sort by any of these attributes:
```elixir
def handle_params(%{"sort_by" => sort_by}, _uri, socket) do
case sort_by do
sort_by
when sort_by in ~w(name course_offering campus start_date end_date lms_cohort_status) ->
{:noreply, assign(socket, cohorts: sort_cohorts(socket.assigns.cohorts, sort_by))}
_ ->
{:noreply, socket}
end
end
```
Here, we've added a check to see if the `sort_by` attribute is included in our list of sortable attributes.
```elixir
when sort_by in ~w(name course_offering campus start_date end_date lms_cohort_status)
```
If so, we will proceed to sort cohorts. If not, i.e. if a user pointed their browser to `/cohorts?sort_by=not_a_thing_we_support`, then we will ignore the `sort_by` value and refrain from updating socket state.
Next up, we'll add the necessary version for the `sort_cohorts/2` function that will pattern match against our new "sort by" options:
```elixir
def sort_cohorts(cohorts, "campus") do
Enum.sort_by(cohorts, fn cohort -> cohort.campus.name end)
end
def sort_cohorts(cohorts, "start_date") do
Enum.sort_by(
cohorts,
fn cohort -> {cohort.start_date.year, cohort.start_date.month, cohort.start_date.day} end,
&>=/2
)
end
def sort_cohorts(cohorts, "status") do
Enum.sort_by(cohorts, fn cohort ->
cohort.status
end)
end
```
And that's it!
## Conclusion
Once again LiveView has made it easy to build seamless real-time UIs. So, while LiveView doesn't mean you'll never have to write JavaScript again, it _does_ mean that we don't need to leverage JavaScript for common, everyday challenges like sorting data in a UI. Instead of writing complex vanilla JS, or reaching for a powerful front-end framework, we were able to create a sophisticated real-time UI with mostly back-end code, and back it all with the power of fault-tolerant Elixir processes.
|
nscyclone/elixir-school
|
_posts/2019-10-20-sorting-a-table-with-live-view-live-links.md
|
Markdown
|
apache-2.0
| 9,050
|
# README
## RELEASENOTES
Please review the
**RELEASENOTES.md**
file in this repository for a detailed overview, including installation instructions.
### DOCUMENTATION
Projects are designed to be self documenting, meaning that installation and configuration instructions are found inside the projects themselves (as committed assets). Generally a project overview (including description, sample screenshots, etc.) can be found on the project wiki page at `http://github.com/ca-apm/<repo_name>/wiki`.
Here are some additional links to CA APM content:
* [CA APM 9.7 Documentation](https://wiki.ca.com/display/APMDEVOPS97/CA+Application+Performance+Management)
* [CA Application Performance Mangement Product Homepage](http://www.ca.com/us/products/application-performance-management.aspx)
## CHANGELOG
Please review the
**CHANGELOG.md**
file in this repository for a detailed record of changes.
## CONTRIBUTING
If you plan to contribute your changes, you need to read the
**CONTRIBUTING.md**
file in this repository
## COMMUNITY
The [CA APM Community](https://communities.ca.com/community/ca-apm) is the primary means of interfacing with other users and with the CA APM product team. The [developer subcommunity](https://communities.ca.com/community/ca-apm/ca-developer-apm) is where you can learn more about building APM-based assets, find code examples, and ask questions of other developers and the CA APM product team.
## LICENSE
Please review the
**LICENSE**
file in this repository. Licenses may vary by repository.
## RELEASES
From time to time, projects may make releases available. While source code is always available for complete build, releases serve as a tag and often contain prepared packages that are prebuilt and ready to use. Visit `http://github.com/ca-apm/<repo_name>/releases` for details.
|
CA-APM/ca-apm-fieldpack-dotnet-useridtracer
|
README.md
|
Markdown
|
apache-2.0
| 1,837
|
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package examples.graphs
import api._
import model._
class SparkConnectedComponentsIntegrationSpec extends BaseConnectedComponentsIntegrationSpec with SparkAware {
def connectedComponents(edges: Seq[Edge[Int]]): Seq[LVertex[Int, Int]] =
withDefaultSparkSession(implicit spark => emma.onSpark {
ConnectedComponents[Int](DataBag(edges)).collect()
})
}
|
emmalanguage/emma
|
emma-examples-spark/src/test/scala/org/emmalanguage/examples/graphs/SparkConnectedComponentsIntegrationSpec.scala
|
Scala
|
apache-2.0
| 1,014
|
---
layout: doc_page
---
For general Indexing Service information, see [here](../design/indexing-service.html).
## Runtime Configuration
The indexing service uses several of the global configs in [Configuration](../configuration/index.html) and has the following set of configurations as well:
### Must be set on Overlord and Middle Manager
#### Node Configs
|Property|Description|Default|
|--------|-----------|-------|
|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()|
|`druid.port`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8090|
|`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/overlord|
#### Task Logging
If you are running the indexing service in remote mode, the task logs must be stored in S3, Azure Blob Store or HDFS.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.logs.type`|Choices:noop, s3, azure, hdfs, file. Where to store task logs|file|
##### File Task Logs
Store task logs in the local filesystem.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.logs.directory`|Local filesystem path.|log|
##### S3 Task Logs
Store task logs in S3.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.logs.s3Bucket`|S3 bucket name.|none|
|`druid.indexer.logs.s3Prefix`|S3 key prefix.|none|
#### Azure Blob Store Task Logs
Store task logs in Azure Blob Store.
Note: this uses the same storage account as the deep storage module for azure.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.logs.container`|The Azure Blob Store container to write logs to|none|
|`druid.indexer.logs.prefix`|The path to prepend to logs|none|
##### HDFS Task Logs
Store task logs in HDFS.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.logs.directory`|The directory to store logs.|none|
### Overlord Configs
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.runner.type`|Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed environment.|local|
|`druid.indexer.storage.type`|Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap) or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord should fail.|local|
|`druid.indexer.storage.recentlyFinishedThreshold`|A duration of time to store task results.|PT24H|
|`druid.indexer.queue.maxSize`|Maximum number of active tasks at one time.|Integer.MAX_VALUE|
|`druid.indexer.queue.startDelay`|Sleep this long before starting overlord queue management. This can be useful to give a cluster time to re-orient itself after e.g. a widespread network issue.|PT1M|
|`druid.indexer.queue.restartDelay`|Sleep this long when overlord queue management throws an exception before trying again.|PT30S|
|`druid.indexer.queue.storageSyncRate`|Sync overlord state this often with an underlying task persistence mechanism.|PT1M|
The following configs only apply if the overlord is running in remote mode:
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.runner.taskAssignmentTimeout`|How long to wait after a task as been assigned to a middle manager before throwing an error.|PT5M|
|`druid.indexer.runner.minWorkerVersion`|The minimum middle manager version to send tasks to. |"0"|
|`druid.indexer.runner.compressZnodes`|Indicates whether or not the overlord should expect middle managers to compress Znodes.|true|
|`druid.indexer.runner.maxZnodeBytes`|The maximum size Znode in bytes that can be created in Zookeeper.|524288|
|`druid.indexer.runner.taskCleanupTimeout`|How long to wait before failing a task after a middle manager is disconnected from Zookeeper.|PT15M|
|`druid.indexer.runner.taskShutdownLinkTimeout`|How long to wait on a shutdown request to a middle manager before timing out|PT1M|
There are additional configs for autoscaling (if it is enabled):
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.autoscale.strategy`|Choices are "noop" or "ec2". Sets the strategy to run when autoscaling is required.|noop|
|`druid.indexer.autoscale.doAutoscale`|If set to "true" autoscaling will be enabled.|false|
|`druid.indexer.autoscale.provisionPeriod`|How often to check whether or not new middle managers should be added.|PT1M|
|`druid.indexer.autoscale.terminatePeriod`|How often to check when middle managers should be removed.|PT5M|
|`druid.indexer.autoscale.originTime`|The starting reference timestamp that the terminate period increments upon.|2012-01-01T00:55:00.000Z|
|`druid.indexer.autoscale.workerIdleTimeout`|How long can a worker be idle (not a run task) before it can be considered for termination.|PT90M|
|`druid.indexer.autoscale.maxScalingDuration`|How long the overlord will wait around for a middle manager to show up before giving up.|PT15M|
|`druid.indexer.autoscale.numEventsToTrack`|The number of autoscaling related events (node creation and termination) to track.|10|
|`druid.indexer.autoscale.pendingTaskTimeout`|How long a task can be in "pending" state before the overlord tries to scale up.|PT30S|
|`druid.indexer.autoscale.workerVersion`|If set, will only create nodes of set version during autoscaling. Overrides dynamic configuration. |null|
|`druid.indexer.autoscale.workerPort`|The port that middle managers will run on.|8080|
#### Dynamic Configuration
The overlord can dynamically change worker behavior.
The JSON object can be submitted to the overlord via a POST request at:
```
http://<OVERLORD_IP>:<port>/druid/indexer/v1/worker
```
Optional Header Parameters for auditing the config change can also be specified.
|Header Param Name| Description | Default |
|----------|-------------|---------|
|`X-Druid-Author`| author making the config change|""|
|`X-Druid-Comment`| comment describing the change being done|""|
A sample worker config spec is shown below:
```json
{
"selectStrategy": {
"type": "fillCapacityWithAffinity",
"affinityConfig": {
"affinity": {
"datasource1": ["ip1:port", "ip2:port"],
"datasource2": ["ip3:port"]
}
}
},
"autoScaler": {
"type": "ec2",
"minNumWorkers": 2,
"maxNumWorkers": 12,
"envConfig": {
"availabilityZone": "us-east-1a",
"nodeData": {
"amiId": "${AMI}",
"instanceType": "c3.8xlarge",
"minInstances": 1,
"maxInstances": 1,
"securityGroupIds": ["${IDs}"],
"keyName": ${KEY_NAME}
},
"userData": {
"impl": "string",
"data": "${SCRIPT_COMMAND}",
"versionReplacementString": ":VERSION:",
"version": null
}
}
}
}
```
Issuing a GET request at the same URL will return the current worker config spec that is currently in place. The worker config spec list above is just a sample for EC2 and it is possible to extend the code base for other deployment environments. A description of the worker config spec is shown below.
|Property|Description|Default|
|--------|-----------|-------|
|`selectStrategy`|How to assign tasks to middlemanagers. Choices are `fillCapacity`, `fillCapacityWithAffinity`, `equalDistribution` and `javascript`.|fillCapacity|
|`autoScaler`|Only used if autoscaling is enabled. See below.|null|
To view the audit history of worker config issue a GET request to the URL -
```
http://<OVERLORD_IP>:<port>/druid/indexer/v1/worker/history?interval=<interval>
```
default value of interval can be specified by setting `druid.audit.manager.auditHistoryMillis` (1 week if not configured) in overlord runtime.properties.
#### Worker Select Strategy
##### Fill Capacity
Workers are assigned tasks until capacity.
|Property|Description|Default|
|--------|-----------|-------|
|`type`|`fillCapacity`.|fillCapacity|
##### Fill Capacity With Affinity
An affinity config can be provided.
|Property|Description|Default|
|--------|-----------|-------|
|`type`|`fillCapacityWithAffinity`.|fillCapacityWithAffinity|
|`affinity`|A map to String to list of String host names.|{}|
Tasks will try to be assigned to preferred workers. Fill capacity strategy is used if no preference for a datasource specified.
##### Equal Distribution
The workers with the least amount of tasks is assigned the task.
|Property|Description|Default|
|--------|-----------|-------|
|`type`|`equalDistribution`.|fillCapacity|
##### Javascript
Allows defining arbitrary logic for selecting workers to run task using a JavaScript function.
The function is passed remoteTaskRunnerConfig, map of workerId to available workers and task to be executed and returns the workerId on which the task should be run or null if the task cannot be run.
It can be used for rapid development of missing features where the worker selection logic is to be changed or tuned often.
If the selection logic is quite complex and cannot be easily tested in javascript environment,
its better to write a druid extension module with extending current worker selection strategies written in java.
|Property|Description|Default|
|--------|-----------|-------|
|`type`|`javascript`.|javascript|
|`function`|String representing javascript function||
Example: a function that sends batch_index_task to workers 10.0.0.1 and 10.0.0.2 and all other tasks to other available workers.
```
{
"type":"javascript",
"function":"function (config, zkWorkers, task) {\nvar batch_workers = new java.util.ArrayList();\nbatch_workers.add(\"10.0.0.1\");\nbatch_workers.add(\"10.0.0.2\");\nworkers = zkWorkers.keySet().toArray();\nvar sortedWorkers = new Array()\n;for(var i = 0; i < workers.length; i++){\n sortedWorkers[i] = workers[i];\n}\nArray.prototype.sort.call(sortedWorkers,function(a, b){return zkWorkers.get(b).getCurrCapacityUsed() - zkWorkers.get(a).getCurrCapacityUsed();});\nvar minWorkerVer = config.getMinWorkerVersion();\nfor (var i = 0; i < sortedWorkers.length; i++) {\n var worker = sortedWorkers[i];\n var zkWorker = zkWorkers.get(worker);\n if(zkWorker.canRunTask(task) && zkWorker.isValidVersion(minWorkerVer)){\n if(task.getType() == 'index_hadoop' && batch_workers.contains(worker)){\n return worker;\n } else {\n if(task.getType() != 'index_hadoop' && !batch_workers.contains(worker)){\n return worker;\n }\n }\n }\n}\nreturn null;\n}"
}
```
#### Autoscaler
Amazon's EC2 is currently the only supported autoscaler.
|Property|Description|Default|
|--------|-----------|-------|
|`minNumWorkers`|The minimum number of workers that can be in the cluster at any given time.|0|
|`maxNumWorkers`|The maximum number of workers that can be in the cluster at any given time.|0|
|`availabilityZone`|What availability zone to run in.|none|
|`nodeData`|A JSON object that describes how to launch new nodes.|none; required|
|`userData`|A JSON object that describes how to configure new nodes. If you have set druid.indexer.autoscale.workerVersion, this must have a versionReplacementString. Otherwise, a versionReplacementString is not necessary.|none; optional|
### MiddleManager Configs
Middle managers pass their configurations down to their child peons. The middle manager requires the following configs:
|Property|Description|Default|
|--------|-----------|-------|
|`druid.indexer.runner.allowedPrefixes`|Whitelist of prefixes for configs that can be passed down to child peons.|"com.metamx", "druid", "io.druid", "user.timezone","file.encoding"|
|`druid.indexer.runner.compressZnodes`|Indicates whether or not the middle managers should compress Znodes.|true|
|`druid.indexer.runner.classpath`|Java classpath for the peon.|System.getProperty("java.class.path")|
|`druid.indexer.runner.javaCommand`|Command required to execute java.|java|
|`druid.indexer.runner.javaOpts`|-X Java options to run the peon in its own JVM.|""|
|`druid.indexer.runner.maxZnodeBytes`|The maximum size Znode in bytes that can be created in Zookeeper.|524288|
|`druid.indexer.runner.startPort`|The port that peons begin running on.|8100|
|`druid.worker.ip`|The IP of the worker.|localhost|
|`druid.worker.version`|Version identifier for the middle manager.|0|
|`druid.worker.capacity`|Maximum number of tasks the middle manager can accept.|Number of available processors - 1|
#### Peon Configs
Although peons inherit the configurations of their parent middle managers, explicit child peon configs in middlemanager can be set by prefixing them with:
```
druid.indexer.fork.property
```
Additional peon configs include:
|Property|Description|Default|
|--------|-----------|-------|
|`druid.peon.mode`|Choices are "local" and "remote". Setting this to local means you intend to run the peon as a standalone node (Not recommended).|remote|
|`druid.indexer.task.baseDir`|Base temporary working directory.|/tmp|
|`druid.indexer.task.baseTaskDir`|Base temporary working directory for tasks.|/tmp/persistent/tasks|
|`druid.indexer.task.hadoopWorkingPath`|Temporary working directory for Hadoop tasks.|/tmp/druid-indexing|
|`druid.indexer.task.defaultRowFlushBoundary`|Highest row count before persisting to disk. Used for indexing generating tasks.|50000|
|`druid.indexer.task.defaultHadoopCoordinates`|Hadoop version to use with HadoopIndexTasks that do not request a particular version.|org.apache.hadoop:hadoop-client:2.3.0|
If the peon is running in remote mode, there must be an overlord up and running. Peons in remote mode can set the following configurations:
|Property|Description|Default|
|--------|-----------|-------|
|`druid.peon.taskActionClient.retry.minWait`|The minimum retry time to communicate with overlord.|PT1M|
|`druid.peon.taskActionClient.retry.maxWait`|The maximum retry time to communicate with overlord.|PT10M|
|`druid.peon.taskActionClient.retry.maxRetryCount`|The maximum number of retries to communicate with overlord.|10|
|
skyportsystems/druid
|
docs/content/configuration/indexing-service.md
|
Markdown
|
apache-2.0
| 14,236
|
/*******************************************************************************
* Copyright 2015 DANS - Data Archiving and Networked Services
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package nl.knaw.dans.common.lang.mail;
import java.io.File;
import java.util.Map;
import javax.activation.DataHandler;
import javax.activation.DataSource;
import javax.activation.FileDataSource;
import javax.mail.BodyPart;
import javax.mail.MessagingException;
import javax.mail.Multipart;
import javax.mail.internet.MimeBodyPart;
import javax.mail.internet.MimeMultipart;
/**
* A utility class for the {@link CommonMailer}.
*
* @author joke
*
*/
class MessageWrapper
{
private MessageWrapper()
{
// all methods static: no instantiation
}
static BodyPart wrapTextBodyPart(final String textContent) throws MessagingException
{
final BodyPart bodyPart = new MimeBodyPart();
bodyPart.setText(textContent);
return bodyPart;
}
static BodyPart wrapHtmlBodyPart(final String htmlContent) throws MessagingException
{
final BodyPart bodyPart = new MimeBodyPart();
bodyPart.setContent(htmlContent, "text/html");
return bodyPart;
}
static BodyPart wrapBodyPart(final String contentId, final File file) throws MessagingException
{
final DataSource dataSource = new FileDataSource(file);
final BodyPart bodyPart = new MimeBodyPart();
bodyPart.setDataHandler(new DataHandler(dataSource));
bodyPart.setHeader("Content-ID", contentId);
bodyPart.setFileName(file.getName());
return bodyPart;
}
static BodyPart wrapAttachementPart(Attachement attachement) throws MessagingException
{
final BodyPart bodyPart = new MimeBodyPart();
bodyPart.setDataHandler(new DataHandler(attachement.dataSource));
bodyPart.setFileName(attachement.fileName);
return bodyPart;
}
static BodyPart wrapBodyPart(final Multipart multiParts) throws MessagingException
{
final BodyPart bodyPart = new MimeBodyPart();
bodyPart.setContent(multiParts);
return bodyPart;
}
static Multipart wrapAlternativeParts(final BodyPart... bodyParts) throws MessagingException
{
final Multipart multiPart = new MimeMultipart("alternative");
for (final BodyPart bodyPart : bodyParts)
{
multiPart.addBodyPart(bodyPart);
}
return multiPart;
}
/**
* Wraps the HTML content and images in a MultiPart as far as the images are mentioned in the
* HTML content.
*
* @param htmlContent will be wrapped in a BodyPart before it is added to the MultiPart
* @param images for each key mentioned as "<code>cid:<i>key</i></code>" in htmlContent,
* the value is added to the MultiPart
* @return a MultiPart with HTML and zero or more images
* @throws MessagingException
*/
static Multipart wrapRelatedParts(final String htmlContent, final Map<String, BodyPart> images) throws MessagingException
{
final Multipart multiPart = new MimeMultipart("related");
multiPart.addBodyPart(wrapHtmlBodyPart(htmlContent));
for (final String key : images.keySet())
{
if (htmlContent.contains("cid:" + key))
multiPart.addBodyPart(images.get(key));
}
return multiPart;
}
}
|
DANS-KNAW/dccd-legacy-libs
|
lang/src/main/java/nl/knaw/dans/common/lang/mail/MessageWrapper.java
|
Java
|
apache-2.0
| 4,022
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cocoon.acting;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.Iterator;
import org.apache.avalon.framework.parameters.Parameters;
import org.apache.avalon.framework.thread.ThreadSafe;
import org.apache.cocoon.environment.ObjectModelHelper;
import org.apache.cocoon.environment.Redirector;
import org.apache.cocoon.environment.Request;
import org.apache.cocoon.environment.SourceResolver;
/**
* This action simply checks to see if a given request parameter
* exists.
*
* <p>It takes an arbitrary number of default parameters to check
* named 'parameter-name'. Non-default parameters need to be separated
* by spaces and passed as value of a sitemap parameter named
* 'parameters'. The action returns a map with all parameters if all
* of them exist and null otherwise. Parameter names can only be added
* to this list but no default parameters can be overridden by
* specific ones.
*
* <p>This action is very closely related to @link{RequestParamAction}
* and {@link FormValidatorAction}. However this action is considerably
* simpler in that it tests only for existence of a parameter and it
* doesn't need a descriptor. Besides it doesn't propagate all request
* parameters to the sitemap but only those that are marked as
* required.</p> <p> One special feature is, however, that parameters
* can contain <strong>one</strong> wildcard ("*"). It will be
* checked, whether all parameters with a wildcard have the same
* matches. E.g. "id_* name_*" enforces, that if "id_1" exists,
* "name_1" must also exist and vice versa.</p>
*
* @cocoon.sitemap.component.documentation
* This action simply checks to see if a given request parameter
* exists.
*
* @version $Id$
*/
public class RequestParameterExistsAction extends AbstractConfigurableAction implements ThreadSafe
{
protected class StringParts {
String prefix = null;
String pstfix = null;
int count = 0;
public StringParts ( String pre, String post ) {
prefix = pre;
pstfix = post;
}
}
public Map act( Redirector redirector,
SourceResolver resolver,
Map objectModel,
String source,
Parameters parameters
)
throws Exception {
Request request = ObjectModelHelper.getRequest(objectModel);
HashMap results = new HashMap();
HashMap items = new HashMap();
int wildcards = 0;
// check default parameters for existence
if (this.getLogger().isDebugEnabled()) {
getLogger().debug("checking default parameters");
}
Iterator reqParams = settings.values().iterator();
while (reqParams.hasNext()) {
String paramName = (String) reqParams.next();
StringParts sp = splitParameter(paramName);
if (sp != null) {
// wildcard: special care required (deferred until later)
items.put(new Integer(wildcards++), sp);
if (this.getLogger().isDebugEnabled()) {
getLogger().debug("(default) deferring " + paramName);
}
} else {
String paramValue = request.getParameter(paramName);
if (paramValue == null) {
return null;
}
results.put(paramName, paramValue);
}
}
// check parameters for existence
if (this.getLogger().isDebugEnabled()) {
getLogger().debug("checking sitemap parameters");
}
String params = parameters.getParameter("parameters", null);
if (params != null) {
StringTokenizer st = new StringTokenizer(params);
while (st.hasMoreTokens()) {
String paramName = st.nextToken();
StringParts sp = splitParameter(paramName);
if (sp != null) {
// wildcard: special care required (deferred until later)
items.put(new Integer(wildcards++), sp);
if (this.getLogger().isDebugEnabled()) {
getLogger().debug("deferring " + paramName);
}
} else {
String paramValue = request.getParameter(paramName);
if (paramValue == null) {
return null;
}
results.put(paramName, paramValue);
}
}
}
if (wildcards != 0) {
// special care for parameters with wildcard
//
if (this.getLogger().isDebugEnabled()) {
getLogger().debug("deferred checking for parameters: " + wildcards);
}
// first one
//
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(" checking first");
}
HashMap values = new HashMap();
StringParts sp1 = (StringParts) items.get(new Integer(0));
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" Parameter is [" + sp1.prefix + " * " + sp1.pstfix + "] ");
}
Enumeration requestParams = request.getParameterNames();
Boolean dummy = Boolean.TRUE;
while (requestParams.hasMoreElements()) {
String paramName = (String) requestParams.nextElement();
String match = getMatch(paramName, sp1);
if (match != null) {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" value is >"
+ match
+ "< "
+ sp1.prefix.length()
+ " "
+ paramName.length()
+ " "
+ sp1.pstfix.length());
}
values.put(match, dummy);
sp1.count++;
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" Parameter "
+ sp1.prefix
+ "*"
+ sp1.pstfix
+ " matches "
+ paramName
+ " ("
+ sp1.count
+ " so far)");
}
String paramValue = request.getParameter(paramName);
if (paramValue == null) {
return null;
}
results.put(paramName, paramValue);
}
}
if (sp1.count == 0) {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" Parameter "
+ sp1.prefix
+ "*"
+ sp1.pstfix
+ " matches "
+ sp1.count);
}
return null;
}
// all other ones
//
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(" checking others");
}
requestParams = request.getParameterNames();
while (requestParams.hasMoreElements()) {
String paramName = (String) requestParams.nextElement();
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(" checking request parameter " + paramName);
}
for (int i = wildcards - 1; i > 0; i--) {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(" checking against " + i);
}
StringParts sp = (StringParts) items.get(new Integer(i));
String match = getMatch(paramName, sp);
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" Parameter is ["
+ sp.prefix
+ " * "
+ sp.pstfix
+ "] ");
}
if (match != null) {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" Parameter "
+ sp.prefix
+ "*"
+ sp.pstfix
+ " matches "
+ paramName
+ " ("
+ sp.count
+ " so far)");
}
if (values.containsKey(match)) {
sp.count++;
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
" " + paramName + " (verified)");
}
String paramValue = request.getParameter(paramName);
if (paramValue == null) {
return null;
}
results.put(paramName, paramValue);
} else {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
"Match "
+ match
+ "not found for "
+ sp1.prefix
+ "*"
+ sp1.pstfix
+ " but for "
+ sp.prefix
+ "*"
+ sp.pstfix);
}
return null;
}
}
}
}
// since we enforce that only matches are counted, that exist for
// the first parameter as well, check if every parameter has an
// equal number of matches.
//
if (this.getLogger().isDebugEnabled()) {
getLogger().debug("checking number of matches");
}
for (int i = wildcards - 1; i > 0; i--) {
StringParts sp = (StringParts) items.get(new Integer(i));
if (sp.count != sp1.count) {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
"Found "
+ sp.count
+ " matches for "
+ sp.prefix
+ "*"
+ sp.pstfix
+ " but expected "
+ sp1.count);
}
return null;
} else {
if (this.getLogger().isDebugEnabled()) {
getLogger().debug(
"Found "
+ sp.count
+ " matches for "
+ sp.prefix
+ "*"
+ sp.pstfix
+ " as expected");
}
}
}
}
return Collections.unmodifiableMap(results);
}
/**
* Find first "*" in a String and split it into the substring
* before and after the "*". Returns null if no "*" is present.
*/
protected StringParts splitParameter( String paramName )
{
int idx = paramName.indexOf("*");
if ( idx != -1 ) {
return new StringParts(paramName.substring(0,idx), paramName.substring(idx+1));
} else {
return null;
}
}
/**
* If a String matches a StringPart spec, return the substring
* between the specified prefix and postfix. Returns null if it
* doesn't match.
*/
protected String getMatch( String paramName,
StringParts sp
)
{
if ( paramName.startsWith( sp.prefix ) && paramName.endsWith( sp.pstfix ) ) {
return paramName.substring( sp.prefix.length(), ( paramName.length() - sp.pstfix.length() ) );
} else {
return null;
}
}
}
|
apache/cocoon
|
core/cocoon-sitemap/cocoon-sitemap-components/src/main/java/org/apache/cocoon/acting/RequestParameterExistsAction.java
|
Java
|
apache-2.0
| 13,802
|
<?php if (!defined('BASEPATH')) exit('No direct script access allowed');
class Dates_Extension extends ExtensionBase
{
private $minYear = 9999999;
private $maxYear = 0;
private $date_mappings = array(
"dc.created" => "Created",
"dc.published" => "Published",
"dc.available" => "Available",
"dc.dateAccepted" => "Accepted",
"dc.dateSubmitted" => "Submitted",
"dc.issued" => "Issued",
"dc.valid" => "Valid",
);
function __construct($ro_pointer)
{
parent::__construct($ro_pointer);
}
function processTemporal()
{
$this->minYear = 9999999;
$this->maxYear = 0;
$temporalArray = array();
$sxml = $this->ro->getSimpleXML();
//TODO: fix me...
$sxml->registerXPathNamespace("ro", RIFCS_NAMESPACE);
$temporals = $sxml->xpath('//ro:temporal/ro:date');
foreach ($temporals AS $temporal) {
$type = (string)$temporal["type"];
$value = $this->getWTCdate((string)$temporal);
if ($value)
$temporalArray[] = array('type' => $type, 'value' => $value);
}
return $temporalArray;
}
function getEarliestAsYear()
{
//TODO: write the function :-)
return $this->minYear;
}
function getLatestAsYear()
{
//TODO: write the function :-)
return $this->maxYear;
}
function getWTCdate($value)
{
utc_timezone();
// "Year and only year" (i.e. 1960) will be treated as HH SS by default
if (strlen($value) == 4) {
// Assume this is a year:
$value = "Jan 1 " . $value;
} else if (preg_match("/\d{4}\-\d{2}/", $value) === 1) {
$value = $value . "-01";
}
if (($timestamp = strtotime($value)) === false) {
return false;
} else {
$date = getDate($timestamp);
if ($date['year'] > $this->maxYear)
$this->maxYear = $date['year'];
if ($date['year'] < $this->minYear)
$this->minYear = $date['year'];
return date('Y-m-d\TH:i:s\Z', $timestamp);
}
reset_timezone();
}
function extractDatesForDisplay($sxml)
{
/* Attempt to find a "nice" display for dates elements ('whole of collection dates') */
$sxml->registerXPathNamespace("ro", RIFCS_NAMESPACE);
$dates = $sxml->xpath('//ro:dates');
foreach ($dates AS $date) {
// Determine the prefix for this date:
$friendly_date = "";
if (isset($date["type"])) {
if (isset($this->date_mappings[(string)$date["type"]])) {
$friendly_date_prefix = $this->date_mappings[(string)$date["type"]] . ": ";
} else {
$friendly_date_prefix = (string)$date["type"] . ": ";
}
}
$to = array();
$from = array();
$other = array();
/* Witchcraft to determine a nicely displayed date */
$friendly_date = $this->formatDates($date->{'date'});
// Add to the extRif
if ($friendly_date) {
$date->addChild("extRif:friendly_date", $friendly_date_prefix . $friendly_date, EXTRIF_NAMESPACE);
}
}
/* Attempt to find a "nice" display for temporal coverage dates */
$sxml->registerXPathNamespace("ro", RIFCS_NAMESPACE);
$temporals = $sxml->xpath('//ro:coverage/ro:temporal');
foreach ($temporals AS $date) {
// Determine the prefix for this date:
$friendly_date = "";
/* Witchcraft to determine a nicely displayed date */
$friendly_date = $this->formatDates($date->{'date'}, true);
// Add to the extRif
if ($friendly_date) {
$date->addChild("extRif:friendly_date", $friendly_date, EXTRIF_NAMESPACE);
}
}
/* Attempt to find a "nice" display for existenceDates */
$sxml->registerXPathNamespace("ro", RIFCS_NAMESPACE);
$existenceDates = $sxml->xpath('//ro:existenceDates');
foreach ($existenceDates AS $date) {
$start = '';
$end = '';
if ($date->startDate) {
if (!($start = $this->nicifyDate($this->getWTCdate($date->startDate)))) {
$start = $date->startDate;
}
}
if ($date->endDate) {
if (!($end = $this->nicifyDate($this->getWTCdate($date->endDate)))) {
$end = $date->endDate;
}
}
if ($start && $end) {
$friendly_date = $start . " - " . $end;
} else {
$friendly_date = $start . $end;
}
$date->addChild("extRif:friendly_date", $friendly_date, EXTRIF_NAMESPACE);
}
return $sxml;
}
function formatDates($sxml_dates, $capitalise = false)
{
$friendly_date = '';
$to = array();
$from = array();
$other = array();
if ($sxml_dates) {
foreach ($sxml_dates AS $date_entry) {
$formatted_date = $this->getWTCdate((string)$date_entry);
if ($formatted_date) {
if (strpos(strtolower((string)$date_entry['type']), "to") !== FALSE) {
$to[] = $this->nicifyDate($formatted_date);
} elseif (strpos(strtolower((string)$date_entry['type']), "from") !== FALSE) {
$from[] = $this->nicifyDate($formatted_date);
} else {
$other[] = $this->nicifyDate($formatted_date);
}
}
}
}
$from_text = ($capitalise ? "From " : "");
if ($to && $from) {
$friendly_date .= $from_text . implode($from, ", ") . " to " . implode($to, ", ");
} elseif ($from) {
$friendly_date .= $from_text . implode($from, ", ");
} else {
$friendly_date .= implode(array_merge($to, $from, $other), ", ");
}
return $friendly_date;
}
function nicifyDate($w3cdtf)
{
utc_timezone();
$time = strtotime($w3cdtf);
if (!$time) {
//we need to cater for the instance when someone legitimately enters 1st jan 1970
if ($w3cdtf == "1970-01-01T00:00:00Z") {
return "1970";
} else {
return false;
}
}
if (date("H:i:s", $time) == "00:00:00") {
if (date("m-d", $time) == "01-01") {
// Assume friendly display of just the year
return date("Y", $time); // i.e. 2001
} else {
// Assume friendly display of full date (and no time)
return date("Y-m-d", $time); // i.e. March 10, 2001
}
} else {
// Assume friendly display of full date and time
return date("Y-m-d H:i", $time); // i.e. March 10, 2001, 5:16 pm
}
reset_timezone();
}
/**
* Returns the earliest year in existenceDates
* @param bool|false $xml
* @return bool|string
*/
function getExistenceDateEarliestYear($xml = false)
{
$earliestYear = false;
if (!$xml) $xml = $this->ro->getSimpleXML();
foreach ($xml->xpath('//ro:existenceDates') AS $date) {
if ($date->startDate) {
if (strlen(trim($date->startDate)) == 4)
$date->startDate = "Jan 1, " . $date->startDate;
$start = strtotime($date->startDate);
$earliestYear = date("Y", $start);
}
}
return $earliestYear;
}
/**
* Returns the latest year in existenceDates
* @param bool|false $xml
* @return bool|string
*/
function getExistenceDateLatestYear($xml = false)
{
$latestYear = false;
if (!$xml) $xml = $this->ro->getSimpleXML();
foreach ($xml->xpath('//ro:existenceDates') AS $date) {
if ($date->endDate) {
if (strlen(trim($date->endDate)) == 4)
$date->endDate = "Dec 31, " . $date->endDate;
$end = strtotime($date->endDate);
$latestYear = date("Y", $end);
}
}
return $latestYear;
}
/**
* Return the existenceDate
* @param $point
* @param string $format
* @param $xml
* @return bool|string
*/
function getExistenceDate($point, $format='Y-m-d', $xml)
{
if (!$xml) $xml = $this->ro->getSimpleXML();
foreach ($xml->xpath('//ro:existenceDates') AS $date) {
if ($date->{$point}) {
if (strlen(trim($date->{$point})) == 4) {
$date->{$point} = "Dec 31, " . $date->{$point};
}
$date = date(strtotime($date->{$point}));
return date($format, strtotime($date));
}
}
}
}
|
au-research/ANDS-Registry-Core
|
applications/registry/registry_object/models/extensions/dates.php
|
PHP
|
apache-2.0
| 9,200
|
/*!
* jQuery UI Progressbar 1.8.22
*
* Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* http://docs.jquery.com/UI/Progressbar#theming
*/
.ui-progressbar { height:2em; text-align: left; overflow: hidden; }
.ui-progressbar .ui-progressbar-value {margin: -1px; height:100%; }
|
duwk123456/stone
|
web/PSD/ui/development-bundle/themes/ui-lightness/jquery.ui.progressbar.css
|
CSS
|
apache-2.0
| 386
|
/*
* JBoss, Home of Professional Open Source
* Copyright 2009, Red Hat, Inc. and/or its affiliates, and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hibernate.validator.test.constraints.impl;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.hibernate.validator.constraints.impl.NullValidator;
/**
* @author Alaa Nassef
*/
public class NullValidatorTest {
private static NullValidator constraint;
@BeforeClass
public static void init() {
constraint = new NullValidator();
}
@Test
public void testIsValid() {
assertTrue( constraint.isValid( null, null ) );
assertFalse( constraint.isValid( new Object(), null ) );
}
}
|
gastaldi/hibernate-validator
|
hibernate-validator/src/test/java/org/hibernate/validator/test/constraints/impl/NullValidatorTest.java
|
Java
|
apache-2.0
| 1,413
|
using Bridge.Test.NUnit;
using System.Linq;
namespace Bridge.ClientTest.Batch3.BridgeIssues
{
[Category(Constants.MODULE_ISSUES)]
[TestFixture(TestNameFormat = "#1266 - {0}")]
public class Bridge1266
{
[Test]
public static void TestArrayToEnumerable()
{
var arr = new[] { 1, 2, 3 };
var x = arr.ToArray().ToEnumerable();
var index = 0;
foreach (var i in x)
{
Assert.AreEqual(arr[index++], i);
}
}
}
}
|
AndreyZM/Bridge
|
Tests/Batch3/BridgeIssues/1200/N1266.cs
|
C#
|
apache-2.0
| 541
|
Bridge.merge(new System.Globalization.CultureInfo("en-MH", true), {
englishName: "English (Marshall Islands)",
nativeName: "English (Marshall Islands)",
numberFormat: Bridge.merge(new System.Globalization.NumberFormatInfo(), {
nanSymbol: "NaN",
negativeSign: "-",
positiveSign: "+",
negativeInfinitySymbol: "-∞",
positiveInfinitySymbol: "∞",
percentSymbol: "%",
percentGroupSizes: [3],
percentDecimalDigits: 2,
percentDecimalSeparator: ".",
percentGroupSeparator: ",",
percentPositivePattern: 1,
percentNegativePattern: 1,
currencySymbol: "$",
currencyGroupSizes: [3],
currencyDecimalDigits: 2,
currencyDecimalSeparator: ".",
currencyGroupSeparator: ",",
currencyNegativePattern: 1,
currencyPositivePattern: 0,
numberGroupSizes: [3],
numberDecimalDigits: 2,
numberDecimalSeparator: ".",
numberGroupSeparator: ",",
numberNegativePattern: 1
}),
dateTimeFormat: Bridge.merge(new System.Globalization.DateTimeFormatInfo(), {
abbreviatedDayNames: ["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],
abbreviatedMonthGenitiveNames: ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec",""],
abbreviatedMonthNames: ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec",""],
amDesignator: "AM",
dateSeparator: "/",
dayNames: ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],
firstDayOfWeek: 0,
fullDateTimePattern: "dddd, MMMM d, yyyy h:mm:ss tt",
longDatePattern: "dddd, MMMM d, yyyy",
longTimePattern: "h:mm:ss tt",
monthDayPattern: "MMMM d",
monthGenitiveNames: ["January","February","March","April","May","June","July","August","September","October","November","December",""],
monthNames: ["January","February","March","April","May","June","July","August","September","October","November","December",""],
pmDesignator: "PM",
rfc1123: "ddd, dd MMM yyyy HH':'mm':'ss 'GMT'",
shortDatePattern: "M/d/yyyy",
shortestDayNames: ["Su","Mo","Tu","We","Th","Fr","Sa"],
shortTimePattern: "h:mm tt",
sortableDateTimePattern: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
sortableDateTimePattern1: "yyyy'-'MM'-'dd",
timeSeparator: ":",
universalSortableDateTimePattern: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
yearMonthPattern: "MMMM yyyy",
roundtripFormat: "yyyy'-'MM'-'dd'T'HH':'mm':'ss.fffffffzzz"
}),
TextInfo: Bridge.merge(new System.Globalization.TextInfo(), {
ANSICodePage: 1252,
CultureName: "en-MH",
EBCDICCodePage: 500,
IsRightToLeft: false,
LCID: 4096,
listSeparator: ",",
MacCodePage: 10000,
OEMCodePage: 850,
IsReadOnly: true
})
});
|
AndreyZM/Bridge
|
Bridge/Resources/Locales/en-MH.js
|
JavaScript
|
apache-2.0
| 2,959
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.nd4j.linalg.schedule;
import lombok.Data;
import org.nd4j.shade.jackson.annotation.JsonProperty;
/**
* An exponential schedule, with 2 parameters: initial value, and gamma.<br>
* value(i) = initialValue * gamma^i
* where i is the iteration or epoch (depending on the setting)
*
* @author Alex Black
*/
@Data
public class ExponentialSchedule implements ISchedule {
private final ScheduleType scheduleType;
private final double initialValue;
private final double gamma;
public ExponentialSchedule(@JsonProperty("scheduleType") ScheduleType scheduleType,
@JsonProperty("initialValue") double initialValue,
@JsonProperty("gamma") double gamma){
this.scheduleType = scheduleType;
this.initialValue = initialValue;
this.gamma = gamma;
}
@Override
public double valueAt(int iteration, int epoch) {
int i = (scheduleType == ScheduleType.ITERATION ? iteration : epoch);
return initialValue * Math.pow(gamma, i);
}
@Override
public ISchedule clone() {
return new ExponentialSchedule(scheduleType, initialValue, gamma);
}
}
|
deeplearning4j/deeplearning4j
|
nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/schedule/ExponentialSchedule.java
|
Java
|
apache-2.0
| 1,955
|
# Copyright 2014 Xinyu, He <legendmohe@foxmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import logging.handlers
file_name = 'log/home_debug.log'
debug_logger = logging.getLogger('DebugLog')
handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=50*1024*1024)
formatter = logging.Formatter("%(asctime)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
debug_logger.setLevel(logging.DEBUG)
debug_logger.addHandler(handler)
debug_logger.propagate = False # now if you use logger it will not log to console.
comm_name = 'log/home.log'
comm_logger = logging.getLogger('CommonLog')
handler = logging.handlers.RotatingFileHandler(comm_name, maxBytes=20*1024*1024)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s [%(filename)s - %(funcName)s] ')
handler.setFormatter(formatter)
comm_logger.setLevel(logging.INFO)
comm_logger.addHandler(handler)
# comm_logger.propagate = False # now if you use logger it will not log to console.
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
# def stack_info_debug(info):
# stack_info = inspect.currentframe().f_back.f_code.co_name
# debug_logger.debug("%s: %s" % (stack_info, info))
DEBUG = debug_logger.debug
# DEBUG = stack_info_debug # only output to file
INFO = comm_logger.info
WARN = comm_logger.warning
ERROR = comm_logger.error
CRITICAL = comm_logger.critical
FDEBUG = debug_logger.debug
FINFO = debug_logger.info
FWARN = debug_logger.warning
FERROR = debug_logger.error
FCRITICAL = debug_logger.critical
EXCEPTION = comm_logger.exception
|
fangjing828/LEHome
|
util/log.py
|
Python
|
apache-2.0
| 2,186
|
/*
* Copyright 2018 Google
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Firestore/core/src/model/transform_operation.h"
#include "Firestore/core/test/unit/testutil/testutil.h"
#include "gtest/gtest.h"
namespace firebase {
namespace firestore {
namespace model {
using testutil::Value;
using Type = TransformOperation::Type;
TEST(TransformOperationsTest, ServerTimestamp) {
ServerTimestampTransform transform;
EXPECT_EQ(Type::ServerTimestamp, transform.type());
ServerTimestampTransform another;
NumericIncrementTransform other(Value(1));
EXPECT_EQ(transform, another);
EXPECT_NE(transform, other);
}
// TODO(mikelehen): Add ArrayTransform test once it no longer depends on
// FSTFieldValue and can be exposed to C++ code.
} // namespace model
} // namespace firestore
} // namespace firebase
|
firebase/firebase-ios-sdk
|
Firestore/core/test/unit/model/transform_operation_test.cc
|
C++
|
apache-2.0
| 1,345
|
package com.github.czyzby.lml.vis.parser.impl.tag;
import com.github.czyzby.lml.parser.LmlParser;
import com.github.czyzby.lml.parser.impl.tag.actor.ProgressBarLmlTag;
import com.github.czyzby.lml.parser.impl.tag.builder.FloatRangeLmlActorBuilder;
import com.github.czyzby.lml.parser.tag.LmlTag;
import com.kotcrab.vis.ui.widget.VisProgressBar;
/** Handles {@link VisProgressBar} actor. Expects that the text between its tags is a valid float - it will be set as
* bar's value. Be careful though, as changing the value in such way might trigger registered change listeners. Mapped
* to "progressBar", "visProgressBar".
*
* @author MJ */
public class VisProgressBarLmlTag extends ProgressBarLmlTag {
public VisProgressBarLmlTag(final LmlParser parser, final LmlTag parentTag, final StringBuilder rawTagData) {
super(parser, parentTag, rawTagData);
}
@Override
protected VisProgressBar getNewInstanceOfProgressBar(final FloatRangeLmlActorBuilder rangeBuilder) {
return new VisProgressBar(rangeBuilder.getMin(), rangeBuilder.getMax(), rangeBuilder.getStepSize(),
rangeBuilder.isVertical(), rangeBuilder.getStyleName());
}
}
|
tommyettinger/SquidSetup
|
src/main/java/com/github/czyzby/lml/vis/parser/impl/tag/VisProgressBarLmlTag.java
|
Java
|
apache-2.0
| 1,180
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
This module handles rate liming at a per-user level, so it should not be used
to prevent intentional Denial of Service attacks, as we can assume a DOS can
easily come through multiple user accounts. DOS protection should be done at a
different layer. Instead this module should be used to protect against
unintentional user actions. With that in mind the limits set here should be
high enough as to not rate-limit any intentional actions.
To find good rate-limit values, check how long requests are taking (see logs)
in your environment to assess your capabilities and multiply out to get
figures.
NOTE: As the rate-limiting here is done in memory, this only works per
process (each process will have its own rate limiting counter).
"""
import collections
import copy
import httplib
import math
import re
import time
from oslo.serialization import jsonutils
from oslo.utils import importutils
import webob.dec
import webob.exc
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import wsgi
from nova.i18n import _
from nova import quota
from nova import utils
from nova import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
class LimitsController(object):
"""Controller for accessing limits in the OpenStack API."""
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def create(self, req, body):
"""Create a new limit."""
raise webob.exc.HTTPNotImplemented()
def delete(self, req, id):
"""Delete the limit."""
raise webob.exc.HTTPNotImplemented()
def detail(self, req):
"""Return limit details."""
raise webob.exc.HTTPNotImplemented()
def show(self, req, id):
"""Show limit information."""
raise webob.exc.HTTPNotImplemented()
def update(self, req, id, body):
"""Update existing limit."""
raise webob.exc.HTTPNotImplemented()
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = (_("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.") %
{'value': self.value, 'verb': self.verb, 'uri': self.uri,
'unit_string': self.unit_string})
self.error_message = msg
def __call__(self, verb, url):
"""Represents a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("POST", "*/servers", "^/servers", 120, utils.TIME_UNITS['MINUTE']),
Limit("PUT", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*changes-since*", ".*changes-since.*", 120,
utils.TIME_UNITS['MINUTE']),
Limit("DELETE", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*/os-fping", "^/os-fping", 12, utils.TIME_UNITS['MINUTE']),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""Rate-limits requests passing through this middleware. All limit
information is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize new `RateLimitingMiddleware`.
It wraps the given WSGI application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Represents a single call through this middleware.
We should record the request if we have a limit relevant to it.
If no limit is relevant to the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("nova.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.RateLimitFault(msg, error, retry)
req.environ["nova.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""Rate-limit checking class which handles limits in memory."""
def __init__(self, limits, **kwargs):
"""Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""Return the limits for a given user."""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""Convert a string into a list of Limit instances. This
implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in utils.TIME_UNITS:
raise ValueError("Invalid units specified")
unit = utils.TIME_UNITS[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""Rate-limit checking from a WSGI application. Uses an in-memory
`Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""Handles a call to this application.
Returns 204 if the request is acceptable to the limiter, else a 403
is returned with a relevant header indicating when the request *will*
succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""Rate-limit requests based on answers from a remote source."""
def __init__(self, limiter_address):
"""Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""Ignore a limits string--simply doesn't apply for the limit
proxy.
@return: Empty list.
"""
return []
|
silenceli/nova
|
nova/api/openstack/compute/limits.py
|
Python
|
apache-2.0
| 15,344
|
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2014.09.17 at 10:18:01 AM IST
//
package com.pacificmetrics.saaif.wordlist;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlID;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <extension base="{http://www.w3.org/1999/xhtml}Inline">
* <attGroup ref="{http://www.w3.org/1999/xhtml}attrs"/>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "")
@XmlRootElement(name = "p")
public class P extends Inline {
@XmlAttribute(name = "onclick")
protected String onclick;
@XmlAttribute(name = "ondblclick")
protected String ondblclick;
@XmlAttribute(name = "onmousedown")
protected String onmousedown;
@XmlAttribute(name = "onmouseup")
protected String onmouseup;
@XmlAttribute(name = "onmouseover")
protected String onmouseover;
@XmlAttribute(name = "onmousemove")
protected String onmousemove;
@XmlAttribute(name = "onmouseout")
protected String onmouseout;
@XmlAttribute(name = "onkeypress")
protected String onkeypress;
@XmlAttribute(name = "onkeydown")
protected String onkeydown;
@XmlAttribute(name = "onkeyup")
protected String onkeyup;
@XmlAttribute(name = "lang")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
protected String langCode;
@XmlAttribute(name = "lang", namespace = "http://www.w3.org/XML/1998/namespace")
protected String lang;
@XmlAttribute(name = "dir")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
protected String dir;
@XmlAttribute(name = "id")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlID
@XmlSchemaType(name = "ID")
protected String id;
@XmlAttribute(name = "class")
@XmlSchemaType(name = "NMTOKENS")
protected List<String> clazz;
@XmlAttribute(name = "style")
protected String style;
@XmlAttribute(name = "title")
protected String title;
/**
* Gets the value of the onclick property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnclick() {
return onclick;
}
/**
* Sets the value of the onclick property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnclick(String value) {
this.onclick = value;
}
/**
* Gets the value of the ondblclick property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOndblclick() {
return ondblclick;
}
/**
* Sets the value of the ondblclick property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOndblclick(String value) {
this.ondblclick = value;
}
/**
* Gets the value of the onmousedown property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmousedown() {
return onmousedown;
}
/**
* Sets the value of the onmousedown property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmousedown(String value) {
this.onmousedown = value;
}
/**
* Gets the value of the onmouseup property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmouseup() {
return onmouseup;
}
/**
* Sets the value of the onmouseup property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmouseup(String value) {
this.onmouseup = value;
}
/**
* Gets the value of the onmouseover property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmouseover() {
return onmouseover;
}
/**
* Sets the value of the onmouseover property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmouseover(String value) {
this.onmouseover = value;
}
/**
* Gets the value of the onmousemove property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmousemove() {
return onmousemove;
}
/**
* Sets the value of the onmousemove property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmousemove(String value) {
this.onmousemove = value;
}
/**
* Gets the value of the onmouseout property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnmouseout() {
return onmouseout;
}
/**
* Sets the value of the onmouseout property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnmouseout(String value) {
this.onmouseout = value;
}
/**
* Gets the value of the onkeypress property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnkeypress() {
return onkeypress;
}
/**
* Sets the value of the onkeypress property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnkeypress(String value) {
this.onkeypress = value;
}
/**
* Gets the value of the onkeydown property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnkeydown() {
return onkeydown;
}
/**
* Sets the value of the onkeydown property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnkeydown(String value) {
this.onkeydown = value;
}
/**
* Gets the value of the onkeyup property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOnkeyup() {
return onkeyup;
}
/**
* Sets the value of the onkeyup property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOnkeyup(String value) {
this.onkeyup = value;
}
/**
* Gets the value of the langCode property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getLangCode() {
return langCode;
}
/**
* Sets the value of the langCode property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setLangCode(String value) {
this.langCode = value;
}
/**
* Gets the value of the lang property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getLang() {
return lang;
}
/**
* Sets the value of the lang property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setLang(String value) {
this.lang = value;
}
/**
* Gets the value of the dir property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getDir() {
return dir;
}
/**
* Sets the value of the dir property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setDir(String value) {
this.dir = value;
}
/**
* Gets the value of the id property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getId() {
return id;
}
/**
* Sets the value of the id property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setId(String value) {
this.id = value;
}
/**
* Gets the value of the clazz property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the clazz property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getClazz().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link String }
*
*
*/
public List<String> getClazz() {
if (clazz == null) {
clazz = new ArrayList<String>();
}
return this.clazz;
}
/**
* Gets the value of the style property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getStyle() {
return style;
}
/**
* Sets the value of the style property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setStyle(String value) {
this.style = value;
}
/**
* Gets the value of the title property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTitle() {
return title;
}
/**
* Sets the value of the title property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setTitle(String value) {
this.title = value;
}
}
|
SmarterApp/ItemAuthoring
|
sbac-iaip/java/src/main/java/com/pacificmetrics/saaif/wordlist/P.java
|
Java
|
apache-2.0
| 11,037
|
package com.googlecode.kanbanik.commands
import com.googlecode.kanbanik.model.{User, ClassOfService}
import com.googlecode.kanbanik.builders.ClassOfServiceBuilder
import com.googlecode.kanbanik.dtos.{ErrorDto, EmptyDto, ClassOfServiceDto, ListDto}
class GetAllClassOfServices extends Command[EmptyDto, ListDto[ClassOfServiceDto]] {
lazy val builder = new ClassOfServiceBuilder
override def execute(params: EmptyDto, user: User): Either[ListDto[ClassOfServiceDto], ErrorDto] = {
val dtos = ClassOfService.all(user).map(builder.buildDto)
Left(ListDto(dtos))
}
}
|
kanbanik/kanbanik
|
kanbanik-server/src/main/scala/com/googlecode/kanbanik/commands/GetAllClassOfServices.scala
|
Scala
|
apache-2.0
| 582
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package channelconfig
import (
"testing"
cb "github.com/hyperledger/fabric/protos/common"
mspprotos "github.com/hyperledger/fabric/protos/msp"
pb "github.com/hyperledger/fabric/protos/peer"
"github.com/stretchr/testify/assert"
)
// The tests in this file are all relatively pointless, as all of this function is exercised
// in the normal startup path and things will break horribly if they are broken.
// There's additionally really nothing to test without simply re-implementing the function
// in the test, which also provides no value. But, not including these produces an artificially
// low code coverage count, so here they are.
func basicTest(t *testing.T, sv *StandardConfigValue) {
assert.NotNil(t, sv)
assert.NotEmpty(t, sv.Key())
assert.NotNil(t, sv.Value())
}
func TestUtilsBasic(t *testing.T) {
basicTest(t, ConsortiumValue("foo"))
basicTest(t, HashingAlgorithmValue())
basicTest(t, BlockDataHashingStructureValue())
basicTest(t, OrdererAddressesValue([]string{"foo:1", "bar:2"}))
basicTest(t, ConsensusTypeValue("foo"))
basicTest(t, BatchSizeValue(1, 2, 3))
basicTest(t, BatchTimeoutValue("1s"))
basicTest(t, ChannelRestrictionsValue(7))
basicTest(t, KafkaBrokersValue([]string{"foo:1", "bar:2"}))
basicTest(t, MSPValue(&mspprotos.MSPConfig{}))
basicTest(t, CapabilitiesValue(map[string]bool{"foo": true, "bar": false}))
basicTest(t, AnchorPeersValue([]*pb.AnchorPeer{&pb.AnchorPeer{}, &pb.AnchorPeer{}}))
basicTest(t, ChannelCreationPolicyValue(&cb.Policy{}))
}
|
mqshen/fabric
|
common/channelconfig/util_test.go
|
GO
|
apache-2.0
| 1,591
|
/*global angular */
'use strict';
describe('Unit: ExampleCtrl', function() {
var ctrl;
beforeEach(function() {
// instantiate the app module
angular.mock.module('app');
// mock the controller
angular.mock.inject(function($controller) {
ctrl = $controller('ExampleCtrl');
});
});
it('should exist', function() {
expect(ctrl).toBeDefined();
});
it('should have a number variable equal to 1234', function() {
expect(ctrl.number).toEqual(1234);
});
it('should have a title variable equal to 'AngularJS, Gulp, and Browserify!'', function() {
expect(ctrl.title).toEqual('AngularJS, Gulp, and Browserify!');
});
});
|
nomagi/miley
|
miley/static/test/unit/controllers/example_spec.js
|
JavaScript
|
apache-2.0
| 704
|
var vsSeperateObj = {
name: '',
id: '',
setId: function (id) {
this.id = id;
},
getId: function () {
return this.id;
},
status: '',
setStatus: function (status) {
this.status = status;
},
'created-time': '',
// Data zone
Steps: {},
Stages: {},
Data: {
originVs: {},
targetVs: {
total: 2
},
createdVs: {}
},
validation: {
'one-domain-error': '当前Virtual Server只有一个domain',
'status-error': '当前Virtual Sever 状态不是激活状态'
},
// initialize
initialize: function () {
this.Steps = {
'select-vs': {
index: 0,
vs: ''
}
};
},
// Steps
newSelectVsStep: function (vsId) {
this.Steps['select-vs'] = {
'select-vs': {
index: 0,
vs: vsId
}
};
},
newAddVsStep: function () {
this.Steps['add-vs'] = {
index: 1,
vses: ''
};
},
newCreateVsStep: function () {
this.Steps['create-vs'] = {
index: 2,
vses: ''
};
},
getSteps: function () {
return this.Steps;
},
// First step
setOriginVs: function (vsId) {
this.Steps['select-vs'].vs = vsId;
// trigger select vs
this.trigger('select-vs');
},
getOriginVs: function () {
return this.Steps['select-vs'].vs;
},
setOriginVsData: function (vs, invalidMsg) {
this.Data.originVs = vs;
if (invalidMsg) this.Data.originVs.error = invalidMsg;
},
getOriginVsData: function () {
return this.Data.originVs;
},
validateOriginVs: function (vs) {
var errorMsgObj = this.validation;
var status = vs.status;
if (!status || status.toLowerCase() != 'activated') return new SlbException(errorMsgObj['status-error']);
var domains = vs.domains;
if (!domains || domains.length == 1) return new SlbException(errorMsgObj['one-domain-error']);
return true;
},
// Second step
setTargetVs: function (vs) {
if (!this.Data.targetVs.vses) {
this.Data.targetVs.vses = [vs];
} else {
this.Data.targetVs.vses.push(vs);
}
},
replaceTargetVs: function (index, vs) {
this.Data.targetVs.vses[index] = vs;
},
getTargetVsData: function () {
return this.Data.targetVs;
},
resetTargetVs: function () {
this.Data.targetVs.vses = '';
},
getTargetVsDomains: function () {
var originVs = this.Data.originVs;
var targetVs = this.Data.targetVs.vses;
var originVsDomains = _.pluck(originVs.domains, 'name');
var targetVsDomains = targetVs ? _.flatten(targetVs) : [];
// Not has slot to share?
if (targetVsDomains.length == originVsDomains.length) {
return new SlbException("域名已经被分配完!");
} else {
var domains = _.difference(originVsDomains, targetVsDomains);
return _.map(domains, function (v) {
return {
domain: v
}
});
}
},
// Third step
setCreatedVs: function (vses) {
this.Data.createdVs = vses;
},
getCreatedVs: function (vses) {
return this.Data.createdVs;
},
// Stages
setStage: function (key, stage) {
// remove the old
this.Stages[key] = stage;
},
getStage: function (key) {
return this.Stages[key];
},
getFailedStages: function () {
return _.pick(this.Stages, function (v, k, item) {
return v.status && v.status.toLowerCase() == 'fail';
});
},
startRevert: function () {
this.trigger('start-revert');
},
// Parse do entity to view entity
toEntity: function (entity) {
var mapping = constants.vsSplitStatus;
var name = entity.name;
var id = entity.id;
var status = entity.status;
var createdTime = entity['create-time'];
var sourceVsId = entity['source-vs-id'];
var targetVses = entity['domain-groups'];
var newVsIds = entity['new-vs-ids'];
var createdStage = entity['created'];
var createAndBindStage = entity['create-and-bind-new-vs'];
var splitStage = entity['split-vs'];
var rollbackStage = entity['rollback'];
this.name = name;
this.id = id;
this.status = mapping[status] || status;
this.statusdo = status;
this['create-time'] = createdTime;
if (sourceVsId) {
this.newSelectVsStep(sourceVsId);
this.newAddVsStep();
this.setOriginVsData({
id: sourceVsId
});
}
if (targetVses && targetVses.length > 0) {
this.Data.targetVs.total = targetVses.length;
this.Data.targetVs.vses = targetVses;
}
if (createdStage) {
this.Stages['created'] = createdStage;
}else{
delete this.Stages['created'];
}
if (createAndBindStage) {
this.Stages['create-and-bind-new-vs'] = createAndBindStage;
}else{
delete this.Stages['create-and-bind-new-vs'];
}
if (splitStage) {
this.Stages['split-vs'] = splitStage;
}else{
delete this.Stages['split-vs'];
}
if (rollbackStage) {
this.Stages['rollback'] = rollbackStage;
}else{
delete this.Stages['rollback'];
}
if ((newVsIds && newVsIds.length > 0) || createAndBindStage || splitStage) {
this.newCreateVsStep();
}
return this;
},
// Parse view entity to do entity
toEntityDo: function () {
var result = {};
if (this.name) result.name = this.name;
if (this.id) result.id = this.id;
if (this.status) result.status = this.statusdo;
if (this['created-time']) result['created-time'] = this['created-time'];
// source vs id
if (this.Data.originVs.id) result['source-vs-id'] = this.Data.originVs.id;
// target domains
var targets = this.Data.targetVs.vses;
if (targets && targets.length > 0) {
var isBlank = _.flatten(targets);
if (isBlank.length > 0) {
result['domain-groups'] = targets;
}
}
// created vs
var createdVs = this.Data.createdVs;
if (createdVs && _.keys(createdVs).length > 0) {
result['new-vs-ids'] = _.keys(createdVs);
}
return result;
}
};
|
sdgdsffdsfff/zeus
|
slb/src/main/web/static-new/viewmodel/vs-seperate.js
|
JavaScript
|
apache-2.0
| 7,084
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.ttl;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.cache.Cache;
import javax.cache.configuration.FactoryBuilder;
import javax.cache.expiry.Duration;
import javax.cache.expiry.TouchedExpiryPolicy;
import javax.cache.integration.CacheLoaderException;
import javax.cache.integration.CacheWriterException;
import javax.cache.integration.CompletionListenerFuture;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteDataStreamer;
import org.apache.ignite.cache.CacheAtomicityMode;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy;
import org.apache.ignite.cache.query.SqlQuery;
import org.apache.ignite.cache.store.CacheStoreAdapter;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.lang.IgniteBiInClosure;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.ignite.cache.CacheMode.LOCAL;
import static org.apache.ignite.cache.CachePeekMode.BACKUP;
import static org.apache.ignite.cache.CachePeekMode.NEAR;
import static org.apache.ignite.cache.CachePeekMode.OFFHEAP;
import static org.apache.ignite.cache.CachePeekMode.ONHEAP;
import static org.apache.ignite.cache.CachePeekMode.PRIMARY;
import static org.apache.ignite.cache.CacheRebalanceMode.SYNC;
import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
/**
* TTL test.
*/
public abstract class CacheTtlAbstractSelfTest extends GridCommonAbstractTest {
/** */
private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
/** */
private static final int MAX_CACHE_SIZE = 5;
/** */
private static final int SIZE = 11;
/** */
private static final long DEFAULT_TIME_TO_LIVE = 2000;
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
CacheConfiguration ccfg = new CacheConfiguration();
ccfg.setCacheMode(cacheMode());
ccfg.setAtomicityMode(atomicityMode());
LruEvictionPolicy plc = new LruEvictionPolicy();
plc.setMaxSize(MAX_CACHE_SIZE);
ccfg.setEvictionPolicy(plc);
ccfg.setOnheapCacheEnabled(true);
ccfg.setIndexedTypes(Integer.class, Integer.class);
ccfg.setBackups(2);
ccfg.setWriteSynchronizationMode(FULL_SYNC);
ccfg.setRebalanceMode(SYNC);
ccfg.setCacheStoreFactory(singletonFactory(new CacheStoreAdapter() {
@Override public void loadCache(IgniteBiInClosure clo, Object... args) {
for (int i = 0; i < SIZE; i++)
clo.apply(i, i);
}
@Override public Object load(Object key) throws CacheLoaderException {
return key;
}
@Override public void write(Cache.Entry entry) throws CacheWriterException {
// No-op.
}
@Override public void delete(Object key) throws CacheWriterException {
// No-op.
}
}));
ccfg.setExpiryPolicyFactory(
FactoryBuilder.factoryOf(new TouchedExpiryPolicy(new Duration(MILLISECONDS, DEFAULT_TIME_TO_LIVE))));
cfg.setCacheConfiguration(ccfg);
((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
return cfg;
}
/**
* @return Atomicity mode.
*/
protected abstract CacheAtomicityMode atomicityMode();
/**
* @return Cache mode.
*/
protected abstract CacheMode cacheMode();
/**
* @return GridCount
*/
protected abstract int gridCount();
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
startGrids(gridCount());
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
stopAllGrids();
}
/**
* @throws Exception If failed.
*/
public void testDefaultTimeToLiveLoadCache() throws Exception {
IgniteCache<Integer, Integer> cache = jcache(0);
cache.loadCache(null);
checkSizeBeforeLive(SIZE);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
}
/**
* @throws Exception If failed.
*/
public void testDefaultTimeToLiveLoadAll() throws Exception {
defaultTimeToLiveLoadAll(false);
defaultTimeToLiveLoadAll(true);
}
/**
* @param replaceExisting Replace existing value flag.
* @throws Exception If failed.
*/
private void defaultTimeToLiveLoadAll(boolean replaceExisting) throws Exception {
IgniteCache<Integer, Integer> cache = jcache(0);
CompletionListenerFuture fut = new CompletionListenerFuture();
Set<Integer> keys = new HashSet<>();
for (int i = 0; i < SIZE; ++i)
keys.add(i);
cache.loadAll(keys, replaceExisting, fut);
fut.get();
checkSizeBeforeLive(SIZE);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
}
/**
* @throws Exception If failed.
*/
public void testDefaultTimeToLiveStreamerAdd() throws Exception {
try (IgniteDataStreamer<Integer, Integer> streamer = ignite(0).dataStreamer(null)) {
for (int i = 0; i < SIZE; i++)
streamer.addData(i, i);
}
checkSizeBeforeLive(SIZE);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
try (IgniteDataStreamer<Integer, Integer> streamer = ignite(0).dataStreamer(null)) {
streamer.allowOverwrite(true);
for (int i = 0; i < SIZE; i++)
streamer.addData(i, i);
}
checkSizeBeforeLive(SIZE);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
}
/**
* @throws Exception If failed.
*/
public void testDefaultTimeToLivePut() throws Exception {
IgniteCache<Integer, Integer> cache = jcache(0);
Integer key = 0;
cache.put(key, 1);
checkSizeBeforeLive(1);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
}
/**
* @throws Exception If failed.
*/
public void testDefaultTimeToLivePutAll() throws Exception {
IgniteCache<Integer, Integer> cache = jcache(0);
Map<Integer, Integer> entries = new HashMap<>();
for (int i = 0; i < SIZE; ++i)
entries.put(i, i);
cache.putAll(entries);
checkSizeBeforeLive(SIZE);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
}
/**
* @throws Exception If failed.
*/
public void testDefaultTimeToLivePreload() throws Exception {
if (cacheMode() == LOCAL)
return;
IgniteCache<Integer, Integer> cache = jcache(0);
Map<Integer, Integer> entries = new HashMap<>();
for (int i = 0; i < SIZE; ++i)
entries.put(i, i);
cache.putAll(entries);
startGrid(gridCount());
checkSizeBeforeLive(SIZE, gridCount() + 1);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive(gridCount() + 1);
}
/**
* @throws Exception If failed.
*/
public void testTimeToLiveTtl() throws Exception {
long time = DEFAULT_TIME_TO_LIVE + 2000;
IgniteCache<Integer, Integer> cache = this.<Integer, Integer>jcache(0).withExpiryPolicy(
new TouchedExpiryPolicy(new Duration(MILLISECONDS, time)));
for (int i = 0; i < SIZE; i++)
cache.put(i, i);
checkSizeBeforeLive(SIZE);
Thread.sleep(DEFAULT_TIME_TO_LIVE + 500);
checkSizeBeforeLive(SIZE);
Thread.sleep(time - DEFAULT_TIME_TO_LIVE + 500);
checkSizeAfterLive();
}
/**
* @param size Expected size.
* @throws Exception If failed.
*/
private void checkSizeBeforeLive(int size) throws Exception {
checkSizeBeforeLive(size, gridCount());
}
/**
* @param size Expected size.
* @param gridCnt Number of nodes.
* @throws Exception If failed.
*/
private void checkSizeBeforeLive(int size, int gridCnt) throws Exception {
for (int i = 0; i < gridCnt; ++i) {
IgniteCache<Integer, Integer> cache = jcache(i);
log.info("Size [node=" + i + ", " + cache.localSize(PRIMARY, BACKUP, NEAR) + ']');
assertEquals("Unexpected size, node: " + i, size, cache.localSize(PRIMARY, BACKUP, NEAR));
for (int key = 0; key < size; key++)
assertNotNull(cache.localPeek(key));
assertFalse(cache.query(new SqlQuery<>(Integer.class, "_val >= 0")).getAll().isEmpty());
}
}
/**
* @throws Exception If failed.
*/
private void checkSizeAfterLive() throws Exception {
checkSizeAfterLive(gridCount());
}
/**
* @param gridCnt Number of nodes.
* @throws Exception If failed.
*/
private void checkSizeAfterLive(int gridCnt) throws Exception {
for (int i = 0; i < gridCnt; ++i) {
IgniteCache<Integer, Integer> cache = jcache(i);
log.info("Size [node=" + i +
", heap=" + cache.localSize(ONHEAP) +
", offheap=" + cache.localSize(OFFHEAP) + ']');
assertEquals(0, cache.localSize());
assertEquals(0, cache.localSize(OFFHEAP));
assertEquals(0, cache.query(new SqlQuery<>(Integer.class, "_val >= 0")).getAll().size());
for (int key = 0; key < SIZE; key++)
assertNull(cache.localPeek(key));
}
}
}
|
pperalta/ignite
|
modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/ttl/CacheTtlAbstractSelfTest.java
|
Java
|
apache-2.0
| 11,017
|
package zhy.com.highlight.position;
import android.graphics.RectF;
import zhy.com.highlight.HighLight;
/**
* Created by caizepeng on 16/8/20.
*/
public abstract class OnBaseCallback implements HighLight.OnPosCallback {
protected float offset ;
public OnBaseCallback() {
}
public OnBaseCallback(float offset) {
this.offset = offset;
}
/**
* 如果需要调整位置,重写该方法
* @param rightMargin
* @param bottomMargin
* @param rectF
* @param marginInfo
*/
public void posOffset(float rightMargin, float bottomMargin, RectF rectF, HighLight.MarginInfo marginInfo){}
@Override
public void getPos(float rightMargin, float bottomMargin, RectF rectF, HighLight.MarginInfo marginInfo) {
getPosition(rightMargin,bottomMargin,rectF,marginInfo);
posOffset(rightMargin,bottomMargin,rectF,marginInfo);
}
public abstract void getPosition(float rightMargin, float bottomMargin, RectF rectF, HighLight.MarginInfo marginInfo);
}
|
isanwenyu/Highlight
|
highlight/src/main/java/zhy/com/highlight/position/OnBaseCallback.java
|
Java
|
apache-2.0
| 1,028
|
var watson = require('watson-developer-cloud');
var config = require('../config');
exports.startSession = function(username, passwd, model, callback) {
if (model instanceof Function) {
callback = model;
if (config.lang == 'cht' || config.lang == 'chs') {
model = 'zh-CN_BroadbandModel';
} else if (config.lang == 'en') {
model = 'en-US_BroadbandModel';
}
}
var speech_to_text = watson.speech_to_text({
'username' : username,
'password' : passwd,
version : 'v1',
url : 'https://stream.watsonplatform.net/speech-to-text/api'
});
var rev = speech_to_text.createRecognizeStream({
'content_type' : 'audio/l16;rate=16000',
//{ 'content_type': 'audio/flac;rate=16000',
'interim_results' : true,
'continuous' : true,
'inactivity_timeout' : -1,
'model' : model
});
rev.on('results', function(data) {
var index = data.results.length ? data.results.length - 1 : 0;
if (data.results[index] && data.results[index].final
&& data.results[index].alternatives && callback) {
callback(data.results[index].alternatives[0].transcript);
}
});
rev.on('connection-close', function(code, description) {
console.info('Watson STT WS connection-closed,', code, description);
});
rev.on('connect', function(conn) {
console.info('Watson STT WS connected');
});
return rev;
}
|
project-humix/humix-dialog-module
|
lib/watson.js
|
JavaScript
|
apache-2.0
| 1,383
|
class User < ActiveRecord::Base
# Include default devise modules. Others available are:
# :token_authenticatable, :confirmable, :lockable and :timeoutable
devise :database_authenticatable, :registerable, :recoverable, :rememberable, :trackable, :validatable, :lockable
# Setup accessible (or protected) attributes for your model
#attr_accessible :email, :password, :password_confirmation, :remember_me, :roles, :avatar
has_and_belongs_to_many :roles
belongs_to :team, :inverse_of => :players
#validates_presence_of(:name)
#validates_presence_of(:account)
# Add Paperclip support for avatars
#has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }
#attr_accessor :delete_avatar
#before_validation { self.avatar = nil if self.delete_avatar == '1' }
def has_role?(role_in_question)
roles.where(name: role_in_question.to_s).any?
end
def role_info
roles.map(&:name).join
end
end
|
RuanShan/xenglish
|
app/models/user.rb
|
Ruby
|
apache-2.0
| 953
|
// Protocol_1_8.h
/*
Declares the 1.8 protocol classes:
- cProtocol_1_8_0
- release 1.8 protocol (#47)
*/
#pragma once
#include "Protocol.h"
#include "../ByteBuffer.h"
#include "../mbedTLS++/AesCfb128Decryptor.h"
#include "../mbedTLS++/AesCfb128Encryptor.h"
class cProtocol_1_8_0 :
public cProtocol
{
typedef cProtocol super;
public:
cProtocol_1_8_0(cClientHandle * a_Client, const AString & a_ServerAddress, UInt16 a_ServerPort, UInt32 a_State);
/** Called when client sends some data: */
virtual void DataReceived(const char * a_Data, size_t a_Size) override;
/** Sending stuff to clients (alphabetically sorted): */
virtual void SendAttachEntity (const cEntity & a_Entity, const cEntity & a_Vehicle) override;
virtual void SendBlockAction (int a_BlockX, int a_BlockY, int a_BlockZ, char a_Byte1, char a_Byte2, BLOCKTYPE a_BlockType) override;
virtual void SendBlockBreakAnim (UInt32 a_EntityID, int a_BlockX, int a_BlockY, int a_BlockZ, char a_Stage) override;
virtual void SendBlockChange (int a_BlockX, int a_BlockY, int a_BlockZ, BLOCKTYPE a_BlockType, NIBBLETYPE a_BlockMeta) override;
virtual void SendBlockChanges (int a_ChunkX, int a_ChunkZ, const sSetBlockVector & a_Changes) override;
virtual void SendCameraSetTo (const cEntity & a_Entity) override;
virtual void SendChat (const AString & a_Message, eChatType a_Type) override;
virtual void SendChat (const cCompositeChat & a_Message, eChatType a_Type, bool a_ShouldUseChatPrefixes) override;
virtual void SendChatRaw (const AString & a_MessageRaw, eChatType a_Type) override;
virtual void SendChunkData (int a_ChunkX, int a_ChunkZ, cChunkDataSerializer & a_Serializer) override;
virtual void SendCollectEntity (const cEntity & a_Entity, const cPlayer & a_Player, int a_Count) override;
virtual void SendDestroyEntity (const cEntity & a_Entity) override;
virtual void SendDetachEntity (const cEntity & a_Entity, const cEntity & a_PreviousVehicle) override;
virtual void SendDisconnect (const AString & a_Reason) override;
virtual void SendEditSign (int a_BlockX, int a_BlockY, int a_BlockZ) override; ///< Request the client to open up the sign editor for the sign (1.6+)
virtual void SendEntityEffect (const cEntity & a_Entity, int a_EffectID, int a_Amplifier, int a_Duration) override;
virtual void SendEntityEquipment (const cEntity & a_Entity, short a_SlotNum, const cItem & a_Item) override;
virtual void SendEntityHeadLook (const cEntity & a_Entity) override;
virtual void SendEntityLook (const cEntity & a_Entity) override;
virtual void SendEntityMetadata (const cEntity & a_Entity) override;
virtual void SendEntityProperties (const cEntity & a_Entity) override;
virtual void SendEntityRelMove (const cEntity & a_Entity, char a_RelX, char a_RelY, char a_RelZ) override;
virtual void SendEntityRelMoveLook (const cEntity & a_Entity, char a_RelX, char a_RelY, char a_RelZ) override;
virtual void SendEntityStatus (const cEntity & a_Entity, char a_Status) override;
virtual void SendEntityVelocity (const cEntity & a_Entity) override;
virtual void SendExplosion (double a_BlockX, double a_BlockY, double a_BlockZ, float a_Radius, const cVector3iArray & a_BlocksAffected, const Vector3d & a_PlayerMotion) override;
virtual void SendGameMode (eGameMode a_GameMode) override;
virtual void SendHealth (void) override;
virtual void SendHeldItemChange (int a_ItemIndex) override;
virtual void SendHideTitle (void) override;
virtual void SendInventorySlot (char a_WindowID, short a_SlotNum, const cItem & a_Item) override;
virtual void SendKeepAlive (UInt32 a_PingID) override;
virtual void SendLeashEntity (const cEntity & a_Entity, const cEntity & a_EntityLeashedTo) override;
virtual void SendLogin (const cPlayer & a_Player, const cWorld & a_World) override;
virtual void SendLoginSuccess (void) override;
virtual void SendMapData (const cMap & a_Map, int a_DataStartX, int a_DataStartY) override;
virtual void SendPaintingSpawn (const cPainting & a_Painting) override;
virtual void SendPickupSpawn (const cPickup & a_Pickup) override;
virtual void SendPlayerAbilities (void) override;
virtual void SendEntityAnimation (const cEntity & a_Entity, char a_Animation) override;
virtual void SendParticleEffect (const AString & a_ParticleName, float a_SrcX, float a_SrcY, float a_SrcZ, float a_OffsetX, float a_OffsetY, float a_OffsetZ, float a_ParticleData, int a_ParticleAmount) override;
virtual void SendParticleEffect (const AString & a_ParticleName, Vector3f a_Src, Vector3f a_Offset, float a_ParticleData, int a_ParticleAmount, std::array<int, 2> a_Data) override;
virtual void SendPlayerListAddPlayer (const cPlayer & a_Player) override;
virtual void SendPlayerListRemovePlayer (const cPlayer & a_Player) override;
virtual void SendPlayerListUpdateGameMode (const cPlayer & a_Player) override;
virtual void SendPlayerListUpdatePing (const cPlayer & a_Player) override;
virtual void SendPlayerListUpdateDisplayName(const cPlayer & a_Player, const AString & a_CustomName) override;
virtual void SendPlayerMaxSpeed (void) override;
virtual void SendPlayerMoveLook (void) override;
virtual void SendPlayerPosition (void) override;
virtual void SendPlayerSpawn (const cPlayer & a_Player) override;
virtual void SendPluginMessage (const AString & a_Channel, const AString & a_Message) override;
virtual void SendRemoveEntityEffect (const cEntity & a_Entity, int a_EffectID) override;
virtual void SendResetTitle (void) override;
virtual void SendRespawn (eDimension a_Dimension) override;
virtual void SendSoundEffect (const AString & a_SoundName, double a_X, double a_Y, double a_Z, float a_Volume, float a_Pitch) override;
virtual void SendExperience (void) override;
virtual void SendExperienceOrb (const cExpOrb & a_ExpOrb) override;
virtual void SendScoreboardObjective (const AString & a_Name, const AString & a_DisplayName, Byte a_Mode) override;
virtual void SendScoreUpdate (const AString & a_Objective, const AString & a_Player, cObjective::Score a_Score, Byte a_Mode) override;
virtual void SendDisplayObjective (const AString & a_Objective, cScoreboard::eDisplaySlot a_Display) override;
virtual void SendSetSubTitle (const cCompositeChat & a_SubTitle) override;
virtual void SendSetRawSubTitle (const AString & a_SubTitle) override;
virtual void SendSetTitle (const cCompositeChat & a_Title) override;
virtual void SendSetRawTitle (const AString & a_Title) override;
virtual void SendSoundParticleEffect (const EffectID a_EffectID, int a_SrcX, int a_SrcY, int a_SrcZ, int a_Data) override;
virtual void SendSpawnFallingBlock (const cFallingBlock & a_FallingBlock) override;
virtual void SendSpawnMob (const cMonster & a_Mob) override;
virtual void SendSpawnObject (const cEntity & a_Entity, char a_ObjectType, int a_ObjectData, Byte a_Yaw, Byte a_Pitch) override;
virtual void SendSpawnVehicle (const cEntity & a_Vehicle, char a_VehicleType, char a_VehicleSubType) override;
virtual void SendStatistics (const cStatManager & a_Manager) override;
virtual void SendTabCompletionResults (const AStringVector & a_Results) override;
virtual void SendTeleportEntity (const cEntity & a_Entity) override;
virtual void SendThunderbolt (int a_BlockX, int a_BlockY, int a_BlockZ) override;
virtual void SendTitleTimes (int a_FadeInTicks, int a_DisplayTicks, int a_FadeOutTicks) override;
virtual void SendTimeUpdate (Int64 a_WorldAge, Int64 a_TimeOfDay, bool a_DoDaylightCycle) override;
virtual void SendUnleashEntity (const cEntity & a_Entity) override;
virtual void SendUnloadChunk (int a_ChunkX, int a_ChunkZ) override;
virtual void SendUpdateBlockEntity (cBlockEntity & a_BlockEntity) override;
virtual void SendUpdateSign (int a_BlockX, int a_BlockY, int a_BlockZ, const AString & a_Line1, const AString & a_Line2, const AString & a_Line3, const AString & a_Line4) override;
virtual void SendUseBed (const cEntity & a_Entity, int a_BlockX, int a_BlockY, int a_BlockZ) override;
virtual void SendWeather (eWeather a_Weather) override;
virtual void SendWholeInventory (const cWindow & a_Window) override;
virtual void SendWindowClose (const cWindow & a_Window) override;
virtual void SendWindowOpen (const cWindow & a_Window) override;
virtual void SendWindowProperty (const cWindow & a_Window, short a_Property, short a_Value) override;
virtual AString GetAuthServerID(void) override { return m_AuthServerID; }
/** Compress the packet. a_Packet must be without packet length.
a_Compressed will be set to the compressed packet includes packet length and data length.
If compression fails, the function returns false. */
static bool CompressPacket(const AString & a_Packet, AString & a_Compressed);
/** The 1.8 protocol use a particle id instead of a string. This function converts the name to the id. If the name is incorrect, it returns 0. */
static int GetParticleID(const AString & a_ParticleName);
/** Minecraft 1.8 use other locations to spawn the item frame. This function converts the 1.7 positions to 1.8 positions. */
static void FixItemFramePositions(int a_ObjectData, double & a_PosX, double & a_PosZ, double & a_Yaw);
protected:
AString m_ServerAddress;
UInt16 m_ServerPort;
AString m_AuthServerID;
/** State of the protocol. 1 = status, 2 = login, 3 = game */
UInt32 m_State;
/** Buffer for the received data */
cByteBuffer m_ReceivedData;
bool m_IsEncrypted;
cAesCfb128Decryptor m_Decryptor;
cAesCfb128Encryptor m_Encryptor;
/** The logfile where the comm is logged, when g_ShouldLogComm is true */
cFile m_CommLogFile;
/** Adds the received (unencrypted) data to m_ReceivedData, parses complete packets */
void AddReceivedData(const char * a_Data, size_t a_Size);
/** Nobody inherits 1.8, so it doesn't use this method */
virtual UInt32 GetPacketId(eOutgoingPackets a_Packet) override
{
ASSERT(!"GetPacketId for cProtocol_1_8_0 is not implemented.");
return 0;
}
/** Reads and handles the packet. The packet length and type have already been read.
Returns true if the packet was understood, false if it was an unknown packet
*/
bool HandlePacket(cByteBuffer & a_ByteBuffer, UInt32 a_PacketType);
// Packet handlers while in the Status state (m_State == 1):
void HandlePacketStatusPing(cByteBuffer & a_ByteBuffer);
void HandlePacketStatusRequest(cByteBuffer & a_ByteBuffer);
// Packet handlers while in the Login state (m_State == 2):
void HandlePacketLoginEncryptionResponse(cByteBuffer & a_ByteBuffer);
void HandlePacketLoginStart(cByteBuffer & a_ByteBuffer);
// Packet handlers while in the Game state (m_State == 3):
void HandlePacketAnimation (cByteBuffer & a_ByteBuffer);
void HandlePacketBlockDig (cByteBuffer & a_ByteBuffer);
void HandlePacketBlockPlace (cByteBuffer & a_ByteBuffer);
void HandlePacketChatMessage (cByteBuffer & a_ByteBuffer);
void HandlePacketClientSettings (cByteBuffer & a_ByteBuffer);
void HandlePacketClientStatus (cByteBuffer & a_ByteBuffer);
void HandlePacketCreativeInventoryAction(cByteBuffer & a_ByteBuffer);
void HandlePacketEntityAction (cByteBuffer & a_ByteBuffer);
void HandlePacketKeepAlive (cByteBuffer & a_ByteBuffer);
void HandlePacketPlayer (cByteBuffer & a_ByteBuffer);
void HandlePacketPlayerAbilities (cByteBuffer & a_ByteBuffer);
void HandlePacketPlayerLook (cByteBuffer & a_ByteBuffer);
void HandlePacketPlayerPos (cByteBuffer & a_ByteBuffer);
void HandlePacketPlayerPosLook (cByteBuffer & a_ByteBuffer);
void HandlePacketPluginMessage (cByteBuffer & a_ByteBuffer);
void HandlePacketSlotSelect (cByteBuffer & a_ByteBuffer);
void HandlePacketSpectate (cByteBuffer & a_ByteBuffer);
void HandlePacketSteerVehicle (cByteBuffer & a_ByteBuffer);
void HandlePacketTabComplete (cByteBuffer & a_ByteBuffer);
void HandlePacketUpdateSign (cByteBuffer & a_ByteBuffer);
void HandlePacketUseEntity (cByteBuffer & a_ByteBuffer);
void HandlePacketEnchantItem (cByteBuffer & a_ByteBuffer);
void HandlePacketWindowClick (cByteBuffer & a_ByteBuffer);
void HandlePacketWindowClose (cByteBuffer & a_ByteBuffer);
/** Parses Vanilla plugin messages into specific ClientHandle calls.
The message payload is still in the bytebuffer, the handler reads it specifically for each handled channel */
void HandleVanillaPluginMessage(cByteBuffer & a_ByteBuffer, const AString & a_Channel);
/** Sends the data to the client, encrypting them if needed. */
virtual void SendData(const char * a_Data, size_t a_Size) override;
/** Sends the packet to the client. Called by the cPacketizer's destructor. */
virtual void SendPacket(cPacketizer & a_Packet) override;
void SendCompass(const cWorld & a_World);
/** Reads an item out of the received data, sets a_Item to the values read.
Returns false if not enough received data.
a_KeepRemainingBytes tells the function to keep that many bytes at the end of the buffer. */
virtual bool ReadItem(cByteBuffer & a_ByteBuffer, cItem & a_Item, size_t a_KeepRemainingBytes = 0);
/** Parses item metadata as read by ReadItem(), into the item enchantments. */
void ParseItemMetadata(cItem & a_Item, const AString & a_Metadata);
void StartEncryption(const Byte * a_Key);
/** Converts the BlockFace received by the protocol into eBlockFace constants.
If the received value doesn't match any of our eBlockFace constants, BLOCK_FACE_NONE is returned. */
eBlockFace FaceIntToBlockFace(Int8 a_FaceInt);
/** Writes the item data into a packet. */
void WriteItem(cPacketizer & a_Pkt, const cItem & a_Item);
/** Writes the metadata for the specified entity, not including the terminating 0x7f. */
void WriteEntityMetadata(cPacketizer & a_Pkt, const cEntity & a_Entity);
/** Writes the mob-specific metadata for the specified mob */
void WriteMobMetadata(cPacketizer & a_Pkt, const cMonster & a_Mob);
/** Writes the entity properties for the specified entity, including the Count field. */
void WriteEntityProperties(cPacketizer & a_Pkt, const cEntity & a_Entity);
/** Writes the block entity data for the specified block entity into the packet. */
void WriteBlockEntity(cPacketizer & a_Pkt, const cBlockEntity & a_BlockEntity);
} ;
|
nounoursheureux/MCServer
|
src/Protocol/Protocol_1_8.h
|
C
|
apache-2.0
| 15,528
|
#
# Author:: Thomas Bishop (<bishop.thomas@gmail.com>)
# Copyright:: Copyright 2011-2016, Thomas Bishop
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "spec_helper"
describe Chef::Knife::CookbookDelete do
before(:each) do
@knife = Chef::Knife::CookbookDelete.new
@knife.name_args = ["foobar"]
@knife.cookbook_name = "foobar"
@stdout = StringIO.new
allow(@knife.ui).to receive(:stdout).and_return(@stdout)
@stderr = StringIO.new
allow(@knife.ui).to receive(:stderr).and_return(@stderr)
end
describe "run" do
it "should print usage and exit when a cookbook name is not provided" do
@knife.name_args = []
expect(@knife).to receive(:show_usage)
expect(@knife.ui).to receive(:fatal)
expect { @knife.run }.to raise_error(SystemExit)
end
describe "when specifying a cookbook name" do
it "should delete the cookbook without a specific version" do
expect(@knife).to receive(:delete_without_explicit_version)
@knife.run
end
describe "and a version" do
it "should delete the specific version of the cookbook" do
@knife.name_args << "1.0.0"
expect(@knife).to receive(:delete_explicit_version)
@knife.run
end
end
describe "with -a or --all" do
it "should delete all versions of the cookbook" do
@knife.config[:all] = true
expect(@knife).to receive(:delete_all_versions)
@knife.run
end
end
describe "with -p or --purge" do
it "should prompt to purge the files" do
@knife.config[:purge] = true
expect(@knife).to receive(:confirm)
.with(/.+Are you sure you want to purge files.+/)
expect(@knife).to receive(:delete_without_explicit_version)
@knife.run
end
end
end
end
describe "delete_explicit_version" do
it "should delete the specific cookbook version" do
@knife.cookbook_name = "foobar"
@knife.version = "1.0.0"
expect(@knife).to receive(:delete_object).with(Chef::CookbookVersion,
"foobar version 1.0.0",
"cookbook").and_yield
expect(@knife).to receive(:delete_request).with("cookbooks/foobar/1.0.0")
@knife.delete_explicit_version
end
end
describe "delete_all_versions" do
it "should prompt to delete all versions of the cookbook" do
@knife.cookbook_name = "foobar"
expect(@knife).to receive(:confirm).with("Do you really want to delete all versions of foobar")
expect(@knife).to receive(:delete_all_without_confirmation)
@knife.delete_all_versions
end
end
describe "delete_all_without_confirmation" do
it "should delete all versions without confirmation" do
versions = ["1.0.0", "1.1.0"]
expect(@knife).to receive(:available_versions).and_return(versions)
versions.each do |v|
expect(@knife).to receive(:delete_version_without_confirmation).with(v)
end
@knife.delete_all_without_confirmation
end
end
describe "delete_without_explicit_version" do
it "should exit if there are no available versions" do
expect(@knife).to receive(:available_versions).and_return(nil)
expect { @knife.delete_without_explicit_version }.to raise_error(SystemExit)
end
it "should delete the version if only one is found" do
expect(@knife).to receive(:available_versions).at_least(:once).and_return(["1.0.0"])
expect(@knife).to receive(:delete_explicit_version)
@knife.delete_without_explicit_version
end
it "should ask which version(s) to delete if multiple are found" do
expect(@knife).to receive(:available_versions).at_least(:once).and_return(["1.0.0", "1.1.0"])
expect(@knife).to receive(:ask_which_versions_to_delete).and_return(["1.0.0", "1.1.0"])
expect(@knife).to receive(:delete_versions_without_confirmation).with(["1.0.0", "1.1.0"])
@knife.delete_without_explicit_version
end
end
describe "available_versions" do
before(:each) do
@rest_mock = double("rest")
expect(@knife).to receive(:rest).and_return(@rest_mock)
@cookbook_data = { "foobar" => { "versions" => [{ "version" => "1.0.0" },
{ "version" => "1.1.0" },
{ "version" => "2.0.0" } ] },
}
end
it "should return the list of versions of the cookbook" do
expect(@rest_mock).to receive(:get).with("cookbooks/foobar").and_return(@cookbook_data)
expect(@knife.available_versions).to eq(["1.0.0", "1.1.0", "2.0.0"])
end
it "should raise if an error other than HTTP 404 is returned" do
exception = Net::HTTPClientException.new("500 Internal Server Error", "500")
expect(@rest_mock).to receive(:get).and_raise(exception)
expect { @knife.available_versions }.to raise_error Net::HTTPClientException
end
describe "if the cookbook can't be found" do
before(:each) do
expect(@rest_mock).to receive(:get)
.and_raise(Net::HTTPClientException.new("404 Not Found", "404"))
end
it "should print an error" do
@knife.available_versions
expect(@stderr.string).to match /error.+cannot find a cookbook named foobar/i
end
it "should return nil" do
expect(@knife.available_versions).to eq(nil)
end
end
end
describe "ask_which_version_to_delete" do
before(:each) do
allow(@knife).to receive(:available_versions).and_return(["1.0.0", "1.1.0", "2.0.0"])
end
it "should prompt the user to select a version" do
prompt = /Which version\(s\) do you want to delete\?.+1\. foobar 1\.0\.0.+2\. foobar 1\.1\.0.+3\. foobar 2\.0\.0.+4\. All versions.+/m
expect(@knife).to receive(:ask_question).with(prompt).and_return("1")
@knife.ask_which_versions_to_delete
end
it "should print an error and exit if a version wasn't specified" do
expect(@knife).to receive(:ask_question).and_return("")
expect(@knife.ui).to receive(:error).with(/no versions specified/i)
expect { @knife.ask_which_versions_to_delete }.to raise_error(SystemExit)
end
it "should print an error if an invalid choice was selected" do
expect(@knife).to receive(:ask_question).and_return("100")
expect(@knife.ui).to receive(:error).with(/100 is not a valid choice/i)
@knife.ask_which_versions_to_delete
end
it "should return the selected versions" do
expect(@knife).to receive(:ask_question).and_return("1, 3")
expect(@knife.ask_which_versions_to_delete).to eq(["1.0.0", "2.0.0"])
end
it "should return all of the versions if 'all' was selected" do
expect(@knife).to receive(:ask_question).and_return("4")
expect(@knife.ask_which_versions_to_delete).to eq([:all])
end
end
describe "delete_version_without_confirmation" do
it "should delete the cookbook version" do
expect(@knife).to receive(:delete_request).with("cookbooks/foobar/1.0.0")
@knife.delete_version_without_confirmation("1.0.0")
end
it "should output that the cookbook was deleted" do
allow(@knife).to receive(:delete_request)
@knife.delete_version_without_confirmation("1.0.0")
expect(@stderr.string).to match /deleted cookbook\[foobar\]\[1.0.0\]/im
end
describe "with --print-after" do
it "should display the cookbook data" do
object = ""
@knife.config[:print_after] = true
allow(@knife).to receive(:delete_request).and_return(object)
expect(@knife).to receive(:format_for_display).with(object)
@knife.delete_version_without_confirmation("1.0.0")
end
end
end
describe "delete_versions_without_confirmation" do
it "should delete each version without confirmation" do
versions = ["1.0.0", "1.1.0"]
versions.each do |v|
expect(@knife).to receive(:delete_version_without_confirmation).with(v)
end
@knife.delete_versions_without_confirmation(versions)
end
describe "with -a or --all" do
it "should delete all versions without confirmation" do
versions = [:all]
expect(@knife).to receive(:delete_all_without_confirmation)
@knife.delete_versions_without_confirmation(versions)
end
end
end
end
|
higanworks/chef
|
spec/unit/knife/cookbook_delete_spec.rb
|
Ruby
|
apache-2.0
| 8,925
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spinnaker.clouddriver.aws.deploy.description;
import com.amazonaws.services.elasticloadbalancingv2.model.ActionTypeEnum;
import com.amazonaws.services.elasticloadbalancingv2.model.Certificate;
import com.amazonaws.services.elasticloadbalancingv2.model.ProtocolEnum;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class UpsertAmazonLoadBalancerV2Description extends UpsertAmazonLoadBalancerDescription {
public List<Listener> listeners = new ArrayList<>();
public List<TargetGroup> targetGroups = new ArrayList<>();
public static class TargetGroup {
private String name;
private ProtocolEnum protocol;
private Integer port;
private Attributes attributes; // TODO: Support target group attributes
private ProtocolEnum healthCheckProtocol;
private String healthCheckPath;
private String healthCheckPort;
private Integer healthCheckInterval = 10;
private Integer healthCheckTimeout = 5;
private Integer unhealthyThreshold = 2;
private Integer healthyThreshold = 10;
private String healthCheckMatcher = "200-299"; // string of ranges or individual http status codes, separated by commas
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public ProtocolEnum getProtocol() {
return protocol;
}
public void setProtocol(ProtocolEnum protocol) {
this.protocol = protocol;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public Attributes getAttributes() {
return attributes;
}
public void setAttributes(Attributes attributes) {
this.attributes = attributes;
}
public ProtocolEnum getHealthCheckProtocol() {
return healthCheckProtocol;
}
public void setHealthCheckProtocol(ProtocolEnum healthCheckProtocol) {
this.healthCheckProtocol = healthCheckProtocol;
}
public String getHealthCheckPath() {
return healthCheckPath;
}
public void setHealthCheckPath(String healthCheckPath) {
this.healthCheckPath = healthCheckPath;
}
public String getHealthCheckPort() {
return healthCheckPort;
}
public void setHealthCheckPort(String healthCheckPort) {
this.healthCheckPort = healthCheckPort;
}
public Integer getHealthCheckInterval() {
return healthCheckInterval;
}
public void setHealthCheckInterval(Integer healthCheckInterval) {
this.healthCheckInterval = healthCheckInterval;
}
public Integer getHealthCheckTimeout() {
return healthCheckTimeout;
}
public void setHealthCheckTimeout(Integer healthCheckTimeout) {
this.healthCheckTimeout = healthCheckTimeout;
}
public Integer getUnhealthyThreshold() {
return unhealthyThreshold;
}
public void setUnhealthyThreshold(Integer unhealthyThreshold) {
this.unhealthyThreshold = unhealthyThreshold;
}
public Integer getHealthyThreshold() {
return healthyThreshold;
}
public void setHealthyThreshold(Integer healthyThreshold) {
this.healthyThreshold = healthyThreshold;
}
public String getHealthCheckMatcher() {
return healthCheckMatcher;
}
public void setHealthCheckMatcher(String healthCheckMatcher) {
this.healthCheckMatcher = healthCheckMatcher;
}
public Boolean compare(com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup awsTargetGroup) {
return this.name.equals(awsTargetGroup.getTargetGroupName()) &&
this.protocol.toString().equals(awsTargetGroup.getProtocol()) &&
this.port.equals(awsTargetGroup.getPort()) &&
this.healthCheckProtocol.toString().equals(awsTargetGroup.getHealthCheckProtocol()) &&
this.healthCheckPath.equals(awsTargetGroup.getHealthCheckPath()) &&
this.healthCheckPort.equals(awsTargetGroup.getHealthCheckPort()) &&
this.healthCheckInterval.equals(awsTargetGroup.getHealthCheckIntervalSeconds()) &&
this.healthCheckTimeout.equals(awsTargetGroup.getHealthCheckTimeoutSeconds()) &&
this.healthyThreshold.equals(awsTargetGroup.getHealthyThresholdCount()) &&
this.unhealthyThreshold.equals(awsTargetGroup.getUnhealthyThresholdCount()) &&
this.healthCheckMatcher.equals(awsTargetGroup.getMatcher().getHttpCode());
}
}
public static class Listener {
private List<Certificate> certificates;
private ProtocolEnum protocol;
private Integer port;
private String sslPolicy;
private List<Action> defaultActions;
private List<Rule> rules = new ArrayList<>();
public List<Certificate> getCertificates() {
return certificates;
}
public void setCertificates(List<Certificate> certificates) {
this.certificates = certificates;
}
public ProtocolEnum getProtocol() {
return protocol;
}
public void setProtocol(ProtocolEnum protocol) {
this.protocol = protocol;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public String getSslPolicy() {
return sslPolicy;
}
public void setSslPolicy(String sslPolicy) {
this.sslPolicy = sslPolicy;
}
public List<Action> getDefaultActions() {
return defaultActions;
}
public void setDefaultActions(List<Action> defaultActions) {
this.defaultActions = defaultActions;
}
public List<Rule> getRules() {
return rules;
}
public void setRules(List<Rule> rules) {
this.rules = rules;
}
public Boolean compare(com.amazonaws.services.elasticloadbalancingv2.model.Listener awsListener,
List<com.amazonaws.services.elasticloadbalancingv2.model.Action> actions,
List<com.amazonaws.services.elasticloadbalancingv2.model.Rule> existingRules,
List<com.amazonaws.services.elasticloadbalancingv2.model.Rule> rules) {
if (existingRules == null) {
existingRules = new ArrayList<>();
}
if (rules == null) {
rules = new ArrayList<>();
}
int awsCertificateCount = awsListener.getCertificates() != null ? awsListener.getCertificates().size() : 0;
int certificateCount = certificates != null ? certificates.size() : 0;
Boolean certificatesSame = awsCertificateCount == certificateCount;
if (certificatesSame) {
Set<String> awsListenerArns = new HashSet<>();
Set<String> thisListenerArns = new HashSet<>();
if (awsListener.getCertificates() != null) {
awsListener.getCertificates().forEach(cert -> awsListenerArns.add(cert.getCertificateArn()));
}
if (certificates != null) {
certificates.forEach(cert -> thisListenerArns.add(cert.getCertificateArn()));
}
certificatesSame = awsListenerArns.equals(thisListenerArns);
}
Boolean rulesSame = existingRules.size() == rules.size() + 1; // existing rules has the default rule, rules does not
if (rulesSame) {
for (com.amazonaws.services.elasticloadbalancingv2.model.Rule existingRule : existingRules) {
boolean match = true;
if (!existingRule.isDefault()) {
match = false;
for (com.amazonaws.services.elasticloadbalancingv2.model.Rule rule : rules) {
if (existingRule.getActions().equals(rule.getActions())
&& existingRule.getConditions().equals(rule.getConditions())
&& existingRule.getPriority().equals(rule.getPriority())) {
match = true;
break;
}
}
}
rulesSame = match;
if (!rulesSame) {
break;
}
}
}
Boolean actionsSame = awsListener.getDefaultActions().containsAll(actions) &&
actions.containsAll(awsListener.getDefaultActions());
return (this.protocol != null && this.protocol.toString().equals(awsListener.getProtocol())) &&
(this.port != null && this.port.equals(awsListener.getPort())) &&
actionsSame &&
rulesSame &&
certificatesSame;
}
}
public static class Action {
private String type = ActionTypeEnum.Forward.toString();
private String targetGroupName;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getTargetGroupName() {
return targetGroupName;
}
public void setTargetGroupName(String targetGroupName) {
this.targetGroupName = targetGroupName;
}
}
public static class Attributes {
private Integer deregistrationDelay = 300;
private Boolean stickinessEnabled = false;
private String stickinessType = "lb_cookie";
private Integer stickinessDuration = 86400;
public Integer getDeregistrationDelay() {
return deregistrationDelay;
}
public void setDeregistrationDelay(Integer deregistrationDelay) {
this.deregistrationDelay = deregistrationDelay;
}
public Boolean getStickinessEnabled() {
return stickinessEnabled;
}
public void setStickinessEnabled(Boolean stickinessEnabled) {
this.stickinessEnabled = stickinessEnabled;
}
public String getStickinessType() {
return stickinessType;
}
public void setStickinessType(String stickinessType) {
this.stickinessType = stickinessType;
}
public Integer getStickinessDuration() {
return stickinessDuration;
}
public void setStickinessDuration(Integer stickinessDuration) {
this.stickinessDuration = stickinessDuration;
}
}
public static class RuleCondition {
private String field;
private List<String> values;
public String getField() {
return field;
}
public void setField(String field) {
this.field = field;
}
public List<String> getValues() {
return values;
}
public void setValues(List<String> values) {
this.values = values;
}
}
public static class Rule {
private Integer priority;
private List<Action> actions;
private List<RuleCondition> conditions;
public Integer getPriority() {
return priority;
}
public void setPriority(Integer priority) {
this.priority = priority;
}
public List<Action> getActions() {
return actions;
}
public void setActions(List<Action> actions) {
this.actions = actions;
}
public List<RuleCondition> getConditions() {
return conditions;
}
public void setConditions(List<RuleCondition> conditions) {
this.conditions = conditions;
}
}
}
|
lookout/clouddriver
|
clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java
|
Java
|
apache-2.0
| 11,449
|
angular.module('classeur.optional.sysPage', [])
.config(
function ($routeProvider) {
$routeProvider
.when('/sys', {
template: '<cl-sys-page></cl-sys-page>',
controller: function (clAnalytics) {
clAnalytics.trackPage('/sys')
}
})
})
.directive('clSysPage',
function ($http, $location, clToast, clSocketSvc) {
return {
restrict: 'E',
templateUrl: 'optional/sysPage/sysPage.html',
link: link
}
function link (scope) {
scope.properties = []
scope.deleteRow = function (propertyToDelete) {
scope.properties = scope.properties.cl_filter(function (property) {
return property !== propertyToDelete
})
}
scope.addRow = function () {
scope.properties.push({})
}
scope.update = function () {
var properties = {}
if (Object.keys(scope.properties).length > 255) {
return clToast('Too many properties.')
}
if (
scope.properties.cl_some(function (property) {
if (!property.key && !property.value) {
return
}
if (!property.key) {
clToast("Property can't be empty.")
return true
}
if (property.key.length > 255) {
clToast('Property key is too long.')
return true
}
if (!property.value) {
clToast("Property can't be empty.")
return true
}
if (property.value.length > 512) {
clToast('Property value is too long.')
return true
}
if (properties.hasOwnProperty(property.key)) {
clToast('Duplicate property: ' + property.key + '.')
return true
}
properties[property.key] = property.value
})
) {
return
}
$http.post('/api/v1/config/app', properties, {
headers: clSocketSvc.makeAuthorizationHeader()
})
.success(function () {
clToast('App config updated.')
})
.error(function (err) {
clToast('Error: ' + (err && err.message) || 'unknown')
})
}
function retrieveConfig () {
$http.get('/api/v1/config/app', {
headers: clSocketSvc.makeAuthorizationHeader()
})
.success(function (res) {
scope.properties = Object.keys(res).sort().cl_map(function (key) {
return {
key: key,
value: res[key]
}
})
})
.error(function (err) {
clToast('Error: ' + (err && err.message) || 'unknown')
})
}
retrieveConfig()
}
})
|
MiaoTofu/classeur
|
src/optional/sysPage/sysPage.js
|
JavaScript
|
apache-2.0
| 2,994
|
project_path: /web/_project.yaml
book_path: /web/tools/_book.yaml
description: Siapkan workspace Anda agar dilengkapi dengan editor, alat debug, dan alat versi untuk web multiperangkat.
{# wf_updated_on: 2015-04-13 #}
{# wf_published_on: 2015-04-13 #}
# Memulai {: .page-title }
Siapkan workspace Anda agar dilengkapi dengan editor, alat debug, dan alat versi untuk web multiperangkat.
Waktu adalah faktor yang sangat besar supaya tetap produktif. Lingkungan development Anda adalah tempat Anda menghabiskan sebagian besar waktu. Siapkan diri Anda untuk meraih kesuksesan dengan menyertakan editor yang andal dan bisa diperluas, serta alat debug dan development yang kokoh.
* [Menyiapkan Editor Anda](setup-editor)
* [Menyiapkan Persistensi dengan DevTools Workspace](setup-workflow)
* [Menyiapkan Praprosesor CSS dan JS](setup-preprocessors)
* [Menyiapkan Pintasan Baris Perintah](setup-shortcuts)
* [Menyiapkan Ekstensi Browser](setup-extensions)
* [Menyiapkan Alat Pembangunan Anda](setup-buildtools)
Setelah selesai, lanjutkan dengan mempelajari selengkapnya tentang [Alat Developer Chrome (DevTools)](/web/tools/chrome-devtools), alat bawaan Chrome yang membantu Anda mengembangkan, menguji, dan men-debug laman Anda.
{# wf_devsite_translation #}
|
robdodson/WebFundamentals
|
src/content/id/tools/setup/index.md
|
Markdown
|
apache-2.0
| 1,260
|
/* Copyright 2012 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
getVisibleElements, scrollIntoView, watchScroll
} from './ui_utils';
import { PDFThumbnailView } from './pdf_thumbnail_view';
var THUMBNAIL_SCROLL_MARGIN = -19;
/**
* @typedef {Object} PDFThumbnailViewerOptions
* @property {HTMLDivElement} container - The container for the thumbnail
* elements.
* @property {IPDFLinkService} linkService - The navigation/linking service.
* @property {PDFRenderingQueue} renderingQueue - The rendering queue object.
*/
/**
* Simple viewer control to display thumbnails for pages.
* @class
* @implements {IRenderableView}
*/
var PDFThumbnailViewer = (function PDFThumbnailViewerClosure() {
/**
* @constructs PDFThumbnailViewer
* @param {PDFThumbnailViewerOptions} options
*/
function PDFThumbnailViewer(options) {
this.container = options.container;
this.renderingQueue = options.renderingQueue;
this.linkService = options.linkService;
this.scroll = watchScroll(this.container, this._scrollUpdated.bind(this));
this._resetView();
}
PDFThumbnailViewer.prototype = {
/**
* @private
*/
_scrollUpdated: function PDFThumbnailViewer_scrollUpdated() {
this.renderingQueue.renderHighestPriority();
},
getThumbnail: function PDFThumbnailViewer_getThumbnail(index) {
return this.thumbnails[index];
},
/**
* @private
*/
_getVisibleThumbs: function PDFThumbnailViewer_getVisibleThumbs() {
return getVisibleElements(this.container, this.thumbnails);
},
scrollThumbnailIntoView:
function PDFThumbnailViewer_scrollThumbnailIntoView(page) {
var selected = document.querySelector('.thumbnail.selected');
if (selected) {
selected.classList.remove('selected');
}
var thumbnail = document.querySelector(
'div.thumbnail[data-page-number="' + page + '"]');
if (thumbnail) {
thumbnail.classList.add('selected');
}
var visibleThumbs = this._getVisibleThumbs();
var numVisibleThumbs = visibleThumbs.views.length;
// If the thumbnail isn't currently visible, scroll it into view.
if (numVisibleThumbs > 0) {
var first = visibleThumbs.first.id;
// Account for only one thumbnail being visible.
var last = (numVisibleThumbs > 1 ? visibleThumbs.last.id : first);
if (page <= first || page >= last) {
scrollIntoView(thumbnail, { top: THUMBNAIL_SCROLL_MARGIN });
}
}
},
get pagesRotation() {
return this._pagesRotation;
},
set pagesRotation(rotation) {
this._pagesRotation = rotation;
for (var i = 0, l = this.thumbnails.length; i < l; i++) {
var thumb = this.thumbnails[i];
thumb.update(rotation);
}
},
cleanup: function PDFThumbnailViewer_cleanup() {
var tempCanvas = PDFThumbnailView.tempImageCache;
if (tempCanvas) {
// Zeroing the width and height causes Firefox to release graphics
// resources immediately, which can greatly reduce memory consumption.
tempCanvas.width = 0;
tempCanvas.height = 0;
}
PDFThumbnailView.tempImageCache = null;
},
/**
* @private
*/
_resetView: function PDFThumbnailViewer_resetView() {
this.thumbnails = [];
this._pageLabels = null;
this._pagesRotation = 0;
this._pagesRequests = [];
// Remove the thumbnails from the DOM.
this.container.textContent = '';
},
setDocument: function PDFThumbnailViewer_setDocument(pdfDocument) {
if (this.pdfDocument) {
this._cancelRendering();
this._resetView();
}
this.pdfDocument = pdfDocument;
if (!pdfDocument) {
return Promise.resolve();
}
return pdfDocument.getPage(1).then(function (firstPage) {
var pagesCount = pdfDocument.numPages;
var viewport = firstPage.getViewport(1.0);
for (var pageNum = 1; pageNum <= pagesCount; ++pageNum) {
var thumbnail = new PDFThumbnailView({
container: this.container,
id: pageNum,
defaultViewport: viewport.clone(),
linkService: this.linkService,
renderingQueue: this.renderingQueue,
disableCanvasToImageConversion: false,
});
this.thumbnails.push(thumbnail);
}
}.bind(this));
},
/**
* @private
*/
_cancelRendering: function PDFThumbnailViewer_cancelRendering() {
for (var i = 0, ii = this.thumbnails.length; i < ii; i++) {
if (this.thumbnails[i]) {
this.thumbnails[i].cancelRendering();
}
}
},
/**
* @param {Array|null} labels
*/
setPageLabels: function PDFThumbnailViewer_setPageLabels(labels) {
if (!this.pdfDocument) {
return;
}
if (!labels) {
this._pageLabels = null;
} else if (!(labels instanceof Array &&
this.pdfDocument.numPages === labels.length)) {
this._pageLabels = null;
console.error('PDFThumbnailViewer_setPageLabels: Invalid page labels.');
} else {
this._pageLabels = labels;
}
// Update all the `PDFThumbnailView` instances.
for (var i = 0, ii = this.thumbnails.length; i < ii; i++) {
var thumbnailView = this.thumbnails[i];
var label = this._pageLabels && this._pageLabels[i];
thumbnailView.setPageLabel(label);
}
},
/**
* @param {PDFThumbnailView} thumbView
* @returns {PDFPage}
* @private
*/
_ensurePdfPageLoaded:
function PDFThumbnailViewer_ensurePdfPageLoaded(thumbView) {
if (thumbView.pdfPage) {
return Promise.resolve(thumbView.pdfPage);
}
var pageNumber = thumbView.id;
if (this._pagesRequests[pageNumber]) {
return this._pagesRequests[pageNumber];
}
var promise = this.pdfDocument.getPage(pageNumber).then(
function (pdfPage) {
thumbView.setPdfPage(pdfPage);
this._pagesRequests[pageNumber] = null;
return pdfPage;
}.bind(this));
this._pagesRequests[pageNumber] = promise;
return promise;
},
forceRendering: function () {
var visibleThumbs = this._getVisibleThumbs();
var thumbView = this.renderingQueue.getHighestPriority(visibleThumbs,
this.thumbnails,
this.scroll.down);
if (thumbView) {
this._ensurePdfPageLoaded(thumbView).then(function () {
this.renderingQueue.renderView(thumbView);
}.bind(this));
return true;
}
return false;
}
};
return PDFThumbnailViewer;
})();
export {
PDFThumbnailViewer,
};
|
1and1/pdf.js
|
web/pdf_thumbnail_viewer.js
|
JavaScript
|
apache-2.0
| 7,407
|
package org.gradle.tests16;
import org.junit.Test;
public class Test16_3 {
@Test
public void myTest() throws Exception {
Thread.sleep(5);
}
}
|
emergentone/10-dependencies
|
src/test/java/org/gradle/tests16/Test16_3.java
|
Java
|
apache-2.0
| 163
|
<html dir="LTR">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=Windows-1252" />
<meta name="vs_targetSchema" content="http://schemas.microsoft.com/intellisense/ie5" />
<title>Lucene.Net.Search.FunctionHierarchy</title>
<xml>
</xml>
<link rel="stylesheet" type="text/css" href="MSDN.css" />
</head>
<body topmargin="0" id="bodyID" class="dtBODY">
<object id="obj_cook" classid="clsid:59CC0C20-679B-11D2-88BD-0800361A1803" style="display:none;">
</object>
<div id="nsbanner">
<div id="bannerrow1">
<table class="bannerparthead" cellspacing="0">
<tr id="hdr">
<td class="runninghead">Apache Lucene.Net 2.4.0 Class Library API</td>
<td class="product">
</td>
</tr>
</table>
</div>
<div id="TitleRow">
<h1 class="dtH1">Lucene.Net.Search.Function Hierarchy</h1>
</div>
</div>
<div id="nstext" valign="bottom">
<div>
<a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemObjectClassTopic.htm">System.Object</a>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.DocValues.html">Lucene.Net.Search.Function.DocValues</a>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.FieldScoreQuery.Type.html">Lucene.Net.Search.Function.FieldScoreQuery.Type</a>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.ValueSource.html">Lucene.Net.Search.Function.ValueSource</a>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.FieldCacheSource.html">Lucene.Net.Search.Function.FieldCacheSource</a>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.ByteFieldSource.html">Lucene.Net.Search.Function.ByteFieldSource</a>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.FloatFieldSource.html">Lucene.Net.Search.Function.FloatFieldSource</a>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.IntFieldSource.html">Lucene.Net.Search.Function.IntFieldSource</a>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.ShortFieldSource.html">Lucene.Net.Search.Function.ShortFieldSource</a>
</div>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.OrdFieldSource.html">Lucene.Net.Search.Function.OrdFieldSource</a>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Function.ReverseOrdFieldSource.html">Lucene.Net.Search.Function.ReverseOrdFieldSource</a>
</div>
</div>
<div class="Hierarchy">
<a href="Lucene.Net.Search.Query.html">Lucene.Net.Search.Query</a> ---- <a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemICloneableClassTopic.htm">System.ICloneable</a><div class="Hierarchy"><a href="Lucene.Net.Search.Function.CustomScoreQuery.html">Lucene.Net.Search.Function.CustomScoreQuery</a></div><div class="Hierarchy"><a href="Lucene.Net.Search.Function.ValueSourceQuery.html">Lucene.Net.Search.Function.ValueSourceQuery</a><div class="Hierarchy"><a href="Lucene.Net.Search.Function.FieldScoreQuery.html">Lucene.Net.Search.Function.FieldScoreQuery</a></div></div></div>
</div>
<h4 class="dtH4">See Also</h4>
<p>
<a href="Lucene.Net.Search.Function.html">Lucene.Net.Search.Function Namespace
</a>
</p>
</div>
</body>
</html>
|
Mpdreamz/lucene.net
|
doc/core/Lucene.Net.Search.FunctionHierarchy.html
|
HTML
|
apache-2.0
| 3,694
|
//// [typesWithSpecializedConstructSignatures.ts]
// basic uses of specialized signatures without errors
class Base { foo: string }
class Derived1 extends Base { bar: string }
class Derived2 extends Base { baz: string }
class C {
constructor(x: 'hi');
constructor(x: 'bye');
constructor(x: string);
constructor(x) {
return x;
}
}
var c = new C('a');
interface I {
new(x: 'hi'): Derived1;
new(x: 'bye'): Derived2;
new(x: string): Base;
}
var i: I;
var a: {
new(x: 'hi'): Derived1;
new(x: 'bye'): Derived2;
new(x: string): Base;
};
c = i;
c = a;
i = a;
a = i;
var r1 = new C('hi');
var r2: Derived2 = new i('bye');
var r3: Base = new a('hm');
//// [typesWithSpecializedConstructSignatures.js]
// basic uses of specialized signatures without errors
var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var Base = /** @class */ (function () {
function Base() {
}
return Base;
}());
var Derived1 = /** @class */ (function (_super) {
__extends(Derived1, _super);
function Derived1() {
return _super !== null && _super.apply(this, arguments) || this;
}
return Derived1;
}(Base));
var Derived2 = /** @class */ (function (_super) {
__extends(Derived2, _super);
function Derived2() {
return _super !== null && _super.apply(this, arguments) || this;
}
return Derived2;
}(Base));
var C = /** @class */ (function () {
function C(x) {
return x;
}
return C;
}());
var c = new C('a');
var i;
var a;
c = i;
c = a;
i = a;
a = i;
var r1 = new C('hi');
var r2 = new i('bye');
var r3 = new a('hm');
|
synaptek/TypeScript
|
tests/baselines/reference/typesWithSpecializedConstructSignatures.js
|
JavaScript
|
apache-2.0
| 2,109
|
/**
* Copyright 2015 The Incremental DOM Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @type {TreeWalker}
*/
var walker_;
/**
* @return {TreeWalker} the current TreeWalker
*/
var getWalker = function() {
return walker_;
};
/**
* Sets the current TreeWalker
* @param {TreeWalker} walker
*/
var setWalker = function(walker) {
walker_ = walker;
};
/** */
export {
getWalker,
setWalker
};
|
markuskobler/incremental-dom
|
src/walker.js
|
JavaScript
|
apache-2.0
| 962
|
package org.drools.kproject;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
public class KProjectChangeLog
implements
PropertyChangeListener {
private boolean kProjectDirty;
private Set<String> addedKBases;
private Set<String> removedKBases;
private Set<String> removedKSessions;
private Set<String> addedKSessions;
private Map<String, KSession> kSessions;
private Map<String, KBase> kBases;
public KProjectChangeLog() {
reset();
}
public boolean isKProjectDirty() {
return kProjectDirty;
}
public void setKProjectDirty(boolean kProjectDirty) {
this.kProjectDirty = kProjectDirty;
}
public boolean iskProjectDirty() {
return kProjectDirty;
}
public void setkProjectDirty(boolean kProjectDirty) {
this.kProjectDirty = kProjectDirty;
}
public Set<String> getAddedKBases() {
return addedKBases;
}
public void setAddedKBases(Set<String> addedKBases) {
this.addedKBases = addedKBases;
}
public Set<String> getRemovedKBases() {
return removedKBases;
}
public void setRemovedKBases(Set<String> removedKBases) {
this.removedKBases = removedKBases;
}
public Set<String> getRemovedKSessions() {
return removedKSessions;
}
public void setRemovedKSessions(Set<String> removedKSessions) {
this.removedKSessions = removedKSessions;
}
public Set<String> getAddedKSessions() {
return addedKSessions;
}
public void setAddedKSessions(Set<String> addedKSessions) {
this.addedKSessions = addedKSessions;
}
public Map<String, KSession> getKSessions() {
return kSessions;
}
public void setKSessions(Map<String, KSession> kSessions) {
this.kSessions = kSessions;
}
public Map<String, KBase> getKBases() {
return kBases;
}
public void setKBases(Map<String, KBase> kBases) {
this.kBases = kBases;
}
public void propertyChange(PropertyChangeEvent evt) {
if ( evt.getSource() instanceof KProjectImpl ) {
KProject kProject = (KProject) evt.getSource();
if ( "kBases".equals( evt.getPropertyName() ) ) {
Map<String, KBaseImpl> oldKBases = (Map<String, KBaseImpl>) evt.getOldValue();
Map<String, KBaseImpl> newKBases = (Map<String, KBaseImpl>) evt.getNewValue();
if ( oldKBases.size() < newKBases.size() ) {
// kBase added
for ( Entry<String, KBaseImpl> entry : newKBases.entrySet() ) {
if ( !oldKBases.containsKey( entry.getKey() ) ) {
removedKBases.remove( entry.getKey() );
addedKBases.add( entry.getKey() );
kBases.put( entry.getKey(), newKBases.get( entry.getKey() ) );
return;
}
}
throw new IllegalStateException( "Maps are different sizes, yet we can't find the new KBase" );
} else if ( oldKBases.size() > newKBases.size() ) {
// kBase removed
for ( Entry<String, KBaseImpl> entry : oldKBases.entrySet() ) {
if ( !newKBases.containsKey( entry.getKey() ) ) {
addedKBases.remove( entry.getKey() );
removedKBases.add( entry.getKey() );
kBases.put( entry.getKey(), oldKBases.get( entry.getKey() ) );
return;
}
}
throw new IllegalStateException( "Maps are different sizes, yet we can't find the removed KBase" );
}
} else {
kProjectDirty = true;
}
} else if ( evt.getSource() instanceof KBaseImpl ) {
KBaseImpl kBase = (KBaseImpl) evt.getSource();
if ( "kSessions".equals( evt.getPropertyName() ) ) {
Map<String, KSessionImpl> oldKSession = (Map<String, KSessionImpl>) evt.getOldValue();
Map<String, KSessionImpl> newKSession = (Map<String, KSessionImpl>) evt.getNewValue();
if ( oldKSession.size() < newKSession.size() ) {
// KSession added
for ( Entry<String, KSessionImpl> entry : newKSession.entrySet() ) {
if ( !oldKSession.containsKey( entry.getKey() ) ) {
removedKSessions.remove( entry.getKey() );
addedKSessions.add( entry.getKey() );
kSessions.put( entry.getKey(), newKSession.get( entry.getKey() ) );
return;
}
}
throw new IllegalStateException( "Maps are different sizes, yet we can't find the new KBase" );
} else if ( oldKSession.size() > newKSession.size() ) {
// KSession removed
for ( Entry<String, KSessionImpl> entry : oldKSession.entrySet() ) {
if ( !newKSession.containsKey( entry.getKey() ) ) {
addedKSessions.remove( entry.getKey() );
removedKSessions.add( entry.getKey() );
kSessions.put( entry.getKey(), oldKSession.get( entry.getKey() ) );
return;
}
}
throw new IllegalStateException( "Maps are different sizes, yet we can't find the removed KBase" );
}
} else if ( "namespace".equals( evt.getPropertyName() ) ) {
String oldV = (String) evt.getOldValue();
String newV = (String) evt.getNewValue();
String oldQName = oldV + "." + kBase.getName();
String newQName = newV + "." + kBase.getName();
kBase.getKProject().moveKBase( oldQName, newQName );
removedKBases.remove( newQName );
removedKBases.add( oldQName );
addedKBases.remove( oldQName );
addedKBases.add( newQName );
kBases.put( newQName, kBase );
} else if ( "name".equals( evt.getPropertyName() ) ) {
String oldV = (String) evt.getOldValue();
String newV = (String) evt.getNewValue();
String oldQName = kBase.getNamespace() + "." + oldV;
String newQName = kBase.getNamespace() + "." + newV;
kBase.getKProject().moveKBase( oldQName, newQName );
removedKBases.remove( newQName );
removedKBases.add( oldQName );
addedKBases.remove( oldQName );
addedKBases.add( newQName );
kBases.put( newQName, kBase );
} else {
addedKBases.add( kBase.getQName() );
}
} else if ( evt.getSource() instanceof KSessionImpl ) {
KSessionImpl kSession = (KSessionImpl) evt.getSource();
if ( "namespace".equals( evt.getPropertyName() ) ) {
String oldV = (String) evt.getOldValue();
String newV = (String) evt.getNewValue();
String oldQName = oldV + "." + kSession.getName();
String newQName = newV + "." + kSession.getName();
kSession.getKBase().moveKSession( oldQName, newQName );
removedKSessions.remove( newQName );
removedKSessions.add( oldQName );
addedKSessions.remove( oldQName );
addedKSessions.add( newQName );
kSessions.put( newQName, kSession );
} else if ( "name".equals( evt.getPropertyName() ) ) {
String oldV = (String) evt.getOldValue();
String newV = (String) evt.getNewValue();
String oldQName = kSession.getNamespace() + "." + oldV;
String newQName = kSession.getNamespace() + "." + newV;
kSession.getKBase().moveKSession( oldQName, newQName );
removedKSessions.remove( newQName );
removedKSessions.add( oldQName );
addedKSessions.remove( oldQName );
addedKSessions.add( newQName );
kSessions.put( newQName, kSession );
} else {
addedKSessions.add( kSession.getQName() );
kSessions.put( kSession.getQName() , kSession );
}
}
}
public void reset() {
kProjectDirty = false;
removedKBases = new HashSet<String>();
addedKBases = new HashSet<String>();
removedKSessions = new HashSet<String>();
addedKSessions = new HashSet<String>();
kBases = new HashMap<String, KBase>();
kSessions = new HashMap<String, KSession>();
}
}
|
pperboires/PocDrools
|
drools-compiler/src/main/java/org/drools/kproject/KProjectChangeLog.java
|
Java
|
apache-2.0
| 9,331
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_79) on Sun Jan 10 06:57:37 PST 2016 -->
<title>Uses of Interface org.apache.nutch.indexer.IndexCleaningFilter (apache-nutch 2.3.1 API)</title>
<meta name="date" content="2016-01-10">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Interface org.apache.nutch.indexer.IndexCleaningFilter (apache-nutch 2.3.1 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../org/apache/nutch/indexer/IndexCleaningFilter.html" title="interface in org.apache.nutch.indexer">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/apache/nutch/indexer/class-use/IndexCleaningFilter.html" target="_top">Frames</a></li>
<li><a href="IndexCleaningFilter.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Interface org.apache.nutch.indexer.IndexCleaningFilter" class="title">Uses of Interface<br>org.apache.nutch.indexer.IndexCleaningFilter</h2>
</div>
<div class="classUseContainer">No usage of org.apache.nutch.indexer.IndexCleaningFilter</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../org/apache/nutch/indexer/IndexCleaningFilter.html" title="interface in org.apache.nutch.indexer">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/apache/nutch/indexer/class-use/IndexCleaningFilter.html" target="_top">Frames</a></li>
<li><a href="IndexCleaningFilter.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2015 The Apache Software Foundation</small></p>
</body>
</html>
|
supermy/nutch2
|
docs/api/org/apache/nutch/indexer/class-use/IndexCleaningFilter.html
|
HTML
|
apache-2.0
| 4,376
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import static com.google.common.base.Preconditions.*;
import static org.apache.solr.common.params.CommonParams.*;
import java.io.Closeable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.util.ResourceLoader;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.solr.client.solrj.impl.BinaryResponseParser;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.CommonParams.EchoParamStyle;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.params.UpdateParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.core.DirectoryFactory.DirContext;
import org.apache.solr.handler.IndexFetcher;
import org.apache.solr.handler.ReplicationHandler;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.handler.admin.ShowFileRequestHandler;
import org.apache.solr.handler.component.HighlightComponent;
import org.apache.solr.handler.component.SearchComponent;
import org.apache.solr.logging.MDCUtils;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.response.BinaryResponseWriter;
import org.apache.solr.response.CSVResponseWriter;
import org.apache.solr.response.JSONResponseWriter;
import org.apache.solr.response.PHPResponseWriter;
import org.apache.solr.response.PHPSerializedResponseWriter;
import org.apache.solr.response.PythonResponseWriter;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.RawResponseWriter;
import org.apache.solr.response.RubyResponseWriter;
import org.apache.solr.response.SchemaXmlResponseWriter;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.response.SortingResponseWriter;
import org.apache.solr.response.XMLResponseWriter;
import org.apache.solr.response.transform.TransformerFactory;
import org.apache.solr.rest.ManagedResourceStorage;
import org.apache.solr.rest.ManagedResourceStorage.StorageIO;
import org.apache.solr.rest.RestManager;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.IndexSchemaFactory;
import org.apache.solr.schema.ManagedIndexSchema;
import org.apache.solr.schema.SimilarityFactory;
import org.apache.solr.search.QParserPlugin;
import org.apache.solr.search.SolrFieldCacheMBean;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.ValueSourceParser;
import org.apache.solr.search.stats.LocalStatsCache;
import org.apache.solr.search.stats.StatsCache;
import org.apache.solr.update.DefaultSolrCoreState;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.solr.update.SolrCoreState;
import org.apache.solr.update.SolrCoreState.IndexWriterCloser;
import org.apache.solr.update.SolrIndexWriter;
import org.apache.solr.update.UpdateHandler;
import org.apache.solr.update.VersionInfo;
import org.apache.solr.update.processor.DistributedUpdateProcessorFactory;
import org.apache.solr.update.processor.LogUpdateProcessorFactory;
import org.apache.solr.update.processor.RunUpdateProcessorFactory;
import org.apache.solr.update.processor.UpdateRequestProcessorChain;
import org.apache.solr.update.processor.UpdateRequestProcessorChain.ProcessorInfo;
import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.PropertiesInputStream;
import org.apache.solr.util.RefCounted;
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
import org.apache.solr.util.plugin.PluginInfoInitialized;
import org.apache.solr.util.plugin.SolrCoreAware;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
*/
public final class SolrCore implements SolrInfoMBean, Closeable {
public static final String version="1.0";
// These should *only* be used for debugging or monitoring purposes
public static final AtomicLong numOpens = new AtomicLong();
public static final AtomicLong numCloses = new AtomicLong();
public static Map<SolrCore,Exception> openHandles = Collections.synchronizedMap(new IdentityHashMap<SolrCore, Exception>());
public static final Logger log = LoggerFactory.getLogger(SolrCore.class);
public static final Logger requestLog = LoggerFactory.getLogger(SolrCore.class.getName() + ".Request");
private String name;
private String logid; // used to show what name is set
private CoreDescriptor coreDescriptor;
private boolean isReloaded = false;
private StatsCache statsCache;
private final SolrConfig solrConfig;
private final SolrResourceLoader resourceLoader;
private volatile IndexSchema schema;
private final String dataDir;
private final String ulogDir;
private final UpdateHandler updateHandler;
private final SolrCoreState solrCoreState;
private final long startTime;
private final RequestHandlers reqHandlers;
private final PluginBag<SearchComponent> searchComponents = new PluginBag<>(SearchComponent.class, this);
private final PluginBag<UpdateRequestProcessorFactory> updateProcessors = new PluginBag<>(UpdateRequestProcessorFactory.class, this);
private final Map<String,UpdateRequestProcessorChain> updateProcessorChains;
private final Map<String, SolrInfoMBean> infoRegistry;
private final IndexDeletionPolicyWrapper solrDelPolicy;
private final DirectoryFactory directoryFactory;
private IndexReaderFactory indexReaderFactory;
private final Codec codec;
private final MemClassLoader memClassLoader;
private final List<Runnable> confListeners = new CopyOnWriteArrayList<>();
private final ReentrantLock ruleExpiryLock;
public long getStartTime() { return startTime; }
private final RestManager restManager;
public RestManager getRestManager() {
return restManager;
}
static int boolean_query_max_clause_count = Integer.MIN_VALUE;
// only change the BooleanQuery maxClauseCount once for ALL cores...
void booleanQueryMaxClauseCount() {
synchronized(SolrCore.class) {
if (boolean_query_max_clause_count == Integer.MIN_VALUE) {
boolean_query_max_clause_count = solrConfig.booleanQueryMaxClauseCount;
BooleanQuery.setMaxClauseCount(boolean_query_max_clause_count);
} else if (boolean_query_max_clause_count != solrConfig.booleanQueryMaxClauseCount ) {
log.debug("BooleanQuery.maxClauseCount={}, ignoring {}", boolean_query_max_clause_count, solrConfig.booleanQueryMaxClauseCount);
}
}
}
/**
* The SolrResourceLoader used to load all resources for this core.
* @since solr 1.3
*/
public SolrResourceLoader getResourceLoader() {
return resourceLoader;
}
/**
* Gets the configuration resource name used by this core instance.
* @since solr 1.3
*/
public String getConfigResource() {
return solrConfig.getResourceName();
}
/**
* Gets the configuration object used by this core instance.
*/
public SolrConfig getSolrConfig() {
return solrConfig;
}
/**
* Gets the schema resource name used by this core instance.
* @since solr 1.3
*/
public String getSchemaResource() {
return getLatestSchema().getResourceName();
}
/** @return the latest snapshot of the schema used by this core instance. */
public IndexSchema getLatestSchema() {
return schema;
}
/** Sets the latest schema snapshot to be used by this core instance. */
public void setLatestSchema(IndexSchema replacementSchema) {
schema = replacementSchema;
}
public String getDataDir() {
return dataDir;
}
public String getUlogDir() {
return ulogDir;
}
public String getIndexDir() {
synchronized (searcherLock) {
if (_searcher == null) return getNewIndexDir();
SolrIndexSearcher searcher = _searcher.get();
return searcher.getPath() == null ? dataDir + "index/" : searcher
.getPath();
}
}
/**
* Returns the indexdir as given in index.properties. If index.properties exists in dataDir and
* there is a property <i>index</i> available and it points to a valid directory
* in dataDir that is returned Else dataDir/index is returned. Only called for creating new indexSearchers
* and indexwriters. Use the getIndexDir() method to know the active index directory
*
* @return the indexdir as given in index.properties
*/
public String getNewIndexDir() {
String result = dataDir + "index/";
Properties p = new Properties();
Directory dir = null;
try {
dir = getDirectoryFactory().get(getDataDir(), DirContext.META_DATA, getSolrConfig().indexConfig.lockType);
IndexInput input;
try {
input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, IOContext.DEFAULT);
} catch (FileNotFoundException | NoSuchFileException e) {
input = null;
}
if (input != null) {
final InputStream is = new PropertiesInputStream(input);
try {
p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
String s = p.getProperty("index");
if (s != null && s.trim().length() > 0) {
result = dataDir + s;
}
} catch (Exception e) {
log.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
} finally {
IOUtils.closeQuietly(is);
}
}
} catch (IOException e) {
SolrException.log(log, "", e);
} finally {
if (dir != null) {
try {
getDirectoryFactory().release(dir);
} catch (IOException e) {
SolrException.log(log, "", e);
}
}
}
if (!result.equals(lastNewIndexDir)) {
log.info("New index directory detected: old="+lastNewIndexDir + " new=" + result);
}
lastNewIndexDir = result;
return result;
}
private String lastNewIndexDir; // for debugging purposes only... access not synchronized, but that's ok
public DirectoryFactory getDirectoryFactory() {
return directoryFactory;
}
public IndexReaderFactory getIndexReaderFactory() {
return indexReaderFactory;
}
@Override
public String getName() {
return name;
}
public void setName(String v) {
this.name = v;
this.logid = (v==null)?"":("["+v+"] ");
this.coreDescriptor = new CoreDescriptor(v, this.coreDescriptor);
}
public String getLogId()
{
return this.logid;
}
/**
* Returns a Map of name vs SolrInfoMBean objects. The returned map is an instance of
* a ConcurrentHashMap and therefore no synchronization is needed for putting, removing
* or iterating over it.
*
* @return the Info Registry map which contains SolrInfoMBean objects keyed by name
* @since solr 1.3
*/
public Map<String, SolrInfoMBean> getInfoRegistry() {
return infoRegistry;
}
private IndexDeletionPolicyWrapper initDeletionPolicy(IndexDeletionPolicyWrapper delPolicyWrapper) {
if (delPolicyWrapper != null) {
return delPolicyWrapper;
}
final PluginInfo info = solrConfig.getPluginInfo(IndexDeletionPolicy.class.getName());
final IndexDeletionPolicy delPolicy;
if (info != null) {
delPolicy = createInstance(info.className, IndexDeletionPolicy.class, "Deletion Policy for SOLR", this, getResourceLoader());
if (delPolicy instanceof NamedListInitializedPlugin) {
((NamedListInitializedPlugin) delPolicy).init(info.initArgs);
}
} else {
delPolicy = new SolrDeletionPolicy();
}
return new IndexDeletionPolicyWrapper(delPolicy);
}
private void initListeners() {
final Class<SolrEventListener> clazz = SolrEventListener.class;
final String label = "Event Listener";
for (PluginInfo info : solrConfig.getPluginInfos(SolrEventListener.class.getName())) {
final String event = info.attributes.get("event");
if ("firstSearcher".equals(event)) {
SolrEventListener obj = createInitInstance(info, clazz, label, null);
firstSearcherListeners.add(obj);
log.info("[{}] Added SolrEventListener for firstSearcher: [{}]", logid, obj);
} else if ("newSearcher".equals(event)) {
SolrEventListener obj = createInitInstance(info, clazz, label, null);
newSearcherListeners.add(obj);
log.info("[{}] Added SolrEventListener for newSearcher: [{}]", logid, obj);
}
}
}
final List<SolrEventListener> firstSearcherListeners = new ArrayList<>();
final List<SolrEventListener> newSearcherListeners = new ArrayList<>();
/**
* NOTE: this function is not thread safe. However, it is safe to call within the
* <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes.
* Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException
*
* @see SolrCoreAware
*/
public void registerFirstSearcherListener( SolrEventListener listener )
{
firstSearcherListeners.add( listener );
}
/**
* NOTE: this function is not thread safe. However, it is safe to call within the
* <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes.
* Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException
*
* @see SolrCoreAware
*/
public void registerNewSearcherListener( SolrEventListener listener )
{
newSearcherListeners.add( listener );
}
/**
* NOTE: this function is not thread safe. However, it is safe to call within the
* <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes.
* Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException
*
* @see SolrCoreAware
*/
public QueryResponseWriter registerResponseWriter( String name, QueryResponseWriter responseWriter ){
return responseWriters.put(name, responseWriter);
}
public SolrCore reload(ConfigSet coreConfig) throws IOException {
solrCoreState.increfSolrCoreState();
final SolrCore currentCore;
if (!getNewIndexDir().equals(getIndexDir())) {
// the directory is changing, don't pass on state
currentCore = null;
} else {
currentCore = this;
}
boolean success = false;
SolrCore core = null;
try {
core = new SolrCore(getName(), getDataDir(), coreConfig.getSolrConfig(),
coreConfig.getIndexSchema(), coreDescriptor, updateHandler, solrDelPolicy, currentCore);
// we open a new IndexWriter to pick up the latest config
core.getUpdateHandler().getSolrCoreState().newIndexWriter(core, false);
core.getSearcher(true, false, null, true);
success = true;
return core;
} finally {
// close the new core on any errors that have occurred.
if (!success) {
IOUtils.closeQuietly(core);
}
}
}
private DirectoryFactory initDirectoryFactory() {
final PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
final DirectoryFactory dirFactory;
if (info != null) {
log.info(info.className);
dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class);
dirFactory.init(info.initArgs);
} else {
log.info("solr.NRTCachingDirectoryFactory");
dirFactory = new NRTCachingDirectoryFactory();
}
return dirFactory;
}
private void initIndexReaderFactory() {
IndexReaderFactory indexReaderFactory;
PluginInfo info = solrConfig.getPluginInfo(IndexReaderFactory.class.getName());
if (info != null) {
indexReaderFactory = resourceLoader.newInstance(info.className, IndexReaderFactory.class);
indexReaderFactory.init(info.initArgs);
} else {
indexReaderFactory = new StandardIndexReaderFactory();
}
this.indexReaderFactory = indexReaderFactory;
}
// protect via synchronized(SolrCore.class)
private static Set<String> dirs = new HashSet<>();
void initIndex(boolean reload) throws IOException {
String indexDir = getNewIndexDir();
boolean indexExists = getDirectoryFactory().exists(indexDir);
boolean firstTime;
synchronized (SolrCore.class) {
firstTime = dirs.add(getDirectoryFactory().normalize(indexDir));
}
boolean removeLocks = solrConfig.unlockOnStartup;
initIndexReaderFactory();
if (indexExists && firstTime && !reload) {
Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT,
getSolrConfig().indexConfig.lockType);
try {
if (IndexWriter.isLocked(dir)) {
if (removeLocks) {
log.warn(
logid
+ "WARNING: Solr index directory '{}' is locked. Unlocking...",
indexDir);
dir.makeLock(IndexWriter.WRITE_LOCK_NAME).close();
} else {
log.error(logid
+ "Solr index directory '{}' is locked. Throwing exception",
indexDir);
throw new LockObtainFailedException(
"Index locked for write for core " + name);
}
}
} finally {
directoryFactory.release(dir);
}
}
// Create the index if it doesn't exist.
if(!indexExists) {
log.warn(logid+"Solr index directory '" + new File(indexDir) + "' doesn't exist."
+ " Creating new index...");
SolrIndexWriter writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true,
getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
writer.close();
}
}
/**
* Creates an instance by trying a constructor that accepts a SolrCore before
* trying the default (no arg) constructor.
*
* @param className the instance class to create
* @param cast the class or interface that the instance should extend or implement
* @param msg a message helping compose the exception error if any occurs.
* @param core The SolrCore instance for which this object needs to be loaded
* @return the desired instance
* @throws SolrException if the object could not be instantiated
*/
public static <T> T createInstance(String className, Class<T> cast, String msg, SolrCore core, ResourceLoader resourceLoader) {
Class<? extends T> clazz = null;
if (msg == null) msg = "SolrCore Object";
try {
clazz = resourceLoader.findClass(className, cast);
//most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
// So invariably always it will cause a NoSuchMethodException. So iterate though the list of available constructors
Constructor<?>[] cons = clazz.getConstructors();
for (Constructor<?> con : cons) {
Class<?>[] types = con.getParameterTypes();
if (types.length == 1 && types[0] == SolrCore.class) {
return cast.cast(con.newInstance(core));
}
}
return resourceLoader.newInstance(className, cast);//use the empty constructor
} catch (SolrException e) {
throw e;
} catch (Exception e) {
// The JVM likes to wrap our helpful SolrExceptions in things like
// "InvocationTargetException" that have no useful getMessage
if (null != e.getCause() && e.getCause() instanceof SolrException) {
SolrException inner = (SolrException) e.getCause();
throw inner;
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error Instantiating " + msg + ", " + className + " failed to instantiate " + cast.getName(), e);
}
}
private UpdateHandler createReloadedUpdateHandler(String className, String msg, UpdateHandler updateHandler) {
Class<? extends UpdateHandler> clazz = null;
if (msg == null) msg = "SolrCore Object";
try {
clazz = getResourceLoader().findClass(className, UpdateHandler.class);
//most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
// So invariably always it will cause a NoSuchMethodException. So iterate though the list of available constructors
Constructor<?>[] cons = clazz.getConstructors();
for (Constructor<?> con : cons) {
Class<?>[] types = con.getParameterTypes();
if(types.length == 2 && types[0] == SolrCore.class && types[1] == UpdateHandler.class){
return UpdateHandler.class.cast(con.newInstance(this, updateHandler));
}
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"Error Instantiating "+msg+", "+className+ " could not find proper constructor for " + UpdateHandler.class.getName());
} catch (SolrException e) {
throw e;
} catch (Exception e) {
// The JVM likes to wrap our helpful SolrExceptions in things like
// "InvocationTargetException" that have no useful getMessage
if (null != e.getCause() && e.getCause() instanceof SolrException) {
SolrException inner = (SolrException) e.getCause();
throw inner;
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"Error Instantiating "+msg+", "+className+ " failed to instantiate " + UpdateHandler.class.getName(), e);
}
}
public <T extends Object> T createInitInstance(PluginInfo info,Class<T> cast, String msg, String defClassName){
if(info == null) return null;
T o = createInstance(info.className == null ? defClassName : info.className ,cast, msg,this, getResourceLoader());
if (o instanceof PluginInfoInitialized) {
((PluginInfoInitialized) o).init(info);
} else if (o instanceof NamedListInitializedPlugin) {
((NamedListInitializedPlugin) o).init(info.initArgs);
}
if(o instanceof SearchComponent) {
((SearchComponent) o).setName(info.name);
}
return o;
}
private UpdateHandler createUpdateHandler(String className) {
return createInstance(className, UpdateHandler.class, "Update Handler", this, getResourceLoader());
}
private UpdateHandler createUpdateHandler(String className, UpdateHandler updateHandler) {
return createReloadedUpdateHandler(className, "Update Handler", updateHandler);
}
/**
* Creates a new core and register it in the list of cores.
* If a core with the same name already exists, it will be stopped and replaced by this one.
*
* @param dataDir the index directory
* @param config a solr config instance
* @param schema a solr schema instance
*
* @since solr 1.3
* @deprecated will be removed in the next release
*/
public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd) {
this(name, dataDir, config, schema, cd, null, null, null);
}
public SolrCore(CoreDescriptor cd, ConfigSet coreConfig) {
this(cd.getName(), null, coreConfig.getSolrConfig(), coreConfig.getIndexSchema(), cd, null, null, null);
}
/**
* Creates a new core that is to be loaded lazily. i.e. lazyLoad="true" in solr.xml
*
* @since solr 4.1
* @deprecated will be removed in the next release
*/
public SolrCore(String name, CoreDescriptor coreDescriptor) {
this.coreDescriptor = coreDescriptor;
this.setName(name);
this.schema = null;
this.dataDir = null;
this.ulogDir = null;
this.solrConfig = null;
this.startTime = System.currentTimeMillis();
this.maxWarmingSearchers = 2; // we don't have a config yet, just pick a number.
this.slowQueryThresholdMillis = -1;
this.resourceLoader = null;
this.updateHandler = null;
this.isReloaded = true;
this.reqHandlers = null;
this.updateProcessorChains = null;
this.infoRegistry = null;
this.codec = null;
this.ruleExpiryLock = null;
this.memClassLoader = null;
this.directoryFactory = null;
this.solrCoreState = null;
this.restManager = null;
this.solrDelPolicy = null;
}
/**
* Creates a new core and register it in the list of cores. If a core with the
* same name already exists, it will be stopped and replaced by this one.
*
* @param dataDir
* the index directory
* @param config
* a solr config instance
* @param schema
* a solr schema instance
*
* @since solr 1.3
*/
public SolrCore(String name, String dataDir, SolrConfig config,
IndexSchema schema, CoreDescriptor coreDescriptor, UpdateHandler updateHandler,
IndexDeletionPolicyWrapper delPolicy, SolrCore prev) {
checkNotNull(coreDescriptor, "coreDescriptor cannot be null");
this.coreDescriptor = coreDescriptor;
setName(name);
MDCUtils.setCore(name); // show the core name in the error logs
resourceLoader = config.getResourceLoader();
this.solrConfig = config;
if (updateHandler == null) {
directoryFactory = initDirectoryFactory();
solrCoreState = new DefaultSolrCoreState(directoryFactory);
} else {
solrCoreState = updateHandler.getSolrCoreState();
directoryFactory = solrCoreState.getDirectoryFactory();
isReloaded = true;
}
this.dataDir = initDataDir(dataDir, config, coreDescriptor);
this.ulogDir = initUpdateLogDir(coreDescriptor);
log.info("[{}] Opening new SolrCore at [{}], dataDir=[{}]", logid, resourceLoader.getInstanceDir(), dataDir);
checkVersionFieldExistsInSchema(schema, coreDescriptor);
// Initialize JMX
this.infoRegistry = initInfoRegistry(name, config);
infoRegistry.put("fieldCache", new SolrFieldCacheMBean());
this.schema = initSchema(config, schema);
this.startTime = System.currentTimeMillis();
this.maxWarmingSearchers = config.maxWarmingSearchers;
this.slowQueryThresholdMillis = config.slowQueryThresholdMillis;
booleanQueryMaxClauseCount();
final CountDownLatch latch = new CountDownLatch(1);
try {
initListeners();
this.solrDelPolicy = initDeletionPolicy(delPolicy);
this.codec = initCodec(solrConfig, this.schema);
memClassLoader = new MemClassLoader(PluginBag.RuntimeLib.getLibObjects(this, solrConfig.getPluginInfos(PluginBag.RuntimeLib.class.getName())), getResourceLoader());
initIndex(prev != null);
initWriters();
qParserPlugins.init(createInstances(QParserPlugin.standardPlugins), this);
valueSourceParsers.init(ValueSourceParser.standardValueSourceParsers, this);
transformerFactories.init(TransformerFactory.defaultFactories, this);
loadSearchComponents();
updateProcessors.init(Collections.<String, UpdateRequestProcessorFactory>emptyMap(), this);
// Processors initialized before the handlers
updateProcessorChains = loadUpdateProcessorChains();
reqHandlers = new RequestHandlers(this);
reqHandlers.initHandlersFromConfig(solrConfig);
// Handle things that should eventually go away
initDeprecatedSupport();
statsCache = initStatsCache();
// cause the executor to stall so firstSearcher events won't fire
// until after inform() has been called for all components.
// searchExecutor must be single-threaded for this to work
searcherExecutor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
latch.await();
return null;
}
});
this.updateHandler = initUpdateHandler(updateHandler);
initSearcher(prev);
// Initialize the RestManager
restManager = initRestManager();
// Finally tell anyone who wants to know
resourceLoader.inform(resourceLoader);
resourceLoader.inform(this); // last call before the latch is released.
} catch (Throwable e) {
// release the latch, otherwise we block trying to do the close. This
// should be fine, since counting down on a latch of 0 is still fine
latch.countDown();
if (e instanceof OutOfMemoryError) {
throw (OutOfMemoryError)e;
}
try {
// close down the searcher and any other resources, if it exists, as this
// is not recoverable
close();
} catch (Throwable t) {
if (t instanceof OutOfMemoryError) {
throw (OutOfMemoryError) t;
}
log.error("Error while closing", t);
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e.getMessage(), e);
} finally {
// allow firstSearcher events to fire and make sure it is released
latch.countDown();
}
infoRegistry.put("core", this);
// register any SolrInfoMBeans SolrResourceLoader initialized
//
// this must happen after the latch is released, because a JMX server impl may
// choose to block on registering until properties can be fetched from an MBean,
// and a SolrCoreAware MBean may have properties that depend on getting a Searcher
// from the core.
resourceLoader.inform(infoRegistry);
// Allow the directory factory to register MBeans as well
for (SolrInfoMBean bean : directoryFactory.offerMBeans()) {
log.debug("Registering JMX bean [{}] from directory factory.", bean.getName());
// Not worried about concurrency, so no reason to use putIfAbsent
if (infoRegistry.containsKey(bean.getName())){
log.info("Ignoring JMX bean [{}] due to name conflict.", bean.getName());
} else {
infoRegistry.put(bean.getName(), bean);
}
}
// seed version buckets with max from index during core initialization ... requires a searcher!
seedVersionBuckets();
bufferUpdatesIfConstructing(coreDescriptor);
// For debugging
// numOpens.incrementAndGet();
// openHandles.put(this, new RuntimeException("unclosed core - name:" + getName() + " refs: " + refCount.get()));
this.ruleExpiryLock = new ReentrantLock();
registerConfListener();
}
public void seedVersionBuckets() {
UpdateHandler uh = getUpdateHandler();
if (uh != null && uh.getUpdateLog() != null) {
RefCounted<SolrIndexSearcher> newestSearcher = getRealtimeSearcher();
if (newestSearcher != null) {
try {
uh.getUpdateLog().seedBucketsWithHighestVersion(newestSearcher.get());
} finally {
newestSearcher.decref();
}
} else {
log.warn("No searcher available! Cannot seed version buckets with max from index.");
}
}
}
/** Set UpdateLog to buffer updates if the slice is in construction. */
private void bufferUpdatesIfConstructing(CoreDescriptor coreDescriptor) {
final CoreContainer cc = coreDescriptor.getCoreContainer();
if (cc != null && cc.isZooKeeperAware()) {
if (reqHandlers.get("/get") == null) {
log.warn("WARNING: RealTimeGetHandler is not registered at /get. " +
"SolrCloud will always use full index replication instead of the more efficient PeerSync method.");
}
// ZK pre-register would have already happened so we read slice properties now
final ClusterState clusterState = cc.getZkController().getClusterState();
final Slice slice = clusterState.getSlice(coreDescriptor.getCloudDescriptor().getCollectionName(),
coreDescriptor.getCloudDescriptor().getShardId());
if (slice.getState() == Slice.State.CONSTRUCTION) {
// set update log to buffer before publishing the core
getUpdateHandler().getUpdateLog().bufferUpdates();
}
}
}
private void initSearcher(SolrCore prev) throws IOException {
// use the (old) writer to open the first searcher
RefCounted<IndexWriter> iwRef = null;
if (prev != null) {
iwRef = prev.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
if (iwRef != null) {
final IndexWriter iw = iwRef.get();
final SolrCore core = this;
newReaderCreator = new Callable<DirectoryReader>() {
// this is used during a core reload
@Override
public DirectoryReader call() throws Exception {
return indexReaderFactory.newReader(iw, core);
}
};
}
}
try {
getSearcher(false, false, null, true);
} finally {
newReaderCreator = null;
if (iwRef != null) {
iwRef.decref();
}
}
}
private UpdateHandler initUpdateHandler(UpdateHandler updateHandler) {
String updateHandlerClass = solrConfig.getUpdateHandlerInfo().className;
if (updateHandlerClass == null) {
updateHandlerClass = DirectUpdateHandler2.class.getName();
}
final UpdateHandler newUpdateHandler;
if (updateHandler == null) {
newUpdateHandler = createUpdateHandler(updateHandlerClass);
} else {
newUpdateHandler = createUpdateHandler(updateHandlerClass, updateHandler);
}
infoRegistry.put("updateHandler", newUpdateHandler);
return newUpdateHandler;
}
private IndexSchema initSchema(SolrConfig config, IndexSchema schema) {
if (schema == null) {
schema = IndexSchemaFactory.buildIndexSchema(IndexSchema.DEFAULT_SCHEMA_FILE, config);
}
final SimilarityFactory similarityFactory = schema.getSimilarityFactory();
if (similarityFactory instanceof SolrCoreAware) {
// Similarity needs SolrCore before inform() is called on all registered SolrCoreAware listeners below
((SolrCoreAware) similarityFactory).inform(this);
}
return schema;
}
private Map<String,SolrInfoMBean> initInfoRegistry(String name, SolrConfig config) {
if (config.jmxConfig.enabled) {
return new JmxMonitoredMap<String, SolrInfoMBean>(name, String.valueOf(this.hashCode()), config.jmxConfig);
} else {
log.info("JMX monitoring not detected for core: " + name);
return new ConcurrentHashMap<>();
}
}
private void checkVersionFieldExistsInSchema(IndexSchema schema, CoreDescriptor coreDescriptor) {
if (null != coreDescriptor.getCloudDescriptor()) {
// we are evidently running in cloud mode.
//
// In cloud mode, version field is required for correct consistency
// ideally this check would be more fine grained, and individual features
// would assert it when they initialize, but DistributedUpdateProcessor
// is currently a big ball of wax that does more then just distributing
// updates (ie: partial document updates), so it needs to work in no cloud
// mode as well, and can't assert version field support on init.
try {
VersionInfo.getAndCheckVersionField(schema);
} catch (SolrException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Schema will not work with SolrCloud mode: " +
e.getMessage(), e);
}
}
}
private String initDataDir(String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
if (dataDir == null) {
if (coreDescriptor.usingDefaultDataDir()) {
dataDir = config.getDataDir();
}
if (dataDir == null) {
try {
dataDir = coreDescriptor.getDataDir();
if (!directoryFactory.isAbsolute(dataDir)) {
dataDir = directoryFactory.getDataHome(coreDescriptor);
}
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
}
return SolrResourceLoader.normalizeDir(dataDir);
}
private String initUpdateLogDir(CoreDescriptor coreDescriptor) {
String updateLogDir = coreDescriptor.getUlogDir();
if (updateLogDir == null) {
updateLogDir = dataDir;
if (new File(updateLogDir).isAbsolute() == false) {
updateLogDir = SolrResourceLoader.normalizeDir(coreDescriptor.getInstanceDir()) + updateLogDir;
}
}
return updateLogDir;
}
private Codec initCodec(SolrConfig solrConfig, final IndexSchema schema) {
final PluginInfo info = solrConfig.getPluginInfo(CodecFactory.class.getName());
final CodecFactory factory;
if (info != null) {
factory = schema.getResourceLoader().newInstance(info.className, CodecFactory.class);
factory.init(info.initArgs);
} else {
factory = new CodecFactory() {
@Override
public Codec getCodec() {
return Codec.getDefault();
}
};
}
if (factory instanceof SolrCoreAware) {
// CodecFactory needs SolrCore before inform() is called on all registered
// SolrCoreAware listeners, at the end of the SolrCore constructor
((SolrCoreAware)factory).inform(this);
} else {
for (FieldType ft : schema.getFieldTypes().values()) {
if (null != ft.getPostingsFormat()) {
String msg = "FieldType '" + ft.getTypeName() + "' is configured with a postings format, but the codec does not support it: " + factory.getClass();
log.error(msg);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg);
}
if (null != ft.getDocValuesFormat()) {
String msg = "FieldType '" + ft.getTypeName() + "' is configured with a docValues format, but the codec does not support it: " + factory.getClass();
log.error(msg);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg);
}
}
}
return factory.getCodec();
}
private StatsCache initStatsCache() {
final StatsCache cache;
PluginInfo pluginInfo = solrConfig.getPluginInfo(StatsCache.class.getName());
if (pluginInfo != null && pluginInfo.className != null && pluginInfo.className.length() > 0) {
cache = createInitInstance(pluginInfo, StatsCache.class, null,
LocalStatsCache.class.getName());
log.info("Using statsCache impl: " + cache.getClass().getName());
} else {
log.info("Using default statsCache cache: " + LocalStatsCache.class.getName());
cache = new LocalStatsCache();
}
return cache;
}
/**
* Get the StatsCache.
*/
public StatsCache getStatsCache() {
return statsCache;
}
/**
* Load the request processors
*/
private Map<String,UpdateRequestProcessorChain> loadUpdateProcessorChains() {
Map<String, UpdateRequestProcessorChain> map = new HashMap<>();
UpdateRequestProcessorChain def = initPlugins(map,UpdateRequestProcessorChain.class, UpdateRequestProcessorChain.class.getName());
if(def == null){
def = map.get(null);
}
if (def == null) {
log.info("no updateRequestProcessorChain defined as default, creating implicit default");
// construct the default chain
UpdateRequestProcessorFactory[] factories = new UpdateRequestProcessorFactory[]{
new LogUpdateProcessorFactory(),
new DistributedUpdateProcessorFactory(),
new RunUpdateProcessorFactory()
};
def = new UpdateRequestProcessorChain(Arrays.asList(factories), this);
}
map.put(null, def);
map.put("", def);
return map;
}
public SolrCoreState getSolrCoreState() {
return solrCoreState;
}
/**
* @return an update processor registered to the given name. Throw an exception if this chain is undefined
*/
public UpdateRequestProcessorChain getUpdateProcessingChain( final String name )
{
UpdateRequestProcessorChain chain = updateProcessorChains.get( name );
if( chain == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
"unknown UpdateRequestProcessorChain: "+name );
}
return chain;
}
public UpdateRequestProcessorChain getUpdateProcessorChain(SolrParams params) {
String chainName = params.get(UpdateParams.UPDATE_CHAIN);
UpdateRequestProcessorChain defaultUrp = getUpdateProcessingChain(chainName);
ProcessorInfo processorInfo = new ProcessorInfo(params);
if (processorInfo.isEmpty()) return defaultUrp;
return UpdateRequestProcessorChain.constructChain(defaultUrp, processorInfo, this);
}
public PluginBag<UpdateRequestProcessorFactory> getUpdateProcessors() {
return updateProcessors;
}
// this core current usage count
private final AtomicInteger refCount = new AtomicInteger(1);
/** expert: increments the core reference count */
public void open() {
refCount.incrementAndGet();
}
/**
* Close all resources allocated by the core if it is no longer in use...
* <ul>
* <li>searcher</li>
* <li>updateHandler</li>
* <li>all CloseHooks will be notified</li>
* <li>All MBeans will be unregistered from MBeanServer if JMX was enabled
* </li>
* </ul>
* <p>
* The behavior of this method is determined by the result of decrementing
* the core's reference count (A core is created with a reference count of 1)...
* </p>
* <ul>
* <li>If reference count is > 0, the usage count is decreased by 1 and no
* resources are released.
* </li>
* <li>If reference count is == 0, the resources are released.
* <li>If reference count is < 0, and error is logged and no further action
* is taken.
* </li>
* </ul>
* @see #isClosed()
*/
@Override
public void close() {
int count = refCount.decrementAndGet();
if (count > 0) return; // close is called often, and only actually closes if nothing is using it.
if (count < 0) {
log.error("Too many close [count:{}] on {}. Please report this exception to solr-user@lucene.apache.org", count, this );
assert false : "Too many closes on SolrCore";
return;
}
log.info(logid+" CLOSING SolrCore " + this);
if( closeHooks != null ) {
for( CloseHook hook : closeHooks ) {
try {
hook.preClose( this );
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
}
}
if(reqHandlers != null) reqHandlers.close();
responseWriters.close();
searchComponents.close();
qParserPlugins.close();
valueSourceParsers.close();
transformerFactories.close();
if (memClassLoader != null) {
try {
memClassLoader.close();
} catch (Exception e) {
}
}
try {
if (null != updateHandler) {
updateHandler.close();
}
} catch (Throwable e) {
SolrException.log(log,e);
if (e instanceof Error) {
throw (Error) e;
}
}
boolean coreStateClosed = false;
try {
if (solrCoreState != null) {
if (updateHandler instanceof IndexWriterCloser) {
coreStateClosed = solrCoreState.decrefSolrCoreState((IndexWriterCloser) updateHandler);
} else {
coreStateClosed = solrCoreState.decrefSolrCoreState(null);
}
}
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
try {
ExecutorUtil.shutdownAndAwaitTermination(searcherExecutor);
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
try {
// Since we waited for the searcherExecutor to shut down,
// there should be no more searchers warming in the background
// that we need to take care of.
//
// For the case that a searcher was registered *before* warming
// then the searchExecutor will throw an exception when getSearcher()
// tries to use it, and the exception handling code should close it.
closeSearcher();
} catch (Throwable e) {
SolrException.log(log,e);
if (e instanceof Error) {
throw (Error) e;
}
}
try {
infoRegistry.clear();
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
if (coreStateClosed) {
try {
directoryFactory.close();
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
}
if( closeHooks != null ) {
for( CloseHook hook : closeHooks ) {
try {
hook.postClose( this );
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
}
}
// For debugging
// numCloses.incrementAndGet();
// openHandles.remove(this);
}
/** Current core usage count. */
public int getOpenCount() {
return refCount.get();
}
/** Whether this core is closed. */
public boolean isClosed() {
return refCount.get() <= 0;
}
@Override
protected void finalize() throws Throwable {
try {
if (getOpenCount() != 0) {
log.error("REFCOUNT ERROR: unreferenced " + this + " (" + getName()
+ ") has a reference count of " + getOpenCount());
}
} finally {
super.finalize();
}
}
private Collection<CloseHook> closeHooks = null;
/**
* Add a close callback hook
*/
public void addCloseHook( CloseHook hook )
{
if( closeHooks == null ) {
closeHooks = new ArrayList<>();
}
closeHooks.add( hook );
}
/** @lucene.internal
* Debugging aid only. No non-test code should be released with uncommented verbose() calls. */
public static boolean VERBOSE = Boolean.parseBoolean(System.getProperty("tests.verbose","false"));
public static void verbose(Object... args) {
if (!VERBOSE) return;
StringBuilder sb = new StringBuilder("VERBOSE:");
// sb.append(Thread.currentThread().getName());
// sb.append(':');
for (Object o : args) {
sb.append(' ');
sb.append(o==null ? "(null)" : o.toString());
}
// System.out.println(sb.toString());
log.info(sb.toString());
}
////////////////////////////////////////////////////////////////////////////////
// Request Handler
////////////////////////////////////////////////////////////////////////////////
/**
* Get the request handler registered to a given name.
*
* This function is thread safe.
*/
public SolrRequestHandler getRequestHandler(String handlerName) {
return RequestHandlerBase.getRequestHandler(RequestHandlers.normalize(handlerName), reqHandlers.handlers);
}
/**
* Returns an unmodifiable Map containing the registered handlers
*/
public PluginBag<SolrRequestHandler> getRequestHandlers() {
return reqHandlers.handlers;
}
/**
* Registers a handler at the specified location. If one exists there, it will be replaced.
* To remove a handler, register <code>null</code> at its path
*
* Once registered the handler can be accessed through:
* <pre>
* http://${host}:${port}/${context}/${handlerName}
* or:
* http://${host}:${port}/${context}/select?qt=${handlerName}
* </pre>
*
* Handlers <em>must</em> be initialized before getting registered. Registered
* handlers can immediately accept requests.
*
* This call is thread safe.
*
* @return the previous <code>SolrRequestHandler</code> registered to this name <code>null</code> if none.
*/
public SolrRequestHandler registerRequestHandler(String handlerName, SolrRequestHandler handler) {
return reqHandlers.register(handlerName,handler);
}
/**
* Register the default search components
*/
private void loadSearchComponents()
{
Map<String, SearchComponent> instances = createInstances(SearchComponent.standard_components);
for (Map.Entry<String, SearchComponent> e : instances.entrySet()) e.getValue().setName(e.getKey());
searchComponents.init(instances, this);
for (String name : searchComponents.keySet()) {
if (searchComponents.isLoaded(name) && searchComponents.get(name) instanceof HighlightComponent) {
if (!HighlightComponent.COMPONENT_NAME.equals(name)) {
searchComponents.put(HighlightComponent.COMPONENT_NAME, searchComponents.getRegistry().get(name));
}
break;
}
}
}
/**
* @return a Search Component registered to a given name. Throw an exception if the component is undefined
*/
public SearchComponent getSearchComponent(String name) {
return searchComponents.get(name);
}
/**
* Accessor for all the Search Components
* @return An unmodifiable Map of Search Components
*/
public PluginBag<SearchComponent> getSearchComponents() {
return searchComponents;
}
////////////////////////////////////////////////////////////////////////////////
// Update Handler
////////////////////////////////////////////////////////////////////////////////
/**
* RequestHandlers need access to the updateHandler so they can all talk to the
* same RAM indexer.
*/
public UpdateHandler getUpdateHandler() {
return updateHandler;
}
////////////////////////////////////////////////////////////////////////////////
// Searcher Control
////////////////////////////////////////////////////////////////////////////////
// The current searcher used to service queries.
// Don't access this directly!!!! use getSearcher() to
// get it (and it will increment the ref count at the same time).
// This reference is protected by searcherLock.
private RefCounted<SolrIndexSearcher> _searcher;
// All of the normal open searchers. Don't access this directly.
// protected by synchronizing on searcherLock.
private final LinkedList<RefCounted<SolrIndexSearcher>> _searchers = new LinkedList<>();
private final LinkedList<RefCounted<SolrIndexSearcher>> _realtimeSearchers = new LinkedList<>();
final ExecutorService searcherExecutor = ExecutorUtil.newMDCAwareSingleThreadExecutor(
new DefaultSolrThreadFactory("searcherExecutor"));
private int onDeckSearchers; // number of searchers preparing
// Lock ordering: one can acquire the openSearcherLock and then the searcherLock, but not vice-versa.
private Object searcherLock = new Object(); // the sync object for the searcher
private ReentrantLock openSearcherLock = new ReentrantLock(true); // used to serialize opens/reopens for absolute ordering
private final int maxWarmingSearchers; // max number of on-deck searchers allowed
private final int slowQueryThresholdMillis; // threshold above which a query is considered slow
private RefCounted<SolrIndexSearcher> realtimeSearcher;
private Callable<DirectoryReader> newReaderCreator;
/**
* Return a registered {@link RefCounted}<{@link SolrIndexSearcher}> with
* the reference count incremented. It <b>must</b> be decremented when no longer needed.
* This method should not be called from SolrCoreAware.inform() since it can result
* in a deadlock if useColdSearcher==false.
* If handling a normal request, the searcher should be obtained from
* {@link org.apache.solr.request.SolrQueryRequest#getSearcher()} instead.
*/
public RefCounted<SolrIndexSearcher> getSearcher() {
return getSearcher(false,true,null);
}
/**
* Returns the current registered searcher with its reference count incremented, or null if none are registered.
*/
public RefCounted<SolrIndexSearcher> getRegisteredSearcher() {
synchronized (searcherLock) {
if (_searcher != null) {
_searcher.incref();
}
return _searcher;
}
}
/**
* Return the newest normal {@link RefCounted}<{@link SolrIndexSearcher}> with
* the reference count incremented. It <b>must</b> be decremented when no longer needed.
* If no searcher is currently open, then if openNew==true a new searcher will be opened,
* or null is returned if openNew==false.
*/
public RefCounted<SolrIndexSearcher> getNewestSearcher(boolean openNew) {
synchronized (searcherLock) {
if (!_searchers.isEmpty()) {
RefCounted<SolrIndexSearcher> newest = _searchers.getLast();
newest.incref();
return newest;
}
}
return openNew ? getRealtimeSearcher() : null;
}
/** Gets the latest real-time searcher w/o forcing open a new searcher if one already exists.
* The reference count will be incremented.
*/
public RefCounted<SolrIndexSearcher> getRealtimeSearcher() {
synchronized (searcherLock) {
if (realtimeSearcher != null) {
realtimeSearcher.incref();
return realtimeSearcher;
}
}
// use the searcher lock to prevent multiple people from trying to open at once
openSearcherLock.lock();
try {
// try again
synchronized (searcherLock) {
if (realtimeSearcher != null) {
realtimeSearcher.incref();
return realtimeSearcher;
}
}
// force a new searcher open
return openNewSearcher(true, true);
} finally {
openSearcherLock.unlock();
}
}
public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher) {
return getSearcher(forceNew, returnSearcher, waitSearcher, false);
}
/** Opens a new searcher and returns a RefCounted<SolrIndexSearcher> with its reference incremented.
*
* "realtime" means that we need to open quickly for a realtime view of the index, hence don't do any
* autowarming and add to the _realtimeSearchers queue rather than the _searchers queue (so it won't
* be used for autowarming by a future normal searcher). A "realtime" searcher will currently never
* become "registered" (since it currently lacks caching).
*
* realtimeSearcher is updated to the latest opened searcher, regardless of the value of "realtime".
*
* This method acquires openSearcherLock - do not call with searckLock held!
*/
public RefCounted<SolrIndexSearcher> openNewSearcher(boolean updateHandlerReopens, boolean realtime) {
if (isClosed()) { // catch some errors quicker
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
}
SolrIndexSearcher tmp;
RefCounted<SolrIndexSearcher> newestSearcher = null;
openSearcherLock.lock();
try {
String newIndexDir = getNewIndexDir();
String indexDirFile = null;
String newIndexDirFile = null;
// if it's not a normal near-realtime update, check that paths haven't changed.
if (!updateHandlerReopens) {
indexDirFile = getDirectoryFactory().normalize(getIndexDir());
newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
}
synchronized (searcherLock) {
newestSearcher = realtimeSearcher;
if (newestSearcher != null) {
newestSearcher.incref(); // the matching decref is in the finally block
}
}
if (newestSearcher != null && (updateHandlerReopens || indexDirFile.equals(newIndexDirFile))) {
DirectoryReader newReader;
DirectoryReader currentReader = newestSearcher.get().getRawReader();
// SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);
RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState()
.getIndexWriter(null);
try {
if (writer != null) {
// if in NRT mode, open from the writer
newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true);
} else {
// verbose("start reopen without writer, reader=", currentReader);
newReader = DirectoryReader.openIfChanged(currentReader);
// verbose("reopen result", newReader);
}
} finally {
if (writer != null) {
writer.decref();
}
}
if (newReader == null) { // the underlying index has not changed at all
if (realtime) {
// if this is a request for a realtime searcher, just return the same searcher
newestSearcher.incref();
return newestSearcher;
} else if (newestSearcher.get().isCachingEnabled() && newestSearcher.get().getSchema() == getLatestSchema()) {
// absolutely nothing has changed, can use the same searcher
// but log a message about it to minimize confusion
newestSearcher.incref();
log.info("SolrIndexSearcher has not changed - not re-opening: " + newestSearcher.get().getName());
return newestSearcher;
} // ELSE: open a new searcher against the old reader...
currentReader.incRef();
newReader = currentReader;
}
// for now, turn off caches if this is for a realtime reader
// (caches take a little while to instantiate)
final boolean useCaches = !realtime;
final String newName = realtime ? "realtime" : "main";
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), newName,
newReader, true, useCaches, true, directoryFactory);
} else {
// newestSearcher == null at this point
if (newReaderCreator != null) {
// this is set in the constructor if there is a currently open index writer
// so that we pick up any uncommitted changes and so we don't go backwards
// in time on a core reload
DirectoryReader newReader = newReaderCreator.call();
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
(realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
} else {
RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState().getIndexWriter(this);
DirectoryReader newReader = null;
try {
newReader = indexReaderFactory.newReader(writer.get(), this);
} finally {
writer.decref();
}
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
(realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
}
}
List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList); // refcount now at 1
// Increment reference again for "realtimeSearcher" variable. It should be at 2 after.
// When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
// it will be closed.
newSearcher.incref();
synchronized (searcherLock) {
if (realtimeSearcher != null) {
realtimeSearcher.decref();
}
realtimeSearcher = newSearcher;
searcherList.add(realtimeSearcher);
}
return newSearcher;
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error opening new searcher", e);
}
finally {
openSearcherLock.unlock();
if (newestSearcher != null) {
newestSearcher.decref();
}
}
}
/**
* Get a {@link SolrIndexSearcher} or start the process of creating a new one.
* <p>
* The registered searcher is the default searcher used to service queries.
* A searcher will normally be registered after all of the warming
* and event handlers (newSearcher or firstSearcher events) have run.
* In the case where there is no registered searcher, the newly created searcher will
* be registered before running the event handlers (a slow searcher is better than no searcher).
*
* <p>
* These searchers contain read-only IndexReaders. To access a non read-only IndexReader,
* see newSearcher(String name, boolean readOnly).
*
* <p>
* If <tt>forceNew==true</tt> then
* A new searcher will be opened and registered regardless of whether there is already
* a registered searcher or other searchers in the process of being created.
* <p>
* If <tt>forceNew==false</tt> then:<ul>
* <li>If a searcher is already registered, that searcher will be returned</li>
* <li>If no searcher is currently registered, but at least one is in the process of being created, then
* this call will block until the first searcher is registered</li>
* <li>If no searcher is currently registered, and no searchers in the process of being registered, a new
* searcher will be created.</li>
* </ul>
* <p>
* If <tt>returnSearcher==true</tt> then a {@link RefCounted}<{@link SolrIndexSearcher}> will be returned with
* the reference count incremented. It <b>must</b> be decremented when no longer needed.
* <p>
* If <tt>waitSearcher!=null</tt> and a new {@link SolrIndexSearcher} was created,
* then it is filled in with a Future that will return after the searcher is registered. The Future may be set to
* <tt>null</tt> in which case the SolrIndexSearcher created has already been registered at the time
* this method returned.
* <p>
* @param forceNew if true, force the open of a new index searcher regardless if there is already one open.
* @param returnSearcher if true, returns a {@link SolrIndexSearcher} holder with the refcount already incremented.
* @param waitSearcher if non-null, will be filled in with a {@link Future} that will return after the new searcher is registered.
* @param updateHandlerReopens if true, the UpdateHandler will be used when reopening a {@link SolrIndexSearcher}.
*/
public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher, boolean updateHandlerReopens) {
// it may take some time to open an index.... we may need to make
// sure that two threads aren't trying to open one at the same time
// if it isn't necessary.
synchronized (searcherLock) {
// see if we can return the current searcher
if (_searcher!=null && !forceNew) {
if (returnSearcher) {
_searcher.incref();
return _searcher;
} else {
return null;
}
}
// check to see if we can wait for someone else's searcher to be set
if (onDeckSearchers>0 && !forceNew && _searcher==null) {
try {
searcherLock.wait();
} catch (InterruptedException e) {
log.info(SolrException.toStr(e));
}
}
// check again: see if we can return right now
if (_searcher!=null && !forceNew) {
if (returnSearcher) {
_searcher.incref();
return _searcher;
} else {
return null;
}
}
// At this point, we know we need to open a new searcher...
// first: increment count to signal other threads that we are
// opening a new searcher.
onDeckSearchers++;
if (onDeckSearchers < 1) {
// should never happen... just a sanity check
log.error(logid+"ERROR!!! onDeckSearchers is " + onDeckSearchers);
onDeckSearchers=1; // reset
} else if (onDeckSearchers > maxWarmingSearchers) {
onDeckSearchers--;
String msg="Error opening new searcher. exceeded limit of maxWarmingSearchers="+maxWarmingSearchers + ", try again later.";
log.warn(logid+""+ msg);
// HTTP 503==service unavailable, or 409==Conflict
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,msg);
} else if (onDeckSearchers > 1) {
log.warn(logid+"PERFORMANCE WARNING: Overlapping onDeckSearchers=" + onDeckSearchers);
}
}
// a signal to decrement onDeckSearchers if something goes wrong.
final boolean[] decrementOnDeckCount=new boolean[]{true};
RefCounted<SolrIndexSearcher> currSearcherHolder = null; // searcher we are autowarming from
RefCounted<SolrIndexSearcher> searchHolder = null;
boolean success = false;
openSearcherLock.lock();
try {
searchHolder = openNewSearcher(updateHandlerReopens, false);
// the searchHolder will be incremented once already (and it will eventually be assigned to _searcher when registered)
// increment it again if we are going to return it to the caller.
if (returnSearcher) {
searchHolder.incref();
}
final RefCounted<SolrIndexSearcher> newSearchHolder = searchHolder;
final SolrIndexSearcher newSearcher = newSearchHolder.get();
boolean alreadyRegistered = false;
synchronized (searcherLock) {
if (_searcher == null) {
// if there isn't a current searcher then we may
// want to register this one before warming is complete instead of waiting.
if (solrConfig.useColdSearcher) {
registerSearcher(newSearchHolder);
decrementOnDeckCount[0]=false;
alreadyRegistered=true;
}
} else {
// get a reference to the current searcher for purposes of autowarming.
currSearcherHolder=_searcher;
currSearcherHolder.incref();
}
}
final SolrIndexSearcher currSearcher = currSearcherHolder==null ? null : currSearcherHolder.get();
Future future=null;
// if the underlying seracher has not changed, no warming is needed
if (newSearcher != currSearcher) {
// warm the new searcher based on the current searcher.
// should this go before the other event handlers or after?
if (currSearcher != null) {
future = searcherExecutor.submit(new Callable() {
@Override
public Object call() throws Exception {
try {
newSearcher.warm(currSearcher);
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
}
return null;
}
});
}
if (currSearcher == null) {
future = searcherExecutor.submit(new Callable() {
@Override
public Object call() throws Exception {
try {
for (SolrEventListener listener : firstSearcherListeners) {
listener.newSearcher(newSearcher, null);
}
} catch (Throwable e) {
SolrException.log(log, null, e);
if (e instanceof Error) {
throw (Error) e;
}
}
return null;
}
});
}
if (currSearcher != null) {
future = searcherExecutor.submit(new Callable() {
@Override
public Object call() throws Exception {
try {
for (SolrEventListener listener : newSearcherListeners) {
listener.newSearcher(newSearcher, currSearcher);
}
} catch (Throwable e) {
SolrException.log(log, null, e);
if (e instanceof Error) {
throw (Error) e;
}
}
return null;
}
});
}
}
// WARNING: this code assumes a single threaded executor (that all tasks
// queued will finish first).
final RefCounted<SolrIndexSearcher> currSearcherHolderF = currSearcherHolder;
if (!alreadyRegistered) {
future = searcherExecutor.submit(
new Callable() {
@Override
public Object call() throws Exception {
try {
// registerSearcher will decrement onDeckSearchers and
// do a notify, even if it fails.
registerSearcher(newSearchHolder);
} catch (Throwable e) {
SolrException.log(log, e);
if (e instanceof Error) {
throw (Error) e;
}
} finally {
// we are all done with the old searcher we used
// for warming...
if (currSearcherHolderF!=null) currSearcherHolderF.decref();
}
return null;
}
}
);
}
if (waitSearcher != null) {
waitSearcher[0] = future;
}
success = true;
// Return the searcher as the warming tasks run in parallel
// callers may wait on the waitSearcher future returned.
return returnSearcher ? newSearchHolder : null;
} catch (Exception e) {
if (e instanceof SolrException) throw (SolrException)e;
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
if (!success) {
synchronized (searcherLock) {
onDeckSearchers--;
if (onDeckSearchers < 0) {
// sanity check... should never happen
log.error(logid+"ERROR!!! onDeckSearchers after decrement=" + onDeckSearchers);
onDeckSearchers=0; // try and recover
}
// if we failed, we need to wake up at least one waiter to continue the process
searcherLock.notify();
}
if (currSearcherHolder != null) {
currSearcherHolder.decref();
}
if (searchHolder != null) {
searchHolder.decref(); // decrement 1 for _searcher (searchHolder will never become _searcher now)
if (returnSearcher) {
searchHolder.decref(); // decrement 1 because we won't be returning the searcher to the user
}
}
}
// we want to do this after we decrement onDeckSearchers so another thread
// doesn't increment first and throw a false warning.
openSearcherLock.unlock();
}
}
private RefCounted<SolrIndexSearcher> newHolder(SolrIndexSearcher newSearcher, final List<RefCounted<SolrIndexSearcher>> searcherList) {
RefCounted<SolrIndexSearcher> holder = new RefCounted<SolrIndexSearcher>(newSearcher) {
@Override
public void close() {
try {
synchronized(searcherLock) {
// it's possible for someone to get a reference via the _searchers queue
// and increment the refcount while RefCounted.close() is being called.
// we check the refcount again to see if this has happened and abort the close.
// This relies on the RefCounted class allowing close() to be called every
// time the counter hits zero.
if (refcount.get() > 0) return;
searcherList.remove(this);
}
resource.close();
} catch (Exception e) {
// do not allow decref() operations to fail since they are typically called in finally blocks
// and throwing another exception would be very unexpected.
SolrException.log(log, "Error closing searcher:" + this, e);
}
}
};
holder.incref(); // set ref count to 1 to account for this._searcher
return holder;
}
public boolean isReloaded() {
return isReloaded;
}
// Take control of newSearcherHolder (which should have a reference count of at
// least 1 already. If the caller wishes to use the newSearcherHolder directly
// after registering it, then they should increment the reference count *before*
// calling this method.
//
// onDeckSearchers will also be decremented (it should have been incremented
// as a result of opening a new searcher).
private void registerSearcher(RefCounted<SolrIndexSearcher> newSearcherHolder) {
synchronized (searcherLock) {
try {
if (_searcher == newSearcherHolder) {
// trying to re-register the same searcher... this can now happen when a commit has been done but
// there were no changes to the index.
newSearcherHolder.decref(); // decref since the caller should have still incref'd (since they didn't know the searcher was the same)
return; // still execute the finally block to notify anyone waiting.
}
if (_searcher != null) {
_searcher.decref(); // dec refcount for this._searcher
_searcher=null;
}
_searcher = newSearcherHolder;
SolrIndexSearcher newSearcher = newSearcherHolder.get();
/***
// a searcher may have been warming asynchronously while the core was being closed.
// if this happens, just close the searcher.
if (isClosed()) {
// NOTE: this should not happen now - see close() for details.
// *BUT* if we left it enabled, this could still happen before
// close() stopped the executor - so disable this test for now.
log.error("Ignoring searcher register on closed core:" + newSearcher);
_searcher.decref();
}
***/
newSearcher.register(); // register subitems (caches)
log.info(logid+"Registered new searcher " + newSearcher);
} catch (Exception e) {
// an exception in register() shouldn't be fatal.
log(e);
} finally {
// wake up anyone waiting for a searcher
// even in the face of errors.
onDeckSearchers--;
searcherLock.notifyAll();
}
}
}
public void closeSearcher() {
log.info(logid+"Closing main searcher on request.");
synchronized (searcherLock) {
if (realtimeSearcher != null) {
realtimeSearcher.decref();
realtimeSearcher = null;
}
if (_searcher != null) {
_searcher.decref(); // dec refcount for this._searcher
_searcher = null; // isClosed() does check this
infoRegistry.remove("currentSearcher");
}
}
}
public void execute(SolrRequestHandler handler, SolrQueryRequest req, SolrQueryResponse rsp) {
if (handler==null) {
String msg = "Null Request Handler '" +
req.getParams().get(CommonParams.QT) + "'";
if (log.isWarnEnabled()) log.warn(logid + msg + ":" + req);
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg);
}
preDecorateResponse(req, rsp);
if (requestLog.isDebugEnabled() && rsp.getToLog().size() > 0) {
// log request at debug in case something goes wrong and we aren't able to log later
requestLog.debug(rsp.getToLogAsString(logid));
}
// TODO: this doesn't seem to be working correctly and causes problems with the example server and distrib (for example /spell)
// if (req.getParams().getBool(ShardParams.IS_SHARD,false) && !(handler instanceof SearchHandler))
// throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"isShard is only acceptable with search handlers");
handler.handleRequest(req,rsp);
postDecorateResponse(handler, req, rsp);
if (rsp.getToLog().size() > 0) {
if (requestLog.isInfoEnabled()) {
requestLog.info(rsp.getToLogAsString(logid));
}
if (log.isWarnEnabled() && slowQueryThresholdMillis >= 0) {
final long qtime = (long) (req.getRequestTimer().getTime());
if (qtime >= slowQueryThresholdMillis) {
log.warn("slow: " + rsp.getToLogAsString(logid));
}
}
}
}
public static void preDecorateResponse(SolrQueryRequest req, SolrQueryResponse rsp) {
// setup response header
final NamedList<Object> responseHeader = new SimpleOrderedMap<>();
rsp.add("responseHeader", responseHeader);
// toLog is a local ref to the same NamedList used by the response
NamedList<Object> toLog = rsp.getToLog();
// for back compat, we set these now just in case other code
// are expecting them during handleRequest
toLog.add("webapp", req.getContext().get("webapp"));
toLog.add(PATH, req.getContext().get(PATH));
final SolrParams params = req.getParams();
final String lpList = params.get(CommonParams.LOG_PARAMS_LIST);
if (lpList == null) {
toLog.add("params", "{" + req.getParamString() + "}");
} else if (lpList.length() > 0) {
toLog.add("params", "{" + params.toFilteredSolrParams(Arrays.asList(lpList.split(","))).toString() + "}");
}
}
/** Put status, QTime, and possibly request handler and params, in the response header */
public static void postDecorateResponse
(SolrRequestHandler handler, SolrQueryRequest req, SolrQueryResponse rsp) {
// TODO should check that responseHeader has not been replaced by handler
NamedList<Object> responseHeader = rsp.getResponseHeader();
final int qtime=(int)(req.getRequestTimer().getTime());
int status = 0;
Exception exception = rsp.getException();
if( exception != null ){
if( exception instanceof SolrException )
status = ((SolrException)exception).code();
else
status = 500;
}
responseHeader.add("status",status);
responseHeader.add("QTime",qtime);
if (rsp.getToLog().size() > 0) {
rsp.getToLog().add("status",status);
rsp.getToLog().add("QTime",qtime);
}
SolrParams params = req.getParams();
if( null != handler && params.getBool(CommonParams.HEADER_ECHO_HANDLER, false) ) {
responseHeader.add("handler", handler.getName() );
}
// Values for echoParams... false/true/all or false/explicit/all ???
String ep = params.get( CommonParams.HEADER_ECHO_PARAMS, null );
if( ep != null ) {
EchoParamStyle echoParams = EchoParamStyle.get( ep );
if( echoParams == null ) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid value '" + ep + "' for " + CommonParams.HEADER_ECHO_PARAMS
+ " parameter, use '" + EchoParamStyle.EXPLICIT + "' or '" + EchoParamStyle.ALL + "'" );
}
if( echoParams == EchoParamStyle.EXPLICIT ) {
responseHeader.add("params", req.getOriginalParams().toNamedList());
} else if( echoParams == EchoParamStyle.ALL ) {
responseHeader.add("params", req.getParams().toNamedList());
}
}
}
final public static void log(Throwable e) {
SolrException.log(log,null,e);
}
public PluginBag<QueryResponseWriter> getResponseWriters() {
return responseWriters;
}
private final PluginBag<QueryResponseWriter> responseWriters = new PluginBag<>(QueryResponseWriter.class, this);
public static final Map<String ,QueryResponseWriter> DEFAULT_RESPONSE_WRITERS ;
static{
HashMap<String, QueryResponseWriter> m= new HashMap<>();
m.put("xml", new XMLResponseWriter());
m.put("standard", m.get("xml"));
m.put(CommonParams.JSON, new JSONResponseWriter());
m.put("python", new PythonResponseWriter());
m.put("php", new PHPResponseWriter());
m.put("phps", new PHPSerializedResponseWriter());
m.put("ruby", new RubyResponseWriter());
m.put("raw", new RawResponseWriter());
m.put(CommonParams.JAVABIN, new BinaryResponseWriter());
m.put("csv", new CSVResponseWriter());
m.put("xsort", new SortingResponseWriter());
m.put("schema.xml", new SchemaXmlResponseWriter());
m.put(ReplicationHandler.FILE_STREAM, getFileStreamWriter());
DEFAULT_RESPONSE_WRITERS = Collections.unmodifiableMap(m);
}
private static BinaryResponseWriter getFileStreamWriter() {
return new BinaryResponseWriter(){
@Override
public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) throws IOException {
RawWriter rawWriter = (RawWriter) response.getValues().get(ReplicationHandler.FILE_STREAM);
if(rawWriter!=null) rawWriter.write(out);
}
@Override
public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
return BinaryResponseParser.BINARY_CONTENT_TYPE;
}
};
}
public MemClassLoader getMemClassLoader() {
return memClassLoader;
}
public interface RawWriter {
public void write(OutputStream os) throws IOException ;
}
/** Configure the query response writers. There will always be a default writer; additional
* writers may also be configured. */
private void initWriters() {
responseWriters.init(DEFAULT_RESPONSE_WRITERS, this);
// configure the default response writer; this one should never be null
if (responseWriters.getDefault() == null) responseWriters.setDefault("standard");
}
/** Finds a writer by name, or returns the default writer if not found. */
public final QueryResponseWriter getQueryResponseWriter(String writerName) {
return responseWriters.get(writerName, true);
}
/** Returns the appropriate writer for a request. If the request specifies a writer via the
* 'wt' parameter, attempts to find that one; otherwise return the default writer.
*/
public final QueryResponseWriter getQueryResponseWriter(SolrQueryRequest request) {
return getQueryResponseWriter(request.getParams().get(CommonParams.WT));
}
private final PluginBag<QParserPlugin> qParserPlugins = new PluginBag<>(QParserPlugin.class, this);
public QParserPlugin getQueryPlugin(String parserName) {
return qParserPlugins.get(parserName);
}
private final PluginBag<ValueSourceParser> valueSourceParsers = new PluginBag<>(ValueSourceParser.class, this);
private final PluginBag<TransformerFactory> transformerFactories = new PluginBag<>(TransformerFactory.class, this);
<T> Map<String, T> createInstances(Map<String, Class<? extends T>> map) {
Map<String, T> result = new LinkedHashMap<>();
for (Map.Entry<String, Class<? extends T>> e : map.entrySet()) {
try {
Object o = getResourceLoader().newInstance(e.getValue().getName(), e.getValue());
result.put(e.getKey(), (T) o);
} catch (Exception exp) {
//should never happen
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unbale to instantiate class", exp);
}
}
return result;
}
public TransformerFactory getTransformerFactory(String name) {
return transformerFactories.get(name);
}
public void addTransformerFactory(String name, TransformerFactory factory){
transformerFactories.put(name, factory);
}
/**
* @param registry The map to which the instance should be added to. The key is the name attribute
* @param type the class or interface that the instance should extend or implement.
* @param defClassName If PluginInfo does not have a classname, use this as the classname
* @return The default instance . The one with (default=true)
*/
private <T> T initPlugins(Map<String, T> registry, Class<T> type, String defClassName) {
return initPlugins(solrConfig.getPluginInfos(type.getName()), registry, type, defClassName);
}
public <T> T initPlugins(List<PluginInfo> pluginInfos, Map<String, T> registry, Class<T> type, String defClassName) {
T def = null;
for (PluginInfo info : pluginInfos) {
T o = createInitInstance(info,type, type.getSimpleName(), defClassName);
registry.put(info.name, o);
if(info.isDefault()){
def = o;
}
}
return def;
}
/**For a given List of PluginInfo return the instances as a List
* @param defClassName The default classname if PluginInfo#className == null
* @return The instances initialized
*/
public <T> List<T> initPlugins(List<PluginInfo> pluginInfos, Class<T> type, String defClassName) {
if(pluginInfos.isEmpty()) return Collections.emptyList();
List<T> result = new ArrayList<>();
for (PluginInfo info : pluginInfos) result.add(createInitInstance(info,type, type.getSimpleName(), defClassName));
return result;
}
/**
*
* @param registry The map to which the instance should be added to. The key is the name attribute
* @param type The type of the Plugin. These should be standard ones registered by type.getName() in SolrConfig
* @return The default if any
*/
public <T> T initPlugins(Map<String, T> registry, Class<T> type) {
return initPlugins(registry, type, null);
}
public ValueSourceParser getValueSourceParser(String parserName) {
return valueSourceParsers.get(parserName);
}
/**
* Manage anything that should be taken care of in case configs change
*/
private void initDeprecatedSupport()
{
// TODO -- this should be removed in deprecation release...
String gettable = solrConfig.get("admin/gettableFiles", null );
if( gettable != null ) {
log.warn(
"solrconfig.xml uses deprecated <admin/gettableFiles>, Please "+
"update your config to use the ShowFileRequestHandler." );
if( getRequestHandler( "/admin/file" ) == null ) {
NamedList<String> invariants = new NamedList<>();
// Hide everything...
Set<String> hide = new HashSet<>();
for (String file : solrConfig.getResourceLoader().listConfigDir()) {
hide.add(file.toUpperCase(Locale.ROOT));
}
// except the "gettable" list
StringTokenizer st = new StringTokenizer( gettable );
while( st.hasMoreTokens() ) {
hide.remove( st.nextToken().toUpperCase(Locale.ROOT) );
}
for( String s : hide ) {
invariants.add( ShowFileRequestHandler.HIDDEN, s );
}
NamedList<Object> args = new NamedList<>();
args.add( "invariants", invariants );
ShowFileRequestHandler handler = new ShowFileRequestHandler();
handler.init( args );
reqHandlers.register("/admin/file", handler);
log.warn( "adding ShowFileRequestHandler with hidden files: "+hide );
}
}
String facetSort = solrConfig.get("//bool[@name='facet.sort']", null);
if (facetSort != null) {
log.warn(
"solrconfig.xml uses deprecated <bool name='facet.sort'>. Please "+
"update your config to use <string name='facet.sort'>.");
}
}
/**
* Creates and initializes a RestManager based on configuration args in solrconfig.xml.
* RestManager provides basic storage support for managed resource data, such as to
* persist stopwords to ZooKeeper if running in SolrCloud mode.
*/
@SuppressWarnings("unchecked")
protected RestManager initRestManager() throws SolrException {
PluginInfo restManagerPluginInfo =
getSolrConfig().getPluginInfo(RestManager.class.getName());
NamedList<String> initArgs = null;
RestManager mgr = null;
if (restManagerPluginInfo != null) {
if (restManagerPluginInfo.className != null) {
mgr = resourceLoader.newInstance(restManagerPluginInfo.className, RestManager.class);
}
if (restManagerPluginInfo.initArgs != null) {
initArgs = (NamedList<String>) restManagerPluginInfo.initArgs;
}
}
if (mgr == null)
mgr = new RestManager();
if (initArgs == null)
initArgs = new NamedList<>();
String collection = coreDescriptor.getCollectionName();
StorageIO storageIO =
ManagedResourceStorage.newStorageIO(collection, resourceLoader, initArgs);
mgr.init(resourceLoader, initArgs, storageIO);
return mgr;
}
public CoreDescriptor getCoreDescriptor() {
return coreDescriptor;
}
public IndexDeletionPolicyWrapper getDeletionPolicy(){
return solrDelPolicy;
}
public ReentrantLock getRuleExpiryLock() {
return ruleExpiryLock;
}
/////////////////////////////////////////////////////////////////////
// SolrInfoMBean stuff: Statistics and Module Info
/////////////////////////////////////////////////////////////////////
@Override
public String getVersion() {
return SolrCore.version;
}
@Override
public String getDescription() {
return "SolrCore";
}
@Override
public Category getCategory() {
return Category.CORE;
}
@Override
public String getSource() {
return null;
}
@Override
public URL[] getDocs() {
return null;
}
@Override
public NamedList getStatistics() {
NamedList<Object> lst = new SimpleOrderedMap<>();
lst.add("coreName", name==null ? "(null)" : name);
lst.add("startTime", new Date(startTime));
lst.add("refCount", getOpenCount());
lst.add("instanceDir", resourceLoader.getInstanceDir());
lst.add("indexDir", getIndexDir());
CoreDescriptor cd = getCoreDescriptor();
if (cd != null) {
if (null != cd && cd.getCoreContainer() != null) {
lst.add("aliases", getCoreDescriptor().getCoreContainer().getCoreNames(this));
}
CloudDescriptor cloudDesc = cd.getCloudDescriptor();
if (cloudDesc != null) {
String collection = cloudDesc.getCollectionName();
if (collection == null) {
collection = "_notset_";
}
lst.add("collection", collection);
String shard = cloudDesc.getShardId();
if (shard == null) {
shard = "_auto_";
}
lst.add("shard", shard);
}
}
return lst;
}
public Codec getCodec() {
return codec;
}
public void unloadOnClose(boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) {
if (deleteIndexDir) {
try {
directoryFactory.remove(getIndexDir());
} catch (Exception e) {
SolrException.log(log, "Failed to flag index dir for removal for core:" + name + " dir:" + getIndexDir());
}
}
if (deleteDataDir) {
try {
directoryFactory.remove(getDataDir(), true);
} catch (Exception e) {
SolrException.log(log, "Failed to flag data dir for removal for core:" + name + " dir:" + getDataDir());
}
}
if (deleteInstanceDir) {
addCloseHook(new CloseHook() {
@Override
public void preClose(SolrCore core) {
}
@Override
public void postClose(SolrCore core) {
CoreDescriptor cd = core.getCoreDescriptor();
if (cd != null) {
File instanceDir = new File(cd.getInstanceDir());
try {
FileUtils.deleteDirectory(instanceDir);
} catch (IOException e) {
SolrException.log(log, "Failed to delete instance dir for core:"
+ core.getName() + " dir:" + instanceDir.getAbsolutePath());
}
}
}
});
}
}
public static void deleteUnloadedCore(CoreDescriptor cd, boolean deleteDataDir, boolean deleteInstanceDir) {
if (deleteDataDir) {
File dataDir = new File(cd.getDataDir());
try {
FileUtils.deleteDirectory(dataDir);
} catch (IOException e) {
SolrException.log(log, "Failed to delete data dir for unloaded core:" + cd.getName()
+ " dir:" + dataDir.getAbsolutePath());
}
}
if (deleteInstanceDir) {
File instanceDir = new File(cd.getInstanceDir());
try {
FileUtils.deleteDirectory(instanceDir);
} catch (IOException e) {
SolrException.log(log, "Failed to delete instance dir for unloaded core:" + cd.getName()
+ " dir:" + instanceDir.getAbsolutePath());
}
}
}
/**Register to notify for any file change in the conf directory.
* If the file change results in a core reload , then the listener
* is not fired
*/
public void addConfListener(Runnable runnable) {
confListeners.add(runnable);
}
/**Remove a listener
* */
public boolean removeConfListener(Runnable runnable) {
return confListeners.remove(runnable);
}
/**This registers one listener for the entire conf directory. In zookeeper
* there is no event fired when children are modified. So , we expect everyone
* to 'touch' the /conf directory by setting some data so that events are triggered.
*/
private void registerConfListener() {
if (!(resourceLoader instanceof ZkSolrResourceLoader)) return;
final ZkSolrResourceLoader zkSolrResourceLoader = (ZkSolrResourceLoader) resourceLoader;
if (zkSolrResourceLoader != null)
zkSolrResourceLoader.getZkController().registerConfListenerForCore(
zkSolrResourceLoader.getConfigSetZkPath(),
this,
getConfListener(this, zkSolrResourceLoader));
}
public static Runnable getConfListener(SolrCore core, ZkSolrResourceLoader zkSolrResourceLoader) {
final String coreName = core.getName();
final CoreContainer cc = core.getCoreDescriptor().getCoreContainer();
final String overlayPath = zkSolrResourceLoader.getConfigSetZkPath() + "/" + ConfigOverlay.RESOURCE_NAME;
final String solrConfigPath = zkSolrResourceLoader.getConfigSetZkPath() + "/" + core.getSolrConfig().getName();
String schemaRes = null;
if (core.getLatestSchema().isMutable() && core.getLatestSchema() instanceof ManagedIndexSchema) {
ManagedIndexSchema mis = (ManagedIndexSchema) core.getLatestSchema();
schemaRes = mis.getResourceName();
}
final String managedSchmaResourcePath = schemaRes == null ? null : zkSolrResourceLoader.getConfigSetZkPath() + "/" + schemaRes;
return new Runnable() {
@Override
public void run() {
log.info("config update listener called for core {}", coreName);
SolrZkClient zkClient = cc.getZkController().getZkClient();
int solrConfigversion, overlayVersion, managedSchemaVersion = 0;
SolrConfig cfg = null;
try (SolrCore core = cc.solrCores.getCoreFromAnyList(coreName, true)) {
if (core == null || core.isClosed()) return;
cfg = core.getSolrConfig();
solrConfigversion = core.getSolrConfig().getOverlay().getZnodeVersion();
overlayVersion = core.getSolrConfig().getZnodeVersion();
if (managedSchmaResourcePath != null) {
managedSchemaVersion = ((ManagedIndexSchema) core.getLatestSchema()).getSchemaZkVersion();
}
}
if (cfg != null) {
cfg.refreshRequestParams();
}
if (checkStale(zkClient, overlayPath, solrConfigversion) ||
checkStale(zkClient, solrConfigPath, overlayVersion) ||
checkStale(zkClient, managedSchmaResourcePath, managedSchemaVersion)) {
log.info("core reload {}", coreName);
cc.reload(coreName);
return;
}
//some files in conf directory may have other than managedschema, overlay, params
try (SolrCore core = cc.solrCores.getCoreFromAnyList(coreName, true)) {
if (core == null || core.isClosed()) return;
for (Runnable listener : core.confListeners) {
try {
listener.run();
} catch (Exception e) {
log.error("Error in listener ", e);
}
}
}
}
};
}
public void registerInfoBean(String name, SolrInfoMBean solrInfoMBean) {
infoRegistry.put(name, solrInfoMBean);
}
private static boolean checkStale(SolrZkClient zkClient, String zkPath, int currentVersion) {
if(zkPath == null) return false;
try {
Stat stat = zkClient.exists(zkPath, null, true);
if(stat == null){
if(currentVersion > -1) return true;
return false;
}
if (stat.getVersion() > currentVersion) {
log.info(zkPath+" is stale will need an update from {} to {}", currentVersion,stat.getVersion());
return true;
}
return false;
} catch (KeeperException.NoNodeException nne){
//no problem
} catch (KeeperException e) {
log.error("error refreshing solrconfig ", e);
} catch (InterruptedException e) {
Thread.currentThread().isInterrupted();
}
return false;
}
}
|
q474818917/solr-5.2.0
|
solr/core/src/java/org/apache/solr/core/SolrCore.java
|
Java
|
apache-2.0
| 98,143
|
<?php
/**
* Example presents usage of the successful updateProblemTestcase() API method
*/
use SphereEngine\Api\ProblemsClientV4;
// require library
require_once('../../../../vendor/autoload.php');
// define access parameters
$accessToken = '<access_token>';
$endpoint = '<endpoint>';
// initialization
$client = new ProblemsClientV4($accessToken, $endpoint);
// API usage
$problemId = 42;
$testcaseNumber = 0;
$newInput = 'New testcase input';
$response = $client->updateProblemTestcase($problemId, $testcaseNumber, $newInput);
|
sphere-engine/php-client
|
Examples/V4/problems/problems/updateProblemTestcase.php
|
PHP
|
apache-2.0
| 539
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/elasticmapreduce/EMR_EXPORTS.h>
#include <aws/elasticmapreduce/EMRRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace EMR
{
namespace Model
{
/**
*/
class AWS_EMR_API DescribeReleaseLabelRequest : public EMRRequest
{
public:
DescribeReleaseLabelRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DescribeReleaseLabel"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The target release label to be described.</p>
*/
inline const Aws::String& GetReleaseLabel() const{ return m_releaseLabel; }
/**
* <p>The target release label to be described.</p>
*/
inline bool ReleaseLabelHasBeenSet() const { return m_releaseLabelHasBeenSet; }
/**
* <p>The target release label to be described.</p>
*/
inline void SetReleaseLabel(const Aws::String& value) { m_releaseLabelHasBeenSet = true; m_releaseLabel = value; }
/**
* <p>The target release label to be described.</p>
*/
inline void SetReleaseLabel(Aws::String&& value) { m_releaseLabelHasBeenSet = true; m_releaseLabel = std::move(value); }
/**
* <p>The target release label to be described.</p>
*/
inline void SetReleaseLabel(const char* value) { m_releaseLabelHasBeenSet = true; m_releaseLabel.assign(value); }
/**
* <p>The target release label to be described.</p>
*/
inline DescribeReleaseLabelRequest& WithReleaseLabel(const Aws::String& value) { SetReleaseLabel(value); return *this;}
/**
* <p>The target release label to be described.</p>
*/
inline DescribeReleaseLabelRequest& WithReleaseLabel(Aws::String&& value) { SetReleaseLabel(std::move(value)); return *this;}
/**
* <p>The target release label to be described.</p>
*/
inline DescribeReleaseLabelRequest& WithReleaseLabel(const char* value) { SetReleaseLabel(value); return *this;}
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline DescribeReleaseLabelRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline DescribeReleaseLabelRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>The pagination token. Reserved for future use. Currently set to null.</p>
*/
inline DescribeReleaseLabelRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Reserved for future use. Currently set to null.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Reserved for future use. Currently set to null.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Reserved for future use. Currently set to null.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Reserved for future use. Currently set to null.</p>
*/
inline DescribeReleaseLabelRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
private:
Aws::String m_releaseLabel;
bool m_releaseLabelHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
};
} // namespace Model
} // namespace EMR
} // namespace Aws
|
awslabs/aws-sdk-cpp
|
aws-cpp-sdk-elasticmapreduce/include/aws/elasticmapreduce/model/DescribeReleaseLabelRequest.h
|
C
|
apache-2.0
| 5,117
|
# encoding: UTF-8
#
# Author:: Xabier de Zuazo (<xabier@zuazo.org>)
# Copyright:: Copyright (c) 2014 Onddo Labs, SL.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copied from https://github.com/burtlo/ark
# Until f6f9650 release.
if defined?(ChefSpec)
def install_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :install, resource_name)
end
def dump_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :dump, resource_name)
end
def cherry_pick_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :cherry_pick, resource_name)
end
def put_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :put, resource_name)
end
def install_with_make_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(
:ark, :install_with_make, resource_name
)
end
def configure_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :configure, resource_name)
end
def setup_py_build_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(
:ark, :setup_py_build, resource_name
)
end
def setup_py_install_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(
:ark, :setup_py_install, resource_name
)
end
def setup_py_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :setup_py, resource_name)
end
def unzip_ark(resource_name)
ChefSpec::Matchers::ResourceMatcher.new(:ark, :unzip, resource_name)
end
end
|
onddo/postfixadmin-cookbook
|
test/unit/support/matchers.rb
|
Ruby
|
apache-2.0
| 2,035
|
{{ partial "head" . }}
<body class="post-template">
{{ partial "header" . }}
{{ partial "navigation" . }}
<main class="content" role="main">
<article class="post">
<header>
<h1 class="post-title">{{ .Title }}</h1>
<div class="post-meta"><time datetime="{{ .Date.Format "02 January 2006" }}">{{ .Date.Format "02 January 2006" }}</time></div>
</header>
<section class="post-content">
{{ .Content }}
</section>
<section class="post-tags" style="padding-bottom:60px;">
<div class="post-meta tags">
<i class="fa fa-fw fa-tag"></i>
{{ range .Params.tags }}
<a href="{{ $.Site.BaseURL }}tags/{{ . }}">{{ . }} </a>
{{ end }}
</div>
</section>
{{ if eq .Site.Params.comments "disqus" }}
{{ template "_internal/disqus.html" . }}
{{ end }}
{{ if eq .Site.Params.comments "googleplus" }}
{{ partial "googleplus-comments" . }}
{{ end }}
<section class="share">
<p class="backtotop"><a data-scroll href="#site-head"><i class="fa fa-lg fa-fw fa-angle-double-up"></i></a><a data-scroll class="backtotoptext" href="#site-head"> {{ .Site.Params.backtotopStr }}</a></p>
<p class="info prompt">{{ .Site.Params.shareStr }}</p>
<a href="http://twitter.com/share?text={{ .Title }}&url={{ .Permalink }}" title="Share on Twitter"
onclick="window.open(this.href, 'twitter-share', 'width=550,height=235');return false;">
<i class="fa fa-2x fa-fw fa-twitter-square"></i> <span class="hidden">Twitter</span>
</a>
<a href="https://www.facebook.com/sharer/sharer.php?u={{ .Permalink }}" title="Share on Facebook"
onclick="window.open(this.href, 'facebook-share','width=580,height=296');return false;">
<i class="fa fa-2x fa-fw fa-facebook-square" style="margin-left: -8px"></i> <span class="hidden">Facebook</span>
</a>
<a href="https://plus.google.com/share?url={{ .Permalink }}" title="Share on Google+"
onclick="window.open(this.href, 'google-plus-share', 'width=490,height=530');return false;">
<i class="fa fa-2x fa-fw fa-google-plus-square" style="margin-left: -8px"></i> <span class="hidden">Google+</span>
</a>
</section>
<footer class="post-footer">
{{ partial "author" . }}
</footer>
</article>
</main>
{{ partial "footer" . }}
{{ partial "js" . }}
</body>
</html>
|
qiansen1386/qiansen1386.github.io
|
themes/steam/layouts/post/single.html
|
HTML
|
apache-2.0
| 2,639
|
#!/bin/bash
set -e
source $(dirname $0)/tools.sh
ensure_go
branch="master"
echo $1
if [ "$1" != "" ]; then
branch="$1"
fi
apt-get update
apt-get install -y curl git
git clone -b "${branch}" https://github.com/zero-os/0-orchestrator.git $GOPATH/src/github.com/zero-os/0-orchestrator
cd $GOPATH/src/github.com/zero-os/0-orchestrator/api
go get -v
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' .
mkdir -p /tmp/archives/
tar -czf "/tmp/archives/rest-api-${branch}.tar.gz" api
|
zero-os/0-orchestrator
|
buildscripts/builder-orchestrator.sh
|
Shell
|
apache-2.0
| 507
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.usecases;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.leveldb.LevelDBStore;
import java.io.File;
import java.io.IOException;
public class SingleBrokerVirtualDestinationsWithWildcardLevelDBTest extends SingleBrokerVirtualDestinationsWithWildcardTest {
@Override
protected void configurePersistenceAdapter(BrokerService broker) throws IOException {
File dataFileDir = new File("target/test-amq-data/leveldb/" + broker.getBrokerName());
LevelDBStore kaha = new LevelDBStore();
kaha.setDirectory(dataFileDir);
broker.setPersistenceAdapter(kaha);
}
}
|
lburgazzoli/apache-activemq-artemis
|
tests/activemq5-unit-tests/src/test/java/org/apache/activemq/usecases/SingleBrokerVirtualDestinationsWithWildcardLevelDBTest.java
|
Java
|
apache-2.0
| 1,446
|
/* ----------------------------------------------------------------------------
* This file was automatically generated by SWIG (http://www.swig.org).
* Version 2.0.10
*
* Do not make changes to this file unless you know what you are doing--modify
* the SWIG interface file instead.
* ----------------------------------------------------------------------------- */
package com.badlogic.gdx.physics.bullet.linearmath;
import com.badlogic.gdx.physics.bullet.BulletBase;
import com.badlogic.gdx.math.Vector3;
import com.badlogic.gdx.math.Quaternion;
import com.badlogic.gdx.math.Matrix3;
import com.badlogic.gdx.math.Matrix4;
public class int4 extends BulletBase {
private long swigCPtr;
protected int4(final String className, long cPtr, boolean cMemoryOwn) {
super(className, cPtr, cMemoryOwn);
swigCPtr = cPtr;
}
/** Construct a new int4, normally you should not need this constructor it's intended for low-level usage. */
public int4(long cPtr, boolean cMemoryOwn) {
this("int4", cPtr, cMemoryOwn);
construct();
}
@Override
protected void reset(long cPtr, boolean cMemoryOwn) {
if (!destroyed)
destroy();
super.reset(swigCPtr = cPtr, cMemoryOwn);
}
public static long getCPtr(int4 obj) {
return (obj == null) ? 0 : obj.swigCPtr;
}
@Override
protected void finalize() throws Throwable {
if (!destroyed)
destroy();
super.finalize();
}
@Override protected synchronized void delete() {
if (swigCPtr != 0) {
if (swigCMemOwn) {
swigCMemOwn = false;
LinearMathJNI.delete_int4(swigCPtr);
}
swigCPtr = 0;
}
super.delete();
}
public void setX(int value) {
LinearMathJNI.int4_x_set(swigCPtr, this, value);
}
public int getX() {
return LinearMathJNI.int4_x_get(swigCPtr, this);
}
public void setY(int value) {
LinearMathJNI.int4_y_set(swigCPtr, this, value);
}
public int getY() {
return LinearMathJNI.int4_y_get(swigCPtr, this);
}
public void setZ(int value) {
LinearMathJNI.int4_z_set(swigCPtr, this, value);
}
public int getZ() {
return LinearMathJNI.int4_z_get(swigCPtr, this);
}
public void setW(int value) {
LinearMathJNI.int4_w_set(swigCPtr, this, value);
}
public int getW() {
return LinearMathJNI.int4_w_get(swigCPtr, this);
}
public int4() {
this(LinearMathJNI.new_int4__SWIG_0(), true);
}
public int4(int _x, int _y, int _z, int _w) {
this(LinearMathJNI.new_int4__SWIG_1(_x, _y, _z, _w), true);
}
}
|
kzganesan/libgdx
|
extensions/gdx-bullet/jni/swig-src/linearmath/com/badlogic/gdx/physics/bullet/linearmath/int4.java
|
Java
|
apache-2.0
| 2,474
|
//---------------------------------------------------------------------------
#ifndef ConfigProgStationUnitH
#define ConfigProgStationUnitH
//---------------------------------------------------------------------------
#include <Classes.hpp>
#include <Controls.hpp>
#include <StdCtrls.hpp>
#include <Forms.hpp>
#include <Buttons.hpp>
//---------------------------------------------------------------------------
class TConfigProgStationForm : public TForm
{
__published: // IDE-managed Components
TGroupBox *GroupBox1;
TCheckBox *MultiDisplayTagCheckBox;
TCheckBox *DuplicateTagFGenCheckBox;
TCheckBox *DuplicateTagGIDCheckBox;
TGroupBox *GroupBox2;
TEdit *OldHostIDEdit;
TLabel *Label1;
TLabel *Label2;
TEdit *NewHostIDEdit;
TCheckBox *AllHostCheckBox;
TBitBtn *BitBtn1;
TBitBtn *SaveBitBtn;
TGroupBox *GroupBox3;
TEdit *TempCalibEdit;
TLabel *Label3;
TRadioButton *TempCalibCRadioButton;
TRadioButton *TempCalibFRadioButton;
TGroupBox *GroupBox4;
TCheckBox *Type01CheckBox;
TCheckBox *Type02CheckBox;
TCheckBox *Type03CheckBox;
TCheckBox *Type04CheckBox;
TCheckBox *Type05CheckBox;
TCheckBox *Type06CheckBox;
TEdit *Type01Edit;
TEdit *Type02Edit;
TEdit *Type03Edit;
TEdit *Type04Edit;
TEdit *Type05Edit;
TEdit *Type06Edit;
TEdit *Type01AbrEdit;
TEdit *Type02AbrEdit;
TEdit *Type03AbrEdit;
TEdit *Type04AbrEdit;
TEdit *Type05AbrEdit;
TEdit *Type06AbrEdit;
TLabel *Label4;
TLabel *Label5;
TBitBtn *Reset;
TLabel *Label6;
TLabel *Label7;
void __fastcall FormActivate(TObject *Sender);
void __fastcall SaveBitBtnClick(TObject *Sender);
void __fastcall TempCalibCRadioButtonClick(TObject *Sender);
void __fastcall TempCalibFRadioButtonClick(TObject *Sender);
void __fastcall Type01CheckBoxClick(TObject *Sender);
void __fastcall Type02CheckBoxClick(TObject *Sender);
void __fastcall Type03CheckBoxClick(TObject *Sender);
void __fastcall Type04CheckBoxClick(TObject *Sender);
void __fastcall Type05CheckBoxClick(TObject *Sender);
void __fastcall Type06CheckBoxClick(TObject *Sender);
void __fastcall ResetClick(TObject *Sender);
private: // User declarations
public: // User declarations
__fastcall TConfigProgStationForm(TComponent* Owner);
bool dGIdFlag;
bool dFgenFlag;
float curTagTempCalibC;
AnsiString localTagTypes[7];
AnsiString localTagTypesAbr[7];
bool __fastcall CheckDuplicatTagType();
bool __fastcall CheckDuplicatTagTypeAbr();
};
//---------------------------------------------------------------------------
extern PACKAGE TConfigProgStationForm *ConfigProgStationForm;
//---------------------------------------------------------------------------
#endif
|
tourajghaffari/tigerfid
|
software/programming-station/Programming Station 190/Programming Station 190/ConfigProgStationUnit.h
|
C
|
apache-2.0
| 3,046
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.