text
stringlengths
1
22.8M
```python import demistomock as demisto # noqa: F401 import pytest def test_canonicalize(): from InferWhetherServiceIsDev import _canonicalize_string assert _canonicalize_string("BLAH") == "blah" assert _canonicalize_string("'BLAH'") == "blah" assert _canonicalize_string('" BLAH" ') == "blah" @pytest.mark.parametrize('raw,matches,list_type', [([{"key": "ENV", "value": "non-prd"}], [{"key": "ENV", "value": "non-prd"}], "dictionary"), ([{"key": "ENV", "value": "prd"}], [], "dictionary"), ([{"key": "ENV", "value": "dv"}, {"key": "stage", "value": "sbx"}], [{"key": "ENV", "value": "dv"}, {"key": "stage", "value": "sbx"}], "dictionary"), (["eng-dev", "rando"], ["eng-dev"], "string"), (["eng-dev"], [], "break")]) def test_get_indicators_from_list(raw, matches, list_type): from InferWhetherServiceIsDev import get_indicators_from_list from InferWhetherServiceIsDev import is_dev_indicator assert get_indicators_from_list(raw, is_dev_indicator, list_type) == matches def test_is_dev_indicator(): from InferWhetherServiceIsDev import is_dev_indicator # Test Dev Matches assert is_dev_indicator('dev') assert is_dev_indicator('uat') assert is_dev_indicator('non-prod') assert is_dev_indicator('noprod') # Test no match assert not is_dev_indicator('devops') assert not is_dev_indicator('prod') assert not is_dev_indicator('pr') def test_is_prod_indicator(): from InferWhetherServiceIsDev import is_prod_indicator # Test Dev Matches assert is_prod_indicator('pr') assert is_prod_indicator('prod') # Test no Matches assert not is_prod_indicator('non-prod') assert not is_prod_indicator('staging') @pytest.mark.parametrize('classifications, matches', [(["SshServer", "DevelopmentEnvironment"], ["DevelopmentEnvironment"]), (["SshServer"], [])]) def test_get_indicators_from_external_classification(classifications, matches): from InferWhetherServiceIsDev import get_indicators_from_external_classification assert get_indicators_from_external_classification(classifications) == matches @pytest.mark.parametrize('external, internal, reason', [(["DevelopmentEnvironment"], [], "match on external classification of DevelopmentEnvironment"), (["DevelopmentEnvironment"], [{"key": "env", "value": "non-prod", "source": "AWS"}], "match on external classification of DevelopmentEnvironment and tag {env: non-prod} from AWS"), ([], [{"key": "env", "value": "non-prod", "source": "AWS"}], "match on tag {env: non-prod} from AWS"), ([], [{"key": "env", "value": "non-prod", "source": "AWS"}, {"key": "stage", "value": "sbx", "source": "GCP"}], "match on tag {env: non-prod} from AWS and tag {stage: sbx} from GCP")]) def test_determine_reason(external, internal, reason): from InferWhetherServiceIsDev import determine_reason assert determine_reason(external, internal, [], "") == reason def test_full_truth_table(): sample_dev_tag = [{"key": "stage", "value": "non-prod", "source": "AWS"}] sample_prod_tag = [{"key": "tier", "value": "prod", "source": "Tenable.io"}] # Blank list means no external classification or tag matches. sample_no_match = [] sample_dev_classification = ["DevelopmentEnvironment"] sample_dev_hierarchy = ["ENG-DEV"] sample_prod_hierarchy = ["ENG-PROD"] from InferWhetherServiceIsDev import final_decision # dev == True, all else is False # kv pair contains no indicators # DevEnv is set (--> dev) assert final_decision(sample_dev_classification, sample_no_match, sample_no_match, sample_no_match, sample_no_match, "")["result"] # DevEnv is not set (--> can't tell) assert not final_decision(sample_no_match, sample_no_match, sample_no_match, sample_no_match, sample_no_match, "")["result"] # kv pair contains dev indicators only # DevEnv is set (--> dev) # Dev Tags only assert final_decision(sample_dev_classification, sample_dev_tag, sample_no_match, sample_no_match, sample_no_match, "")["result"] # Dev Hierachy only assert final_decision(sample_dev_classification, sample_no_match, sample_no_match, sample_dev_hierarchy, sample_no_match, "")["result"] # Both Dev Tags and Hierarchy assert final_decision(sample_dev_classification, sample_dev_tag, sample_no_match, sample_dev_hierarchy, sample_no_match, "")["result"] # # DevEnv is not set (--> dev) # Dev Tag only assert final_decision(sample_no_match, sample_dev_tag, sample_no_match, sample_no_match, sample_no_match, "")["result"] # Dev Hierachy only assert final_decision(sample_no_match, sample_no_match, sample_no_match, sample_dev_hierarchy, sample_no_match, "")["result"] # Both Dev Tags and Hierarchy assert final_decision(sample_no_match, sample_dev_tag, sample_no_match, sample_dev_hierarchy, sample_no_match, "")["result"] # kv pair contains prod indicators only # DevEnv is set (--> conflicting) # PROD Tag only assert not final_decision(sample_dev_classification, sample_no_match, sample_prod_tag, sample_no_match, sample_no_match, "")["result"] # PROD Hierachy only assert not final_decision(sample_dev_classification, sample_no_match, sample_no_match, sample_no_match, sample_prod_hierarchy, "")["result"] # Both PROD Tags and Hierarchy assert not final_decision(sample_dev_classification, sample_no_match, sample_prod_tag, sample_no_match, sample_prod_hierarchy, "")["result"] # # DevEnv is not set (--> prod) # PROD Tag only assert not final_decision(sample_no_match, sample_no_match, sample_prod_tag, sample_no_match, sample_no_match, "")["result"] # PROD Hierachy only assert not final_decision(sample_no_match, sample_no_match, sample_no_match, sample_no_match, sample_prod_hierarchy, "")["result"] # Both PROD Tags and Hierarchy assert not final_decision(sample_no_match, sample_no_match, sample_prod_tag, sample_no_match, sample_prod_hierarchy, "")["result"] # kv pair contains conflicting indicators # DevEnv is set (--> conflicting) # Conflicting tags only assert not final_decision(sample_dev_classification, sample_dev_tag, sample_prod_tag, sample_no_match, sample_no_match, "")["result"] # Conflicting hierarchy only assert not final_decision(sample_dev_classification, sample_no_match, sample_no_match, sample_dev_hierarchy, sample_prod_hierarchy, "")["result"] # Conflicting hiearchy and tags (would need other combinations to do full truth table) assert not final_decision(sample_dev_classification, sample_dev_tag, sample_prod_tag, sample_dev_hierarchy, sample_prod_hierarchy, "")["result"] # # DevEnv is not set (--> conflicting) assert not final_decision(sample_no_match, sample_dev_tag, sample_prod_tag, sample_no_match, sample_no_match, "")["result"] # Conflicting hierarchy only assert not final_decision(sample_no_match, sample_no_match, sample_no_match, sample_dev_hierarchy, sample_prod_hierarchy, "")["result"] # Conflicting hiearchy and tags (would need other combinations to do full truth table) assert not final_decision(sample_no_match, sample_dev_tag, sample_prod_tag, sample_dev_hierarchy, sample_prod_hierarchy, "")["result"] @pytest.mark.parametrize('in_classifications,in_tags,expected_out_boolean', [([], [{"key": "ENV", "value": "non-prod", "source": "AWS"}], [{'result': True, 'result_readable': 'The service is development', 'confidence': 'Likely Development', 'reason': 'match on tag {ENV: non-prod} from AWS'}])]) def test_main(mocker, in_classifications, in_tags, expected_out_boolean): import InferWhetherServiceIsDev import unittest # Construct payload arg_payload = {} if in_classifications: arg_payload["active_classifications"] = in_classifications if in_tags: arg_payload["asm_tags"] = in_tags mocker.patch.object(demisto, 'args', return_value=arg_payload) # Execute main using a mock that we can inspect for `executeCommand` demisto_execution_mock = mocker.patch.object(demisto, 'executeCommand') InferWhetherServiceIsDev.main() # Verify the output value was set expected_calls_to_mock_object = [unittest.mock.call('setAlert', {'asmdevcheckdetails': expected_out_boolean})] assert demisto_execution_mock.call_args_list == expected_calls_to_mock_object ```
```java /* * one or more contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright ownership. */ package io.camunda.optimize.dto.optimize.rest.export.report; import static io.camunda.optimize.dto.optimize.rest.export.ExportEntityType.SINGLE_DECISION_REPORT; import io.camunda.optimize.dto.optimize.query.report.single.decision.DecisionReportDataDto; import io.camunda.optimize.dto.optimize.query.report.single.decision.SingleDecisionReportDefinitionRequestDto; import io.camunda.optimize.dto.optimize.rest.export.ExportEntityType; import io.camunda.optimize.service.db.schema.index.report.SingleDecisionReportIndex; import jakarta.validation.constraints.NotNull; import lombok.AllArgsConstructor; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; @AllArgsConstructor @NoArgsConstructor @EqualsAndHashCode(callSuper = true) @Data public class SingleDecisionReportDefinitionExportDto extends ReportDefinitionExportDto { @NotNull private DecisionReportDataDto data; public SingleDecisionReportDefinitionExportDto( final SingleDecisionReportDefinitionRequestDto reportDefinition) { super( reportDefinition.getId(), SINGLE_DECISION_REPORT, SingleDecisionReportIndex.VERSION, reportDefinition.getName(), reportDefinition.getDescription(), reportDefinition.getCollectionId()); this.data = reportDefinition.getData(); } @Override public ExportEntityType getExportEntityType() { return SINGLE_DECISION_REPORT; } } ```
```javascript --x ```
```php <?php declare(strict_types=1); use PhpOffice\PhpSpreadsheet\Cell\DataType; return [ /*[ 'ABCDEFGHIJ', 'ABCDE', 'FGHIJ', ], [ '123', 1, 2, 3, ], [ 'Boolean-TRUE', 'Boolean', '-', true, ], 'no arguments' => ['exception'], 'result just fits' => [ // Note use Armenian character below to make sure chars, not bytes str_repeat('', DataType::MAX_STRING_LENGTH - 5) . 'ABCDE', 'A3', 'ABCDE', ], 'result too long' => [ '#CALC!', 'A3', 'abc', 'def', ],*/ 'propagate DIV0' => ['#DIV/0!', '1', 'A2', '3'], ]; ```
For the Good Times is a 1971 studio album by Dean Martin arranged by Ernie Freeman and produced by Jimmy Bowen. The album peaked at 113 on the Billboard 200 and 41 on the Billboard top Country Albums chart. It was reissued on CD by Capitol Records in 2006 and Hip-O Records in 2009. Though Martin was recording infrequently at this stage of his career, this was the second album he recorded in 1970. Reception The initial Billboard review from 6 February 1971 commented that "Dean Martin is an old timer who knows how to make time with the new crop of writers". William Ruhlmann on Allmusic.com gave the album two and a half stars out of five. Ruhlmann said that "Martin handled the material with his usual careless aplomb, but the result was just another record, no better or worse than its immediate predecessors". Track listing Side One: "For the Good Times" (Kris Kristofferson) - 3:50 "Marry Me" (Barry Mason, Les Reed) - 2:34 "Georgia Sunshine" (Jerry Reed Hubbard) - 2:58 "Invisible Tears" (Ned Miller, Sue Miller) - 2:10 "Raindrops Keep Fallin' on My Head" (Burt Bacharach, Hal David) - 2:43 Side Two: "A Perfect Mountain" (Gene Thomas) - 2:46 "Raining in My Heart" (Felice and Boudleaux Bryant) - 2:37 "She's a Little Bit of Country" (Harlan Howard) - 2:34 "For Once in My Life" (Ron Miller, Orlando Murden) - 2:19 "Sweetheart" (Barry Gibb, Maurice Gibb) - 2:40 Personnel Dean Martin – vocals Ernie Freeman - arranger Ed Thrasher - art direction Eddie Brackett - engineer Jimmy Bowen - record producer References 1971 albums Dean Martin albums Albums arranged by Ernie Freeman Albums produced by Jimmy Bowen Reprise Records albums
```java /* * one or more contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright ownership. */ package io.camunda.zeebe.broker.system.configuration; import static io.camunda.zeebe.broker.system.configuration.BrokerCfg.ENV_DEBUG_EXPORTER; import static io.camunda.zeebe.broker.system.configuration.DataCfg.DEFAULT_DIRECTORY; import static io.camunda.zeebe.broker.system.configuration.NetworkCfg.DEFAULT_COMMAND_API_PORT; import static io.camunda.zeebe.broker.system.configuration.NetworkCfg.DEFAULT_INTERNAL_API_PORT; import static io.camunda.zeebe.protocol.Protocol.START_PARTITION_ID; import static org.assertj.core.api.Assertions.assertThat; import io.camunda.zeebe.broker.exporter.debug.DebugLogExporter; import io.camunda.zeebe.broker.exporter.metrics.MetricsExporter; import io.camunda.zeebe.broker.system.configuration.backpressure.LimitCfg; import io.camunda.zeebe.broker.system.configuration.backpressure.LimitCfg.LimitAlgorithm; import java.nio.file.Paths; import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.stream.Stream; import org.assertj.core.api.Assertions; import org.assertj.core.api.Condition; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.springframework.boot.context.properties.bind.BindException; public final class BrokerCfgTest { public static final String BROKER_BASE = "test"; private static final String ZEEBE_BROKER_EXPERIMENTAL_MAX_APPENDS_PER_FOLLOWER = "zeebe.broker.experimental.maxAppendsPerFollower"; private static final String ZEEBE_BROKER_EXPERIMENTAL_MAX_APPEND_BATCH_SIZE = "zeebe.broker.experimental.maxAppendBatchSize"; private static final String ZEEBE_BROKER_EXPERIMENTAL_DISABLEEXPLICITRAFTFLUSH = "zeebe.broker.experimental.disableExplicitRaftFlush"; private static final String ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION = "zeebe.broker.cluster.raft.enablePriorityElection"; private static final String ZEEBE_BROKER_EXPERIMENTAL_QUERYAPI_ENABLED = "zeebe.broker.experimental.queryapi.enabled"; private static final String ZEEBE_BROKER_DATA_DIRECTORY = "zeebe.broker.data.directory"; private static final String ZEEBE_BROKER_NETWORK_HOST = "zeebe.broker.network.host"; private static final String ZEEBE_BROKER_NETWORK_ADVERTISED_HOST = "zeebe.broker.network.advertisedHost"; private static final String ZEEBE_BROKER_NETWORK_PORT_OFFSET = "zeebe.broker.network.portOffset"; private static final String ZEEBE_BROKER_EXECUTION_METRICS_EXPORTER_ENABLED = "zeebe.broker.executionMetricsExporterEnabled"; @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); public final Map<String, String> environment = new HashMap<>(); @Test public void shouldUseDefaultPorts() { assertDefaultPorts(DEFAULT_COMMAND_API_PORT, DEFAULT_INTERNAL_API_PORT); } @Test public void shouldUseSpecifiedPorts() { assertPorts("specific-ports", 1, 5); } @Test public void shouldUsePortOffset() { final int offset = 50; assertPorts( "port-offset", DEFAULT_COMMAND_API_PORT + offset, DEFAULT_INTERNAL_API_PORT + offset); } @Test public void shouldUsePortOffsetWithSpecifiedPorts() { final int offset = 30; assertPorts("specific-ports-offset", 1 + offset, 5 + offset); } @Test public void shouldUsePortOffsetFromEnvironment() { environment.put(ZEEBE_BROKER_NETWORK_PORT_OFFSET, "5"); final int offset = 50; assertDefaultPorts(DEFAULT_COMMAND_API_PORT + offset, DEFAULT_INTERNAL_API_PORT + offset); } @Test public void shouldUsePortOffsetFromEnvironmentWithSpecifiedPorts() { environment.put(ZEEBE_BROKER_NETWORK_PORT_OFFSET, "3"); final int offset = 30; assertPorts("specific-ports", 1 + offset, 5 + offset); } @Test public void shouldRejectInvalidPortOffsetFromEnvironment() { // given environment.put(ZEEBE_BROKER_NETWORK_PORT_OFFSET, "a"); // when + then Assertions.assertThatThrownBy( () -> assertDefaultPorts(DEFAULT_COMMAND_API_PORT, DEFAULT_INTERNAL_API_PORT)) .isInstanceOf(BindException.class); } @Test public void shouldOverridePortOffsetFromEnvironment() { environment.put(ZEEBE_BROKER_NETWORK_PORT_OFFSET, "7"); final int offset = 70; assertPorts( "port-offset", DEFAULT_COMMAND_API_PORT + offset, DEFAULT_INTERNAL_API_PORT + offset); } @Test public void shouldExpandExporterJarPathRelativeToBrokerBaseIffPresent() { // given final ExporterCfg exporterCfgExternal = new ExporterCfg(); exporterCfgExternal.setJarPath("exporters/exporter.jar"); final ExporterCfg exporterCfgInternal1 = new ExporterCfg(); exporterCfgInternal1.setJarPath(""); final ExporterCfg exporterCfgInternal2 = new ExporterCfg(); final BrokerCfg config = new BrokerCfg(); config.getExporters().put("external", exporterCfgExternal); config.getExporters().put("internal-1", exporterCfgInternal1); config.getExporters().put("internal-2", exporterCfgInternal2); final String base = temporaryFolder.getRoot().getAbsolutePath(); final String jarFile = Paths.get(base, "exporters", "exporter.jar").toAbsolutePath().toString(); // when config.init(base); // then assertThat(config.getExporters()).hasSize(3); assertThat(config.getExporters().get("external")) .hasFieldOrPropertyWithValue("jarPath", jarFile) .is(new Condition<>(ExporterCfg::isExternal, "is external")); assertThat(config.getExporters().get("internal-1").isExternal()).isFalse(); assertThat(config.getExporters().get("internal-2").isExternal()).isFalse(); } @Test public void shouldEnableDebugLogExporter() { // given final var expectedId = DebugLogExporter.defaultExporterId(); final var expectedConfig = DebugLogExporter.defaultConfig(); environment.put(ENV_DEBUG_EXPORTER, "true"); // then assertWithDefaultConfigurations( cfg -> assertThat(cfg.getExporters()).containsEntry(expectedId, expectedConfig)); } @Test public void shouldNotRegisterDebugLogExporter() { // given environment.put(ENV_DEBUG_EXPORTER, "false"); // when final String exporterId = DebugLogExporter.defaultExporterId(); final BrokerCfg brokerCfg = TestConfigReader.readConfig("empty", environment); // then assertThat(brokerCfg.getExporters()).doesNotContainKey(exporterId); } @Test public void shouldHaveNoExportersByDefault() { assertWithDefaultConfigurations(cfg -> assertThat(cfg.getExporters()).isEmpty()); } @Test public void shouldEnableMetricsExporter() { // given environment.put(ZEEBE_BROKER_EXECUTION_METRICS_EXPORTER_ENABLED, "true"); // then assertMetricsExporter(); } @Test public void shouldUseDefaultHost() { assertDefaultHost("0.0.0.0"); } @Test public void shouldUseSpecifiedHosts() { assertHost( "specific-hosts", "0.0.0.0", "gatewayHost", "commandHost", "internalHost", "monitoringHost"); } @Test public void shouldUseGlobalHost() { assertHost("host", "1.1.1.1"); } @Test public void shouldUseHostFromEnvironment() { environment.put(ZEEBE_BROKER_NETWORK_HOST, "2.2.2.2"); assertDefaultHost("2.2.2.2"); } @Test public void shouldUseHostFromEnvironmentWithGlobalHost() { environment.put(ZEEBE_BROKER_NETWORK_HOST, "myHost"); assertHost("host", "myHost"); } @Test public void shouldNotOverrideSpecifiedHostsFromEnvironment() { environment.put(ZEEBE_BROKER_NETWORK_HOST, "myHost"); assertHost( "specific-hosts", "myHost", "gatewayHost", "commandHost", "internalHost", "monitoringHost"); } @Test public void shouldUseDefaultDirectory() { // given final String expectedDataDirectory = Paths.get(BROKER_BASE, DEFAULT_DIRECTORY).toString(); // then assertWithDefaultConfigurations( config -> assertThat(config.getData().getDirectory()).isEqualTo(expectedDataDirectory)); } @Test public void shouldUseSpecifiedDirectory() { // given final BrokerCfg config = TestConfigReader.readConfig("directory", environment); final String expectedDataDirectory = Paths.get(BROKER_BASE, "foo").toString(); // then assertThat(config.getData().getDirectory()).isEqualTo(expectedDataDirectory); } @Test public void shouldUseDirectoryFromEnvironment() { // given final String expectedDataDirectory = Paths.get(BROKER_BASE, "foo").toString(); environment.put(ZEEBE_BROKER_DATA_DIRECTORY, "foo"); // then assertWithDefaultConfigurations( config -> assertThat(config.getData().getDirectory()).isEqualTo(expectedDataDirectory)); } @Test public void shouldReadSpecificSystemClusterConfiguration() { // given final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ClusterCfg cfgCluster = cfg.getCluster(); // when - then assertThat(cfgCluster.getInitialContactPoints()).isEmpty(); assertThat(cfgCluster.getNodeId()).isEqualTo(2); assertThat(cfgCluster.getPartitionsCount()).isEqualTo(3); assertThat(cfgCluster.getReplicationFactor()).isEqualTo(4); assertThat(cfgCluster.getClusterSize()).isEqualTo(5); } @Test public void shouldCreatePartitionIds() { // given final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ClusterCfg cfgCluster = cfg.getCluster(); // when - then assertThat(cfgCluster.getPartitionsCount()).isEqualTo(3); final List<Integer> partitionIds = cfgCluster.getPartitionIds(); final int startId = START_PARTITION_ID; assertThat(partitionIds).contains(startId, startId + 1, startId + 2); } @Test public void shouldOverrideMaxAppendsViaEnvironment() { // given environment.put(ZEEBE_BROKER_EXPERIMENTAL_MAX_APPENDS_PER_FOLLOWER, "8"); // when final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ExperimentalCfg experimentalCfg = cfg.getExperimental(); // then assertThat(experimentalCfg.getMaxAppendsPerFollower()).isEqualTo(8); } @Test public void shouldOverrideMaxAppendBatchSizeViaEnvironment() { // given environment.put(ZEEBE_BROKER_EXPERIMENTAL_MAX_APPEND_BATCH_SIZE, "256KB"); // when final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ExperimentalCfg experimentalCfg = cfg.getExperimental(); // then assertThat(experimentalCfg.getMaxAppendBatchSizeInBytes()).isEqualTo(256 * 1024); } @Test public void shouldOverrideDisableExplicitRaftFlushViaEnvironment() { // given environment.put(ZEEBE_BROKER_EXPERIMENTAL_DISABLEEXPLICITRAFTFLUSH, "true"); // when final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ExperimentalCfg experimentalCfg = cfg.getExperimental(); // then assertThat(experimentalCfg.isDisableExplicitRaftFlush()).isTrue(); } @Test public void shouldOverrideEnablePriorityElectionViaEnvironment() { // given environment.put(ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION, "true"); // when final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ClusterCfg clusterCfg = cfg.getCluster(); // then assertThat(clusterCfg.getRaft().isEnablePriorityElection()).isTrue(); } @Test public void shouldEnablePriorityElectionByDefault() { // given final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); // when final ClusterCfg clusterCfg = cfg.getCluster(); // then assertThat(clusterCfg.getRaft().isEnablePriorityElection()).isTrue(); } @Test public void shouldSetEnablePriorityElectionFromConfig() { // given final BrokerCfg cfg = TestConfigReader.readConfig("experimental-cfg", environment); // when final ClusterCfg clusterCfg = cfg.getCluster(); // then assertThat(clusterCfg.getRaft().isEnablePriorityElection()).isTrue(); } @Test public void shouldDisableQueryApiByDefault() { // given final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); // when final ExperimentalCfg experimentalCfg = cfg.getExperimental(); // then assertThat(experimentalCfg.getQueryApi().isEnabled()).isFalse(); } @Test public void shouldSetEnableQueryApiFromConfig() { // given final BrokerCfg cfg = TestConfigReader.readConfig("experimental-cfg", environment); // when final ExperimentalCfg experimentalCfg = cfg.getExperimental(); // then assertThat(experimentalCfg.getQueryApi().isEnabled()).isTrue(); } @Test public void shouldOverrideSetEnableQueryApiViaEnvironment() { // given environment.put(ZEEBE_BROKER_EXPERIMENTAL_QUERYAPI_ENABLED, "true"); // when final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", environment); final ExperimentalCfg experimentalCfg = cfg.getExperimental(); // then assertThat(experimentalCfg.getQueryApi().isEnabled()).isTrue(); } @Test public void shouldReadDefaultEmbedGateway() { assertDefaultEmbeddedGatewayEnabled(true); } @Test public void shouldReadEmbedGateway() { assertEmbeddedGatewayEnabled("disabled-gateway", false); } @Test public void shouldSetEmbedGatewayViaEnvironment() { // given environment.put("zeebe.broker.gateway.enable", "true"); // then assertEmbeddedGatewayEnabled("disabled-gateway", true); } @Test public void shouldSetBackpressureConfig() { // when final BrokerCfg cfg = TestConfigReader.readConfig("backpressure-cfg", environment); final LimitCfg backpressure = cfg.getBackpressure(); // then assertThat(backpressure.isEnabled()).isTrue(); assertThat(backpressure.useWindowed()).isFalse(); assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.GRADIENT); } @Test public void shouldUseConfiguredBackpressureAlgorithms() { final LimitCfg backpressure = new LimitCfg(); // when backpressure.setAlgorithm("gradient"); // then; assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.GRADIENT); // when backpressure.setAlgorithm("gradient"); // then; assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.GRADIENT); // when backpressure.setAlgorithm("gradient2"); // then; assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.GRADIENT2); // when backpressure.setAlgorithm("vegas"); // then; assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.VEGAS); // when backpressure.setAlgorithm("fixed"); // then; assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.FIXED); // when backpressure.setAlgorithm("aimd"); // then; assertThat(backpressure.getAlgorithm()).isEqualTo(LimitAlgorithm.AIMD); } @Test public void shouldUseDefaultAdvertisedHost() { // when - then assertAdvertisedAddress( "default-advertised-host-cfg", "zeebe.io", NetworkCfg.DEFAULT_COMMAND_API_PORT); assertHost("default-advertised-host-cfg", "0.0.0.0"); } @Test public void shouldUseAdvertisedHost() { // when - then assertAdvertisedAddress("advertised-host-cfg", "zeebe.io", NetworkCfg.DEFAULT_COMMAND_API_PORT); assertHost("advertised-host-cfg", "0.0.0.0"); } @Test public void shouldUseAdvertisedAddress() { // when - then assertAdvertisedAddress("advertised-address-cfg", "zeebe.io", 8080); } @Test public void shouldUseDefaultAdvertisedHostFromEnv() { // given environment.put(ZEEBE_BROKER_NETWORK_ADVERTISED_HOST, "zeebe.io"); // then assertAdvertisedAddress("default", "zeebe.io", NetworkCfg.DEFAULT_COMMAND_API_PORT); assertAdvertisedAddress("empty", "zeebe.io", NetworkCfg.DEFAULT_COMMAND_API_PORT); } @Test public void shouldReadExporterConfigWithMinimalInfo() { // given final ExporterCfg expected = new ExporterCfg(); expected.setClassName("io.camunda.zeebe.exporter.ElasticsearchExporter"); final BrokerCfg actual = TestConfigReader.readConfig("exporters", environment); // then assertThat(actual.getExporters()) .hasSize(1) .containsKey("elasticsearch") .containsEntry("elasticsearch", expected); } @Test public void shouldSetCustomMembershipConfig() { // when final BrokerCfg brokerCfg = TestConfigReader.readConfig("membership-cfg", environment); // then final var membershipCfg = brokerCfg.getCluster().getMembership(); assertThat(membershipCfg.isBroadcastDisputes()).isFalse(); assertThat(membershipCfg.isBroadcastUpdates()).isTrue(); assertThat(membershipCfg.isNotifySuspect()).isTrue(); assertThat(membershipCfg.getGossipInterval()).isEqualTo(Duration.ofSeconds(2)); assertThat(membershipCfg.getGossipFanout()).isEqualTo(3); assertThat(membershipCfg.getProbeInterval()).isEqualTo(Duration.ofSeconds(3)); assertThat(membershipCfg.getProbeTimeout()).isEqualTo(Duration.ofSeconds(5)); assertThat(membershipCfg.getSuspectProbes()).isEqualTo(5); assertThat(membershipCfg.getFailureTimeout()).isEqualTo(Duration.ofSeconds(20)); assertThat(membershipCfg.getSyncInterval()).isEqualTo(Duration.ofSeconds(25)); } private void assertDefaultPorts(final int command, final int internal) { assertPorts("default", command, internal); assertPorts("empty", command, internal); } private void assertPorts(final String configFileName, final int command, final int internal) { final BrokerCfg brokerCfg = TestConfigReader.readConfig(configFileName, environment); final NetworkCfg network = brokerCfg.getNetwork(); assertThat(network.getCommandApi().getAddress().getPort()).isEqualTo(command); assertThat(network.getCommandApi().getAdvertisedAddress().getPort()).isEqualTo(command); assertThat(network.getInternalApi().getPort()).isEqualTo(internal); } private void assertDefaultHost(final String host) { assertHost("default", host); assertHost("empty", host); } private void assertHost(final String configFileName, final String host) { assertHost(configFileName, host, host, host, host, host); } private void assertHost( final String configFileName, final String host, final String gateway, final String command, final String internal, final String monitoring) { final BrokerCfg brokerCfg = TestConfigReader.readConfig(configFileName, environment); final NetworkCfg networkCfg = brokerCfg.getNetwork(); assertThat(networkCfg.getHost()).isEqualTo(host); assertThat(brokerCfg.getGateway().getNetwork().getHost()).isEqualTo(gateway); assertThat(networkCfg.getCommandApi().getAddress().getHostString()).isEqualTo(command); assertThat(networkCfg.getInternalApi().getHost()).isEqualTo(internal); } private void assertAdvertisedHost(final String configFileName, final String host) { final BrokerCfg brokerCfg = TestConfigReader.readConfig(configFileName, environment); final NetworkCfg networkCfg = brokerCfg.getNetwork(); assertThat(networkCfg.getCommandApi().getAdvertisedAddress().getHostName()).isEqualTo(host); } private void assertAdvertisedAddress( final String configFileName, final String host, final int port) { final BrokerCfg brokerCfg = TestConfigReader.readConfig(configFileName, environment); final NetworkCfg networkCfg = brokerCfg.getNetwork(); assertThat(networkCfg.getCommandApi().getAdvertisedAddress().getHostName()).isEqualTo(host); assertThat(networkCfg.getCommandApi().getAdvertisedAddress().getPort()).isEqualTo(port); } private void assertDefaultEmbeddedGatewayEnabled(final boolean enabled) { assertEmbeddedGatewayEnabled("default", enabled); assertEmbeddedGatewayEnabled("empty", enabled); } private void assertEmbeddedGatewayEnabled(final String configFileName, final boolean enabled) { final EmbeddedGatewayCfg gatewayCfg = TestConfigReader.readConfig(configFileName, environment).getGateway(); assertThat(gatewayCfg.isEnable()).isEqualTo(enabled); } private void assertMetricsExporter() { assertMetricsExporter("default"); assertMetricsExporter("empty"); } private void assertMetricsExporter(final String configFileName) { final ExporterCfg exporterCfg = MetricsExporter.defaultConfig(); final BrokerCfg brokerCfg = TestConfigReader.readConfig(configFileName, environment); assertThat(brokerCfg.getExporters().values()) .usingRecursiveFieldByFieldElementComparator() .contains(exporterCfg); } private void assertWithDefaultConfigurations(final Consumer<BrokerCfg> assertions) { Stream.of("default", "empty") .forEach(configFileName -> assertWithConfiguration(assertions, configFileName)); } private void assertWithConfiguration( final Consumer<BrokerCfg> assertions, final String configFileName) { final BrokerCfg cfg = TestConfigReader.readConfig(configFileName, environment); assertions.accept(cfg); } } ```
METIS is a software package for graph partitioning that implements various multilevel algorithms. METIS' multilevel approach has three phases and comes with several algorithms for each phase: Coarsen the graph by generating a sequence of graphs G0, G1, ..., GN, where G0 is the original graph and for each 0 ≀ i ≀ j ≀ N, the number of vertices in Gi is greater than the number of vertices in Gj. Compute a partition of GN Project the partition back through the sequence in the order of GN, ..., G0, refining it with respect to each graph. The final partition computed during the third phase (the refined partition projected onto G0) is a partition of the original graph. References External links METIS website Graph algorithms Mathematical software
```java package com.codeest.geeknews.model.bean; import java.util.List; /** * Created by codeest on 16/8/18. */ public class CommentBean { /** * author : xiaowei * content : ?? * avatar : path_to_url * time : 1413603692 * reply_to : {"content":" \u201c\u201d \u2026\u2026 \u2026\u2026 ","status":0,"id":545589,"author":"Samuelback"} * id : 545838 * likes : 2 */ private List<CommentsBean> comments; public List<CommentsBean> getComments() { return comments; } public void setComments(List<CommentsBean> comments) { this.comments = comments; } public static class CommentsBean { private String author; private String content; private String avatar; private int time; /** * content : * status : 0 * id : 545589 * author : Samuelback */ private ReplyToBean reply_to; private int id; private int likes; public String getAuthor() { return author; } public void setAuthor(String author) { this.author = author; } public String getContent() { return content; } public void setContent(String content) { this.content = content; } public String getAvatar() { return avatar; } public void setAvatar(String avatar) { this.avatar = avatar; } public int getTime() { return time; } public void setTime(int time) { this.time = time; } public ReplyToBean getReply_to() { return reply_to; } public void setReply_to(ReplyToBean reply_to) { this.reply_to = reply_to; } public int getId() { return id; } public void setId(int id) { this.id = id; } public int getLikes() { return likes; } public void setLikes(int likes) { this.likes = likes; } public static class ReplyToBean { private String content; private int status; private int id; private String author; private int expandState = 0; public int getExpandState() { return expandState; } public void setExpandState(int expandState) { this.expandState = expandState; } public String getContent() { return content; } public void setContent(String content) { this.content = content; } public int getStatus() { return status; } public void setStatus(int status) { this.status = status; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getAuthor() { return author; } public void setAuthor(String author) { this.author = author; } } } } ```
```objective-c /* * */ #pragma once #include "esp_bit_defs.h" #ifdef __cplusplus extern "C" { #endif /*IRAM0 is connected with Cache IBUS0*/ #define SOC_IRAM0_ADDRESS_LOW 0x4037C000 #define SOC_IRAM0_ADDRESS_HIGH 0x403E0000 #define SOC_IRAM0_CACHE_ADDRESS_LOW 0x42000000 #define SOC_IRAM0_CACHE_ADDRESS_HIGH 0x42800000 /*DRAM0 is connected with Cache DBUS0*/ #define SOC_DRAM0_ADDRESS_LOW 0x3FC80000 #define SOC_DRAM0_ADDRESS_HIGH 0x3FCE0000 #define SOC_DRAM0_CACHE_ADDRESS_LOW 0x3C000000 #define SOC_DRAM0_CACHE_ADDRESS_HIGH 0x3C800000 #define SOC_IRAM_FLASH_ADDRESS_LOW SOC_IRAM0_CACHE_ADDRESS_LOW #define SOC_IRAM_FLASH_ADDRESS_HIGH SOC_IRAM0_CACHE_ADDRESS_HIGH #define SOC_DRAM_FLASH_ADDRESS_LOW SOC_DRAM0_CACHE_ADDRESS_LOW #define SOC_DRAM_FLASH_ADDRESS_HIGH SOC_DRAM0_CACHE_ADDRESS_HIGH #define SOC_BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW) #define SOC_ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH) #define SOC_ADDRESS_IN_IRAM0(vaddr) SOC_ADDRESS_IN_BUS(SOC_IRAM0, vaddr) #define SOC_ADDRESS_IN_IRAM0_CACHE(vaddr) SOC_ADDRESS_IN_BUS(SOC_IRAM0_CACHE, vaddr) #define SOC_ADDRESS_IN_DRAM0(vaddr) SOC_ADDRESS_IN_BUS(SOC_DRAM0, vaddr) #define SOC_ADDRESS_IN_DRAM0_CACHE(vaddr) SOC_ADDRESS_IN_BUS(SOC_DRAM0_CACHE, vaddr) #define SOC_MMU_INVALID BIT(8) #define SOC_MMU_VALID 0 #define SOC_MMU_TYPE 0 #define SOC_MMU_ACCESS_FLASH 0 /** * MMU entry valid bit mask for mapping value. For an entry: * valid bit + value bits * valid bit is BIT(8), so value bits are 0xff */ #define SOC_MMU_VALID_VAL_MASK 0xff /** * Max MMU available paddr page num. * `SOC_MMU_MAX_PADDR_PAGE_NUM * SOC_MMU_PAGE_SIZE` means the max paddr address supported by the MMU. e.g.: * 256 * 64KB, means MMU can support 16MB paddr at most */ #define SOC_MMU_MAX_PADDR_PAGE_NUM 256 /** * This is the mask used for mapping. e.g.: * 0x4200_0000 & SOC_MMU_VADDR_MASK */ #define SOC_MMU_VADDR_MASK 0x7FFFFF //MMU entry num #define SOC_MMU_ENTRY_NUM 128 #define SOC_MMU_DBUS_VADDR_BASE 0x3C000000 #define SOC_MMU_IBUS_VADDR_BASE 0x42000000 /*your_sha256_hash-------------- * MMU Linear Address *your_sha256_hash------------*/ /** * - 64KB MMU page size: the last 0xFFFF, which is the offset * - 128 MMU entries, needs 0x7F to hold it. * * Therefore, 0x7F,FFFF */ #define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF /** * - If high linear address isn't 0, this means MMU can recognize these addresses * - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range. * Under this condition, we use the max linear space. */ #define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (SOC_IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK) #if ((SOC_IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0) #define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) #else #define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1) #endif #define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (SOC_DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK) #if ((SOC_DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0) #define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) #else #define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1) #endif /** * I/D share the MMU linear address range */ #ifndef __cplusplus _Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same"); #endif /** * ROM flash mmap driver needs below definitions */ #define BUS_IRAM0_CACHE_SIZE SOC_BUS_SIZE(SOC_IRAM0_CACHE) #define BUS_DRAM0_CACHE_SIZE SOC_BUS_SIZE(SOC_DRAM0_CACHE) #define CACHE_IBUS 0 #define CACHE_IBUS_MMU_START 0 #define CACHE_IBUS_MMU_END 0x200 #define CACHE_DBUS 1 #define CACHE_DBUS_MMU_START 0 #define CACHE_DBUS_MMU_END 0x200 #define CACHE_IROM_MMU_START 0 #define CACHE_IROM_MMU_END Cache_Get_IROM_MMU_End() #define CACHE_IROM_MMU_SIZE (CACHE_IROM_MMU_END - CACHE_IROM_MMU_START) #define CACHE_DROM_MMU_START CACHE_IROM_MMU_END #define CACHE_DROM_MMU_END Cache_Get_DROM_MMU_End() #define CACHE_DROM_MMU_SIZE (CACHE_DROM_MMU_END - CACHE_DROM_MMU_START) #define CACHE_DROM_MMU_MAX_END 0x200 #define ICACHE_MMU_SIZE 0x200 #define DCACHE_MMU_SIZE 0x200 #define MMU_BUS_START(i) 0 #define MMU_BUS_SIZE(i) 0x200 #ifdef __cplusplus } #endif ```
The Apple A17 Pro is a 64-bit ARM-based system on a chip (SoC) designed by Apple Inc. and manufactured by TSMC. It is used in the iPhone 15 Pro and iPhone 15 Pro Max models only. Design The Apple A17 Pro features an Apple-designed 64-bit six-core CPU with two high-performance cores running at 3.78 GHz, and four energy-efficient cores running at 2.11 GHz. Apple claims the new high-performance cores are 10% faster due to its improved branch prediction, and wider decode & execution engines, and the new energy-efficient cores are faster and 3x more efficient than the competition. The A17 Pro integrates a new Apple-designed six-core GPU, which Apple claims is 20% faster and their biggest redesign in the history of Apple GPUs, with added hardware accelerated ray tracing and mesh shading support. The 16-core Neural Engine is now capable of 35 trillion operations per second. The A17 Pro also added support for AV1 decoding and USB 3.2 Gen 2 (est. up to 10Gbps / 1.25GBps). The A17 Pro contains 19 billion transistors, a 19% increase from the A16's transistor count of 16 billion, and is fabricated by TSMC on their 3 nm N3B process. Products that include the Apple A17 Pro iPhone 15 Pro & 15 Pro Max A15, A16 and A17 See also Apple silicon, range of ARM-based processors designed by Apple for their products Comparison of Armv8-A processors References Computer-related introductions in 2023 Apple silicon
```objective-c /* * */ #ifndef ZEPHYR_INCLUDE_SYS_HASH_FUNCTION_H_ #define ZEPHYR_INCLUDE_SYS_HASH_FUNCTION_H_ #include <stddef.h> #include <stdint.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util_macro.h> #ifdef __cplusplus extern "C" { #endif /** * @ingroup hashmap_apis * @defgroup hash_functions Hash Functions * @{ */ /** * @brief 32-bit Hash function interface * * Hash functions are used to map data from an arbitrarily large space to a * (typically smaller) fixed-size space. For a given input, a hash function will * consistently generate the same, semi-unique numerical value. Even for * marginally different data, a good hash function will distribute the entropy * almost evenly over all bits in the hashed value when combined with modulo * arithmetic over a finite-sized numeric field. * * @param str a string of input data * @param n the number of bytes in @p str * * @return the numeric hash associated with @p str */ typedef uint32_t (*sys_hash_func32_t)(const void *str, size_t n); /** * @brief The naive identity hash function * * This hash function requires that @p n is equal to the size of a primitive * type, such as `[u]int8_t`, `[u]int16_t`, `[u]int32_t`, `[u]int64_t`, * `float`, `double`, or `void *`, and that the alignment of @p str agrees * with that of the respective native type. * * @note The identity hash function is used for testing @ref sys_hashmap. * * @param str a string of input data * @param n the number of bytes in @p str * * @return the numeric hash associated with @p str */ static inline uint32_t sys_hash32_identity(const void *str, size_t n) { switch (n) { case sizeof(uint8_t): return *(uint8_t *)str; case sizeof(uint16_t): return *(uint16_t *)str; case sizeof(uint32_t): return *(uint32_t *)str; case sizeof(uint64_t): return (uint32_t)(*(uint64_t *)str); default: break; } __ASSERT(false, "invalid str length %zu", n); return 0; } /** * @brief Daniel J.\ Bernstein's hash function * * Some notes: * - normally, this hash function is used on NUL-terminated strings * - it has been modified to support arbitrary sequences of bytes * - it has been modified to use XOR rather than addition * * @param str a string of input data * @param n the number of bytes in @p str * * @return the numeric hash associated with @p str * * @note enable with @kconfig{CONFIG_SYS_HASH_FUNC32_DJB2} * * @see path_to_url */ uint32_t sys_hash32_djb2(const void *str, size_t n); /** * @brief Murmur3 hash function * * @param str a string of input data * @param n the number of bytes in @p str * * @return the numeric hash associated with @p str * * @note enable with @kconfig{CONFIG_SYS_HASH_FUNC32_MURMUR3} * * @see path_to_url */ uint32_t sys_hash32_murmur3(const void *str, size_t n); /** * @brief System default 32-bit hash function * * @param str a string of input data * @param n the number of bytes in @p str * * @return the numeric hash associated with @p str */ static inline uint32_t sys_hash32(const void *str, size_t n) { if (IS_ENABLED(CONFIG_SYS_HASH_FUNC32_CHOICE_IDENTITY)) { return sys_hash32_identity(str, n); } if (IS_ENABLED(CONFIG_SYS_HASH_FUNC32_CHOICE_DJB2)) { return sys_hash32_djb2(str, n); } if (IS_ENABLED(CONFIG_SYS_HASH_FUNC32_CHOICE_MURMUR3)) { return sys_hash32_murmur3(str, n); } __ASSERT(0, "No default 32-bit hash. See CONFIG_SYS_HASH_FUNC32_CHOICE"); return 0; } /** * @} */ #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_SYS_HASH_FUNCTION_H_ */ ```
```c++ // 2016 and later: Unicode, Inc. and others. /* ****************************************************************************** * * Corporation and others. All Rights Reserved. * ****************************************************************************** */ #include "unicode/utypes.h" #include "unicode/icudataver.h" #include "unicode/ures.h" #include "uresimp.h" /* for ures_getVersionByKey */ U_CAPI void U_EXPORT2 u_getDataVersion(UVersionInfo dataVersionFillin, UErrorCode *status) { UResourceBundle *icudatares = NULL; if (U_FAILURE(*status)) { return; } if (dataVersionFillin != NULL) { icudatares = ures_openDirect(NULL, U_ICU_VERSION_BUNDLE , status); if (U_SUCCESS(*status)) { ures_getVersionByKey(icudatares, U_ICU_DATA_KEY, dataVersionFillin, status); } ures_close(icudatares); } } ```
Alajáe is a village in PeipsiÀÀre Parish, Tartu County in Estonia. References Villages in Tartu County
```objective-c /* * * in the file LICENSE in the source distribution or at * path_to_url */ #include <openssl/provider.h> #include <openssl/types.h> typedef struct { /* * References to the underlying cipher implementation. |cipher| caches * the cipher, always. |alloc_cipher| only holds a reference to an * explicitly fetched cipher. */ const EVP_CIPHER *cipher; /* cipher */ EVP_CIPHER *alloc_cipher; /* fetched cipher */ /* Conditions for legacy EVP_CIPHER uses */ ENGINE *engine; /* cipher engine */ } PROV_CIPHER; typedef struct { /* * References to the underlying digest implementation. |md| caches * the digest, always. |alloc_md| only holds a reference to an explicitly * fetched digest. */ const EVP_MD *md; /* digest */ EVP_MD *alloc_md; /* fetched digest */ /* Conditions for legacy EVP_MD uses */ ENGINE *engine; /* digest engine */ } PROV_DIGEST; /* Cipher functions */ /* * Load a cipher from the specified parameters with the specified context. * The params "properties", "engine" and "cipher" are used to determine the * implementation used. If a provider cannot be found, it falls back to trying * non-provider based implementations. */ int ossl_prov_cipher_load_from_params(PROV_CIPHER *pc, const OSSL_PARAM params[], OSSL_LIB_CTX *ctx); /* Reset the PROV_CIPHER fields and free any allocated cipher reference */ void ossl_prov_cipher_reset(PROV_CIPHER *pc); /* Clone a PROV_CIPHER structure into a second */ int ossl_prov_cipher_copy(PROV_CIPHER *dst, const PROV_CIPHER *src); /* Query the cipher and associated engine (if any) */ const EVP_CIPHER *ossl_prov_cipher_cipher(const PROV_CIPHER *pc); ENGINE *ossl_prov_cipher_engine(const PROV_CIPHER *pc); /* Digest functions */ /* * Fetch a digest from the specified libctx using the provided mdname and * propquery. Store the result in the PROV_DIGEST and return the fetched md. */ const EVP_MD *ossl_prov_digest_fetch(PROV_DIGEST *pd, OSSL_LIB_CTX *libctx, const char *mdname, const char *propquery); /* * Load a digest from the specified parameters with the specified context. * The params "properties", "engine" and "digest" are used to determine the * implementation used. If a provider cannot be found, it falls back to trying * non-provider based implementations. */ int ossl_prov_digest_load_from_params(PROV_DIGEST *pd, const OSSL_PARAM params[], OSSL_LIB_CTX *ctx); /* Reset the PROV_DIGEST fields and free any allocated digest reference */ void ossl_prov_digest_reset(PROV_DIGEST *pd); /* Clone a PROV_DIGEST structure into a second */ int ossl_prov_digest_copy(PROV_DIGEST *dst, const PROV_DIGEST *src); /* Query the digest and associated engine (if any) */ const EVP_MD *ossl_prov_digest_md(const PROV_DIGEST *pd); ENGINE *ossl_prov_digest_engine(const PROV_DIGEST *pd); /* * Set the various parameters on an EVP_MAC_CTX from the supplied arguments. * If any of the supplied ciphername/mdname etc are NULL then the values * from the supplied params (if non NULL) are used instead. */ int ossl_prov_set_macctx(EVP_MAC_CTX *macctx, const OSSL_PARAM params[], const char *ciphername, const char *mdname, const char *engine, const char *properties, const unsigned char *key, size_t keylen); /* MAC functions */ /* * Load an EVP_MAC_CTX* from the specified parameters with the specified * library context. * The params "mac" and "properties" are used to determine the implementation * used, and the parameters "digest", "cipher", "engine" and "properties" are * passed to the MAC via the created MAC context if they are given. * If there is already a created MAC context, it will be replaced if the "mac" * parameter is found, otherwise it will simply be used as is, and passed the * parameters to pilfer as it sees fit. * * As an option, a MAC name may be explicitly given, and if it is, the "mac" * parameter will be ignored. * Similarly, as an option, a cipher name or a digest name may be explicitly * given, and if any of them is, the "digest" and "cipher" parameters are * ignored. */ int ossl_prov_macctx_load_from_params(EVP_MAC_CTX **macctx, const OSSL_PARAM params[], const char *macname, const char *ciphername, const char *mdname, OSSL_LIB_CTX *ctx); typedef struct ag_capable_st { OSSL_ALGORITHM alg; int (*capable)(void); } OSSL_ALGORITHM_CAPABLE; /* * Dynamically select algorithms by calling a capable() method. * If this method is NULL or the method returns 1 then the algorithm is added. */ void ossl_prov_cache_exported_algorithms(const OSSL_ALGORITHM_CAPABLE *in, OSSL_ALGORITHM *out); /* Duplicate a lump of memory safely */ int ossl_prov_memdup(const void *src, size_t src_len, unsigned char **dest, size_t *dest_len); ```
```php <?php /** * FecShop file. * * @link path_to_url * @license path_to_url */ namespace fecshop\app\appserver\modules\Payment\controllers; use fecshop\app\appserver\modules\Payment\PaymentController; use Yii; /** * @author Terry Zhao <2358269014@qq.com> * @since 1.0 */ class Wxpayh5Controller extends PaymentController { public $enableCsrfValidation = false; protected $_increment_id; protected $_order_model; /** * . */ public function actionStart() { if(Yii::$app->request->getMethod() === 'OPTIONS'){ return []; } $checkOrder = $this->checkOrder(); if($checkOrder !== true){ return $checkOrder; } $return_Url = Yii::$app->request->post('return_url'); //Yii::$service->page->theme->layoutFile = 'wxpay_jsapi.php'; $objectxml = Yii::$service->payment->wxpayH5->getScanCodeStart(); //var_dump($objectxml); //$returnUrl = Yii::$service->payment->getStandardReturnUrl(); $return_Url = urlencode($return_Url); $redirectUrl = $objectxml['mweb_url'] . '&redirect_url=' . $return_Url; $data = [ 'redirectUrl' => $redirectUrl, ]; $code = Yii::$service->helper->appserver->status_success; $responseData = Yii::$service->helper->appserver->getResponseData($code, $data); return $responseData; } public function actionReview() { if(Yii::$app->request->getMethod() === 'OPTIONS'){ return []; } $checkOrder = $this->checkOrder(); if($checkOrder !== true){ return $checkOrder; } $out_trade_no = $this->_increment_id; $reviewStatus = Yii::$service->payment->wxpay->scanCodeCheckTradeIsSuccess($out_trade_no); if($reviewStatus){ $data = [ 'redirectUrl' => $redirectUrl, ]; $code = Yii::$service->helper->appserver->status_success; $responseData = Yii::$service->helper->appserver->getResponseData($code, $data); return $responseData; }else{ $errors = Yii::$service->helper->errors->get(','); $data = [ 'errors' => $errors, ]; $code = Yii::$service->helper->appserver->order_wxpay_payment_fail; $responseData = Yii::$service->helper->appserver->getResponseData($code, $data); return $responseData; } } /** * IPN * IPNsession increment_id * */ public function actionIpn() { Yii::$service->payment->wxpay->ipn(); } /** * . */ public function actionSuccess() { $data = [ 'increment_id' => $this->_increment_id, ]; // () if (Yii::$app->user->isGuest) { Yii::$service->cart->clearCartProductAndCoupon(); } // sessionincrement_id Yii::$service->order->removeSessionIncrementId(); return $this->render('../../payment/checkmoney/success', $data); } } ```
George Frederick Bullock (12 August 1918 – November 2006) was an English professional golfer. He died from motor neurone disease. He finished in the top-10 four times in The Open Championship: T-8 in 1938, T-7 in 1950, 8th in 1952, and T-2 in 1959. Until late 1946 he was an assistant professional at Holyhead Golf Club on Anglesey, Wales where his father George was the professional. Aged 17, he was runner-up in the 1936 Welsh Professional Championship at Prestatyn, behind Fred Lloyd. After two years as professional at Otley Golf Club he became playing assistant professional at Royal Lytham & St Annes Golf Club and then the professional at Glasgow Golf Club in late 1950. At the end of 1955 he moved to Moortown Golf Club. He was later the club professional at Prestwick St Ninians, Caird Park Golf Club and Largs Golf Club. He had two children: Sandra, who caddied for him in the 1959 Open, and Freida. Results in major championships Note: Bullock only played in The Open Championship. NT = No tournament CUT = missed the half-way cut "T" indicates a tie for a place References English male golfers Sportspeople from Warwickshire Neurological disease deaths in England Deaths from motor neuron disease 1918 births 2006 deaths
```go // // // // path_to_url // // Unless required by applicable law or agreed to in writing, software // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // package gitlab import ( "bytes" "fmt" "time" ) // SnippetsService handles communication with the snippets // related methods of the GitLab API. // // GitLab API docs: path_to_url type SnippetsService struct { client *Client } // Snippet represents a GitLab snippet. // // GitLab API docs: path_to_url type Snippet struct { ID int `json:"id"` Title string `json:"title"` FileName string `json:"file_name"` Description string `json:"description"` Author struct { ID int `json:"id"` Username string `json:"username"` Email string `json:"email"` Name string `json:"name"` State string `json:"state"` CreatedAt *time.Time `json:"created_at"` } `json:"author"` UpdatedAt *time.Time `json:"updated_at"` CreatedAt *time.Time `json:"created_at"` WebURL string `json:"web_url"` RawURL string `json:"raw_url"` } func (s Snippet) String() string { return Stringify(s) } // ListSnippetsOptions represents the available ListSnippets() options. // // GitLab API docs: path_to_url#list-snippets type ListSnippetsOptions ListOptions // ListSnippets gets a list of snippets. // // GitLab API docs: path_to_url#list-snippets func (s *SnippetsService) ListSnippets(opt *ListSnippetsOptions, options ...OptionFunc) ([]*Snippet, *Response, error) { req, err := s.client.NewRequest("GET", "snippets", opt, options) if err != nil { return nil, nil, err } var ps []*Snippet resp, err := s.client.Do(req, &ps) if err != nil { return nil, resp, err } return ps, resp, err } // GetSnippet gets a single snippet // // GitLab API docs: // path_to_url#single-snippet func (s *SnippetsService) GetSnippet(snippet int, options ...OptionFunc) (*Snippet, *Response, error) { u := fmt.Sprintf("snippets/%d", snippet) req, err := s.client.NewRequest("GET", u, nil, options) if err != nil { return nil, nil, err } ps := new(Snippet) resp, err := s.client.Do(req, ps) if err != nil { return nil, resp, err } return ps, resp, err } // CreateSnippetOptions represents the available CreateSnippet() options. // // GitLab API docs: // path_to_url#create-new-snippet type CreateSnippetOptions struct { Title *string `url:"title,omitempty" json:"title,omitempty"` FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Content *string `url:"content,omitempty" json:"content,omitempty"` Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` } // CreateSnippet creates a new snippet. The user must have permission // to create new snippets. // // GitLab API docs: // path_to_url#create-new-snippet func (s *SnippetsService) CreateSnippet(opt *CreateSnippetOptions, options ...OptionFunc) (*Snippet, *Response, error) { req, err := s.client.NewRequest("POST", "snippets", opt, options) if err != nil { return nil, nil, err } ps := new(Snippet) resp, err := s.client.Do(req, ps) if err != nil { return nil, resp, err } return ps, resp, err } // UpdateSnippetOptions represents the available UpdateSnippet() options. // // GitLab API docs: // path_to_url#update-snippet type UpdateSnippetOptions struct { Title *string `url:"title,omitempty" json:"title,omitempty"` FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Content *string `url:"content,omitempty" json:"content,omitempty"` Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` } // UpdateSnippet updates an existing snippet. The user must have // permission to change an existing snippet. // // GitLab API docs: // path_to_url#update-snippet func (s *SnippetsService) UpdateSnippet(snippet int, opt *UpdateSnippetOptions, options ...OptionFunc) (*Snippet, *Response, error) { u := fmt.Sprintf("snippets/%d", snippet) req, err := s.client.NewRequest("PUT", u, opt, options) if err != nil { return nil, nil, err } ps := new(Snippet) resp, err := s.client.Do(req, ps) if err != nil { return nil, resp, err } return ps, resp, err } // DeleteSnippet deletes an existing snippet. This is an idempotent // function and deleting a non-existent snippet still returns a 200 OK status // code. // // GitLab API docs: // path_to_url#delete-snippet func (s *SnippetsService) DeleteSnippet(snippet int, options ...OptionFunc) (*Response, error) { u := fmt.Sprintf("snippets/%d", snippet) req, err := s.client.NewRequest("DELETE", u, nil, options) if err != nil { return nil, err } return s.client.Do(req, nil) } // SnippetContent returns the raw snippet as plain text. // // GitLab API docs: // path_to_url#snippet-content func (s *SnippetsService) SnippetContent(snippet int, options ...OptionFunc) ([]byte, *Response, error) { u := fmt.Sprintf("snippets/%d/raw", snippet) req, err := s.client.NewRequest("GET", u, nil, options) if err != nil { return nil, nil, err } var b bytes.Buffer resp, err := s.client.Do(req, &b) if err != nil { return nil, resp, err } return b.Bytes(), resp, err } // ExploreSnippetsOptions represents the available ExploreSnippets() options. // // GitLab API docs: // path_to_url#explore-all-public-snippets type ExploreSnippetsOptions ListOptions // ExploreSnippets gets the list of public snippets. // // GitLab API docs: // path_to_url#explore-all-public-snippets func (s *SnippetsService) ExploreSnippets(opt *ExploreSnippetsOptions, options ...OptionFunc) ([]*Snippet, *Response, error) { req, err := s.client.NewRequest("GET", "snippets/public", nil, options) if err != nil { return nil, nil, err } var ps []*Snippet resp, err := s.client.Do(req, &ps) if err != nil { return nil, resp, err } return ps, resp, err } ```
```sqlpl SET citus.shard_replication_factor to 1; SET citus.next_shard_id TO 60000; SET citus.next_placement_id TO 60000; SET citus.shard_count TO 4; create schema test_tableam; set search_path to test_tableam; SELECT public.run_command_on_coordinator_and_workers($Q$ SET citus.enable_ddl_propagation TO off; CREATE FUNCTION fake_am_handler(internal) RETURNS table_am_handler AS 'citus' LANGUAGE C; CREATE ACCESS METHOD fake_am TYPE TABLE HANDLER fake_am_handler; $Q$); -- Since Citus assumes access methods are part of the extension, make fake_am -- owned manually to be able to pass checks on Citus while distributing tables. ALTER EXTENSION citus ADD ACCESS METHOD fake_am; -- -- Hash distributed table using a non-default table access method -- create table test_hash_dist(id int, val int) using fake_am; insert into test_hash_dist values (1, 1); select create_distributed_table('test_hash_dist','id', colocate_with := 'none'); select * from test_hash_dist; insert into test_hash_dist values (1, 1); -- we should error on following, since this AM is append only SET client_min_messages TO ERROR; delete from test_hash_dist where id=1; update test_hash_dist set val=2 where id=2; RESET client_min_messages; -- ddl events should include "USING fake_am" SELECT * FROM master_get_table_ddl_events('test_hash_dist'); -- -- Reference table using a non-default table access method -- create table test_ref(a int) using fake_am; insert into test_ref values (1); select create_reference_table('test_ref'); select * from test_ref; insert into test_ref values (1); -- we should error on following, since this AM is append only SET client_min_messages TO ERROR; delete from test_ref; update test_ref set a=2; RESET client_min_messages; -- ddl events should include "USING fake_am" SELECT * FROM master_get_table_ddl_events('test_ref'); -- -- Range partitioned table using a non-default table access method -- CREATE TABLE test_range_dist(id int, val int) using fake_am; SELECT create_distributed_table('test_range_dist', 'id', 'range'); CALL public.create_range_partitioned_shards('test_range_dist', '{"0","25"}','{"24","49"}'); select * from test_range_dist; insert into test_range_dist values (1, 1); COPY test_range_dist FROM PROGRAM 'echo 0, 0 && echo 1, -1 && echo 2, 4 && echo 3, 9' WITH CSV; COPY test_range_dist FROM PROGRAM 'echo 25, 16 && echo 26, 1 && echo 27, 4 && echo 7, 9' WITH CSV; -- ddl events should include "USING fake_am" SELECT * FROM master_get_table_ddl_events('test_range_dist'); -- -- Test copy_copy_shard_placement with a fake_am table -- select a.shardid, a.nodeport FROM pg_dist_shard b, pg_dist_shard_placement a WHERE a.shardid=b.shardid AND logicalrelid = 'test_hash_dist'::regclass::oid ORDER BY a.shardid, nodeport; -- Change repmodel to allow copy_copy_shard_placement UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid = 'test_hash_dist'::regclass; SELECT citus_copy_shard_placement( get_shard_id_for_distribution_column('test_hash_dist', '1'), 'localhost', :worker_1_port, 'localhost', :worker_2_port, transfer_mode := 'block_writes'); select a.shardid, a.nodeport FROM pg_dist_shard b, pg_dist_shard_placement a WHERE a.shardid=b.shardid AND logicalrelid = 'test_hash_dist'::regclass::oid ORDER BY a.shardid, nodeport; -- verify that data was copied correctly \c - - - :worker_1_port select * from test_tableam.test_hash_dist_60000 ORDER BY id; \c - - - :worker_2_port select * from test_tableam.test_hash_dist_60000 ORDER BY id; \c - - - :master_port set search_path to test_tableam; -- -- Test that partitioned tables work correctly with a fake_am table -- -- parent using default am, one of children using fake_am CREATE TABLE test_partitioned(id int, p int, val int) PARTITION BY RANGE (p); CREATE TABLE test_partitioned_p1 PARTITION OF test_partitioned FOR VALUES FROM (1) TO (10); CREATE TABLE test_partitioned_p2 PARTITION OF test_partitioned FOR VALUES FROM (11) TO (20) USING fake_am; INSERT INTO test_partitioned VALUES (1, 5, -1), (2, 15, -2); SELECT create_distributed_table('test_partitioned', 'id'); INSERT INTO test_partitioned VALUES (3, 6, -6), (4, 16, -4); SELECT count(*) FROM test_partitioned; DROP TABLE test_partitioned; -- Specifying access method in parent is not supported. -- If the below statement ever succeeds, add more tests for -- the case where children inherit access method from parent. CREATE TABLE test_partitioned(id int, p int, val int) PARTITION BY RANGE (p) USING fake_am; \set VERBOSITY terse ALTER EXTENSION citus DROP ACCESS METHOD fake_am; drop schema test_tableam cascade; ```
```javascript module.exports = { extends: ['@proton/eslint-config-proton'], parser: '@typescript-eslint/parser', parserOptions: { tsconfigRootDir: __dirname, project: './tsconfig.json', }, rules: { 'no-console': 'off', curly: ['error', 'multi-line'], '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_' }], }, ignorePatterns: ['.eslintrc.js'], }; ```
```python # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # # path_to_url # # Unless required by applicable law or agreed to in writing, # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # specific language governing permissions and limitations import sys import numpy as np import pytest import tvm import tvm.testing import tvm.tir.tensor_intrin.cuda from tvm import TVMError, te, tir from tvm.meta_schedule.testing import te_workload from tvm.script import tir as T from tvm.testing.tir import mma_schedule from tvm.tir.tensor_intrin.cuda import ( LDMATRIX_f16_A_DYN_INTRIN, LDMATRIX_f16_B_DYN_INTRIN, MMA_f16f16f32_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_store_16x16_f32_global_INTRIN, shared_16x16_to_ldmatrix_32x8_layout, ) def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func.with_attr("global_symbol", "main")) mod = tvm.tir.transform.InjectSoftwarePipeline()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal( mod["main"], transformed.with_attr("global_symbol", "main"), True ) def _check_error(func): mod = tvm.IRModule.from_expr(func) with pytest.raises(ValueError): tvm.tir.transform.InjectSoftwarePipeline()(mod) @T.prim_func def trivial_pipeline(A: T.Buffer((16, 1), "float32"), C: T.Buffer((16, 1), "float32")): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 1, annotations={"software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1]} ): with T.block(): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) @T.prim_func def transformed_trivial_pipeline( A: T.Buffer((16, 1), "float32"), C: T.Buffer((16, 1), "float32") ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0]) T.writes(C[tx, 0]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0]) T.writes(B[0, tx, 0]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads() T.writes() T.evaluate(0) with T.block(): T.reads(B[0, tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[0, tx, 0] + T.float32(1) def gen_simple_compute(num_stages): @T.prim_func def simple_compute(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, num_stages], "software_pipeline_order": [0, 1], }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) return simple_compute @T.prim_func def transformed_simple_compute( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16]]) T.writes([C[tx, 0:16]]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0]]) T.writes([B[0, tx, 0]]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads([A[tx, 1:16], B[0:2, tx, 0]]) T.writes([B[0:2, tx, 0], C[tx, 0:15]]) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1]]) T.writes([B[(i + 1) % 2, tx, 0]]) B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.reads([B[i % 2, tx, 0]]) T.writes([C[tx, i]]) C[tx, i] = B[i % 2, tx, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 0]]) T.writes([C[tx, 15]]) C[tx, 15] = B[1, tx, 0] + T.float32(1) @T.prim_func def dynamic_compute(a_handle: T.handle, c_handle: T.handle): k = T.int32() A = T.match_buffer(a_handle, (16, k), "float32") C = T.match_buffer(c_handle, (16, k), "float32") for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, k, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) @T.prim_func def transformed_dynamic_compute(a_handle: T.handle, c_handle: T.handle): k = T.int32() A = T.match_buffer(a_handle, (16, k), "float32") C = T.match_buffer(c_handle, (16, k), "float32") for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0 : T.max(1, k)]) T.writes(C[tx, T.min(0, k - 1) : T.min(0, k - 1) + T.max(k, 1)]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(""): T.reads(A[tx, 0]) T.writes(B[0, tx, 0]) with T.block(""): T.where(0 < k) T.reads(A[tx, 0]) T.writes(B[0, tx, 0]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(""): T.reads(A[tx, 1 : 1 + (k - 1)], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[tx, 0 : k - 1]) for i in range(k - 1): with T.block(""): T.reads(A[tx, i + 1]) T.writes(B[(i + 1) % 2, tx, 0]) B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(""): T.reads(B[i % 2, tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[i % 2, tx, 0] + T.float32(1) with T.block(""): T.reads(B[(k + 1) % 2, tx, 0]) T.writes(C[tx, k - 1]) with T.block(""): T.where(1 <= k) T.reads(B[(k + 1) % 2, tx, 0]) T.writes(C[tx, k - 1]) C[tx, k - 1] = B[(k + 1) % 2, tx, 0] + T.float32(1) @T.prim_func def simple_compute_with_other_annotation( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], "pragma_loop_partition_hint": True, }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) @T.prim_func def transformed_simple_compute_with_other_annotation( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16]]) T.writes([C[tx, 0:16]]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0]]) T.writes([B[0, tx, 0]]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads([A[tx, 1:16], B[0:2, tx, 0]]) T.writes([B[0:2, tx, 0], C[tx, 0:15]]) for i in T.serial( 0, 15, annotations={"pragma_loop_partition_hint": True}, ): with T.block(): T.reads([A[tx, i + 1]]) T.writes([B[(i + 1) % 2, tx, 0]]) B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.reads([B[i % 2, tx, 0]]) T.writes([C[tx, i]]) C[tx, i] = B[i % 2, tx, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 0]]) T.writes([C[tx, 15]]) C[tx, 15] = B[1, tx, 0] + T.float32(1) @T.prim_func def three_stage_compute(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 2], "software_pipeline_order": [0, 1, 2], }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(D[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") C = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(C[tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[tx, 0] + T.float32(1) @T.prim_func def transformed_three_stage_compute( A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32") ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(D[tx, 0:16]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") C = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:2], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0]) for i in T.unroll(2): with T.block(): T.reads(A[tx, i]) T.writes(B[0:2, tx, 0]) B[i, tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.where(i == 1) T.reads(B[0:2, tx, 0]) T.writes(C[0:2, tx, 0]) C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2) with T.block(): T.reads(A[tx, 2:16], B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0], D[tx, 0:14]) for i in T.serial(14): with T.block(): T.reads(A[tx, i + 2]) T.writes(B[0:2, tx, 0]) B[i % 2, tx, 0] = A[tx, i + 2] * T.float32(2) with T.block(): T.reads(B[0:2, tx, 0]) T.writes(C[0:2, tx, 0]) C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2) with T.block(): T.reads(C[0:2, tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[i % 2, tx, 0] + T.float32(1) with T.block(): T.reads(B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(C[0:2, tx, 0], D[tx, 14:16]) for i in T.unroll(2): with T.block(): T.where(i < 1) T.reads(B[0:2, tx, 0]) T.writes(C[0:2, tx, 0]) C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2) with T.block(): T.reads(C[0:2, tx, 0]) T.writes(D[tx, i + 14]) D[tx, i + 14] = C[i, tx, 0] + T.float32(1) @T.prim_func def dag_interleaving( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 0, 0, 1], "software_pipeline_order": [0, 2, 1, 3, 4], }, ): with T.block(): T.reads(A[tx, i]) T.writes(C[tx, i]) AS = T.alloc_buffer((16, 1), dtype="float32", scope="shared") BS = T.alloc_buffer((16, 1), dtype="float32", scope="shared") AL = T.alloc_buffer((1, 1), dtype="float32", scope="local") BL = T.alloc_buffer((1, 1), dtype="float32", scope="local") with T.block(): T.reads(A[tx, i]) T.writes(AS[tx, 0]) AS[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(AS[tx, 0]) T.writes(AL[0, 0]) AL[0, 0] = AS[tx, 0] with T.block(): T.reads(B[tx, i]) T.writes(BS[tx, 0]) BS[tx, 0] = B[tx, i] + T.float32(2) with T.block(): T.reads(BS[tx, 0]) T.writes(BL[0, 0]) BL[0, 0] = BS[tx, 0] with T.block(): T.reads(AL[0, 0], BL[0, 0]) T.writes(C[tx, i]) C[tx, i] = AL[0, 0] * BL[0, 0] @T.prim_func def transformed_dag_interleaving( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16], B[tx, 0:16]) T.writes(C[tx, 0:16]) AS = T.alloc_buffer([16, 1], dtype="float32", scope="shared") BS = T.alloc_buffer([16, 1], dtype="float32", scope="shared") AL = T.alloc_buffer([2, 1, 1], dtype="float32", scope="local") BL = T.alloc_buffer([2, 1, 1], dtype="float32", scope="local") with T.block(): T.reads(A[tx, 0], B[tx, 0], AS[tx, 0], BS[tx, 0]) T.writes(AS[tx, 0], BS[tx, 0], AL[0, 0, 0], BL[0, 0, 0]) with T.block(): T.reads(A[tx, 0]) T.writes(AS[tx, 0]) AS[tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(BS[tx, 0]) BS[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(AS[tx, 0]) T.writes(AL[0, 0, 0]) AL[0, 0, 0] = AS[tx, 0] with T.block(): T.reads(BS[tx, 0]) T.writes(BL[0, 0, 0]) BL[0, 0, 0] = BS[tx, 0] with T.block(): T.reads( A[tx, 1:16], B[tx, 1:16], AS[tx, 0], BS[tx, 0], AL[0:2, 0, 0], BL[0:2, 0, 0] ) T.writes(AS[tx, 0], BS[tx, 0], AL[0:2, 0, 0], BL[0:2, 0, 0], C[tx, 0:15]) for i in T.serial(15): with T.block(): T.reads(A[tx, i + 1]) T.writes(AS[tx, 0]) AS[tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.reads(B[tx, i + 1]) T.writes(BS[tx, 0]) BS[tx, 0] = B[tx, i + 1] + T.float32(2) with T.block(): T.reads(AS[tx, 0]) T.writes(AL[(i + 1) % 2, 0, 0]) AL[(i + 1) % 2, 0, 0] = AS[tx, 0] with T.block(): T.reads(BS[tx, 0]) T.writes(BL[(i + 1) % 2, 0, 0]) BL[(i + 1) % 2, 0, 0] = BS[tx, 0] with T.block(): T.reads(AL[i % 2, 0, 0], BL[i % 2, 0, 0]) T.writes(C[tx, i]) C[tx, i] = AL[i % 2, 0, 0] * BL[i % 2, 0, 0] with T.block(): T.reads(AL[1, 0, 0], BL[1, 0, 0]) T.writes(C[tx, 15]) C[tx, 15] = AL[1, 0, 0] * BL[1, 0, 0] @T.prim_func def nested_pipeline_simple( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 1, 1], "software_pipeline_order": [0, 1, 2, 3], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_shared[tx, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_shared[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_shared[tx, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_simple( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([2, 16, 1, 16], dtype="float32", scope="shared") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[0, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[0, tx, 0, j]]) A_shared[0, tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A[tx, 1:16, 0:16], A_shared[0:2, tx, 0:15, 0:16], B[0:2, tx, 0:15, 0]]) T.writes([A_shared[0:2, tx, 0, 0:16], B[0:2, tx, 0:15, 0], C[tx, 0:15, 0:16]]) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[(i + 1) % 2, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[(i + 1) % 2, tx, 0, j]]) A_shared[(i + 1) % 2, tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_shared[i % 2, tx, i, 0]]) T.writes([B[0, tx, i, 0]]) B[0, tx, i, 0] = A_shared[i % 2, tx, 0, 0] * T.float32(2) with T.block(): T.reads([A_shared[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[i % 2, tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_shared[ i % 2, tx, 0, j + 1 ] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[1, tx, 15, 0:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_shared[1, tx, 15, 0]]) T.writes([B[0, tx, 15, 0]]) B[0, tx, 15, 0] = A_shared[1, tx, 0, 0] * T.float32(2) with T.block(): T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[1, tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_shared[1, tx, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def nested_pipeline_prefetch_inner( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 1, 1], "software_pipeline_order": [0, 2, 1, 3], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_shared[tx, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_shared[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_shared[tx, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_prefetch_inner( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([2, 16, 1, 16], dtype="float32", scope="shared") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16], A_shared[0, tx, 0, 0]]) T.writes([A_shared[0, tx, 0, 0:16], B[0, tx, 0, 0]]) with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[0, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[0, tx, 0, j]]) A_shared[0, tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A_shared[0, tx, 0, 0]]) T.writes([B[0, tx, 0, 0]]) B[0, tx, 0, 0] = A_shared[0, tx, 0, 0] * T.float32(2) with T.block(): T.reads([A[tx, 1:16, 0:16], A_shared[0:2, tx, 0:16, 0:16], B[0:2, tx, 0:15, 0]]) T.writes([A_shared[0:2, tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16]]) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[(i + 1) % 2, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[(i + 1) % 2, tx, 0, j]]) A_shared[(i + 1) % 2, tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_shared[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[i % 2, tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_shared[ i % 2, tx, 0, j + 1 ] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[(i + 1) % 2, tx, i + 1, 0]]) T.writes([B[0, tx, i + 1, 0]]) B[0, tx, i + 1, 0] = A_shared[(i + 1) % 2, tx, 0, 0] * T.float32(2) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[1, tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_shared[1, tx, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def nested_pipeline_interleaving( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 0, 1, 1], "software_pipeline_order": [0, 2, 3, 1, 4], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") A_local = T.alloc_buffer((1, 1, 16), dtype="float32", scope="local") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial(0, 16): with T.block(): T.reads(A_shared[tx, 0, j]) T.writes(A_local[0, 0, j]) A_local[0, 0, j] = A_shared[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_local[0, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_local[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_local[0, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_interleaving( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([16, 1, 16], dtype="float32", scope="shared") A_local = T.alloc_buffer([1, 1, 16], dtype="float32", scope="local") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16], A_shared[tx, 0, 0:16], A_local[tx, 0, 0]]) T.writes([A_shared[tx, 0, 0:16], A_local[0, 0, 0:16], B[0, tx, 0, 0]]) with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[0, 0, j]]) A_local[0, 0, j] = A_shared[tx, 0, j] with T.block(): T.reads([A_local[tx, 0, 0]]) T.writes([B[0, tx, 0, 0]]) B[0, tx, 0, 0] = A_local[0, 0, 0] * T.float32(2) with T.block(): T.reads( [ A[tx, 1:16, 0:16], A_local[tx, 0:16, 0:16], B[0:2, tx, 0:15, 0], A_shared[tx, 0, 0:16], ] ) T.writes( [ A_shared[tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16], A_local[0, 0, 0:16], ] ) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_local[tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_local[0, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[0, 0, j]]) A_local[0, 0, j] = A_shared[tx, i + 1, j] with T.block(): T.reads([A_local[tx, i + 1, 0]]) T.writes([B[0, tx, i + 1, 0]]) B[0, tx, i + 1, 0] = A_local[0, 0, 0] * T.float32(2) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_local[tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_local[tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_local[0, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def nested_pipeline_double_buffer( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 0, 1, 1], "software_pipeline_order": [0, 2, 3, 1, 4], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") A_local = T.alloc_buffer((1, 1, 16), dtype="float32", scope="local") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial(0, 16): with T.block(): T.block_attr({"double_buffer_scope": 0}) T.reads(A_shared[tx, 0, j]) T.writes(A_local[0, 0, j]) A_local[0, 0, j] = A_shared[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_local[0, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_local[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_local[0, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_double_buffer( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([16, 1, 16], dtype="float32", scope="shared") A_local = T.alloc_buffer([2, 1, 1, 16], dtype="float32", scope="local") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16], A_shared[tx, 0, 0:16], A_local[0, tx, 0, 0]]) T.writes([A_shared[tx, 0, 0:16], A_local[0, 0, 0, 0:16], B[0, tx, 0, 0]]) with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[0, 0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[0, 0, 0, j]]) T.block_attr({"double_buffer_scope": 0}) A_local[0, 0, 0, j] = A_shared[tx, 0, j] with T.block(): T.reads([A_local[0, tx, 0, 0]]) T.writes([B[0, tx, 0, 0]]) B[0, tx, 0, 0] = A_local[0, 0, 0, 0] * T.float32(2) with T.block(): T.reads( [ A[tx, 1:16, 0:16], A_local[0:2, tx, 0:16, 0:16], B[0:2, tx, 0:15, 0], A_shared[tx, 0, 0:16], ] ) T.writes( [ A_shared[tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16], A_local[0:2, 0, 0, 0:16], ] ) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_local[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[i % 2, tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_local[i % 2, 0, 0, j + 1] * T.float32( 2 ) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[(i + 1) % 2, 0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[(i + 1) % 2, 0, 0, j]]) T.block_attr({"double_buffer_scope": 0}) A_local[(i + 1) % 2, 0, 0, j] = A_shared[tx, i + 1, j] with T.block(): T.reads([A_local[(i + 1) % 2, tx, i + 1, 0]]) T.writes([B[0, tx, i + 1, 0]]) B[0, tx, i + 1, 0] = A_local[(i + 1) % 2, 0, 0, 0] * T.float32(2) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_local[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_local[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[1, tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_local[1, 0, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def simple_compute_incorrect_reorder( A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 1], "software_pipeline_order": [0, 2, 1], }, ): with T.block(): T.reads(A[tx, i]) T.writes(D[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") C = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(C[tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[tx, 0] + T.float32(1) @T.prim_func def simple_compute_conflicting_order( A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 1], "software_pipeline_order": [0, 1, 1], }, ): with T.block(): T.reads(A[tx, i]) T.writes(D[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") C = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(C[tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[tx, 0] + T.float32(1) @T.prim_func def simple_compute_missing_annotation( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial(0, 16, annotations={"software_pipeline_stage": [0, 1]}): with T.block(): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) def test_simple_compute(): _check(gen_simple_compute(1), transformed_simple_compute) def test_simple_compute_with_other_annotation(): _check(simple_compute_with_other_annotation, transformed_simple_compute_with_other_annotation) def test_dynamic_compute(): _check(dynamic_compute, transformed_dynamic_compute) def test_trivial_pipeline(): _check(trivial_pipeline, transformed_trivial_pipeline) def test_three_stage_compute(): _check(three_stage_compute, transformed_three_stage_compute) def test_dag_interleaving(): _check(dag_interleaving, transformed_dag_interleaving) def test_nest_pipeline_simple(): _check(nested_pipeline_simple, transformed_nested_pipeline_simple) def test_nest_pipeline_prefetch_inner(): _check(nested_pipeline_prefetch_inner, transformed_nested_pipeline_prefetch_inner) def test_nest_pipeline_interleaving(): _check(nested_pipeline_interleaving, transformed_nested_pipeline_interleaving) def test_nest_pipeline_double_buffer(): _check(nested_pipeline_double_buffer, transformed_nested_pipeline_double_buffer) def test_error_reorder(): _check_error(simple_compute_incorrect_reorder) def test_error_conflicting_order(): _check_error(simple_compute_conflicting_order) def test_error_missing_annotation(): _check_error(simple_compute_missing_annotation) def test_simple_compute_async(): mod = tvm.IRModule.from_expr(gen_simple_compute(1).with_attr("global_symbol", "main")) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")): for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(C[tx, 0:16]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0]) T.writes(B[T.FloorMod(0, 2), tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[T.FloorMod(0, 2), tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads(A[tx, 1:16], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[tx, 0:15]) for i in T.serial(15): with T.block(): T.where(i + 1 < 16) T.reads(A[tx, i + 1]) T.writes(B[(i + 1) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.where(i + 1 - 1 < 16) T.reads(B[(i - 1 + 1) % 2, tx, 0]) T.writes(C[tx, i - 1 + 1]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 1): C[tx, i - 1 + 1] = B[(i - 1 + 1) % 2, tx, 0] + T.float32(1) with T.block(): T.reads(B[T.FloorMod(15, 2), tx, 0]) T.writes(C[tx, 15]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 0): C[tx, 15] = B[T.FloorMod(15, 2), tx, 0] + T.float32(1) tvm.ir.assert_structural_equal(mod["main"], ref.with_attr("global_symbol", "main"), True) mod = tvm.IRModule.from_expr(gen_simple_compute(3).with_attr("global_symbol", "main")) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(C[tx, 0:16]) B = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:3]) T.writes(B[0:3, tx, 0]) for i in T.unroll(3): with T.block(): T.where(i < 16) T.reads(A[tx, i]) T.writes(B[i % 4, tx, 0]) T.attr(0, "async_commit_queue_scope", 0) T.attr(0, "async_scope", 1) B[i % 4, tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(A[tx, 3:16], B[0:4, tx, 0]) T.writes(B[0:4, tx, 0], C[tx, 0:13]) for i in T.serial(13): with T.block(): T.where(i + 3 < 16) T.reads(A[tx, i + 3]) T.writes(B[(i + 3) % 4, tx, 0]) T.attr(0, "async_commit_queue_scope", 0) T.attr(0, "async_scope", 1) B[(i + 3) % 4, tx, 0] = A[tx, i + 3] * T.float32(2) with T.block(): T.where(i + 3 - 3 < 16) T.reads(B[0:4, tx, 0]) T.writes(C[tx, i - 3 + 3]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 3): C[tx, i - 3 + 3] = B[(i - 3 + 3) % 4, tx, 0] + T.float32(1) with T.block(): T.reads(B[0:4, tx, 0]) T.writes(C[tx, 13:16]) for i in T.unroll(3): with T.block(): T.where(i + 16 - 3 < 16) T.reads(B[0:4, tx, 0]) T.writes(C[tx, i - 3 + 16]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 2 - i): C[tx, i - 3 + 16] = B[(i - 3 + 16) % 4, tx, 0] + T.float32(1) tvm.ir.assert_structural_equal(mod["main"], ref.with_attr("global_symbol", "main"), True) def test_async_producer_interleaving(): @T.prim_func def simple_compute( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in range(16): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) A_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared") B_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(A_shared[tx, 0]) A_shared[tx, 0] = A[tx, i] with T.block(): T.reads(B[tx, i]) T.writes(B_shared[tx, 0]) B_shared[tx, 0] = B[tx, i] with T.block(): T.reads(A_shared[tx, 0], B_shared[tx, 0]) T.writes(C[tx, i]) C[tx, i] = A_shared[tx, 0] + B_shared[tx, 0] mod = tvm.IRModule.from_expr(simple_compute.with_attr("global_symbol", "main")) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_stage", ann_val=[0, 0, 3]) sch.annotate(loop, ann_key="software_pipeline_order", ann_val=[0, 2, 1]) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16], B[tx, 0:16]) T.writes(C[tx, 0:16]) A_shared = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared") B_shared = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:3], B[tx, 0:3]) T.writes(A_shared[0:3, tx, 0], B_shared[0:3, tx, 0]) for i in T.unroll(3): with T.block(): T.where(i < 16) T.reads(A[tx, i], B[tx, i]) T.writes(A_shared[i % 4, tx, 0], B_shared[i % 4, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): A_shared[i % 4, tx, 0] = A[tx, i] with T.attr(0, "async_scope", 1): B_shared[i % 4, tx, 0] = B[tx, i] with T.block(): T.reads(A[tx, 3:16], A_shared[0:4, tx, 0], B_shared[0:4, tx, 0], B[tx, 3:16]) T.writes(A_shared[0:4, tx, 0], C[tx, 0:13], B_shared[0:4, tx, 0]) for i in T.serial(13): with T.block(): T.where(i + 3 < 16) T.reads(A[tx, i + 3]) T.writes(A_shared[(i + 3) % 4, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): A_shared[(i + 3) % 4, tx, 0] = A[tx, i + 3] with T.block(): T.where(i + 3 - 3 < 16) T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0]) T.writes(C[tx, i - 3 + 3]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 5): C[tx, i - 3 + 3] = ( A_shared[(i - 3 + 3) % 4, tx, 0] + B_shared[(i - 3 + 3) % 4, tx, 0] ) with T.block(): T.where(i + 3 < 16) T.reads(B[tx, i + 3]) T.writes(B_shared[(i + 3) % 4, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B_shared[(i + 3) % 4, tx, 0] = B[tx, i + 3] with T.block(): T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0]) T.writes(C[tx, 13:16]) for i in T.unroll(3): with T.block(): T.where(i + 16 - 3 < 16) T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0]) T.writes(C[tx, i - 3 + 16]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 2 - i): C[tx, i - 3 + 16] = ( A_shared[(i - 3 + 16) % 4, tx, 0] + B_shared[(i - 3 + 16) % 4, tx, 0] ) tvm.ir.assert_structural_equal(mod["main"], ref.with_attr("global_symbol", "main"), True) def test_three_stage_compute_two_stage_async(): mod = tvm.IRModule.from_expr(three_stage_compute.with_attr("global_symbol", "main")) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0, 1]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(D[tx, 0:16]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") C = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:2], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0]) for i in T.unroll(2): with T.block(): T.where(i < 16) T.reads(A[tx, i]) T.writes(B[i % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[i % 2, tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.where(i == 1 and i - 1 < 16) T.reads(B[(i - 1) % 2, tx, 0]) T.writes(C[(i - 1) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 1): with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 1): with T.attr(0, "async_scope", 1): C[(i - 1) % 2, tx, 0] = B[ (i - 1) % 2, tx, 0 ] + T.float32(2) with T.block(): T.reads(A[tx, 2:16], B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0], D[tx, 0:14]) for i in T.serial(14): with T.block(): T.where(i + 2 < 16) T.reads(A[tx, i + 2]) T.writes(B[(i + 2) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[(i + 2) % 2, tx, 0] = A[tx, i + 2] * T.float32(2) with T.block(): T.where(i + 2 - 1 < 16) T.reads(B[(i - 1 + 2) % 2, tx, 0]) T.writes(C[(i - 1 + 2) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 1): with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 1): with T.attr(0, "async_scope", 1): C[(i - 1 + 2) % 2, tx, 0] = B[ (i - 1 + 2) % 2, tx, 0 ] + T.float32(2) with T.block(): T.where(i + 2 - 2 < 16) T.reads(C[0:2, tx, 0]) T.writes(D[tx, i - 2 + 2]) with T.attr(0, "async_wait_queue_scope", 1): with T.attr(0, "async_wait_inflight_count", 1): D[tx, i - 2 + 2] = C[(i - 2 + 2) % 2, tx, 0] + T.float32(1) with T.block(): T.reads(B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(C[0:2, tx, 0], D[tx, 14:16]) for i in T.unroll(2): with T.block(): T.where(i + 16 - 1 < 16) T.reads(B[(i - 1 + 16) % 2, tx, 0]) T.writes(C[(i - 1 + 16) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 1): with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 0 - i): with T.attr(0, "async_scope", 1): C[(i - 1 + 16) % 2, tx, 0] = B[ (i - 1 + 16) % 2, tx, 0 ] + T.float32(2) with T.block(): T.where(i + 16 - 2 < 16) T.reads(C[0:2, tx, 0]) T.writes(D[tx, i - 2 + 16]) with T.attr(0, "async_wait_queue_scope", 1): with T.attr( 0, "async_wait_inflight_count", T.if_then_else(i + 16 - 1 < 16, 1, 0, dtype="int32"), ): D[tx, i - 2 + 16] = C[(i - 2 + 16) % 2, tx, 0] + T.float32(1) tvm.ir.assert_structural_equal(mod["main"], ref.with_attr("global_symbol", "main"), True) N = K = M = 4096 def get_mma_schedule(): i_factors, j_factors, k_factors = [1, 32, 1, 4, 2], [16, 2, 4, 1, 2], [128, 2, 1] def index_map(i, j): return ( i // 16, j // 16, *shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16), ) workload = te.create_prim_func( te_workload.matmul(N, M, K, in_dtype="float16", out_dtype="float32") ) return mma_schedule( workload, 16, "float16", False, i_factors, j_factors, k_factors, index_map, index_map, index_map, LDMATRIX_f16_A_DYN_INTRIN, LDMATRIX_f16_B_DYN_INTRIN, MMA_f16f16f32_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_store_16x16_f32_global_INTRIN, "shared.dyn", ) def build_and_run(sch): if tvm.testing.is_ampere_or_newer(): with tvm.transform.PassContext(config={"tir.use_async_copy": 1}): f = tvm.build(sch.mod["main"], target="cuda") dev = tvm.device("cuda", 0) a_np = np.random.uniform(size=(N, K)).astype("float16") b_np = np.random.uniform(size=(K, M)).astype("float16") c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((N, M), dtype="float32"), dev) f(a, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) @tvm.testing.requires_cuda def test_async_pipelined_mma_gemm_simple(): sch = get_mma_schedule() k0 = sch.get_loops(sch.get_block("C_o_update"))[3] sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 3]) sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2]) sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0]) seq = tvm.transform.Sequential( [ tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(), tvm.tir.transform.ConvertBlocksToOpaque(), tvm.tir.transform.UnifyThreadBinding(), tvm.tir.transform.LowerMatchBuffer(), tvm.tir.transform.InjectSoftwarePipeline(), ] ) mod = seq(sch.mod) pipeline = mod["main"].body.block.body.body.body.body.body.block.body[1].block.body prologue, body, epilogue = pipeline commit_queue_scope = prologue.block.body.body.block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 commit_queue_scope = body.block.body.body[0].block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 assert body.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count" assert body.block.body.body[1].block.body.body.value == 3 assert epilogue.block.body.body.block.body.body.attr_key == "async_wait_inflight_count" assert str(epilogue.block.body.body.block.body.body.value) == "2 - k_0_0" build_and_run(sch) @tvm.testing.requires_cuda def test_async_nested_pipeline_mma_gemm_ideal_annotation(): sch = get_mma_schedule() k0 = sch.get_loops(sch.get_block("C_o_update"))[3] k1 = sch.get_loops(sch.get_block("C_o_update"))[4] sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 2, 3, 3]) sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 3, 2, 4]) sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0]) sch.annotate(k1, ann_key="software_pipeline_stage", ann_val=[0, 0, 1]) sch.annotate(k1, ann_key="software_pipeline_order", ann_val=[0, 1, 2]) seq = tvm.transform.Sequential( [ tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(), tvm.tir.transform.ConvertBlocksToOpaque(), tvm.tir.transform.UnifyThreadBinding(), tvm.tir.transform.LowerMatchBuffer(), tvm.tir.transform.InjectSoftwarePipeline(), ] ) mod = seq(sch.mod) pipeline = mod["main"].body.block.body.body.body.body.body.block.body[1].block.body prologue, body, epilogue = pipeline commit_queue_scope = prologue.block.body.body[0].block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 assert prologue.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count" assert prologue.block.body.body[1].block.body.body.value == 2 commit_queue_scope = body.block.body.body[0].block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 assert body.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count" assert body.block.body.body[1].block.body.body.value == 2 assert str(epilogue.block.body.body[0].block.body.body.value) == "1 - k_0_0" build_and_run(sch) if __name__ == "__main__": tvm.testing.main() ```
River Tam is a fictional character of the Firefly franchise. River is portrayed by actress Summer Glau in the 2002 TV series Firefly and the 2005 film Serenity. The nature of the character and her role in the franchise has garnered praise. In 2005, Glau won the SFX magazine award for Best Actress for her role as River in Serenity. Glau later won a Saturn Award for Best Supporting Actress, again for her role as River in Serenity in May 2006. Glau was also runner up for Best Actress/Movie in the SyFy Genre Awards for 2006. Production details Joss Whedon selected Glau for the role after having previously worked with her on another of his shows, Angel. River Tam was Glau's first major role. Glau's inexperience assisted her in playing the character: she likened River's emotional withdrawal to her own initial apprehension on the set. In many of the hand-to-hand combat scenes featuring the character, particularly those in Serenity, Glau's own experience in ballet dancing was incorporated into her fight choreography. Depiction During River's early childhood, she grew up alongside her brother, Simon, part of the wealthy Tam family on the "core" planet of Osiris. She was graceful and intellectually gifted. She is described as having a strong thirst for knowledge and a love for and intuitive grasp of dance. By the time she was fourteen years old, she had grown "bored" with her studies and enrolled in a graduate program for physics. It was at this point that she was sent to a government learning facility known as "The Academy". While her parents and Simon believed the Academy was a private school meant to nurture the gifts of talented children, it was in fact a cover for a government experiment in creating assassins. While in the hands of Alliance doctors and scientists, River was secretly experimented on, including surgery that damaged her amygdala. According to Simon, the Alliance attempted to isolate River from her family, though she managed to send a call for help by putting a coded message in a letter to her brother. Simon decoded the message and set out to rescue his then sixteen-year-old sister, despite his parents' insistence that he was being paranoid. In his quest to locate and free River, Simon enlisted the aid of anti-governmental groups, exhausting his personal fortune and sacrificing a promising medical career. The R. Tam sessions depict her descent into insanity and portray hints of her psychic abilities. She references the "G-23 Paxilon Hydrochlorate", which would later be a plot point in the film Serenity, and mentions the Academy's first subject "dying on the table." The first session clip indicates she has strong "intuitive" abilities and can easily understand complex subjects. Reception In 2005, Glau won the SFX Magazine award for Best Actress for her role as River in Serenity. Glau later won a Saturn Award for Best Supporting Actress, again for her role as River in Serenity in May 2006. Glau was also runner up for Best Actress/Movie in the SyFy Genre Awards for 2006. The nature of the character and her role in the franchise has garnered both praise and criticism from various reviewers. Some have positively likened the character's erratic behavior to autism. Dr. Karin Beeler of the University of Northern British Columbia compared and contrasted River to Buffy Summers, protagonist of the Buffy the Vampire Slayer franchise (also created by Joss Whedon), in her book Seers, Witches and Psychics on Screen: An Analysis of Women Visionary Characters in Recent Television and Film. Beeler labeled the character an anti-heroine, in comparison to the more heroic role of Buffy Summers. Film critic and horror author Michael Marano also compared the character to Buffy, citing the two characters' combat prowess as similarities and describing River as the apotheosis of Joss Whedon's strong female characters. References External links Female characters in television Fictional characters with eidetic memory Fictional characters with neurotrauma Fictional characters with schizophrenia Fictional characters with post-traumatic stress disorder Fictional child prodigies Fictional criminals Fictional psychics Fictional sleeper agents Fictional super soldiers Fictional victims of human experimentation Firefly (TV series) characters Science fiction film characters Teenage characters in television Television characters introduced in 2002
Ahetuk () is a 2015 Indian Assamese language romantic thriller film directed by Bani Das, veteran Assamese film director and a screenwriter who delivered the Assamese audiences some blockbusters like Maharathi, Mon, Kadambari. His movies are popularly known for an individualistic stamp of visual splendor which is a combine package of romance, action and drama. Ahetuk is another entertainment package, with Gunjan Bhardwaj and Amrita Gogoi in lead role. Actor Tapan Das is also playing an important character in this film. A deep quest for the meaning of those aspects of life which are really meaningless! That's β€˜Ahetuk’ for youβ€”a journey in celluloid that tries to depict how we chase our desires in life and often hit a blank wall of nothingness. Produced by Raj Kumar Jain this film was released on 2 January 2015. Plot summary The story is about two passionate journalists Raj (Gunjan Bhardwaj) and Joyeeta (Amrita Gogoi) who set out to get at the truth of a story to the extent of putting their lives in jeopardy. At the end, surprisingly, they find they have been used by certain vested interests. The step they would take thereafter is something none could fathom. The moral of the storyβ€”people or the society must not play with the budding minds or sentiments of Gen X or the younger generation. Cast Gunjan Bhardwaj as Raj Amrita Gogoi as Joyeeta Tapan Das as Mr Borbora Sayan Chakravarty as Rakesh Prithiraj Rava as Minister Ashwini Bhuyan Rajiv Kro as Bailung Prastuti Porasor (guest appearance) Zubeen Garg (guest appearance) Soundtrack The music of the film is composed by Poran Borkatoky (JoJo). The album contains 6 tracks. References External links 2015 films Films set in Assam 2010s Assamese-language films
Frank Scanlan may refer to: Frank Scanlan (baseball) Frank Scanlan (footballer)
```javascript var http = require('http') , https = require('https') , server = http.createServer(handler) , port = +process.argv[2] , prefix = process.argv[3] , upstream = process.argv[4] , calls = 0 server.listen(port) function handler (req, res) { if (req.url.indexOf(prefix) != 0) throw new Error('request url [' + req.url + '] does not start with [' + prefix + ']') var upstreamUrl = upstream + req.url.substring(prefix.length) console.log(req.url + ' -> ' + upstreamUrl) https.get(upstreamUrl, function (ures) { ures.on('end', function () { if (++calls == 2) server.close() }) ures.pipe(res) }) } ```
White County Central School District is a public school district based in unincorporated White County, Arkansas, United States, near the Providence community, north of Judsonia. The district encompasses of land including a small northwestern portion of the city limits of Judsonia (the majority of the city being served by the Riverview School District). Schools in the district provide early childhood, elementary and secondary education to Providence and Steprock, as well as surrounding unincorporated communities in central White County along the Arkansas Highway 157 corridor, and near Pangburn and Bald Knob. Schools White County Central High School, located north of Judsonia and serving more than 250 students in grades 7 through 12. White County Central Elementary School, located north of Judsonia and serving more than 425 students in pre-kindergarten through grade 6. References External links School districts in Arkansas Education in White County, Arkansas
Oedignatha proboscidea is a species of spider of the genus Oedignatha endemic to Sri Lanka. See also List of Liocranidae species References Liocranidae Endemic fauna of Sri Lanka Spiders of Asia Spiders described in 1913
```php <?php /* * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the */ namespace Google\Service\Vision; class GoogleCloudVisionV1p2beta1SafeSearchAnnotation extends \Google\Model { /** * @var string */ public $adult; /** * @var string */ public $medical; /** * @var string */ public $racy; /** * @var string */ public $spoof; /** * @var string */ public $violence; /** * @param string */ public function setAdult($adult) { $this->adult = $adult; } /** * @return string */ public function getAdult() { return $this->adult; } /** * @param string */ public function setMedical($medical) { $this->medical = $medical; } /** * @return string */ public function getMedical() { return $this->medical; } /** * @param string */ public function setRacy($racy) { $this->racy = $racy; } /** * @return string */ public function getRacy() { return $this->racy; } /** * @param string */ public function setSpoof($spoof) { $this->spoof = $spoof; } /** * @return string */ public function getSpoof() { return $this->spoof; } /** * @param string */ public function setViolence($violence) { $this->violence = $violence; } /** * @return string */ public function getViolence() { return $this->violence; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(GoogleCloudVisionV1p2beta1SafeSearchAnnotation::class, your_sha256_hashtion'); ```
Trudy Young (born January 1, 1950) is a Canadian actress. Born in Ontario, Canada, her career began in 1963 with appearances on CBC's Time of Your Life. She became a regular host of Razzle Dazzle in 1965 while attending school at Alderwood Collegiate. She appeared in films throughout the 1970s and 1980s such as Face-Off (1971). In 1979 Young was hired to supply the voice of the groupie on the track "One of My Turns" from Pink Floyd's album The Wall. She was nominated as best supporting actress in the 4th Genie Awards for her role in the film Melanie (1982). Filmography Feature films Television series Post-career Young retired from acting in 1982 and is now living in Oshawa, Ontario. In August 2010, she underwent a second back surgery for a chronic back ailment that had worsened as the result of two car accidents. References External links Living people Canadian child actresses Canadian film actresses Canadian television actresses Year of birth uncertain 1950 births
```javascript /** * @license Apache-2.0 * * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ 'use strict'; // MODULES // var isLittleEndian = require( '@stdlib/assert/is-little-endian' ); // MAIN // var HIGH; if ( isLittleEndian === true ) { HIGH = 1; // second index } else { HIGH = 0; // first index } // EXPORTS // module.exports = HIGH; ```
```html <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=US-ASCII"> <title>The multi pass iterator</title> <link rel="stylesheet" href="../../../../../../doc/src/boostbook.css" type="text/css"> <meta name="generator" content="DocBook XSL Stylesheets V1.79.1"> <link rel="home" href="../../index.html" title="Spirit 2.5.4"> <link rel="up" href="../support.html" title="Supporting Libraries"> <link rel="prev" href="../support.html" title="Supporting Libraries"> <link rel="next" href="line_pos_iterator.html" title="The line position iterator"> </head> <body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"> <table cellpadding="2" width="100%"><tr> <td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../../boost.png"></td> <td align="center"><a href="../../../../../../index.html">Home</a></td> <td align="center"><a href="../../../../../../libs/libraries.htm">Libraries</a></td> <td align="center"><a href="path_to_url">People</a></td> <td align="center"><a href="path_to_url">FAQ</a></td> <td align="center"><a href="../../../../../../more/index.htm">More</a></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="../support.html"><img src="../../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../support.html"><img src="../../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="line_pos_iterator.html"><img src="../../../../../../doc/src/images/next.png" alt="Next"></a> </div> <div class="section"> <div class="titlepage"><div><div><h3 class="title"> <a name="spirit.support.multi_pass"></a><a class="link" href="multi_pass.html" title="The multi pass iterator">The multi pass iterator</a> </h3></div></div></div> <p> Backtracking in <span class="emphasis"><em>Spirit.Qi</em></span> requires the use of the following types of iterator: forward, bidirectional, or random access. Because of backtracking, input iterators cannot be used. Therefore, the standard library classes <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istreambuf_iterator</span></code> and <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span></code>, that fall under the category of input iterators, cannot be used. Another input iterator that is of interest is one that wraps a lexer, such as LEX. </p> <div class="note"><table border="0" summary="Note"> <tr> <td rowspan="2" align="center" valign="top" width="25"><img alt="[Note]" src="../../images/note.png"></td> <th align="left">Note</th> </tr> <tr><td align="left" valign="top"><p> In general, <span class="emphasis"><em>Spirit.Qi</em></span> generates recursive descent parser which require backtracking parsers by design. For this reason we need to provide at least forward iterators to any of <span class="emphasis"><em>Spirit.Qi</em></span>'s API functions. This is not an absolute requirement though. In the future, we shall see more deterministic parsers that require no more than 1 character (token) of lookahead. Such parsers allow us to use input iterators such as the <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span></code> as is. </p></td></tr> </table></div> <p> Backtracking can be implemented only if we are allowed to save an iterator position, i.e. making a copy of the current iterator. Unfortunately, with an input iterator, there is no way to do so, and thus input iterators will not work with backtracking in <span class="emphasis"><em>Spirit.Qi</em></span>. One solution to this problem is to simply load all the data to be parsed into a container, such as a vector or deque, and then pass the begin and end of the container to <span class="emphasis"><em>Spirit.Qi</em></span>. This method can be too memory intensive for certain applications, which is why the <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator was created. </p> <h5> <a name="spirit.support.multi_pass.h0"></a> <span class="phrase"><a name="spirit.support.multi_pass.using_the_multi_pass"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.using_the_multi_pass">Using the multi_pass</a> </h5> <p> The <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator will convert any input iterator into a forward iterator suitable for use with <span class="emphasis"><em>Spirit.Qi</em></span>. <code class="computeroutput"><span class="identifier">multi_pass</span></code> will buffer data when needed and will discard the buffer when its contents is not needed anymore. This happens either if only one copy of the iterator exists or if no backtracking can occur. </p> <p> A grammar must be designed with care if the <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator is used. Any rule that may need to backtrack, such as one that contains an alternative, will cause data to be buffered. The rules that are optimal to use are repetition constructs (as kleene and plus). </p> <p> Sequences of the form <code class="computeroutput"><span class="identifier">a</span> <span class="special">&gt;&gt;</span> <span class="identifier">b</span></code> will buffer data as well. This is different from the behavior of <a href="../../../../../../libs/spirit/classic/index.html" target="_top"><span class="emphasis"><em>Spirit.Classic</em></span></a> but for a good reason. Sequences need to reset the current iterator to its initial state if one of the components of a sequence fails to match. To compensate for this behavior we added functionality to the <code class="computeroutput"><span class="identifier">expect</span></code> parsers (i.e. constructs like <code class="computeroutput"><span class="identifier">a</span> <span class="special">&gt;</span> <span class="identifier">b</span></code>). Expectation points introduce deterministic points into the grammar ensuring no backtracking can occur if they match. For this reason we clear the buffers of any multi_pass iterator on each expectation point, ensuring minimal buffer content even for large grammars. </p> <div class="important"><table border="0" summary="Important"> <tr> <td rowspan="2" align="center" valign="top" width="25"><img alt="[Important]" src="../../images/important.png"></td> <th align="left">Important</th> </tr> <tr><td align="left" valign="top"> <p> If you use an error handler in conjunction with the <code class="computeroutput"><span class="identifier">expect</span></code> parser while utilizing a <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator and you intend to use the error handler to force a <code class="computeroutput"><span class="identifier">retry</span></code> or a <code class="computeroutput"><span class="identifier">fail</span></code> (see the description of error handlers - <span class="bold"><strong>FIXME</strong></span>: insert link), then you need to instantiate the error handler using <code class="computeroutput"><span class="identifier">retry</span></code> or <code class="computeroutput"><span class="identifier">fail</span></code>, for instance: </p> <pre class="programlisting"><span class="identifier">rule</span> <span class="identifier">r</span><span class="special">&lt;</span><span class="identifier">iterator_type</span><span class="special">&gt;</span> <span class="identifier">r</span><span class="special">;</span> <span class="identifier">on_error</span><span class="special">&lt;</span><span class="identifier">retry</span><span class="special">&gt;(</span><span class="identifier">r</span><span class="special">,</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">cout</span> <span class="special">&lt;&lt;</span> <span class="identifier">phoenix</span><span class="special">::</span><span class="identifier">val</span><span class="special">(</span><span class="string">"Error!"</span><span class="special">));</span> </pre> <p> If you fail to do so the resulting code will trigger an assert statement at runtime. </p> </td></tr> </table></div> <p> Any rule that repeats, such as kleene_star (<code class="computeroutput"><span class="special">*</span><span class="identifier">a</span></code>) or positive such as (<code class="computeroutput"><span class="special">+</span><span class="identifier">a</span></code>), will only buffer the data for the current repetition. </p> <p> In typical grammars, ambiguity and therefore lookahead is often localized. In fact, many well designed languages are fully deterministic and require no lookahead at all. Peeking at the first character from the input will immediately determine the alternative branch to take. Yet, even with highly ambiguous grammars, alternatives are often of the form <code class="computeroutput"><span class="special">*(</span><span class="identifier">a</span> <span class="special">|</span> <span class="identifier">b</span> <span class="special">|</span> <span class="identifier">c</span> <span class="special">|</span> <span class="identifier">d</span><span class="special">)</span></code>. The input iterator moves on and is never stuck at the beginning. Let's look at a Pascal snippet for example: </p> <pre class="programlisting"><span class="identifier">program</span> <span class="special">=</span> <span class="identifier">programHeading</span> <span class="special">&gt;&gt;</span> <span class="identifier">block</span> <span class="special">&gt;&gt;</span> <span class="char">'.'</span> <span class="special">;</span> <span class="identifier">block</span> <span class="special">=</span> <span class="special">*(</span> <span class="identifier">labelDeclarationPart</span> <span class="special">|</span> <span class="identifier">constantDefinitionPart</span> <span class="special">|</span> <span class="identifier">typeDefinitionPart</span> <span class="special">|</span> <span class="identifier">variableDeclarationPart</span> <span class="special">|</span> <span class="identifier">procedureAndFunctionDeclarationPart</span> <span class="special">)</span> <span class="special">&gt;&gt;</span> <span class="identifier">statementPart</span> <span class="special">;</span> </pre> <p> Notice the alternatives inside the Kleene star in the rule block . The rule gobbles the input in a linear manner and throws away the past history with each iteration. As this is fully deterministic LL(1) grammar, each failed alternative only has to peek 1 character (token). The alternative that consumes more than 1 character (token) is definitely a winner. After which, the Kleene star moves on to the next. </p> <p> Now, after the lecture on the features to be careful with when using <code class="computeroutput"><span class="identifier">multi_pass</span></code>, you may think that <code class="computeroutput"><span class="identifier">multi_pass</span></code> is way too restrictive to use. That's not the case. If your grammar is deterministic, you can make use of the <code class="computeroutput"><span class="identifier">flush_multi_pass</span></code> pseudo parser in your grammar to ensure that data is not buffered when unnecessary (<code class="computeroutput"><span class="identifier">flush_multi_pass</span></code> is available from the <span class="emphasis"><em>Spirit.Qi</em></span> parser <a href="../../../../repository/doc/html/index.html" target="_top">Repository</a>). </p> <p> Here we present a minimal example showing a minimal use case. The <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator is highly configurable, but the default policies have been chosen so that its easily usable with input iterators such as <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istreambuf_iterator</span></code>. For the complete source code of this example please refer to <a href="../../../../example/support/multi_pass.cpp" target="_top">multi_pass.cpp</a>. </p> <p> </p> <pre class="programlisting"><span class="keyword">int</span> <span class="identifier">main</span><span class="special">()</span> <span class="special">{</span> <span class="keyword">namespace</span> <span class="identifier">spirit</span> <span class="special">=</span> <span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">;</span> <span class="keyword">using</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">ascii</span><span class="special">::</span><span class="identifier">space</span><span class="special">;</span> <span class="keyword">using</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">ascii</span><span class="special">::</span><span class="identifier">char_</span><span class="special">;</span> <span class="keyword">using</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">qi</span><span class="special">::</span><span class="identifier">double_</span><span class="special">;</span> <span class="keyword">using</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">qi</span><span class="special">::</span><span class="identifier">eol</span><span class="special">;</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">ifstream</span> <span class="identifier">in</span><span class="special">(</span><span class="string">"multi_pass.txt"</span><span class="special">);</span> <span class="comment">// we get our input from this file</span> <span class="keyword">if</span> <span class="special">(!</span><span class="identifier">in</span><span class="special">.</span><span class="identifier">is_open</span><span class="special">())</span> <span class="special">{</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">cout</span> <span class="special">&lt;&lt;</span> <span class="string">"Could not open input file: 'multi_pass.txt'"</span> <span class="special">&lt;&lt;</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">endl</span><span class="special">;</span> <span class="keyword">return</span> <span class="special">-</span><span class="number">1</span><span class="special">;</span> <span class="special">}</span> <span class="keyword">typedef</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">istreambuf_iterator</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">&gt;</span> <span class="identifier">base_iterator_type</span><span class="special">;</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">multi_pass</span><span class="special">&lt;</span><span class="identifier">base_iterator_type</span><span class="special">&gt;</span> <span class="identifier">first</span> <span class="special">=</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">make_default_multi_pass</span><span class="special">(</span><span class="identifier">base_iterator_type</span><span class="special">(</span><span class="identifier">in</span><span class="special">));</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">vector</span><span class="special">&lt;</span><span class="keyword">double</span><span class="special">&gt;</span> <span class="identifier">v</span><span class="special">;</span> <span class="keyword">bool</span> <span class="identifier">result</span> <span class="special">=</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">qi</span><span class="special">::</span><span class="identifier">phrase_parse</span><span class="special">(</span><span class="identifier">first</span> <span class="special">,</span> <span class="identifier">spirit</span><span class="special">::</span><span class="identifier">make_default_multi_pass</span><span class="special">(</span><span class="identifier">base_iterator_type</span><span class="special">())</span> <span class="special">,</span> <span class="identifier">double_</span> <span class="special">&gt;&gt;</span> <span class="special">*(</span><span class="char">','</span> <span class="special">&gt;&gt;</span> <span class="identifier">double_</span><span class="special">)</span> <span class="comment">// recognize list of doubles</span> <span class="special">,</span> <span class="identifier">space</span> <span class="special">|</span> <span class="char">'#'</span> <span class="special">&gt;&gt;</span> <span class="special">*(</span><span class="identifier">char_</span> <span class="special">-</span> <span class="identifier">eol</span><span class="special">)</span> <span class="special">&gt;&gt;</span> <span class="identifier">eol</span> <span class="comment">// comment skipper</span> <span class="special">,</span> <span class="identifier">v</span><span class="special">);</span> <span class="comment">// data read from file</span> <span class="keyword">if</span> <span class="special">(!</span><span class="identifier">result</span><span class="special">)</span> <span class="special">{</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">cout</span> <span class="special">&lt;&lt;</span> <span class="string">"Failed parsing input file!"</span> <span class="special">&lt;&lt;</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">endl</span><span class="special">;</span> <span class="keyword">return</span> <span class="special">-</span><span class="number">2</span><span class="special">;</span> <span class="special">}</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">cout</span> <span class="special">&lt;&lt;</span> <span class="string">"Successfully parsed input file!"</span> <span class="special">&lt;&lt;</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">endl</span><span class="special">;</span> <span class="keyword">return</span> <span class="number">0</span><span class="special">;</span> <span class="special">}</span> </pre> <p> </p> <h5> <a name="spirit.support.multi_pass.h1"></a> <span class="phrase"><a name="spirit.support.multi_pass.using_the_flush_multi_pass_parser"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.using_the_flush_multi_pass_parser">Using the flush_multi_pass parser</a> </h5> <p> The <a href="path_to_url" target="_top">Spirit</a> <a href="../../../../repository/doc/html/index.html" target="_top">Repository</a> contains the <code class="computeroutput"><span class="identifier">flush_multi_pass</span></code> parser component. This is usable in conjunction with the <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator to minimize the buffering. It allows to insert explicit synchronization points into your grammar where it is safe to clear any stored input as it is ensured that no backtracking can occur at this point anymore. </p> <p> When the <code class="computeroutput"><span class="identifier">flush_multi_pass</span></code> parser is used with <code class="computeroutput"><span class="identifier">multi_pass</span></code>, it will call <code class="computeroutput"><span class="identifier">multi_pass</span><span class="special">::</span><span class="identifier">clear_queue</span><span class="special">()</span></code>. This will cause any buffered data to be erased. This also will invalidate all other copies of multi_pass and they should not be used. If they are, an <code class="computeroutput"><span class="identifier">boost</span><span class="special">::</span><span class="identifier">illegal_backtracking</span></code> exception will be thrown. </p> <h5> <a name="spirit.support.multi_pass.h2"></a> <span class="phrase"><a name="spirit.support.multi_pass.the_multi_pass_policies"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.the_multi_pass_policies">The multi_pass Policies</a> </h5> <p> The <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator is a templated class configurable using policies. The description of <code class="computeroutput"><span class="identifier">multi_pass</span></code> above is how it was originally implemented (before it used policies), and is the default configuration now. But, <code class="computeroutput"><span class="identifier">multi_pass</span></code> is capable of much more. Because of the open-ended nature of policies, you can write your own policy to make <code class="computeroutput"><span class="identifier">multi_pass</span></code> behave in a way that we never before imagined. </p> <p> The multi_pass class has two template parameters: </p> <div class="variablelist"> <p class="title"><b>The multi_pass template parameters</b></p> <dl class="variablelist"> <dt><span class="term">Input</span></dt> <dd><p> The type multi_pass uses to acquire it's input. This is typically an input iterator, or functor. </p></dd> <dt><span class="term">Policies</span></dt> <dd><p> The combined policies to use to create an instance of a multi_pass iterator. This combined policy type is described below </p></dd> </dl> </div> <p> It is possible to implement all of the required functionality of the combined policy in a single class. But it has shown to be more convenient to split this into four different groups of functions, i.e. four separate, but well coordinated policies. For this reason the <code class="computeroutput"><span class="identifier">multi_pass</span></code> library implements a template <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">default_policy</span></code> allowing to combine several different policies, each implementing one of the functionality groups: </p> <div class="table"> <a name="spirit.support.multi_pass.policies_needed_for_default_policy_template"></a><p class="title"><b>Table&#160;12.&#160;Policies needed for default_policy template</b></p> <div class="table-contents"><table class="table" summary="Policies needed for default_policy template"> <colgroup> <col> <col> </colgroup> <thead><tr> <th> <p> Template Parameter </p> </th> <th> <p> Description </p> </th> </tr></thead> <tbody> <tr> <td> <p> <code class="computeroutput"><span class="identifier">OwnershipPolicy</span></code> </p> </td> <td> <p> This policy determines how <code class="computeroutput"><span class="identifier">multi_pass</span></code> deals with it's shared components. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">CheckingPolicy</span></code> </p> </td> <td> <p> This policy determines how checking for invalid iterators is done. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">InputPolicy</span></code> </p> </td> <td> <p> A class that defines how <code class="computeroutput"><span class="identifier">multi_pass</span></code> acquires its input. The <code class="computeroutput"><span class="identifier">InputPolicy</span></code> is parameterized by the <code class="computeroutput"><span class="identifier">Input</span></code> template parameter to the <code class="computeroutput"><span class="identifier">multi_pass</span></code>. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">StoragePolicy</span></code> </p> </td> <td> <p> The buffering scheme used by <code class="computeroutput"><span class="identifier">multi_pass</span></code> is determined and managed by the StoragePolicy. </p> </td> </tr> </tbody> </table></div> </div> <br class="table-break"><p> The <code class="computeroutput"><span class="identifier">multi_pass</span></code> library contains several predefined policy implementations for each of the policy types as described above. First we will describe those predefined types. Afterwards we will give some guidelines how you can write your own policy implementations. </p> <h5> <a name="spirit.support.multi_pass.h3"></a> <span class="phrase"><a name="spirit.support.multi_pass.predefined_policies"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.predefined_policies">Predefined policies</a> </h5> <p> All predefined <code class="computeroutput"><span class="identifier">multi_pass</span></code> policies are defined in the namespace <code class="computeroutput"><span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">::</span><span class="identifier">iterator_policies</span></code>. </p> <div class="table"> <a name="spirit.support.multi_pass.predefined_policy_classes"></a><p class="title"><b>Table&#160;13.&#160;Predefined policy classes</b></p> <div class="table-contents"><table class="table" summary="Predefined policy classes"> <colgroup> <col> <col> </colgroup> <thead><tr> <th> <p> Class name </p> </th> <th> <p> Description </p> </th> </tr></thead> <tbody> <tr> <td> <p> <span class="bold"><strong>InputPolicy</strong></span> classes </p> </td> <td class="auto-generated">&#160;</td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">input_iterator</span></code> </p> </td> <td> <p> This policy directs <code class="computeroutput"><span class="identifier">multi_pass</span></code> to read from an input iterator of type <code class="computeroutput"><span class="identifier">Input</span></code>. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">buffering_input_iterator</span></code> </p> </td> <td> <p> This policy directs <code class="computeroutput"><span class="identifier">multi_pass</span></code> to read from an input iterator of type <code class="computeroutput"><span class="identifier">Input</span></code>. Additionally it buffers the last character received from the underlying iterator. This allows to wrap iterators not buffering the last character on their own (as <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istreambuf_iterator</span></code>). </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">istream</span></code> </p> </td> <td> <p> This policy directs <code class="computeroutput"><span class="identifier">multi_pass</span></code> to read from an input stream of type <code class="computeroutput"><span class="identifier">Input</span></code> (usually a <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">basic_istream</span></code>). </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">lex_input</span></code> </p> </td> <td> <p> This policy obtains it's input by calling yylex(), which would typically be provided by a scanner generated by <a href="path_to_url" target="_top">Flex</a>. If you use this policy your code must link against a <a href="path_to_url" target="_top">Flex</a> generated scanner. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">functor_input</span></code> </p> </td> <td> <p> This input policy obtains it's data by calling a functor of type <code class="computeroutput"><span class="identifier">Input</span></code>. The functor must meet certain requirements. It must have a typedef called <code class="computeroutput"><span class="identifier">result_type</span></code> which should be the type returned from <code class="computeroutput"><span class="keyword">operator</span><span class="special">()</span></code>. Also, since an input policy needs a way to determine when the end of input has been reached, the functor must contain a static variable named <code class="computeroutput"><span class="identifier">eof</span></code> which is comparable to a variable of <code class="computeroutput"><span class="identifier">result_type</span></code>. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">split_functor_input</span></code> </p> </td> <td> <p> This is essentially the same as the <code class="computeroutput"><span class="identifier">functor_input</span></code> policy except that the (user supplied) function object exposes separate <code class="computeroutput"><span class="identifier">unique</span></code> and <code class="computeroutput"><span class="identifier">shared</span></code> sub classes, allowing to integrate the functors <span class="emphasis"><em>unique</em></span> data members with the <code class="computeroutput"><span class="identifier">multi_pass</span></code> data items held by each instance and its <span class="emphasis"><em>shared</em></span> data members will be integrated with the <code class="computeroutput"><span class="identifier">multi_pass</span></code> members shared by all copies. </p> </td> </tr> <tr> <td> <p> <span class="bold"><strong>OwnershipPolicy</strong></span> classes </p> </td> <td class="auto-generated">&#160;</td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">ref_counted</span></code> </p> </td> <td> <p> This class uses a reference counting scheme. The <code class="computeroutput"><span class="identifier">multi_pass</span></code> will delete it's shared components when the count reaches zero. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">first_owner</span></code> </p> </td> <td> <p> When this policy is used, the first <code class="computeroutput"><span class="identifier">multi_pass</span></code> created will be the one that deletes the shared data. Each copy will not take ownership of the shared data. This works well for <a href="path_to_url" target="_top">Spirit</a>, since no dynamic allocation of iterators is done. All copies are made on the stack, so the original iterator has the longest lifespan. </p> </td> </tr> <tr> <td> <p> <span class="bold"><strong>CheckingPolicy</strong></span> classes </p> </td> <td class="auto-generated">&#160;</td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">no_check</span></code> </p> </td> <td> <p> This policy does no checking at all. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">buf_id_check</span></code> </p> </td> <td> <p> This policy keeps around a buffer id, or a buffer age. Every time <code class="computeroutput"><span class="identifier">clear_queue</span><span class="special">()</span></code> is called on a <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator, it is possible that all other iterators become invalid. When <code class="computeroutput"><span class="identifier">clear_queue</span><span class="special">()</span></code> is called, <code class="computeroutput"><span class="identifier">buf_id_check</span></code> increments the buffer id. When an iterator is dereferenced, this policy checks that the buffer id of the iterator matches the shared buffer id. This policy is most effective when used together with the <code class="computeroutput"><span class="identifier">split_std_deque</span></code> StoragePolicy. It should not be used with the <code class="computeroutput"><span class="identifier">fixed_size_queue</span></code> StoragePolicy, because it will not detect iterator dereferences that are out of range. </p> </td> </tr> <tr> <td> <p> full_check </p> </td> <td> <p> This policy has not been implemented yet. When it is, it will keep track of all iterators and make sure that they are all valid. This will be mostly useful for debugging purposes as it will incur significant overhead. </p> </td> </tr> <tr> <td> <p> <span class="bold"><strong>StoragePolicy</strong></span> classes </p> </td> <td class="auto-generated">&#160;</td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">split_std_deque</span></code> </p> </td> <td> <p> Despite its name this policy keeps all buffered data in a <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">vector</span></code>. All data is stored as long as there is more than one iterator. Once the iterator count goes down to one, and the queue is no longer needed, it is cleared, freeing up memory. The queue can also be forcibly cleared by calling <code class="computeroutput"><span class="identifier">multi_pass</span><span class="special">::</span><span class="identifier">clear_queue</span><span class="special">()</span></code>. </p> </td> </tr> <tr> <td> <p> <code class="computeroutput"><span class="identifier">fixed_size_queue</span><span class="special">&lt;</span><span class="identifier">N</span><span class="special">&gt;</span></code> </p> </td> <td> <p> This policy keeps a circular buffer that is size <code class="computeroutput"><span class="identifier">N</span><span class="special">+</span><span class="number">1</span></code> and stores <code class="computeroutput"><span class="identifier">N</span></code> elements. <code class="computeroutput"><span class="identifier">fixed_size_queue</span></code> is a template with a <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">size_t</span></code> parameter that specified the queue size. It is your responsibility to ensure that <code class="computeroutput"><span class="identifier">N</span></code> is big enough for your parser. Whenever the foremost iterator is incremented, the last character of the buffer is automatically erased. Currently there is no way to tell if an iterator is trailing too far behind and has become invalid. No dynamic allocation is done by this policy during normal iterator operation, only on initial construction. The memory usage of this <code class="computeroutput"><span class="identifier">StoragePolicy</span></code> is set at <code class="computeroutput"><span class="identifier">N</span><span class="special">+</span><span class="number">1</span></code> bytes, unlike <code class="computeroutput"><span class="identifier">split_std_deque</span></code>, which is unbounded. </p> </td> </tr> </tbody> </table></div> </div> <br class="table-break"><h5> <a name="spirit.support.multi_pass.h4"></a> <span class="phrase"><a name="spirit.support.multi_pass.combinations__how_to_specify_your_own_custom_multi_pass"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.combinations__how_to_specify_your_own_custom_multi_pass">Combinations: How to specify your own custom multi_pass</a> </h5> <p> The beauty of policy based designs is that you can mix and match policies to create your own custom iterator by selecting the policies you want. Here's an example of how to specify a custom <code class="computeroutput"><span class="identifier">multi_pass</span></code> that wraps an <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">&gt;</span></code>, and is slightly more efficient than the default <code class="computeroutput"><span class="identifier">multi_pass</span></code> (as generated by the <code class="computeroutput"><span class="identifier">make_default_multi_pass</span><span class="special">()</span></code> API function) because it uses the <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">first_owner</span></code> OwnershipPolicy and the <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">no_check</span></code> CheckingPolicy: </p> <pre class="programlisting"><span class="keyword">typedef</span> <span class="identifier">multi_pass</span><span class="special">&lt;</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">&gt;</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">default_policy</span><span class="special">&lt;</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">first_owner</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">no_check</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">buffering_input_iterator</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">split_std_deque</span> <span class="special">&gt;</span> <span class="special">&gt;</span> <span class="identifier">first_owner_multi_pass_type</span><span class="special">;</span> </pre> <p> The default template parameters for <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">default_policy</span></code> are: </p> <div class="itemizedlist"><ul class="itemizedlist" style="list-style-type: disc; "> <li class="listitem"> <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">ref_counted</span></code> OwnershipPolicy </li> <li class="listitem"> <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">no_check</span></code> CheckingPolicy, if <code class="computeroutput"><span class="identifier">BOOST_SPIRIT_DEBUG</span></code> is defined: <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">buf_id_check</span></code> CheckingPolicy </li> <li class="listitem"> <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">buffering_input_iterator</span></code> InputPolicy, and </li> <li class="listitem"> <code class="computeroutput"><span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">split_std_deque</span></code> StoragePolicy. </li> </ul></div> <p> So if you use <code class="computeroutput"><span class="identifier">multi_pass</span><span class="special">&lt;</span><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">&gt;</span> <span class="special">&gt;</span></code> you will get those pre-defined behaviors while wrapping an <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">&gt;</span></code>. </p> <h5> <a name="spirit.support.multi_pass.h5"></a> <span class="phrase"><a name="spirit.support.multi_pass.dealing_with_constant_look_ahead"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.dealing_with_constant_look_ahead">Dealing with constant look ahead</a> </h5> <p> There is one other pre-defined class called <code class="computeroutput"><span class="identifier">look_ahead</span></code>. The class <code class="computeroutput"><span class="identifier">look_ahead</span></code> is another predefine <code class="computeroutput"><span class="identifier">multi_pass</span></code> iterator type. It has two template parameters: <code class="computeroutput"><span class="identifier">Input</span></code>, the type of the input iterator to wrap, and a <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">size_t</span> <span class="identifier">N</span></code>, which specifies the size of the buffer to the <code class="computeroutput"><span class="identifier">fixed_size_queue</span></code> policy. While the default multi_pass configuration is designed for safety, <code class="computeroutput"><span class="identifier">look_ahead</span></code> is designed for speed. <code class="computeroutput"><span class="identifier">look_ahead</span></code> is derived from a multi_pass with the following policies: <code class="computeroutput"><span class="identifier">input_iterator</span></code> InputPolicy, <code class="computeroutput"><span class="identifier">first_owner</span></code> OwnershipPolicy, <code class="computeroutput"><span class="identifier">no_check</span></code> CheckingPolicy, and <code class="computeroutput"><span class="identifier">fixed_size_queue</span><span class="special">&lt;</span><span class="identifier">N</span><span class="special">&gt;</span></code> StoragePolicy. </p> <p> This iterator is defined by including the files: </p> <pre class="programlisting"><span class="comment">// forwards to &lt;boost/spirit/home/support/look_ahead.hpp&gt;</span> <span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">spirit</span><span class="special">/</span><span class="identifier">include</span><span class="special">/</span><span class="identifier">support_look_ahead</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">&gt;</span> </pre> <p> Also, see <a class="link" href="../structure/include.html" title="Include">Include Structure</a>. </p> <h5> <a name="spirit.support.multi_pass.h6"></a> <span class="phrase"><a name="spirit.support.multi_pass.reading_from_standard_input_streams"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.reading_from_standard_input_streams">Reading from standard input streams</a> </h5> <p> Yet another predefined iterator for wrapping standard input streams (usually a <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">basic_istream</span><span class="special">&lt;&gt;</span></code>) is called <code class="computeroutput"><span class="identifier">basic_istream_iterator</span><span class="special">&lt;</span><span class="identifier">Char</span><span class="special">,</span> <span class="identifier">Traits</span><span class="special">&gt;</span></code>. This class is usable as a drop in replacement for <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span><span class="special">&lt;</span><span class="identifier">Char</span><span class="special">,</span> <span class="identifier">Traits</span><span class="special">&gt;</span></code>. Its only difference is that it is a forward iterator (instead of the <code class="computeroutput"><span class="identifier">std</span><span class="special">::</span><span class="identifier">istream_iterator</span></code>, which is an input iterator). <code class="computeroutput"><span class="identifier">basic_istream_iterator</span></code> is derived from a multi_pass with the following policies: <code class="computeroutput"><span class="identifier">istream</span></code> InputPolicy, <code class="computeroutput"><span class="identifier">ref_counted</span></code> OwnershipPolicy, <code class="computeroutput"><span class="identifier">no_check</span></code> CheckingPolicy, and <code class="computeroutput"><span class="identifier">split_std_deque</span></code> StoragePolicy. </p> <p> There exists an additional predefined typedef: </p> <pre class="programlisting"><span class="keyword">typedef</span> <span class="identifier">basic_istream_iterator</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">,</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">char_traits</span><span class="special">&lt;</span><span class="keyword">char</span><span class="special">&gt;</span> <span class="special">&gt;</span> <span class="identifier">istream_iterator</span><span class="special">;</span> </pre> <p> This iterator is defined by including the files: </p> <pre class="programlisting"><span class="comment">// forwards to &lt;boost/spirit/home/support/istream_iterator.hpp&gt;</span> <span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">spirit</span><span class="special">/</span><span class="identifier">include</span><span class="special">/</span><span class="identifier">support_istream_iterator</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">&gt;</span> </pre> <p> Also, see <a class="link" href="../structure/include.html" title="Include">Include Structure</a>. </p> <h5> <a name="spirit.support.multi_pass.h7"></a> <span class="phrase"><a name="spirit.support.multi_pass.your_sha256_hashtifier__functor_input__phrase___code__inputpolicy"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.your_sha256_hashtifier__functor_input__phrase___code__inputpolicy">How to write a functor for use with the <code class="computeroutput"><span class="identifier">functor_input</span></code> InputPolicy</a> </h5> <p> If you want to use the <code class="computeroutput"><span class="identifier">functor_input</span></code> InputPolicy, you can write your own function object that will supply the input to <code class="computeroutput"><span class="identifier">multi_pass</span></code>. The function object must satisfy several requirements. It must have a typedef <code class="computeroutput"><span class="identifier">result_type</span></code> which specifies the return type of its <code class="computeroutput"><span class="keyword">operator</span><span class="special">()</span></code>. This is standard practice in the STL. Also, it must supply a static variable called eof which is compared against to know whether the input has reached the end. Last but not least the function object must be default constructible. Here is an example: </p> <pre class="programlisting"><span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">iostream</span><span class="special">&gt;</span> <span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">spirit</span><span class="special">/</span><span class="identifier">home</span><span class="special">/</span><span class="identifier">qi</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">&gt;</span> <span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">spirit</span><span class="special">/</span><span class="identifier">home</span><span class="special">/</span><span class="identifier">support</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">&gt;</span> <span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">spirit</span><span class="special">/</span><span class="identifier">home</span><span class="special">/</span><span class="identifier">support</span><span class="special">/</span><span class="identifier">multi_pass</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">&gt;</span> <span class="preprocessor">#include</span> <span class="special">&lt;</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">spirit</span><span class="special">/</span><span class="identifier">home</span><span class="special">/</span><span class="identifier">support</span><span class="special">/</span><span class="identifier">iterators</span><span class="special">/</span><span class="identifier">detail</span><span class="special">/</span><span class="identifier">functor_input_policy</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">&gt;</span> <span class="comment">// define the function object</span> <span class="keyword">class</span> <span class="identifier">iterate_a2m</span> <span class="special">{</span> <span class="keyword">public</span><span class="special">:</span> <span class="keyword">typedef</span> <span class="keyword">char</span> <span class="identifier">result_type</span><span class="special">;</span> <span class="identifier">iterate_a2m</span><span class="special">()</span> <span class="special">:</span> <span class="identifier">c_</span><span class="special">(</span><span class="char">'A'</span><span class="special">)</span> <span class="special">{}</span> <span class="identifier">iterate_a2m</span><span class="special">(</span><span class="keyword">char</span> <span class="identifier">c</span><span class="special">)</span> <span class="special">:</span> <span class="identifier">c_</span><span class="special">(</span><span class="identifier">c</span><span class="special">)</span> <span class="special">{}</span> <span class="identifier">result_type</span> <span class="keyword">operator</span><span class="special">()()</span> <span class="special">{</span> <span class="keyword">if</span> <span class="special">(</span><span class="identifier">c_</span> <span class="special">==</span> <span class="char">'M'</span><span class="special">)</span> <span class="keyword">return</span> <span class="identifier">eof</span><span class="special">;</span> <span class="keyword">return</span> <span class="identifier">c_</span><span class="special">++;</span> <span class="special">}</span> <span class="keyword">static</span> <span class="identifier">result_type</span> <span class="identifier">eof</span><span class="special">;</span> <span class="keyword">private</span><span class="special">:</span> <span class="keyword">char</span> <span class="identifier">c_</span><span class="special">;</span> <span class="special">};</span> <span class="identifier">iterate_a2m</span><span class="special">::</span><span class="identifier">result_type</span> <span class="identifier">iterate_a2m</span><span class="special">::</span><span class="identifier">eof</span> <span class="special">=</span> <span class="identifier">iterate_a2m</span><span class="special">::</span><span class="identifier">result_type</span><span class="special">(</span><span class="char">'M'</span><span class="special">);</span> <span class="keyword">using</span> <span class="keyword">namespace</span> <span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">;</span> <span class="comment">// create two iterators using the define function object, one of which is </span> <span class="comment">// an end iterator</span> <span class="keyword">typedef</span> <span class="identifier">multi_pass</span><span class="special">&lt;</span><span class="identifier">iterate_a2m</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">first_owner</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">no_check</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">functor_input</span> <span class="special">,</span> <span class="identifier">iterator_policies</span><span class="special">::</span><span class="identifier">split_std_deque</span><span class="special">&gt;</span> <span class="identifier">functor_multi_pass_type</span><span class="special">;</span> <span class="keyword">int</span> <span class="identifier">main</span><span class="special">()</span> <span class="special">{</span> <span class="identifier">functor_multi_pass_type</span> <span class="identifier">first</span> <span class="special">=</span> <span class="identifier">functor_multi_pass_type</span><span class="special">(</span><span class="identifier">iterate_a2m</span><span class="special">());</span> <span class="identifier">functor_multi_pass_type</span> <span class="identifier">last</span><span class="special">;</span> <span class="comment">// use the iterators: this will print "ABCDEFGHIJKL"</span> <span class="keyword">while</span> <span class="special">(</span><span class="identifier">first</span> <span class="special">!=</span> <span class="identifier">last</span><span class="special">)</span> <span class="special">{</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">cout</span> <span class="special">&lt;&lt;</span> <span class="special">*</span><span class="identifier">first</span><span class="special">;</span> <span class="special">++</span><span class="identifier">first</span><span class="special">;</span> <span class="special">}</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">cout</span> <span class="special">&lt;&lt;</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">endl</span><span class="special">;</span> <span class="keyword">return</span> <span class="number">0</span><span class="special">;</span> <span class="special">}</span> </pre> <h5> <a name="spirit.support.multi_pass.h8"></a> <span class="phrase"><a name="spirit.support.multi_pass.how_to_write_policies_for_use_with_multi_pass"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.how_to_write_policies_for_use_with_multi_pass">How to write policies for use with multi_pass</a> </h5> <p> All policies to be used with the <code class="computeroutput"><span class="identifier">default_policy</span></code> template need to have two embedded classes: <code class="computeroutput"><span class="identifier">unique</span></code> and <code class="computeroutput"><span class="identifier">shared</span></code>. The <code class="computeroutput"><span class="identifier">unique</span></code> class needs to implement all required functions for a particular policy type. In addition it may hold all member data items being <span class="emphasis"><em>unique</em></span> for a particular instance of a <code class="computeroutput"><span class="identifier">multi_pass</span></code> (hence the name). The <code class="computeroutput"><span class="identifier">shared</span></code> class does not expose any member functions (except sometimes a constructor), but it may hold all member data items to be <span class="emphasis"><em>shared</em></span> between all copies of a particular <code class="computeroutput"><span class="identifier">multi_pass</span></code>. </p> <h5> <a name="spirit.support.multi_pass.h9"></a> <span class="phrase"><a name="spirit.support.multi_pass.inputpolicy"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.inputpolicy">InputPolicy</a> </h5> <p> An <code class="computeroutput"><span class="identifier">InputPolicy</span></code> must have the following interface: </p> <pre class="programlisting"><span class="keyword">struct</span> <span class="identifier">input_policy</span> <span class="special">{</span> <span class="comment">// Input is the same type used as the first template parameter</span> <span class="comment">// while instantiating the multi_pass</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">Input</span><span class="special">&gt;</span> <span class="keyword">struct</span> <span class="identifier">unique</span> <span class="special">{</span> <span class="comment">// these typedef's will be exposed as the multi_pass iterator</span> <span class="comment">// properties</span> <span class="keyword">typedef</span> <span class="identifier">__unspecified_type__</span> <span class="identifier">value_type</span><span class="special">;</span> <span class="keyword">typedef</span> <span class="identifier">__unspecified_type__</span> <span class="identifier">difference_type</span><span class="special">;</span> <span class="keyword">typedef</span> <span class="identifier">__unspecified_type__</span> <span class="identifier">distance_type</span><span class="special">;</span> <span class="keyword">typedef</span> <span class="identifier">__unspecified_type__</span> <span class="identifier">pointer</span><span class="special">;</span> <span class="keyword">typedef</span> <span class="identifier">__unspecified_type__</span> <span class="identifier">reference</span><span class="special">;</span> <span class="identifier">unique</span><span class="special">()</span> <span class="special">{}</span> <span class="keyword">explicit</span> <span class="identifier">unique</span><span class="special">(</span><span class="identifier">Input</span><span class="special">)</span> <span class="special">{}</span> <span class="comment">// destroy is called whenever the last copy of a multi_pass is</span> <span class="comment">// destructed (ownership_policy::release() returned true)</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">destroy</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// swap is called by multi_pass::swap()</span> <span class="keyword">void</span> <span class="identifier">swap</span><span class="special">(</span><span class="identifier">unique</span><span class="special">&amp;);</span> <span class="comment">// get_input is called whenever the next input character/token</span> <span class="comment">// should be fetched. </span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// This method is expected to return a reference to the next </span> <span class="comment">// character/token</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">::</span><span class="identifier">reference</span> <span class="identifier">get_input</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// advance_input is called whenever the underlying input stream </span> <span class="comment">// should be advanced so that the next call to get_input will be </span> <span class="comment">// able to return the next input character/token</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">advance_input</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// input_at_eof is called to test whether this instance is a </span> <span class="comment">// end of input iterator.</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// This method is expected to return true if the end of input is </span> <span class="comment">// reached. It is often used in the implementation of the function</span> <span class="comment">// storage_policy::is_eof.</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">input_at_eof</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// input_is_valid is called to verify if the parameter t represents </span> <span class="comment">// a valid input character/token</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">// t: is the character/token to test for validity</span> <span class="comment">// </span> <span class="comment">// This method is expected to return true if the parameter t </span> <span class="comment">// represents a valid character/token.</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">input_is_valid</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">,</span> <span class="identifier">value_type</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">t</span><span class="special">);</span> <span class="special">};</span> <span class="comment">// Input is the same type used as the first template parameter passed</span> <span class="comment">// while instantiating the multi_pass</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">Input</span><span class="special">&gt;</span> <span class="keyword">struct</span> <span class="identifier">shared</span> <span class="special">{</span> <span class="keyword">explicit</span> <span class="identifier">shared</span><span class="special">(</span><span class="identifier">Input</span><span class="special">)</span> <span class="special">{}</span> <span class="special">};</span> <span class="special">};</span> </pre> <p> It is possible to derive the struct <code class="computeroutput"><span class="identifier">unique</span></code> from the type <code class="computeroutput"><span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">::</span><span class="identifier">detail</span><span class="special">::</span><span class="identifier">default_input_policy</span></code>. This type implements a minimal sufficient interface for some of the required functions, simplifying the task of writing a new input policy. </p> <p> This class may implement a function <code class="computeroutput"><span class="identifier">destroy</span><span class="special">()</span></code> being called during destruction of the last copy of a <code class="computeroutput"><span class="identifier">multi_pass</span></code>. This function should be used to free any of the shared data items the policy might have allocated during construction of its <code class="computeroutput"><span class="identifier">shared</span></code> part. Because of the way <code class="computeroutput"><span class="identifier">multi_pass</span></code> is implemented any allocated data members in <code class="computeroutput"><span class="identifier">shared</span></code> should <span class="underline">not</span> be deep copied in a copy constructor of <code class="computeroutput"><span class="identifier">shared</span></code>. </p> <h5> <a name="spirit.support.multi_pass.h10"></a> <span class="phrase"><a name="spirit.support.multi_pass.ownershippolicy"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.ownershippolicy">OwnershipPolicy</a> </h5> <p> The <code class="computeroutput"><span class="identifier">OwnershipPolicy</span></code> must have the following interface: </p> <pre class="programlisting"><span class="keyword">struct</span> <span class="identifier">ownership_policy</span> <span class="special">{</span> <span class="keyword">struct</span> <span class="identifier">unique</span> <span class="special">{</span> <span class="comment">// destroy is called whenever the last copy of a multi_pass is</span> <span class="comment">// destructed (ownership_policy::release() returned true)</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">destroy</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// swap is called by multi_pass::swap()</span> <span class="keyword">void</span> <span class="identifier">swap</span><span class="special">(</span><span class="identifier">unique</span><span class="special">&amp;);</span> <span class="comment">// clone is called whenever a multi_pass is copied</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">clone</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// release is called whenever a multi_pass is destroyed</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// The method is expected to return true if the destructed </span> <span class="comment">// instance is the last copy of a particular multi_pass. </span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">release</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// is_unique is called to test whether this instance is the only </span> <span class="comment">// existing copy of a particular multi_pass</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// The method is expected to return true if this instance is unique</span> <span class="comment">// (no other copies of this multi_pass exist).</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">is_unique</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="special">};</span> <span class="keyword">struct</span> <span class="identifier">shared</span> <span class="special">{};</span> <span class="special">};</span> </pre> <p> It is possible to derive the struct <code class="computeroutput"><span class="identifier">unique</span></code> from the type <code class="computeroutput"><span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">::</span><span class="identifier">detail</span><span class="special">::</span><span class="identifier">default_ownership_policy</span></code>. This type implements a minimal sufficient interface for some of the required functions, simplifying the task of writing a new ownership policy. </p> <p> This class may implement a function <code class="computeroutput"><span class="identifier">destroy</span><span class="special">()</span></code> being called during destruction of the last copy of a <code class="computeroutput"><span class="identifier">multi_pass</span></code>. This function should be used to free any of the shared data items the policy might have allocated during construction of its <code class="computeroutput"><span class="identifier">shared</span></code> part. Because of the way <code class="computeroutput"><span class="identifier">multi_pass</span></code> is implemented any allocated data members in <code class="computeroutput"><span class="identifier">shared</span></code> should <span class="underline">not</span> be deep copied in a copy constructor of <code class="computeroutput"><span class="identifier">shared</span></code>. </p> <h5> <a name="spirit.support.multi_pass.h11"></a> <span class="phrase"><a name="spirit.support.multi_pass.checkingpolicy"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.checkingpolicy">CheckingPolicy</a> </h5> <p> The <code class="computeroutput"><span class="identifier">CheckingPolicy</span></code> must have the following interface: </p> <pre class="programlisting"><span class="keyword">struct</span> <span class="identifier">checking_policy</span> <span class="special">{</span> <span class="keyword">struct</span> <span class="identifier">unique</span> <span class="special">{</span> <span class="comment">// swap is called by multi_pass::swap()</span> <span class="keyword">void</span> <span class="identifier">swap</span><span class="special">(</span><span class="identifier">unique</span><span class="special">&amp;);</span> <span class="comment">// destroy is called whenever the last copy of a multi_pass is</span> <span class="comment">// destructed (ownership_policy::release() returned true)</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">destroy</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// docheck is called before the multi_pass is dereferenced or </span> <span class="comment">// incremented. </span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// This method is expected to make sure the multi_pass instance is</span> <span class="comment">// still valid. If it is invalid an exception should be thrown.</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">docheck</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// clear_queue is called whenever the function </span> <span class="comment">// multi_pass::clear_queue is called on this instance</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">clear_queue</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="special">};</span> <span class="keyword">struct</span> <span class="identifier">shared</span> <span class="special">{};</span> <span class="special">};</span> </pre> <p> It is possible to derive the struct <code class="computeroutput"><span class="identifier">unique</span></code> from the type <code class="computeroutput"><span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">::</span><span class="identifier">detail</span><span class="special">::</span><span class="identifier">default_checking_policy</span></code>. This type implements a minimal sufficient interface for some of the required functions, simplifying the task of writing a new checking policy. </p> <p> This class may implement a function <code class="computeroutput"><span class="identifier">destroy</span><span class="special">()</span></code> being called during destruction of the last copy of a <code class="computeroutput"><span class="identifier">multi_pass</span></code>. This function should be used to free any of the shared data items the policy might have allocated during construction of its <code class="computeroutput"><span class="identifier">shared</span></code> part. Because of the way <code class="computeroutput"><span class="identifier">multi_pass</span></code> is implemented any allocated data members in <code class="computeroutput"><span class="identifier">shared</span></code> should <span class="underline">not</span> be deep copied in a copy constructor of <code class="computeroutput"><span class="identifier">shared</span></code>. </p> <h5> <a name="spirit.support.multi_pass.h12"></a> <span class="phrase"><a name="spirit.support.multi_pass.storagepolicy"></a></span><a class="link" href="multi_pass.html#spirit.support.multi_pass.storagepolicy">StoragePolicy</a> </h5> <p> A <code class="computeroutput"><span class="identifier">StoragePolicy</span></code> must have the following interface: </p> <pre class="programlisting"><span class="keyword">struct</span> <span class="identifier">storage_policy</span> <span class="special">{</span> <span class="comment">// Value is the same type as typename MultiPass::value_type</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">Value</span><span class="special">&gt;</span> <span class="keyword">struct</span> <span class="identifier">unique</span> <span class="special">{</span> <span class="comment">// destroy is called whenever the last copy of a multi_pass is</span> <span class="comment">// destructed (ownership_policy::release() returned true)</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">destroy</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// swap is called by multi_pass::swap()</span> <span class="keyword">void</span> <span class="identifier">swap</span><span class="special">(</span><span class="identifier">unique</span><span class="special">&amp;);</span> <span class="comment">// dereference is called whenever multi_pass::operator*() is invoked</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// This function is expected to return a reference to the current</span> <span class="comment">// character/token.</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">::</span><span class="identifier">reference</span> <span class="identifier">dereference</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// increment is called whenever multi_pass::operator++ is invoked</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">increment</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="identifier">clear_queue</span><span class="special">(</span><span class="identifier">MultiPass</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// is_eof is called to test whether this instance is a end of input </span> <span class="comment">// iterator.</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">//</span> <span class="comment">// This method is expected to return true if the end of input is </span> <span class="comment">// reached. </span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">is_eof</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">);</span> <span class="comment">// less_than is called whenever multi_pass::operator==() is invoked</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">// rhs: is the multi_pass reference this instance is compared </span> <span class="comment">// to</span> <span class="comment">//</span> <span class="comment">// This function is expected to return true if the current instance</span> <span class="comment">// is equal to the right hand side multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">equal_to</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">,</span> <span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">rhs</span><span class="special">);</span> <span class="comment">// less_than is called whenever multi_pass::operator&lt;() is invoked</span> <span class="comment">//</span> <span class="comment">// mp: is a reference to the whole multi_pass instance</span> <span class="comment">// rhs: is the multi_pass reference this instance is compared </span> <span class="comment">// to</span> <span class="comment">//</span> <span class="comment">// This function is expected to return true if the current instance</span> <span class="comment">// is less than the right hand side multi_pass instance</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">MultiPass</span><span class="special">&gt;</span> <span class="keyword">static</span> <span class="keyword">bool</span> <span class="identifier">less_than</span><span class="special">(</span><span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">mp</span><span class="special">,</span> <span class="identifier">MultiPass</span> <span class="keyword">const</span><span class="special">&amp;</span> <span class="identifier">rhs</span><span class="special">);</span> <span class="special">};</span> <span class="comment">// Value is the same type as typename MultiPass::value_type</span> <span class="keyword">template</span> <span class="special">&lt;</span><span class="keyword">typename</span> <span class="identifier">Value</span><span class="special">&gt;</span> <span class="keyword">struct</span> <span class="identifier">shared</span> <span class="special">{};</span> <span class="special">};</span> </pre> <p> It is possible to derive the struct <code class="computeroutput"><span class="identifier">unique</span></code> from the type <code class="computeroutput"><span class="identifier">boost</span><span class="special">::</span><span class="identifier">spirit</span><span class="special">::</span><span class="identifier">detail</span><span class="special">::</span><span class="identifier">default_storage_policy</span></code>. This type implements a minimal sufficient interface for some of the required functions, simplifying the task of writing a new storage policy. </p> <p> This class may implement a function <code class="computeroutput"><span class="identifier">destroy</span><span class="special">()</span></code> being called during destruction of the last copy of a <code class="computeroutput"><span class="identifier">multi_pass</span></code>. This function should be used to free any of the shared data items the policy might have allocated during construction of its <code class="computeroutput"><span class="identifier">shared</span></code> part. Because of the way <code class="computeroutput"><span class="identifier">multi_pass</span></code> is implemented any allocated data members in <code class="computeroutput"><span class="identifier">shared</span></code> should <span class="underline">not</span> be deep copied in a copy constructor of <code class="computeroutput"><span class="identifier">shared</span></code>. </p> <p> Generally, a <code class="computeroutput"><span class="identifier">StoragePolicy</span></code> is the trickiest policy to implement. You should study and understand the existing <code class="computeroutput"><span class="identifier">StoragePolicy</span></code> classes before you try and write your own. </p> </div> <table xmlns:rev="path_to_url~gregod/boost/tools/doc/revision" width="100%"><tr> <td align="left"></td> file LICENSE_1_0.txt or copy at <a href="path_to_url" target="_top">path_to_url </p> </div></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="../support.html"><img src="../../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../support.html"><img src="../../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="line_pos_iterator.html"><img src="../../../../../../doc/src/images/next.png" alt="Next"></a> </div> </body> </html> ```
Balaji Mandir may refer to a number of temples dedicated to the Hindu deity Balaji, also known as Venkateswara: India Rajasthan Mehandipur Balaji Temple, a temple of Hanuman in Rajasthan, India Punrasar Balaji, a temple of Hanuman in Rajasthan, India Salasar Balaji, a temple of Hanuman in Rajasthan, India Other Indian states Balaji Mandir, Pune, in Pashan, Pune, India Balaji Temple, Ketkawla, in Pune, Maharashtra, India Chilkur Balaji Temple, a temple of Balaji near Hyderabad, India Shreebalajimandir, in Mumbai, Kandivali, Charkop, India Sri Balaji Temple, T. Nagar, in Chennai, Tamil Nadu, India Unao, Balaji, in Madhya Pradesh, India Elsewhere Sri Venkateswara (Balaji) Temple Minnesota, in Edina, Minnesota, US Tividale Tirupathy Balaji Temple, in Tividale, West Midlands, England
Asfordby railway station was a station serving the villages of Asfordby and Kirby Bellars in Leicestershire. The station was situated at a level crossing on the road between the two villages. It opened in 1846 and was originally named Kirby, but had been renamed Asfordby by 1863. It closed to passengers in 1951 but remained in use for goods until 1964. History It was opened by the Midland Railway on the Syston and Peterborough Railway. The station building were designed by the architects William Parsons and Sancton Wood. The contractors Norman and Grimson undertook to build it for Β£744 8s 6d. and it was remarkably similar to the station at Rearsby. It became part of the London, Midland and Scottish Railway during the Grouping of 1923. The station then passed on to the London Midland Region of British Railways on nationalisation in 1948. It was then closed by the British Railways Board. Stationmasters Charles Allen 1847 - 1892 H. Ellis 1892 - 1899 (formerly station master at Moira) William Williamson 1899 - 1928 W. Stephenson 1933 - 1935 (afterwards station master at Annesley) Walter Wilson 1935 H.E. Harrison ca. 1945 The site today Trains still pass the site on the Birmingham to Peterborough line. References External links Station on navigable O.S. map Former Midland Railway stations Disused railway stations in Leicestershire Railway stations in Great Britain opened in 1846 Railway stations in Great Britain closed in 1951 1846 establishments in England
Nariva is a county in Trinidad and Tobago. It is located in eastern Trinidad, south of Saint Andrew County and north of Mayaro County, to the west by Victoria County and to the northwest by Saint George County. The southern boundary of the county lies along the Ortoire River and the western boundary is Cocos Bay (more commonly referred to as Manzanilla Bay). The towns of Rio Claro and Ecclesville are located in Nariva County. The Nariva Swamp is located in the eastern part of Nariva County. Nariva County covers 166Β km2 (64Β mi2) and is divided into two wards, Charuma and Cocal. The name Nariva is of Amerindian origins. Until 1990 Nariva was administered together with the county of Mayaro by the Nariva–Mayaro County Council. Since then the county has been split between the Sangre Grande Regional Corporation and the Rio Claro–Mayaro Regional Corporation. References Counties of Trinidad and Tobago Rio Claro, Trinidad and Tobago Trinidad (island) Former counties
Donnie Calvin is an American reggae musician and singer. He was involved in Arthur Baker's studio project Rockers Revenge, which on September 18, 1982 had a number one hit on the Billboard Hot Dance Club Play chart with their version of Eddy Grant's "Walking on Sunshine" with Calvin as featured artist. The song also reached number 4 on the UK Singles Chart. References American reggae musicians Year of birth missing (living people) Living people
The Loder Cup is a New Zealand conservation award. It was donated by Gerald Loder, 1st Baron Wakehurst in 1926 to "encourage and honour New Zealanders who work to investigate, promote, retain and cherish our indigenous flora". The Minister of Conservation awards the Loder Cup to a person or group of people who best represent the objectives of the Cup. Recipients The Loder Cup has been awarded to the following individuals and groups: 1929 – Duncan and Davies Ltd, New Plymouth 1930 – Henry Bennett and son 1931 – Henry Bennett and son 1933 – T. Waugh and son 1934 – Lord Bledisloe 1935 – Trustees of R. C. Bruce 1936 – John Scott Thomson & George Simpson 1937 – Auckland Institute & Museum and Lucy Cranwell 1938 – Elizabeth Knox Gilmer 1939 – W. A. Thomson 1940 – P. H. Johnson 1941 – Edward Earle Vaile 1942 – A. W. Wastney 1943 – James Speden 1944 – Norman Potts 1945 – Walter Boa Brockie 1946 – Royal Forest & Bird Protection Society and Val Sanderson 1947 – N. R. W. Thomas 1948 – Andrew Davidson Beddie 1949 – Noeline Baker 1950 – Arthur Paul Harper 1951 – Lance McCaskill 1952 – Marguerite Crookes 1953 – PΓ©rrine Moncrieff 1954 – Norman L. Elder 1955 – Michael Christian Gudex 1956 – Frank Singleton Holman 1957 – Frederick William Lokan 1958 – Ernest Corbett 1959 – Charles Cameron 1960 – William Marton 1961 – Charles Thomas Keeble 1962 – Bernard H. M. Teague 1963 – Nancy Adams 1964 – David Alfred Bathgate 1965 – Arthur Farnell 1966 – Oliver Hunter 1967 – John Salmon 1968 – Victor C. Davies 1969 – Patrick John Devlin 1970 – Muriel E. and William E. Fisher 1971 – Violet Ada Briffault 1972 – Arthur David Mead 1973 – Katie Reynolds 1974 – Alexander Walter Anderson 1975 – Alan Mark 1976 – Waipahihi Botanical Society, Taupo 1977 – Reginald Ivan Bell 1978 – Lawrence J. Metcalf 1979 – Roger & Christina Sutton 1980 – Whangarei Native Forest & Bird Protection Society (Inc.) 1981 – Raymond H. Mole 1982 – Arthur William Ericson 1983 – Roy J. Peacock 1984 – Eric Godley 1985 – Audrey Eagle 1986 – Roderick Syme 1987 – Hugh Wilson 1988 – Arthur Blair Cowan 1989 – no award 1990 – Brian Molloy 1991 – Reginald Janes 1992 – Gordon and Celia Stephenson 1993 – Michael Greenwood 1994 – Peter Johnson 1995 – David Given 1996 – Native Forests Restoration Trust 1997 – Isabel Morgan 1998 – Supporters of Tiritiri Matangi Island 1999 – Chris and Brian Rance 2000 – Jorge Santos 2001 – Colin Meurk 2002 – Marge Maddren 2003 – Gerry McSweeney 2004 – Colin Ogle 2005 – Ewen Cameron 2006 – Bruce Clarkson 2007 – Amanda Baird 2008 – Shannel Courtney 2009 – Philip Simpson 2010 – Colin Burrows 2011 – Mark Dean 2012 – Ralph Allen 2013 – Nick Head 2014 – Clive Paton 2015/16 – Barbara and Neill Simpson 2017 – Peter de Lange 2018 – Robert McGowan 2019 – Chris Horne 2020 – Graeme Atkins 2021 – Beverley Clarkson 2022 – Simon Walls 2023 – Mike Harding Publications See also Conservation in New Zealand List of environmental awards References Nature conservation in New Zealand New Zealand awards Environmental awards 1926 establishments in New Zealand
Zinc sulfate is used medically as a dietary supplement. Specifically it is used to treat zinc deficiency and to prevent the condition in those at high risk. This includes use together with oral rehydration therapy for children who have diarrhea. General use is not recommended. It may be taken by mouth or by injection into a vein. Side effects may include abdominal pain, vomiting, headache, and feeling tired. While normal doses are deemed safe in pregnancy and breastfeeding, the safety of larger doses is unclear. Greater care should be taken in those with kidney problems. Zinc is an essential mineral in people as well as other animals. The medical use of zinc sulfate began as early as the 1600s. It is on the World Health Organization's List of Essential Medicines. Zinc sulfate is available as a generic medication. and over the counter. Medical uses The use of zinc sulfate supplements together with oral rehydration therapy decreases the number of bowel movements and the time until the diarrhea stops. Its use in this situation is recommended by the World Health Organization. There is some evidence zinc is effective in reducing hepatic and neurological symptoms of Wilson's disease. Zinc sulfate is also an important part of parenteral nutrition. Misuse During the 1918 flu pandemic in New Zealand, inhalation chambers were set up in towns and cities as a means to boost immunity. The public were encouraged to attend these chambers and inhale a zinc sulfate mist, a process that was said to disinfect the lungs and throat and protect against infection. In reality, the inhalation of hot steam could inflame the nasal tissue, potentially making participants more susceptible to infection. In towns such as Ashburton, New Zealand for example, in order to be eligible to travel by train, people had to present documentation at the train station proving that they had been through the inhalation chamber. The inhalation chamber which was set up in the old Dunedin Post Office building was described as follows: "It was a small room, relatively airtight, holding 20 or 30 persons, and the air is impregnated with the vapour of zinc sulphate. Each batch remains in the chamber for 10 minutes, and the persons treated are instructed to breathe through the nose at first, and then through the mouth." References External links Zinc Dietary supplements World Health Organization essential medicines Wikipedia medicine articles ready to translate
```c /** * @license Apache-2.0 * * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #define NAME "isnan" #define ITERATIONS 1000000 #define REPEATS 3 /** * Prints the TAP version. */ static void print_version( void ) { printf( "TAP version 13\n" ); } /** * Prints the TAP summary. * * @param total total number of tests * @param passing total number of passing tests */ static void print_summary( int total, int passing ) { printf( "#\n" ); printf( "1..%d\n", total ); // TAP plan printf( "# total %d\n", total ); printf( "# pass %d\n", passing ); printf( "#\n" ); printf( "# ok\n" ); } /** * Prints benchmarks results. * * @param elapsed elapsed time in seconds */ static void print_results( double elapsed ) { double rate = (double)ITERATIONS / elapsed; printf( " ---\n" ); printf( " iterations: %d\n", ITERATIONS ); printf( " elapsed: %0.9f\n", elapsed ); printf( " rate: %0.9f\n", rate ); printf( " ...\n" ); } /** * Returns a clock time. * * @return clock time */ static double tic( void ) { struct timeval now; gettimeofday( &now, NULL ); return (double)now.tv_sec + (double)now.tv_usec/1.0e6; } /** * Generates a random number on the interval [0,1). * * @return random number */ static double rand_double( void ) { int r = rand(); return (double)r / ( (double)RAND_MAX + 1.0 ); } /** * Runs a benchmark. * * @return elapsed time in seconds */ static double benchmark( void ) { double elapsed; double x; double t; int y; int i; t = tic(); for ( i = 0; i < ITERATIONS; i++ ) { x = ( 1.0e7*rand_double() ) - 5.0e6; y = isnan( x ); if ( y != 0 && y != 1 ) { printf( "should return 0 or 1\n" ); break; } } elapsed = tic() - t; if ( y != 0 && y != 1 ) { printf( "should return 0 or 1\n" ); } return elapsed; } /** * Main execution sequence. */ int main( void ) { double elapsed; int i; // Use the current time to seed the random number generator: srand( time( NULL ) ); print_version(); for ( i = 0; i < REPEATS; i++ ) { printf( "# c::%s\n", NAME ); elapsed = benchmark(); print_results( elapsed ); printf( "ok %d benchmark finished\n", i+1 ); } print_summary( REPEATS, REPEATS ); } ```
```javascript /** * $('#more').more({'url':'data.php'}); * amount * address * format * template htmlDIVclass * trigger class * scroll * offset * data * loading */ (function (factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define(['jquery'], factory); } else if (typeof exports === 'object') { // Node/CommonJS style for Browserify module.exports = factory; } else { // Browser globals factory(jQuery); } }(function ($) { var target = null; var template = null; var lock = false; var cur_last = 0; var variables = { 'last' : 0 } var settings = { 'amount' : '10', 'address' : 'comments.php', 'format' : 'json', 'template' : '.single_item', 'trigger' : '.get_more', 'scroll' : 'false', 'offset' : '100', 'data' : {}, 'loading' : '...' } var methods = { init: function(options) { return this.each(function() { if (options) { $.extend(settings, options); } template = $(this).children(settings.template).wrap('<div/>').parent(); template.css('display', 'none'); $(this).append('<div class="loading">' + settings.loading + '</div>'); template.remove(); target = $(this); if (settings.scroll == 'false') { $(this).find(settings.trigger).bind('click.more', methods.get_data); $(this).more('get_data'); } else { if ($(this).height() <= $(this).attr('scrollHeight')) { target.more('get_data', settings.amount * 2); } $(this).bind('scroll.more', methods.check_scroll); } }) }, check_scroll: function() { if ((target.scrollTop() + target.height() + parseInt(settings.offset)) >= target.attr('scrollHeight') && lock == false) { target.more('get_data'); } }, debug: function() { var debug_string = ''; $.each(variables, function(k, v) { debug_string += k + ' : ' + v + '\n'; }) alert(debug_string); }, remove: function() { target.children(settings.trigger).unbind('.more'); target.unbind('.more') target.children(settings.trigger).remove(); }, add_elements: function(data) { var root = target var counter = 0; if (data) { $(data).each(function() { counter++ var t = template $.each(this, function(key, value) { if (t.find('.' + key)) t.find('.' + key).html(value); }) if (settings.scroll == 'true') { root.children('.loading').before(t.html()) } else { root.children(settings.trigger).before(t.html()) } root.children(settings.template + ':last').attr('id', 'more_element_' + ((variables.last++) + 1)); }) } else methods.remove() // target.children('.loading').css('display', 'none'); if (counter < settings.amount){ methods.remove(); target.children('.loading').html(""); } }, get_data: function() { var ile; lock = true; target.children(".loading").css('display', 'block'); $(settings.trigger).css('display', 'none'); if (typeof(arguments[0]) == 'number') { ile = arguments[0]; } else { ile = settings.amount; } if(variables.last >= cur_last) { var postdata = settings.data; postdata['last'] = variables.last; postdata['amount'] = ile; $.post(settings.address, postdata, function(data){ $(settings.trigger).css('display', 'block') methods.add_elements(data) lock = false; }, settings.format); cur_last = cur_last + 10; } } }; $.fn.more = function(method) { if (methods[method]) { return methods[method].apply(this, Array.prototype.slice.call(arguments, 1)); } else if (typeof method == 'object' || !method) { return methods.init.apply(this, arguments); } else $.error('Method ' + method + ' does not exist!'); } $(document).ready(function() { $(window).on('scroll', function() { if ($(document).scrollTop() + $(window).height() > $(document).height() - 10) { $('.get_more').click(); } }); }); })); ```
The Lega Nazionale Professionisti Serie A (Italian for National Professionals League Serie A), commonly known as LNPA or Lega Serie A (Serie A League), is the governing body that runs the major professional football competitions in Italy, most prominently the Serie A. It was founded on 1 July 2010. In the past the television rights of the Serie A clubs were sold separately, and "Serie A" had to financially support Serie B through divided part of the Serie A TV revenues to Serie B clubs. On 30 April 2009, Serie A announced a split from Serie B, when nineteen of the twenty clubs voted in favour of the move. Relegation-threatened Lecce voted against. The governing body took over most of the competitions formerly held by Lega Calcio, namely Serie A, Coppa Italia, Supercoppa Italiana, and youth competitions Campionato Primavera 1, Coppa Italia Primavera, and Supercoppa Primavera. Serie B is now organised by the Lega Serie B, which was also created in 2010. Competitions League Serie A counts a total number of 20 clubs. In each season (that starts in August, to end in following May) every club faces the others twice (double round-robin system): once in home stadium and once in the opponents one, for 38 total games (19 for each half). Teams gain 3 points for win and a point for draw: no points are gained for lost matches. Ranking is based on total points: the top-club (with the most points) is crowned Italian champion at the end of season. If two or more teams are equal on number of points, they are ranked by following criteria: head-to-head records (results and points), goal difference in these games, goal difference overall, most goals scored, draw. The three lowest placed teams are relegated in Serie B, as three other sides (two top-teams and play-off winner) are promoted in order to replace them. Cup The Lega Serie A organizes the main Italian cup competition, the Coppa Italia, which is open also to all Serie B clubs and some clubs from the Serie C and the Serie D. Super Cup The Lega Serie A also organizes the Supercoppa Italiana, a yearly match between the champions of the Serie A and the winners of the Coppa Italia. Youth competitions Youth teams of Lega Serie A clubs play in the Campionato Primavera 1, as well as competing in their own cup competitions, such as the Coppa Italia Primavera and the Supercoppa Primavera. Footballs Nike is the official match football of the Lega Serie A and is used by all 20 teams in league games. The same football is used in all Coppa Italia games and the Supercoppa Italiana. List of Lega Serie A chairmen Maurizio Beretta 2010–2017 Carlo Tavecchio 2017–2018 (interim commissioner) Giovanni MalagΓ² 2018 (interim commissioner) Gaetano MiccicchΓ¨ (2018–2019) Mario Cicala (2019) (interim commissioner) Giancarlo Abete (2019–2020) (interim commissioner) Paolo Dal Pino (2020–) Official Match Ball 2010–11 Nike T90 Tracer 2011–12 Nike Seitiro 2012–13 Nike Maxim 2013–14 Nike Incyte 2014–15 Nike Ordem 2 2015–16 Nike Ordem 3 2016–17 Nike Ordem 4 2017–18 Nike Ordem 5 2018–19 Nike Merlin 2019–20 Nike Merlin 2 2020–21 Nike Flight 2021–22 Nike Flight 2 2022–23 Puma Orbita Serie A 2022–23 2023–24 Puma Orbita Serie A 2023–24 References External links Lega Serie A official website A 2010 establishments in Italy Serie A Professional sports leagues in Italy
Alfred Emil Fredrik SandstrΓΆm (1886, NykΓΆping, Sweden – 1962) was a Swedish lawyer. He was the chairman of the International Federation of Red Cross and Red Crescent Societies from 1950 to 1959. Life In the course of his career he was, among other things, a judge in the Supreme Court, at the Permanent Court of Arbitration in the Hague as well as at the so-called mixed courts that existed in Egypt until 1949 and settled disputes between Egyptians and foreigners. He also acted as an international mediator on many occasions - for example, as Swedish representative (and, from June 1947, Chairman) on the United Nations Special Committee on Palestine (UNSCOP), an attempt by the United Nations to test out the situation in Palestine during the termination of the British Mandate. In addition he succeeded Folke Bernadotte as president of the Swedish Red Cross. In 1950, Emil SandstrΓΆm became a member of the Institut de Droit International (institute for international law). References External links Red Cross Biography 1886 births 1962 deaths Red Cross personnel Members of the Institut de Droit International Presidents of the International Federation of Red Cross and Red Crescent Societies Grand Crosses with Star and Sash of the Order of Merit of the Federal Republic of Germany Justices of the Supreme Court of Sweden 20th-century Swedish judges 20th-century Swedish lawyers
Robert Walker (born 16 June 1982, in Glasgow) is a Scottish footballer who played as a defender, for junior side Arthurlie. Career Walker started his senior career with South Lanarkshire club Hamilton Academical after signing from Glasgow junior side Maryhill, before playing one season for both Dumbarton and Stranraer. After leaving the Stair Park side, Walker played at Cliftonhill with Albion Rovers for two seasons before going amateur with Queen's Park in 2009. Walker was released by Queen's Park at the end of the 2009-10 season. After his release, Walker signed for Barrhead side Arthurlie. References Sources 1982 births Scottish men's footballers Men's association football defenders Hamilton Academical F.C. players Dumbarton F.C. players Stranraer F.C. players Albion Rovers F.C. players Queen's Park F.C. players Living people Scottish Football League players Scottish Junior Football Association players Footballers from Glasgow Maryhill F.C. players Arthurlie F.C. players
```shell Cleaning up comments on config files with `grep` Short intro to `grep` Intro to `sed` Image manipulation using `convert` Wrap text with `fold` ```
```go // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package zapcore import ( "bytes" "errors" "fmt" ) var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") // A Level is a logging priority. Higher levels are more important. type Level int8 const ( // DebugLevel logs are typically voluminous, and are usually disabled in // production. DebugLevel Level = iota - 1 // InfoLevel is the default logging priority. InfoLevel // WarnLevel logs are more important than Info, but don't need individual // human review. WarnLevel // ErrorLevel logs are high-priority. If an application is running smoothly, // it shouldn't generate any error-level logs. ErrorLevel // DPanicLevel logs are particularly important errors. In development the // logger panics after writing the message. DPanicLevel // PanicLevel logs a message, then panics. PanicLevel // FatalLevel logs a message, then calls os.Exit(1). FatalLevel _minLevel = DebugLevel _maxLevel = FatalLevel // InvalidLevel is an invalid value for Level. // // Core implementations may panic if they see messages of this level. InvalidLevel = _maxLevel + 1 ) // ParseLevel parses a level based on the lower-case or all-caps ASCII // representation of the log level. If the provided ASCII representation is // invalid an error is returned. // // This is particularly useful when dealing with text input to configure log // levels. func ParseLevel(text string) (Level, error) { var level Level err := level.UnmarshalText([]byte(text)) return level, err } type leveledEnabler interface { LevelEnabler Level() Level } // LevelOf reports the minimum enabled log level for the given LevelEnabler // from Zap's supported log levels, or [InvalidLevel] if none of them are // enabled. // // A LevelEnabler may implement a 'Level() Level' method to override the // behavior of this function. // // func (c *core) Level() Level { // return c.currentLevel // } // // It is recommended that [Core] implementations that wrap other cores use // LevelOf to retrieve the level of the wrapped core. For example, // // func (c *coreWrapper) Level() Level { // return zapcore.LevelOf(c.wrappedCore) // } func LevelOf(enab LevelEnabler) Level { if lvler, ok := enab.(leveledEnabler); ok { return lvler.Level() } for lvl := _minLevel; lvl <= _maxLevel; lvl++ { if enab.Enabled(lvl) { return lvl } } return InvalidLevel } // String returns a lower-case ASCII representation of the log level. func (l Level) String() string { switch l { case DebugLevel: return "debug" case InfoLevel: return "info" case WarnLevel: return "warn" case ErrorLevel: return "error" case DPanicLevel: return "dpanic" case PanicLevel: return "panic" case FatalLevel: return "fatal" default: return fmt.Sprintf("Level(%d)", l) } } // CapitalString returns an all-caps ASCII representation of the log level. func (l Level) CapitalString() string { // Printing levels in all-caps is common enough that we should export this // functionality. switch l { case DebugLevel: return "DEBUG" case InfoLevel: return "INFO" case WarnLevel: return "WARN" case ErrorLevel: return "ERROR" case DPanicLevel: return "DPANIC" case PanicLevel: return "PANIC" case FatalLevel: return "FATAL" default: return fmt.Sprintf("LEVEL(%d)", l) } } // MarshalText marshals the Level to text. Note that the text representation // drops the -Level suffix (see example). func (l Level) MarshalText() ([]byte, error) { return []byte(l.String()), nil } // UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText // expects the text representation of a Level to drop the -Level suffix (see // example). // // In particular, this makes it easy to configure logging levels using YAML, // TOML, or JSON files. func (l *Level) UnmarshalText(text []byte) error { if l == nil { return errUnmarshalNilLevel } if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { return fmt.Errorf("unrecognized level: %q", text) } return nil } func (l *Level) unmarshalText(text []byte) bool { switch string(text) { case "debug": *l = DebugLevel case "info", "": // make the zero value useful *l = InfoLevel case "warn", "warning": *l = WarnLevel case "error": *l = ErrorLevel case "dpanic": *l = DPanicLevel case "panic": *l = PanicLevel case "fatal": *l = FatalLevel default: return false } return true } // Set sets the level for the flag.Value interface. func (l *Level) Set(s string) error { return l.UnmarshalText([]byte(s)) } // Get gets the level for the flag.Getter interface. func (l *Level) Get() interface{} { return *l } // Enabled returns true if the given level is at or above this level. func (l Level) Enabled(lvl Level) bool { return lvl >= l } // LevelEnabler decides whether a given logging level is enabled when logging a // message. // // Enablers are intended to be used to implement deterministic filters; // concerns like sampling are better implemented as a Core. // // Each concrete Level value implements a static LevelEnabler which returns // true for itself and all higher logging levels. For example WarnLevel.Enabled() // will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and // FatalLevel, but return false for InfoLevel and DebugLevel. type LevelEnabler interface { Enabled(Level) bool } ```
```go // // // path_to_url // // Unless required by applicable law or agreed to in writing, software // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. package system import ( "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/pkg/permission/types" ) var ( policies = []*types.Policy{ {Resource: rbac.ResourceCatalog, Action: rbac.ActionRead}, {Resource: rbac.ResourceAuditLog, Action: rbac.ActionList}, {Resource: rbac.ResourceProject, Action: rbac.ActionCreate}, {Resource: rbac.ResourceProject, Action: rbac.ActionRead}, {Resource: rbac.ResourceProject, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceProject, Action: rbac.ActionDelete}, {Resource: rbac.ResourceProject, Action: rbac.ActionList}, {Resource: rbac.ResourceUser, Action: rbac.ActionCreate}, {Resource: rbac.ResourceUser, Action: rbac.ActionRead}, {Resource: rbac.ResourceUser, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceUser, Action: rbac.ActionDelete}, {Resource: rbac.ResourceUser, Action: rbac.ActionList}, {Resource: rbac.ResourceUserGroup, Action: rbac.ActionCreate}, {Resource: rbac.ResourceUserGroup, Action: rbac.ActionRead}, {Resource: rbac.ResourceUserGroup, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceUserGroup, Action: rbac.ActionDelete}, {Resource: rbac.ResourceUserGroup, Action: rbac.ActionList}, {Resource: rbac.ResourceRegistry, Action: rbac.ActionCreate}, {Resource: rbac.ResourceRegistry, Action: rbac.ActionRead}, {Resource: rbac.ResourceRegistry, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceRegistry, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRegistry, Action: rbac.ActionList}, {Resource: rbac.ResourceReplication, Action: rbac.ActionCreate}, {Resource: rbac.ResourceReplication, Action: rbac.ActionRead}, {Resource: rbac.ResourceReplication, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceReplication, Action: rbac.ActionList}, {Resource: rbac.ResourceReplication, Action: rbac.ActionDelete}, {Resource: rbac.ResourceDistribution, Action: rbac.ActionCreate}, {Resource: rbac.ResourceDistribution, Action: rbac.ActionRead}, {Resource: rbac.ResourceDistribution, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceDistribution, Action: rbac.ActionDelete}, {Resource: rbac.ResourceDistribution, Action: rbac.ActionList}, {Resource: rbac.ResourceGarbageCollection, Action: rbac.ActionCreate}, {Resource: rbac.ResourceGarbageCollection, Action: rbac.ActionRead}, {Resource: rbac.ResourceGarbageCollection, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceGarbageCollection, Action: rbac.ActionDelete}, {Resource: rbac.ResourceGarbageCollection, Action: rbac.ActionList}, {Resource: rbac.ResourceScanAll, Action: rbac.ActionCreate}, {Resource: rbac.ResourceScanAll, Action: rbac.ActionRead}, {Resource: rbac.ResourceScanAll, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceScanAll, Action: rbac.ActionDelete}, {Resource: rbac.ResourceScanAll, Action: rbac.ActionList}, {Resource: rbac.ResourceScanAll, Action: rbac.ActionStop}, {Resource: rbac.ResourceSystemVolumes, Action: rbac.ActionRead}, {Resource: rbac.ResourceLdapUser, Action: rbac.ActionCreate}, {Resource: rbac.ResourceLdapUser, Action: rbac.ActionList}, {Resource: rbac.ResourceConfiguration, Action: rbac.ActionRead}, {Resource: rbac.ResourceConfiguration, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceJobServiceMonitor, Action: rbac.ActionRead}, {Resource: rbac.ResourceJobServiceMonitor, Action: rbac.ActionList}, {Resource: rbac.ResourceJobServiceMonitor, Action: rbac.ActionStop}, {Resource: rbac.ResourceSecurityHub, Action: rbac.ActionRead}, {Resource: rbac.ResourceSecurityHub, Action: rbac.ActionList}, } ) ```
```java package com.megagao.production.ssm.controller.technology; import java.util.List; import javax.validation.Valid; import com.megagao.production.ssm.domain.customize.CustomResult; import com.megagao.production.ssm.domain.TechnologyPlan; import com.megagao.production.ssm.domain.customize.EUDataGridResult; import com.megagao.production.ssm.domain.vo.TechnologyPlanVO; import com.megagao.production.ssm.service.TechnologyPlanService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.validation.BindingResult; import org.springframework.validation.FieldError; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.ResponseBody; @Controller @RequestMapping("/technologyPlan") public class TechnologyPlanController { @Autowired private TechnologyPlanService technologyPlanService; @RequestMapping("/get/{technologyPlanId}") @ResponseBody public TechnologyPlan getItemById(@PathVariable String technologyPlanId) throws Exception{ TechnologyPlan technologyPlan = technologyPlanService.get(technologyPlanId); return technologyPlan; } @RequestMapping("/find") public String find() throws Exception{ return "technologyPlan_list"; } @RequestMapping("/add") public String add() { return "technologyPlan_add"; } @RequestMapping("/edit") public String edit() throws Exception{ return "technologyPlan_edit"; } @RequestMapping("/get_data") @ResponseBody public List<TechnologyPlan> getData() throws Exception{ List<TechnologyPlan> list = technologyPlanService.find(); return list; } @RequestMapping("/list") @ResponseBody public EUDataGridResult getItemList(Integer page, Integer rows, TechnologyPlanVO technologyPlanPO) throws Exception{ EUDataGridResult result = technologyPlanService.getList(page, rows, technologyPlanPO); return result; } @RequestMapping(value="/insert", method=RequestMethod.POST) @ResponseBody private CustomResult insert(@Valid TechnologyPlan technologyPlan, BindingResult bindingResult) throws Exception { CustomResult result; if(bindingResult.hasErrors()){ FieldError fieldError = bindingResult.getFieldError(); return CustomResult.build(100, fieldError.getDefaultMessage()); } if(technologyPlanService.get(technologyPlan.getTechnologyPlanId()) != null){ result = new CustomResult(0, "", null); }else{ result = technologyPlanService.insert(technologyPlan); } return result; } @RequestMapping(value="/update_all") @ResponseBody private CustomResult updateAll(@Valid TechnologyPlan technologyPlan, BindingResult bindingResult) throws Exception { if(bindingResult.hasErrors()){ FieldError fieldError = bindingResult.getFieldError(); return CustomResult.build(100, fieldError.getDefaultMessage()); } return technologyPlanService.updateAll(technologyPlan); } @RequestMapping(value="/delete_batch") @ResponseBody private CustomResult deleteBatch(String[] ids) throws Exception { CustomResult result = technologyPlanService.deleteBatch(ids); return result; } //id @RequestMapping("/search_technologyPlan_by_technologyPlanId") @ResponseBody public EUDataGridResult searchTechnologyPlanByTechnologyPlanId(Integer page, Integer rows, String searchValue) throws Exception{ EUDataGridResult result = technologyPlanService.searchTechnologyPlanByTechnologyPlanId(page, rows, searchValue); return result; } // @RequestMapping("/search_technologyPlan_by_technologyName") @ResponseBody public EUDataGridResult searchTechnologyPlanByTechnologyName(Integer page, Integer rows, String searchValue) throws Exception{ EUDataGridResult result = technologyPlanService.searchTechnologyPlanByTechnologyName(page, rows, searchValue); return result; } } ```
The Devil Drives is the sixth album by Dave Graney 'n' The Coral Snakes. It was released in May 1997 on Mercury Records. The album peaked at No.Β 18 on the Australian Recording Industry Association (ARIA) Album Charts. It was also produced by Dave Graney, Clare Moore and David Ruffy. The album was recorded in August 1996 in Kiss Studios, Melbourne and mixed in October 1996 at London at Matrix Maison Rouge studios. The singles from the album were, "Feelin Kinda Sporty" in April, and "A Man on the Make" in September. The album was also accompanied by a media CD with an interview with Dave Graney called Coffins Have No Pockets. This was part of a media booklet based on a Holden Monaro owners manual in a plastic booklet/folder. The single won 'Best Video' by Mahony in 1997, the album was nominated for 'Best Cover Art' by Mahony and Graney received a nomination as 'Best Male Artist'. It was the last studio album with the Coral Snakes and with Universal Music as the group disbanded in December. Title concept The name of the album follows the old proverb "Needs must when the devil drives", in which drive is a reference to coercing or (figuratively) pushing. However, the imagery of the album is suggestive of a literal interpretation of the devil driving (piloting) a car. Reception Professional reviews See table. Track listing "The Oblivion Seekers" - 3:57 "My Only Regret (I Opened My Mouth)" - 4:25 "I Don't Know You Exist" - 4:15 "Rackin' Up Some Zeds" - 4:25 "Everybody Loves a Mass Killer" - 4:52 "I Dig the Pioneers" - 3:50 "The Sheriff of Hell" - 4:58 "Pianola Roll" - 3:16 "Land of the Giants" - 5:24 "I Love Your Gravity" - 4:48 "Biker in Business Class" - 4:29 "A Man on the Make" - 4:11 "Pascal et Caroline" - 6:41 "The Devil Drives" - 3:59 "Feelin' Kinda Sporty" - 3:11 Personnel Musicians Andy Baldwin - violin Rebecca Barnard - backing vocals Gordy Blair - bass, backing vocals Robin Casinader - piano, organ, violin, backing vocals Dave Graney - vocals, acoustic guitar Rod Hayward - guitar, backing vocals Clare Moore - drums, percussion, organ, vibraphone, vocals David Ruffy - sampler (programming, edits, loops) Liduina Van Der Sman - flute, backing vocals Production details Engineer - Kenny Jones Engineer Assistant - Sean Thompson Engineer - Andy Baldwin Mixer - David Ruffy Producer - Dave Graney, Clare Moore, David Ruffy Studio - Kiss, Melbourne (recording) Matrix Maison Rouge, London (Mixing) Artwork Cover art - Tony Mahony References 1997 albums Dave Graney 'n' the Coral Snakes albums Mercury Records albums
Lieutenant-General Sir John George des Reaux Swayne KCB CBE (3 July 1890 – 16 December 1964) was a senior British Army officer who became General Officer Commanding-in-Chief (GOC-in-C) of South-Eastern Command during the Second World War. Military career Born the son of William Swayne, the Bishop of Lincoln, Swayne, after being educated at Charterhouse School and the University of Oxford, was commissioned into the Somerset Light Infantry in 1911. He served in the First World War, spending most of it as a prisoner of war. After the war he was appointed aide-de-camp to the general officer commanding (GOC) Western Command in India before becoming adjutant of his regiment in 1924. He became a general staff officer at the War Office in 1927 and brigade major for 7th Infantry Brigade in 1929. He was made military assistant to the Chief of the Imperial General Staff in 1930 and chief of staff for the International Force for the Saar Plebiscite in Germany in 1934. He was selected to be Commanding Officer (CO) of the 1st Battalion, Royal Northumberland Fusiliers in 1935 and chief instructor at the Staff College, Camberley in 1937. He served in the Second World War, initially as head of the British Military Mission to the French Grand Quartier GΓ©nΓ©ral (GQG) and then as general officer commanding 4th Division from 1941. He was appointed chief of the general staff for Home Forces in 1942 and general officer commanding-in-chief of South Eastern Command in 1942. His final appointment was as chief of the General Staff in India in 1944; he retired in 1946. References Bibliography External links British Army Officers 1939βˆ’1945 Generals of World War II |- |- |- 1890 births 1964 deaths Somerset Light Infantry officers British Army personnel of World War I British Army generals of World War II World War I prisoners of war held by Germany Knights Commander of the Order of the Bath Commanders of the Order of the British Empire British World War I prisoners of war Graduates of the Staff College, Camberley Graduates of the Royal College of Defence Studies People from Warminster Royal Northumberland Fusiliers officers Academics of the Staff College, Camberley British Army lieutenant generals People educated at Charterhouse School Alumni of the University of Oxford Military personnel from Wiltshire
```c++ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/v8.h" #include "src/hash-seed-inl.h" #include "src/interpreter/bytecode-array-builder.h" #include "src/interpreter/bytecode-array-random-iterator.h" #include "src/objects-inl.h" #include "src/objects/smi.h" #include "test/unittests/interpreter/bytecode-utils.h" #include "test/unittests/test-utils.h" namespace v8 { namespace internal { namespace interpreter { class BytecodeArrayRandomIteratorTest : public TestWithIsolateAndZone { public: BytecodeArrayRandomIteratorTest() = default; ~BytecodeArrayRandomIteratorTest() override = default; }; TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); ast_factory.Internalize(isolate()); Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate()); BytecodeArrayRandomIterator iterator(bytecodeArray, zone()); iterator.GoToStart(); ASSERT_TRUE(iterator.IsValid()); --iterator; ASSERT_FALSE(iterator.IsValid()); } TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); ast_factory.Internalize(isolate()); Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate()); BytecodeArrayRandomIterator iterator(bytecodeArray, zone()); iterator.GoToEnd(); ASSERT_TRUE(iterator.IsValid()); ++iterator; ASSERT_FALSE(iterator.IsValid()); } TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); ast_factory.Internalize(isolate()); Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate()); BytecodeArrayRandomIterator iterator(bytecodeArray, zone()); iterator.GoToStart(); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant); EXPECT_EQ(iterator.current_index(), 0); EXPECT_EQ(iterator.current_offset(), 0); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0); ASSERT_TRUE(iterator.IsValid()); } TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); ast_factory.Internalize(isolate()); Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate()); BytecodeArrayRandomIterator iterator(bytecodeArray, zone()); iterator.GoToEnd(); int offset = bytecodeArray->length() - Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn); EXPECT_EQ(iterator.current_index(), 22); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); } TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t name_index = 2; uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); // Test iterator sees the expected output from the builder. ast_factory.Internalize(isolate()); BytecodeArrayRandomIterator iterator(builder.ToBytecodeArray(isolate()), zone()); const int kPrefixByteSize = 1; int offset = 0; iterator.GoToIndex(13); offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) + kPrefixByteSize; offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd); EXPECT_EQ(iterator.current_index(), 13); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); iterator.GoToIndex(2); offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant); EXPECT_EQ(iterator.current_index(), 2); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1); ASSERT_TRUE(iterator.IsValid()); iterator.GoToIndex(18); offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) + kPrefixByteSize; offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair); EXPECT_EQ(iterator.current_index(), 18); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall); EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1); EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u); EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2); ASSERT_TRUE(iterator.IsValid()); iterator -= 3; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty); EXPECT_EQ(iterator.current_index(), 15); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetIndexOperand(1), name_index); EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot); ASSERT_TRUE(iterator.IsValid()); iterator += 2; offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 17); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); iterator.GoToIndex(22); offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) + kPrefixByteSize; offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle); offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn); EXPECT_EQ(iterator.current_index(), 22); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); iterator.GoToIndex(24); EXPECT_FALSE(iterator.IsValid()); iterator.GoToIndex(-5); EXPECT_FALSE(iterator.IsValid()); } TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t name_index = 2; uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); // Test iterator sees the expected output from the builder. ast_factory.Internalize(isolate()); BytecodeArrayRandomIterator iterator(builder.ToBytecodeArray(isolate()), zone()); const int kPrefixByteSize = 1; int offset = 0; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant); EXPECT_EQ(iterator.current_index(), 0); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 1); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant); EXPECT_EQ(iterator.current_index(), 2); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 3); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero); EXPECT_EQ(iterator.current_index(), 4); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 5); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi); EXPECT_EQ(iterator.current_index(), 6); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck); EXPECT_EQ(iterator.current_index(), 7); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 8); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi); EXPECT_EQ(iterator.current_index(), 9); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple); EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) + kPrefixByteSize; ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck); EXPECT_EQ(iterator.current_index(), 10); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 11); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar); EXPECT_EQ(iterator.current_index(), 12); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd); EXPECT_EQ(iterator.current_index(), 13); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 14); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty); EXPECT_EQ(iterator.current_index(), 15); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetIndexOperand(1), name_index); EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd); EXPECT_EQ(iterator.current_index(), 16); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 17); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair); EXPECT_EQ(iterator.current_index(), 18); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall); EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1); EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u); EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare); EXPECT_EQ(iterator.current_index(), 19); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3); EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime); EXPECT_EQ(iterator.current_index(), 20); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss); EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger); EXPECT_EQ(iterator.current_index(), 21); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle); ++iterator; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn); EXPECT_EQ(iterator.current_index(), 22); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); ++iterator; ASSERT_TRUE(!iterator.IsValid()); } TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) { // Use a builder to create an array with containing multiple bytecodes // with 0, 1 and 2 operands. FeedbackVectorSpec feedback_spec(zone()); BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec); AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(), HashSeed(isolate())); double heap_num_0 = 2.718; double heap_num_1 = 2.0 * Smi::kMaxValue; Smi zero = Smi::zero(); Smi smi_0 = Smi::FromInt(64); Smi smi_1 = Smi::FromInt(-65536); Register reg_0(0); Register reg_1(1); RegisterList pair = BytecodeUtils::NewRegisterList(0, 2); RegisterList triple = BytecodeUtils::NewRegisterList(0, 3); Register param = Register::FromParameterIndex(2, builder.parameter_count()); const AstRawString* name = ast_factory.GetOneByteString("abc"); uint32_t name_index = 2; uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt(); builder.LoadLiteral(heap_num_0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(heap_num_1) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(zero) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_0) .StackCheck(0) .StoreAccumulatorInRegister(reg_0) .LoadLiteral(smi_1) .StackCheck(1) .StoreAccumulatorInRegister(reg_1) .LoadAccumulatorWithRegister(reg_0) .BinaryOperation(Token::Value::ADD, reg_0, 2) .StoreAccumulatorInRegister(reg_1) .LoadNamedProperty(reg_1, name, feedback_slot) .BinaryOperation(Token::Value::ADD, reg_0, 3) .StoreAccumulatorInRegister(param) .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair) .ForInPrepare(triple, feedback_slot) .CallRuntime(Runtime::kLoadIC_Miss, reg_0) .Debugger() .Return(); // Test iterator sees the expected output from the builder. ast_factory.Internalize(isolate()); Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate()); BytecodeArrayRandomIterator iterator(bytecodeArray, zone()); const int kPrefixByteSize = 1; int offset = bytecodeArray->length(); iterator.GoToEnd(); offset -= Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn); EXPECT_EQ(iterator.current_index(), 22); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger); EXPECT_EQ(iterator.current_index(), 21); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime); EXPECT_EQ(iterator.current_index(), 20); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss); EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare); EXPECT_EQ(iterator.current_index(), 19); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3); EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair); EXPECT_EQ(iterator.current_index(), 18); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall); EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1); EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u); EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 17); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd); EXPECT_EQ(iterator.current_index(), 16); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty); EXPECT_EQ(iterator.current_index(), 15); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetIndexOperand(1), name_index); EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 14); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd); EXPECT_EQ(iterator.current_index(), 13); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar); EXPECT_EQ(iterator.current_index(), 12); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 11); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck); EXPECT_EQ(iterator.current_index(), 10); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) + kPrefixByteSize; EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi); EXPECT_EQ(iterator.current_index(), 9); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple); EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 8); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck); EXPECT_EQ(iterator.current_index(), 7); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi); EXPECT_EQ(iterator.current_index(), 6); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 5); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero); EXPECT_EQ(iterator.current_index(), 4); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 3); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant); EXPECT_EQ(iterator.current_index(), 2); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar); EXPECT_EQ(iterator.current_index(), 1); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index()); EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1); ASSERT_TRUE(iterator.IsValid()); --iterator; offset -= Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle); EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant); EXPECT_EQ(iterator.current_index(), 0); EXPECT_EQ(iterator.current_offset(), offset); EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle); EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0); ASSERT_TRUE(iterator.IsValid()); --iterator; ASSERT_FALSE(iterator.IsValid()); } } // namespace interpreter } // namespace internal } // namespace v8 ```
Jakubovany may refer to several villages in Slovakia: Jakubovany, LiptovskΓ½ MikulΓ‘Ε‘ Jakubovany, Sabinov
Muntukayise Bhekuyise Ntuli (24 December 1957 – 16 January 2021) was a South African politician. A member of the African National Congress, he served as a Member of the National Assembly of South Africa from 1999 to 2003, when he resigned amid fraud allegations. He was then elected to the KwaZulu-Natal Legislature in 2009. On 11 September 2019, Ntuli became the Member of the Executive Council for Transport, Community Safety & Liaison, replacing Mxolisi Kaunda, who was elected Mayor of the eThekwini Metropolitan Municipality. Ntuli died on 16 January 2021, from complications of COVID-19. Early life and education Ntuli was born on 24 December 1957 in Mtubatuba, north of Richard's Bay, in South Africa's former Natal Province. In 1980, he matriculated from Siyamukela High School in Madadeni, a township outside Newcastle. Ntuli obtained a primary teachers certificate at Madadeni College and an associate in management qualification from the UCT Graduate School of Business. From the University of the Witwatersrand, he held a certificate in leadership. Ntuli also had a post-diploma in research, strategic diplomacy and transitional justice from the University of Johannesburg. Political career Ntuli was active in the underground structures of the African National Congress and was part of uMkhonto we Sizwe operations during apartheid. Soon after, he was involved with the establishing of ANC structures in Northern Natal. He then served as the regional chair of the ANC's Musa Dladla region for a total of 15 years and was a member of the ANC's Peace and Stability Committee for over 20 years. In 1999, he was elected to the National Assembly, the lower house of the South African parliament, as an ANC representative. He resigned from parliament in 2003 after he pleaded guilty to abusing his travel facilities, his parliamentary medical aid and defrauding parliament. Speaker Frene Ginwala called his actions "totally unacceptable" and "reprehensible". At the 2009 general election, Ntuli was elected to the KwaZulu-Natal Legislature. For nine years, he was a member of various committees, including the transport portfolio committee, and served as chairperson of the community safety and liaison portfolio committee in the legislature. On 11 September 2019, he was appointed Member of the Executive Council (MEC) for Transport, Community Safety and Liaison, replacing Mxolisi Kaunda, who was elected Mayor of the eThekwini Metropolitan Municipality. Death Ntuli died from COVID-19 complications on 16 January 2021, during the COVID-19 pandemic in South Africa. At the time of his death, he was a member of the provincial executive committee of the ANC. References 1957 births 2021 deaths Zulu people Politicians from KwaZulu-Natal Members of the National Assembly of South Africa Members of the KwaZulu-Natal Legislature African National Congress politicians University of the Witwatersrand alumni University of Johannesburg alumni Deaths from the COVID-19 pandemic in South Africa
Robin Lapert (born 29 November 1997) is a French professional footballer who plays for Hartford Athletic in the USL Championship. Career Youth Lapert began his career spending three years with the Le Havre AC academy, where he spent a season with the club's under-19 side and two playing in the Championnat National 2 and Championnat National 3. Following his release by Le Havre in the summer of 2017, he signed with Arras FA, but opted to leave following the preseason to pursue the opportunity to play college soccer in the United States. College In 2017, Lapert arrived at the University of Charleston, going on to make 21 appearances for the Golden Eagles, scoring five goals and adding a single assist on the way to help the team to win the National Championship. 2018 saw Lapert transfer to the University of Connecticut, who he made 40 appearances for, scoring four goals and tallying one assist in his three seasons with the Huskies. Following college, Lapert was eligible in the 2021 MLS SuperDraft, but went undrafted. Professional On 1 September 2022, Lapert signed with USL Championship club Hartford Athletic. He made his debut for Hartford on 17 September 2022, appearing as a 86th-minute substitute during a 3–0 win over Las Vegas Lights. On 26 November 2022, it was announced Lapert would remain with Hartford for their upcoming 2023 season. References 1997 births Living people Arras FA players Men's association football defenders Championnat National 2 players Championnat National 3 players Charleston Golden Eagles men's soccer players Expatriate men's soccer players in the United States Footballers from Le Havre French expatriate men's footballers French expatriate sportspeople in the United States French men's footballers Hartford Athletic players Le Havre AC players Sportspeople from Le Havre UConn Huskies men's soccer players USL Championship players
```c++ // // // path_to_url // // Unless required by applicable law or agreed to in writing, software // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #include "source/opt/remove_duplicates_pass.h" #include <algorithm> #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #include "source/opcode.h" #include "source/opt/decoration_manager.h" #include "source/opt/ir_context.h" namespace spvtools { namespace opt { Pass::Status RemoveDuplicatesPass::Process() { bool modified = RemoveDuplicateCapabilities(); modified |= RemoveDuplicatesExtInstImports(); modified |= RemoveDuplicateTypes(); modified |= RemoveDuplicateDecorations(); return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange; } bool RemoveDuplicatesPass::RemoveDuplicateCapabilities() const { bool modified = false; if (context()->capabilities().empty()) { return modified; } std::unordered_set<uint32_t> capabilities; for (auto* i = &*context()->capability_begin(); i;) { auto res = capabilities.insert(i->GetSingleWordOperand(0u)); if (res.second) { // Never seen before, keep it. i = i->NextNode(); } else { // It's a duplicate, remove it. i = context()->KillInst(i); modified = true; } } return modified; } bool RemoveDuplicatesPass::RemoveDuplicatesExtInstImports() const { bool modified = false; if (context()->ext_inst_imports().empty()) { return modified; } std::unordered_map<std::string, spv::Id> ext_inst_imports; for (auto* i = &*context()->ext_inst_import_begin(); i;) { auto res = ext_inst_imports.emplace(i->GetInOperand(0u).AsString(), i->result_id()); if (res.second) { // Never seen before, keep it. i = i->NextNode(); } else { // It's a duplicate, remove it. context()->ReplaceAllUsesWith(i->result_id(), res.first->second); i = context()->KillInst(i); modified = true; } } return modified; } bool RemoveDuplicatesPass::RemoveDuplicateTypes() const { bool modified = false; if (context()->types_values().empty()) { return modified; } analysis::TypeManager type_manager(context()->consumer(), context()); std::vector<Instruction*> visited_types; std::vector<analysis::ForwardPointer> visited_forward_pointers; std::vector<Instruction*> to_delete; for (auto* i = &*context()->types_values_begin(); i; i = i->NextNode()) { const bool is_i_forward_pointer = i->opcode() == spv::Op::OpTypeForwardPointer; // We only care about types. if (!spvOpcodeGeneratesType(i->opcode()) && !is_i_forward_pointer) { continue; } if (!is_i_forward_pointer) { // Is the current type equal to one of the types we have already visited? spv::Id id_to_keep = 0u; analysis::Type* i_type = type_manager.GetType(i->result_id()); assert(i_type); // TODO(dneto0): Use a trie to avoid quadratic behaviour? Extract the // ResultIdTrie from unify_const_pass.cpp for this. for (auto j : visited_types) { analysis::Type* j_type = type_manager.GetType(j->result_id()); assert(j_type); if (*i_type == *j_type) { id_to_keep = j->result_id(); break; } } if (id_to_keep == 0u) { // This is a never seen before type, keep it around. visited_types.emplace_back(i); } else { // The same type has already been seen before, remove this one. context()->KillNamesAndDecorates(i->result_id()); context()->ReplaceAllUsesWith(i->result_id(), id_to_keep); modified = true; to_delete.emplace_back(i); } } else { analysis::ForwardPointer i_type( i->GetSingleWordInOperand(0u), (spv::StorageClass)i->GetSingleWordInOperand(1u)); i_type.SetTargetPointer( type_manager.GetType(i_type.target_id())->AsPointer()); // TODO(dneto0): Use a trie to avoid quadratic behaviour? Extract the // ResultIdTrie from unify_const_pass.cpp for this. const bool found_a_match = std::find(std::begin(visited_forward_pointers), std::end(visited_forward_pointers), i_type) != std::end(visited_forward_pointers); if (!found_a_match) { // This is a never seen before type, keep it around. visited_forward_pointers.emplace_back(i_type); } else { // The same type has already been seen before, remove this one. modified = true; to_delete.emplace_back(i); } } } for (auto i : to_delete) { context()->KillInst(i); } return modified; } // TODO(pierremoreau): Duplicate decoration groups should be removed. For // example, in // OpDecorate %1 Constant // %1 = OpDecorationGroup // OpDecorate %2 Constant // %2 = OpDecorationGroup // OpGroupDecorate %1 %3 // OpGroupDecorate %2 %4 // group %2 could be removed. bool RemoveDuplicatesPass::RemoveDuplicateDecorations() const { bool modified = false; std::vector<const Instruction*> visited_decorations; analysis::DecorationManager decoration_manager(context()->module()); for (auto* i = &*context()->annotation_begin(); i;) { // Is the current decoration equal to one of the decorations we have // already visited? bool already_visited = false; // TODO(dneto0): Use a trie to avoid quadratic behaviour? Extract the // ResultIdTrie from unify_const_pass.cpp for this. for (const Instruction* j : visited_decorations) { if (decoration_manager.AreDecorationsTheSame(&*i, j, false)) { already_visited = true; break; } } if (!already_visited) { // This is a never seen before decoration, keep it around. visited_decorations.emplace_back(&*i); i = i->NextNode(); } else { // The same decoration has already been seen before, remove this one. modified = true; i = context()->KillInst(i); } } return modified; } } // namespace opt } // namespace spvtools ```
```yaml subject: "Ensure keyword" description: "ensure in a begin/end block" notes: > Is represented with an EnsureNode node: - a block body is represented with a `tryPart` child node. - ensure branch is represented with an `ensurePart` child node. focused_on_node: "org.truffleruby.language.control.SequenceNode" ruby: | begin "foo" ensure bar end ast: | SequenceNode attributes: flags = 12 sourceCharIndex = 0 sourceLength = 30 children: body = [ WriteLocalVariableNode attributes: flags = 0 frameSlot = 0 # (self) sourceCharIndex = -1 sourceLength = 0 children: valueNode = ProfileArgumentNodeGen attributes: flags = 0 sourceCharIndex = -1 sourceLength = 0 children: childNode_ = ReadSelfNode attributes: flags = 0 sourceCharIndex = -1 sourceLength = 0 EnsureNodeGen attributes: flags = 0 sourceCharIndex = 0 sourceLength = 30 children: ensurePart = RubyCallNode attributes: descriptor = NoKeywordArgumentsDescriptor dispatchConfig = PRIVATE emptyKeywordsProfile = false flags = 1 isAttrAssign = false isSafeNavigation = false isSplatted = false isVCall = true lastArgIsNotHashProfile = false methodName = "bar" notEmptyKeywordsProfile = false notRuby2KeywordsHashProfile = false sourceCharIndex = 23 sourceLength = 3 children: receiver = SelfNode attributes: flags = 0 sourceCharIndex = -1 sourceLength = 0 tryPart = StringLiteralNode attributes: encoding = UTF-8 flags = 1 sourceCharIndex = 8 sourceLength = 5 tstring = foo ] ```
Showtune is a musical revue celebrating the words and music of Broadway composer and lyricist Jerry Herman. Its title was inspired by Herman's autobiography of the same name. The revue's original title was Tune the Grand Up. After its 1985 San Francisco premiere and several regional productions through the 1990s, the piece played in the West End in 1998 under the title The Best of Times, and Off-Broadway in 2003, titled Showtune. The forty songs featured in Showtune come from Herman's Broadway musicals Milk and Honey (1961), Hello, Dolly! (1964), Mame (1966), Dear World (1969), Mack & Mabel (1974), The Grand Tour (1979), A Day in Hollywood / A Night in the Ukraine (1980), and La Cage aux Folles (1983). Conceived by Paul Gilger, the revue has no dialogue. Its songs are grouped into thematic scenes that tell stories and place a strong emphasis on Herman's lyrics and their optimistic messages. The song-cycle format creates dramatic sub-texts giving through-lines to the show. Scene and Song List Act I It's Today! (Mame) Big Time (Mack & Mabel) We Need a Little Christmas (Mame) Put On Your Sunday Clothes (Hello, Dolly!) Little More Mascara (La Cage aux Folles) The Man In the Moon (Mame) I Am What I Am (La Cage aux Folles) Song On the Sand – Prelude (La Cage aux Folles) I Won't Send Roses (Mack & Mabel) Ribbons Down My Back (Hello, Dolly!) Dancing (Hello, Dolly!) It Takes a Woman (Hello, Dolly!) Wherever He Ain't (Mack & Mabel) Hundreds of Girls (Mack & Mabel) So Long Dearie (Hello, Dolly!) It Takes a Woman – Reprise And I Was Beautiful (Dear World) Kiss Her Now (Dear World) And I Was Beautiful / Kiss Her Now – Counterpoint Time Heals Everything (Mack & Mabel) Before the Parade Passes By (Hello, Dolly!) One Person (Dear World) Open a New Window (Mame) Counterpoint March Before the Parade Passes By – Reprise Act II "Hello, Dolly!" – Entr'acte (Hello, Dolly!) Movies Were Movies (Mack & Mabel) Look What Happened to Mabel (Mack & Mabel) That's How Young I Feel (Mame) Look What Happened to Mabel – Reprise My Best Girl (Mame) Nelson (A Day in Hollywood/A Night in the Ukraine) Just Go to the Movies (A Day in Hollywood/A Night in the Ukraine) It Only Takes a Moment (Hello, Dolly!) Gooch's Song (Mame) Tap Your Troubles Away (Mack & Mabel) Bosom Buddies (Mame) I Don't Want to Know (Dear World) I Don't Want to Know – Reprise Song On the Sand (La Cage aux Folles) Shalom (Milk and Honey) I'll Be Here Tomorrow (The Grand Tour) If He Walked Into My Life (Mame) I Promise You a Happy Ending (Mack & Mabel) Mame (Mame) The Best of Times (La Cage aux Folles) It's Today! – Reprise Hello, Dolly! – Encore Synopsis Act I Herman's optimistic view of show business life is presented in "It's Today!", the opening number from Mame, and "Big Time", from Mack and Mabel. On the other hand, "We Need A Little Christmas" and "Put On Your Sunday Clothes!" present his strategies for dealing with bad news and hard times. In the latter number, the cast simulates a train, with the men's bowler hats becoming smokestacks and the ladies' parasols acting as the wheels. Backstage at a Cabaret, an actor makes up as "Zaza" the star of La Cage Aux Folles! ("A Little More Mascara"). A crescent moon descends, and Zaza performs "The Man in the Moon". The cast sings "I Am What I Am." "The Four Seasons" is the theme for Herman's outlook on love. Spring is represented by "I Won't Send Roses", "Ribbons Down My Back" and "Dancing"; summer is a battle of the sexes, with "It Takes A Woman" (men) "Wherever He Ain't!" (women), "Hundreds of Girls" (men) and "So Long, Dearie" (women); and autumn includes "And I Was Beautiful" and "Kiss Her Now" (While She's Young), with the two songs then given in counterpoint. During these numbers, the warring couples reconcile. Finally, in winter, the sequence is philosophical: Although "Time Heals Everything", one must act "Before the Parade Passes By!", and "One Person" can change the world, if he or she will "Open a New Window". Act II As he thinks about the days of silent film, Mack Sennett recalls when "Movies Were Movies", and his love story is seen through the eyes of a cameraman who sings "Look What Happened to Mabel". Mabel dances a Charleston to "That's How Young I Feel". Jeanette MacDonald and Nelson Eddy perform "My Best Girl" on set for the umpteenth time, and she complains about his acting ("Nelson"). The company advises us to "Just Go to the Movies". A woman's unrequited love ("It Only Takes a Moment"), segues to a very pregnant Agnes Gooch who enters singing, "It only took a moment" and then her big number "Gooch's Song". A tap dancer encourages her to "Tap Your Troubles Away". Big production numbers for leading ladies follow: "Hello, Dolly!" and "Mame". The two divas sing "Bosom Buddies". A serious and romantic segment follows, with "I Don't Want to Know", "Song on the Sand", "Shalom", "I'll Be Here Tomorrow", "If He Walked Into My Life" and "I Promise You a Happy Ending". These tender recollections yield to a big finale with a medley of production numbers "Mame", "The Best of Times" and a reprise of "It's Today". The cast takes its bows singing "Hello, Dolly!" and asks the audience to join in. Production history San Francisco premiere Showtune was originally titled Tune the Grand Up, and premiered May 1, 1985 at The 1177 Club in the Gramercy Towers on Nob Hill in San Francisco. The cabaret-style show was directed by Paul Gilger and Barbara Valente, with choreography by Valente. The show ran for 2 years. The cast was: John Nockels (Man 1) Darlene Popovic (Woman 1) James Followell (Man 2 and the pianist) Alma Sayles (Woman 2) David Broussal (Man 3) Lise-Marie Thomas (Woman 3) Cindy Herron joined the cast in the second year as Woman 3. California and Hawaii In March 1987, a production of Tune the Grand Up opened at the Lyceum Space Theatre in San Diego, California. The show was directed and choreographed by Barbara Valente, with the cast that included Cindy Herron, John Nockels, Tim Connell, Mimi Unser, Darlene Popovic and James Followell. In September 1987, a third production of Tune the Grand Up was financed by actor Richard Smart at the 490 seat Kahilu Theatre in the town of Kamuela, Hawaii. The production was directed and supervised by Gilger. The cast again starred Nockels and Herron, who were joined by A.J. Holmes. In 1996, producer Jennifer Strome optioned the rights to Tune the Grand Up and produced the subsequent productions of the revue from the 1996 production in the Delta King Riverboat Theatre in Sacramento, California, until the 2003 Off-Broadway production. The Sacramento cast included Nockels and Barry Lloyd. In November 1996, Tune the Grand Up returned to San Francisco at the Alcazar Theatre. The production was supervised by Jerry Herman, directed by Jay Manley and choreographed by Barbara Valente, with musical direction by Barry Lloyd. The cast was Pierce Brandt, Dan Johnson, Michelle E. Jordan, Barry Lloyd, Marsha Mercant and Jan Wasser. Every member of the cast won a Hollywood Drama-Logue Award. Off-West End In 1998, the revue was produced twice in London, by Strome in association with Sharleen Cooper Cohen, with a new title, The Best of Times. It was directed and choreographed by Bill Starr. It was produced at the Bridewell Theatre, with the cast that featured Lindsay Hamilton, and Karen Evans. West End The Bridewell production transferred, in November 1998, to the Vaudeville Theatre in the West End. The cast was Garth Bardsley, Kathryn Evans, James Followell (pianist), Sarah Payne, Jamie Golding and Lindsay Hamilton. New York tryout and Off-Broadway In October 2002, the revue, now retitled Showtune, had an out-of-town tryout at the Helen Hayes Theatre in Nyack, New York. The production was directed and choreographed by Joey McKneely, with musical direction by James Followell. The cast was expanded from six to seven, with the addition of a fourth man. Martin Vidnovic (Man 1) Donna McKechnie (Woman 1) Paul Harman (Man 2) Sandy Binion (Woman 2) Tom Korbee (Man 3) Russell Arden Koplin (Woman 3) Bobby Peaco (Man 4 and the Pianist) Showtune opened Off-Broadway at the York Theatre at St. Peter's, running from February 18, 2003 to April 13, 2003. The revue was produced by Jenny Strome and David Brown. The Off-Broadway production was also directed and choreographed by McKneely, with music direction by Followell and the same cast as the tryout, except that Karen Murphy replaced McKechnie.<ref>[http://www.lortel.org/Archives/Production/2222 " Showtune' Listing"], lortel.org, accessed November 29, 2016</ref> Subsequent regional productions In June 2003, Showtune played at the Pasadena Playhouse in Los Angeles. The production was directed by Bill Starr and Sheldon Epps, with choreography by Starr. The cast included Vidnovic, Peaco and Merle Dandridge. In November 2003, the musical was presented at the Caldwell Theatre in Boca Raton, Florida. The production was directed by Michael Hall, with choreography by Barbara Flaten and musical direction by Bobby Peaco. The cast included Vidnovic and Peaco. Since 2003, Showtune has been licensed for hundreds of productions in the United States, England, Scotland, Wales, Australia and Japan. International productionsShowtune has been produced at the 2006 Edinburgh Festival Fringe. It has also been seen in 2008 at the Galaxy Theatre in Tokyo and at the Hyogo Performing Arts Center in Nishinomiya, Hyogo. The Japanese-language production was directed by Akio Miki with the all-female Takarazuka Revue Company. Cast recording In 2004, a cast recording of Showtune'' was produced in New York City with the New York cast (except as noted). The conductor and music director was James Followell. Martin Vidnovic (Man 1) Karen Murphy (Woman 1) Paul Harman and Steve Wilson (Florida cast) (Man 2) Sandy Binion (Woman 2) Tom Korbee (Man 3) Stephanie Lynge (Los Angeles and Florida casts) (Woman 3) Bobby Peaco (Man 4 and the Pianist) Licensing Showtune page at Music Theatre International, United States licensing Showtune page at Josef Weinberger, United Kingdom licensing Showtune page at Hal Leonard, Australia licensing References External links Showtune article Playbill.com Showtune synopsis page at The Guide to Musical Theatre, United Kingdom website Showtune page JerryHerman.com 1985 musicals Revues
```javascript Types of numbers Data type comparison in `switch` statements No block scope Prototype methods Getting the *real* dimensions of an image ```
Amy Woodman (born 1 November 1984) is a British track and field athlete who competes in the long jump. She was English National Long-jump champion in 2009 and 2010; English indoor long jump champion in 2009; and USA National Collegiate Champion (NCCA Division II) - Long jump in 2008. Early life Amy was born in 1984 and lived in the village of Clutton, Somerset from the age of two years. Woodman studied Sport and Exercise Science at the University of Wales Institute, Cardiff, before winning a two-year scholarship to Ferris State University at Big Rapids, Michigan, USA. Career In March 2008 she represented Ferris State University, Michigan at the USA National Collegiate Championships (NCCA) which were held at Minnesota State University, Mankato. She won the US National Long jump championship by clearing 5.94Β metres. In February 2009, while training at Ferris State University, she visited the UK and won the Aviva UK National Long-jump championship at Sheffield with a distance of 6.40Β metres. Her personal best performances are 60Β metres - 7.74Β secs; 100Β metres - 12Β seconds; 200Β metres - 24.86; high jump - 1.6Β metres; long jump - 6.40Β metres; triple jump - 11.04Β metres. References External links Gallery - Zimbio images of Amy Woodman competing at the AVIVA European Trials & UK Championship - 13 February 2011; AVIVA Grand Prix February 20, 2010; AVIVA World Trials & UK Championships February 14, 2009; Norwich Union Trial & UK Championship July 12, 2008 1984 births Living people Sportspeople from Bristol British female long jumpers English female long jumpers Ferris State University alumni Alumni of Cardiff Metropolitan University
```xml /// /// /// /// path_to_url /// /// Unless required by applicable law or agreed to in writing, software /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// import { NgModule } from '@angular/core'; import { SharedModule } from '@shared/shared.module'; import { CommonModule } from '@angular/common'; import { SnmpDeviceProfileTransportConfigurationComponent } from '@home/components/profile/device/snmp/snmp-device-profile-transport-configuration.component'; import { SnmpDeviceProfileCommunicationConfigComponent } from '@home/components/profile/device/snmp/snmp-device-profile-communication-config.component'; import { SnmpDeviceProfileMappingComponent } from '@home/components/profile/device/snmp/snmp-device-profile-mapping.component'; @NgModule({ declarations: [ SnmpDeviceProfileTransportConfigurationComponent, SnmpDeviceProfileCommunicationConfigComponent, SnmpDeviceProfileMappingComponent ], imports: [ CommonModule, SharedModule ], exports: [ SnmpDeviceProfileTransportConfigurationComponent ] }) export class SnmpDeviceProfileTransportModule { } ```
Wolfgang Weber (17 June 1902 – 4 March 1985) was a German photojournalist and film producer. Life and work Wolfgang Weber was born in Leipzig. His father, Friedrich Weber, was a wealthy factory owner who decided to quit business to follow his main interests in taking over the management of the Research Institute for Ethnology in Munich. There in his early years Weber was able to get to know numerous cultural assets from distant countries from his father's collection. He studied ethnology, philosophy and musicology in Munich, but also completed a training as a conductor at the Academy of Musical Art. Erich von Hornborstel, professor at the Phonetic Institute of the Humboldt University in Berlin, appointed Weber as an assistant and sent him on a music-ethnographic research trip to East Africa to the tribe of the Wadjaggas on Kilimanjaro. In addition to the elaborate sound recording devices that work with wax rolls, with which he recorded the tribal songs, he also worked with a stereo camera. He published the photographic recordings in the MΓΌnchner Illustrierte Zeitung (MIZ) in 1925. This was the beginning of his career as a photojournalist. Mainly he worked for the Berliner Illustrirte Zeitung and the MIZ, but published also – sometimes under synonyms – in several other papers, like β€œDie Dame” or β€œVossische Zeitung”. Alongside Felix H. Man, Erich Salomon, Martin MunkΓ‘csi and Alfred Eisenstaedt, Wolfgang Weber is considered a pioneer of modern photojournalism, as it was established in Germany around 1920. His subject area included reports on the social, political and economic situation at home and abroad, to the publication of which he also contributed the texts and the layout. From the first beginning, Weber was always looking for something unusual, foreign, strange ore new. In his pictures he always found the point, that expressed the theme best, so that no text was needed to understand the importance and feel the atmosphere. Often he used sequences to develop a story in pictures. In 1928 his first book was published by Albertus Verlag in Berlin with more than 200 photographs for a portrait of Barcelona. In 1931 the Berliner Illustrierte Zeitung published the impressive sozial report β€œDorf ohne Arbeit” (village without work) on the situation of German unemployed people in 1933 β€œThe trial that the world is listening to” about the trial against van der Lubbe after the Reichstag fire, and in 1936 β€œThe Olympic Stadium is filling up". But mainly he traveled around the world to Africa, Asia and the Middle East, and in 1943 and 1944 he documented the situation in various European countries. After World War II he was "the leading photo-journalist to have stayed in Germany" and became chief reporter of the Neue Illustrierte, at that time the leading German illustrated magazine. As one of the first German photojournalists he could work 1949 also in the USA. One of his outstanding reports was a comparison of everyday life in New York and Moscow photographed in the same week After producing more than 900 reports in 40 years with about 3.000 published photos, he started in the 60th a new career with reports for television until the 80th. He was one of the very few journalist, who were allowed to film in China before, during and after Maos Cultural Revolution. He was the only, who documented the development of the Cabora Bassa dam over 10 years with all its sozial and political aspects and problems. As a freelance journalist, he had access to many well-known personalities of the time. And he made portraits of interesting statesmen often in the near and middle east, like Ben Gurion and Yassar Arafat. Although he had wife and two daughters in Cologne, he was traveling almost every day of his life. After his dead 1985 in Cologne nearly all his works with about 200,000 negatives, films and prints were sold to the Folkwang Museum in Essen. An exhibition of his life and works was held in this museum in 2004. Exhibitions 1977 Kassel: documenta 6, 150 Years of Photography 1982 KΓΆln: Reisen ohne Ende – Fotos 1933 bis 1935 (Travels without End – Photos 1933 to 1935), Historical archive of the city of Cologne 1984 Barcelona: Barcelona 1928, Caixa de Barcelona 2004/2005 Essen: Wolfgang Weber’s Reports, photography and film from 1925 to 1977. His life and work in a special exhibition, Folkwang Museum Numerous participations in group exhibitions Publications (only books) Barcelona. The face of the cities. Edited by Carl Otto Justh, Albertus Verlag, Berlin 1928. Hotel Affenbrotbaum (Hotel Baobabtree, adventure on the Cape – Cairo highway), Ullstein, Berlin 1936. Abenteuer einer Kamera (Adventure of a camera. Experiences of a picture hunter in Europe and Africa), Deutscher Verlag, Berlin 1938. Reisen ohne Ende Wolfgang Weber sieht die Welt (Travel without end. Wolfgang Weber sees the world), BrΓΌder Auer Verlag, Bonn/Rheindorf 1952. Abenteuer meines Lebans (Adventure of my life), Kurt Desch, Vienna et al. 1960. Auf Abwegen um die Welt (Astray around the world), Sigbert Mohn Verlag, GΓΌtersloh 1964 Hinter den Kulissen des Fernsehens (Behind the scenes of television) Signal-Verlag, Baden-Baden 1975, and: Maier, Ravensburg 1980, Literature Tom Allbeson: Photography, Reconstruction and the Cultural History of the Postwar European ... Routledge 2021 Cecil Beaton, Gail Buckland: The Magic Image. Genders of Photography 1939 to the Present Day. Weidenfeld & Nicolson, London und Boston 1975. Catalog for documenta 6. Volume 2: Photography, film, video. Kassel 1977, p. 110 ff. Ute Eskildsen (Ed.): β€žFliegen Sie sofort nach β€¦β€œ. Wolfgang Weber – Reportagen, Fotografie und Film 1925 bis 1977. Steidl, GΓΆttingen 2004, Tim N. Gidal: β€žWolfgang Weberβ€œ in "Deutschland: Beginn des modernen Fotojournalismus" (Germany: Beginning of modern photojournalism), Bucher Verlag Luzern und Frankfurt, 1972. p. 48 ff. Tim Gidal: Modern Photojournalism: Origin and Evolution, 1910–1933, translated by Maureen Oberli-Turner, published by Macmillan, Michigan 1973, Kristina Grub: Wolfgang Weber. in: Lynne Warren (Ed.): Encyclopedia of Twentieth-Century Photography. 3-Volume Set. Routledge, New York 2006 p.1653 ff. Anton Holzer: Picture Stories: The Rise of the Photoessay in the Weimar Republic, in International Journal for History, Culture and Modernity 2018: Picture Stories: the Rise of the Photoessay in the Weimar Republic Ian Jeffery: Photography: A Concise History, Oxford University Press, 1981 87 Randy Kaufmann, Brigitte Werneburg: Der Ideenreichste (the most imaginativ), in: TAZ. 9. Februar 2005 Der Ideenreichste Daniel H. Magilow: The Photography of Crisis, Pennsylvania State University Press 2012 Karl Ruhrberg: Art of the 20th Century. Taschen 2000, p.Β 666. References External links Wolfgang Weber Published Photos and Premium High Res Pictures – Getty Images Wolfgang Weber Vintage Stock-Fotos und Bilder – Getty Images Museum der Arbeit 1902 births 1985 deaths German photojournalists German male journalists 20th-century German journalists Film people from Leipzig
Fortin Site is a prehistoric village site located in Oneonta, New York. It was added to the National Register of Historic Places on November 28, 1980. References Archaeological sites in New York (state) National Register of Historic Places in Otsego County, New York Archaeological sites on the National Register of Historic Places in New York (state) Buildings and structures in Oneonta, New York
Mangham is a village in Richland Parish in northeastern Louisiana, United States. The population was 672 at the 2010 census. Mangham was established in 1890. It is named for Wiley P. Mangham, the publisher of the Richland Beacon-News, a weekly newspaper in Rayville, the seat of Richland Parish. Geography Mangham is located at (32.308304, -91.776225). According to the United States Census Bureau, the town has a total area of , all land. The community straddles the border with Franklin Parish. Louisiana Highway 15 runs through Mangham. Demographics As of the census of 2000, there were 595 people, 247 households, and 170 families residing in the town. The population density was . There were 268 housing units at an average density of . The racial makeup of the town was 59.66% White, 39.83% African American, 0.17% Asian, and 0.34% from two or more races. Hispanic or Latino of any race were 1.34% of the population. There were 247 households, out of which 28.3% had children under the age of 18 living with them, 40.9% were married couples living together, 27.1% had a female householder with no husband present, and 30.8% were non-families. 28.7% of all households were made up of individuals, and 13.0% had someone living alone who was 65 years of age or older. The average household size was 2.41 and the average family size was 2.96. In the town, the population was spread out, with 27.1% under the age of 18, 7.4% from 18 to 24, 24.4% from 25 to 44, 23.5% from 45 to 64, and 17.6% who were 65 years of age or older. The median age was 38 years. For every 100 females, there were 85.4 males. For every 100 females age 18 and over, there were 72.9 males. The median income for a household in the town was $17,500, and the median income for a family was $23,558. Males had a median income of $22,273 versus $18,125 for females. The per capita income for the town was $15,813. About 22.0% of families and 28.3% of the population were below the poverty line, including 42.8% of those under age 18 and 9.0% of those age 65 or over. Education Public schools in Richland Parish are operated by the Richland Parish School Board. Three campuses serve the village of Mangham - Mangham Elementary School (Grades PK-5), Mangham Junior High School (Grades 6-8), and Mangham High School (Grades 9-12). Notable people Ralph Abraham (born September 16, 1954), veterinarian and physician elected as a Republican to the U.S. House of Representatives in 2014 Robert Max Ross (August 5, 1933 – September 15, 2009) ran as a Republican candidate for governor in 1972 and 1983, as well as the United States Senate, the U.S. House of Representatives, the Louisiana State Senate, and for Mayor of Mangham, Louisiana. References External links Mangham Progress Community Progress Site for Mangham, LA Villages in Richland Parish, Louisiana Villages in Louisiana
```xml export * from './components/CarouselFooter/index'; ```
```java /* * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * * path_to_url * * Unless required by applicable law or agreed to in writing, * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * specific language governing permissions and limitations */ package org.apache.pulsar.broker.service.schema; import static com.google.common.collect.Lists.newArrayList; import static com.google.protobuf.ByteString.copyFrom; import static java.util.Objects.isNull; import static java.util.concurrent.CompletableFuture.completedFuture; import static org.apache.pulsar.broker.service.schema.BookkeeperSchemaStorage.Functions.newSchemaEntry; import static org.apache.pulsar.metadata.api.MetadataStoreException.AlreadyExistsException; import static org.apache.pulsar.metadata.api.MetadataStoreException.BadVersionException; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; import javax.validation.constraints.NotNull; import org.apache.bookkeeper.client.AsyncCallback; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerEntry; import org.apache.bookkeeper.client.LedgerHandle; import org.apache.bookkeeper.mledger.impl.LedgerMetadataUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.schema.exceptions.IncompatibleSchemaException; import org.apache.pulsar.broker.service.schema.exceptions.SchemaException; import org.apache.pulsar.common.protocol.schema.SchemaStorage; import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.protocol.schema.StoredSchema; import org.apache.pulsar.common.schema.LongSchemaVersion; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.metadata.api.MetadataCache; import org.apache.pulsar.metadata.api.MetadataSerde; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.Stat; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class BookkeeperSchemaStorage implements SchemaStorage { private static final Logger log = LoggerFactory.getLogger(BookkeeperSchemaStorage.class); private static final String SchemaPath = "/schemas"; private static final byte[] LedgerPassword = "".getBytes(); private final MetadataStoreExtended store; private final PulsarService pulsar; private final MetadataCache<SchemaStorageFormat.SchemaLocator> locatorEntryCache; private final ServiceConfiguration config; private BookKeeper bookKeeper; private final ConcurrentMap<String, CompletableFuture<StoredSchema>> readSchemaOperations = new ConcurrentHashMap<>(); @VisibleForTesting BookkeeperSchemaStorage(PulsarService pulsar) { this.pulsar = pulsar; this.store = pulsar.getLocalMetadataStore(); this.config = pulsar.getConfiguration(); this.locatorEntryCache = store.getMetadataCache(new MetadataSerde<SchemaStorageFormat.SchemaLocator>() { @Override public byte[] serialize(String path, SchemaStorageFormat.SchemaLocator value) { return value.toByteArray(); } @Override public SchemaStorageFormat.SchemaLocator deserialize(String path, byte[] content, Stat stat) throws IOException { return SchemaStorageFormat.SchemaLocator.parseFrom(content); } }); } @Override public void start() throws IOException { this.bookKeeper = pulsar.getBookKeeperClientFactory().create( pulsar.getConfiguration(), store, pulsar.getIoEventLoopGroup(), Optional.empty(), null ).join(); } @Override public CompletableFuture<SchemaVersion> put(String key, byte[] value, byte[] hash) { return putSchema(key, value, hash).thenApply(LongSchemaVersion::new); } @Override public CompletableFuture<SchemaVersion> put(String key, Function<CompletableFuture<List<CompletableFuture<StoredSchema>>>, CompletableFuture<Pair<byte[], byte[]>>> fn) { CompletableFuture<SchemaVersion> promise = new CompletableFuture<>(); put(key, fn, promise); return promise; } private void put(String key, Function<CompletableFuture<List<CompletableFuture<StoredSchema>>>, CompletableFuture<Pair<byte[], byte[]>>> fn, CompletableFuture<SchemaVersion> promise) { CompletableFuture<Pair<Optional<LocatorEntry>, List<CompletableFuture<StoredSchema>>>> schemasWithLocator = getAllWithLocator(key); schemasWithLocator.thenCompose(pair -> fn.apply(completedFuture(pair.getRight())).thenCompose(p -> { // The schema is existed if (p == null) { return CompletableFuture.completedFuture(null); } return putSchema(key, p.getLeft(), p.getRight(), pair.getLeft()); }).thenApply(version -> { return version != null ? new LongSchemaVersion(version) : null; })).whenComplete((v, ex) -> { if (ex == null) { promise.complete(v); } else { Throwable cause = FutureUtil.unwrapCompletionException(ex); if (cause instanceof AlreadyExistsException || cause instanceof BadVersionException) { put(key, fn, promise); } else { promise.completeExceptionally(ex); } } }); } @Override public CompletableFuture<StoredSchema> get(String key, SchemaVersion version) { if (version == SchemaVersion.Latest) { return getSchema(key); } else { LongSchemaVersion longVersion = (LongSchemaVersion) version; return getSchema(key, longVersion.getVersion()); } } @Override public CompletableFuture<List<CompletableFuture<StoredSchema>>> getAll(String key) { return getAllWithLocator(key).thenApply(Pair::getRight); } private CompletableFuture<Pair<Optional<LocatorEntry>, List<CompletableFuture<StoredSchema>>>> getAllWithLocator( String key) { return getLocator(key).thenApply(locator -> { if (log.isDebugEnabled()) { log.debug("[{}] Get all schemas - locator: {}", key, locator); } if (locator.isEmpty()) { return Pair.of(locator, Collections.emptyList()); } SchemaStorageFormat.SchemaLocator schemaLocator = locator.get().locator; List<CompletableFuture<StoredSchema>> list = new ArrayList<>(); schemaLocator.getIndexList().forEach(indexEntry -> list.add(readSchemaEntry(indexEntry.getPosition()) .thenApply(entry -> new StoredSchema ( entry.getSchemaData().toByteArray(), new LongSchemaVersion(indexEntry.getVersion()) ) ) )); return Pair.of(locator, list); }); } CompletableFuture<Optional<LocatorEntry>> getLocator(String key) { return getSchemaLocator(getSchemaPath(key)); } public List<Long> getSchemaLedgerList(String key) throws IOException { Optional<LocatorEntry> locatorEntry = null; try { locatorEntry = getLocator(key).get(); } catch (Exception e) { log.warn("Failed to get list of schema-storage ledger for {}, the exception as follow: \n {}", key, (e instanceof ExecutionException ? e.getCause() : e)); throw new IOException("Failed to get schema ledger for" + key); } LocatorEntry entry = locatorEntry.orElse(null); return entry != null ? entry.locator.getIndexList().stream().map(i -> i.getPosition().getLedgerId()) .collect(Collectors.toList()) : null; } @VisibleForTesting BookKeeper getBookKeeper() { return bookKeeper; } @Override public CompletableFuture<SchemaVersion> delete(String key, boolean forcefully) { return deleteSchema(key, forcefully).thenApply(version -> { if (version == null) { return null; } return new LongSchemaVersion(version); }); } @Override public CompletableFuture<SchemaVersion> delete(String key) { return delete(key, false); } @NotNull private CompletableFuture<StoredSchema> getSchema(String schemaId) { // There's already a schema read operation in progress. Just piggyback on that return readSchemaOperations.computeIfAbsent(schemaId, key -> { if (log.isDebugEnabled()) { log.debug("[{}] Fetching schema from store", schemaId); } return getSchemaLocator(getSchemaPath(schemaId)).thenCompose(locator -> { if (log.isDebugEnabled()) { log.debug("[{}] Got schema locator {}", schemaId, locator); } if (!locator.isPresent()) { return completedFuture(null); } SchemaStorageFormat.SchemaLocator schemaLocator = locator.get().locator; return readSchemaEntry(schemaLocator.getInfo().getPosition()) .thenApply(entry -> new StoredSchema(entry.getSchemaData().toByteArray(), new LongSchemaVersion(schemaLocator.getInfo().getVersion()))); }); }).whenComplete((res, ex) -> { if (log.isDebugEnabled()) { log.debug("[{}] Get operation completed. res={} -- ex={}", schemaId, res, ex); } readSchemaOperations.remove(schemaId); }); } @Override public SchemaVersion versionFromBytes(byte[] version) { // The schema storage converts the schema from bytes to long // so it handles both cases 1) version is 64 bytes long pre 2.4.0; // 2) version is 8 bytes long post 2.4.0 // // NOTE: if you are planning to change the logic here. you should consider // both 64 bytes and 8 bytes cases. ByteBuffer bb = ByteBuffer.wrap(version); return new LongSchemaVersion(bb.getLong()); } @Override public void close() throws Exception { if (bookKeeper != null) { bookKeeper.close(); } } @NotNull private CompletableFuture<StoredSchema> getSchema(String schemaId, long version) { if (log.isDebugEnabled()) { log.debug("[{}] Get schema - version: {}", schemaId, version); } return getSchemaLocator(getSchemaPath(schemaId)).thenCompose(locator -> { if (log.isDebugEnabled()) { log.debug("[{}] Get schema - version: {} - locator: {}", schemaId, version, locator); } if (!locator.isPresent()) { return completedFuture(null); } SchemaStorageFormat.SchemaLocator schemaLocator = locator.get().locator; if (version > schemaLocator.getInfo().getVersion()) { return completedFuture(null); } return findSchemaEntryByVersion(schemaLocator.getIndexList(), version) .thenApply(entry -> new StoredSchema( entry.getSchemaData().toByteArray(), new LongSchemaVersion(version) ) ); }); } @NotNull private CompletableFuture<Long> putSchema(String schemaId, byte[] data, byte[] hash) { return getSchemaLocator(getSchemaPath(schemaId)).thenCompose(optLocatorEntry -> putSchema(schemaId, data, hash, optLocatorEntry)); } private CompletableFuture<Long> putSchema(String schemaId, byte[] data, byte[] hash, Optional<LocatorEntry> optLocatorEntry) { if (optLocatorEntry.isPresent()) { SchemaStorageFormat.SchemaLocator locator = optLocatorEntry.get().locator; if (log.isDebugEnabled()) { log.debug("[{}] findSchemaEntryByHash - hash={}", schemaId, hash); } //don't check the schema whether already exist return readSchemaEntry(locator.getIndexList().get(0).getPosition()) .thenCompose(schemaEntry -> addNewSchemaEntryToStore(schemaId, locator.getIndexList(), data).thenCompose( position -> updateSchemaLocator(schemaId, optLocatorEntry.get(), position, hash)) ); } else { return createNewSchema(schemaId, data, hash); } } private CompletableFuture<Long> createNewSchema(String schemaId, byte[] data, byte[] hash) { SchemaStorageFormat.IndexEntry emptyIndex = SchemaStorageFormat.IndexEntry.newBuilder() .setVersion(0) .setHash(copyFrom(hash)) .setPosition(SchemaStorageFormat.PositionInfo.newBuilder() .setEntryId(-1L) .setLedgerId(-1L) ).build(); return addNewSchemaEntryToStore(schemaId, Collections.singletonList(emptyIndex), data).thenCompose(position -> { // The schema was stored in the ledger, now update the z-node with the pointer to it SchemaStorageFormat.IndexEntry info = SchemaStorageFormat.IndexEntry.newBuilder() .setVersion(0) .setPosition(position) .setHash(copyFrom(hash)) .build(); return createSchemaLocator(getSchemaPath(schemaId), SchemaStorageFormat.SchemaLocator.newBuilder() .setInfo(info) .addAllIndex( newArrayList(info)) .build()) .thenApply(ignore -> 0L); }); } @NotNull private CompletableFuture<Long> deleteSchema(String schemaId, boolean forcefully) { return (forcefully ? CompletableFuture.completedFuture(null) : ignoreUnrecoverableBKException(getSchema(schemaId))).thenCompose(schemaAndVersion -> { if (!forcefully && isNull(schemaAndVersion)) { return completedFuture(null); } else { // The version is only for the compatibility of the current interface final long version = -1; CompletableFuture<Long> future = new CompletableFuture<>(); getLocator(schemaId).whenComplete((locator, ex) -> { if (ex != null) { future.completeExceptionally(ex); } else { if (!locator.isPresent()) { future.complete(null); return; } List<SchemaStorageFormat.IndexEntry> indexEntryList = locator.get().locator.getIndexList(); List<CompletableFuture<Void>> deleteFutures = new ArrayList<>(indexEntryList.size()); indexEntryList.forEach(indexEntry -> { final long ledgerId = indexEntry.getPosition().getLedgerId(); CompletableFuture<Void> deleteFuture = new CompletableFuture<>(); deleteFutures.add(deleteFuture); bookKeeper.asyncDeleteLedger(ledgerId, (int rc, Object cnx) -> { if (rc != BKException.Code.OK) { // It's not a serious error, we didn't need call future.completeExceptionally() log.warn("Failed to delete ledger {} of {}: {}", ledgerId, schemaId, rc); } deleteFuture.complete(null); }, null); }); FutureUtil.waitForAll(deleteFutures).whenComplete((v, e) -> { final String path = getSchemaPath(schemaId); store.delete(path, Optional.empty()) .thenRun(() -> { future.complete(version); }).exceptionally(zkException -> { if (zkException.getCause() instanceof MetadataStoreException.NotFoundException) { // The znode has been deleted by others. // In some cases, the program may enter this logic. // Since the znode is gone, we dont need to deal with it. if (log.isDebugEnabled()) { log.debug("No node for schema path: {}", path); } future.complete(null); } else { future.completeExceptionally(zkException); } return null; }); }); } }); return future; } }); } @NotNull private static String getSchemaPath(String schemaId) { return SchemaPath + "/" + schemaId; } @NotNull private CompletableFuture<SchemaStorageFormat.PositionInfo> addNewSchemaEntryToStore( String schemaId, List<SchemaStorageFormat.IndexEntry> index, byte[] data ) { SchemaStorageFormat.SchemaEntry schemaEntry = newSchemaEntry(index, data); return createLedger(schemaId).thenCompose(ledgerHandle -> { final long ledgerId = ledgerHandle.getId(); return addEntry(ledgerHandle, schemaEntry) .thenApply(entryId -> { ledgerHandle.closeAsync(); return Functions.newPositionInfo(ledgerId, entryId); }); }); } @NotNull private CompletableFuture<Long> updateSchemaLocator( String schemaId, LocatorEntry locatorEntry, SchemaStorageFormat.PositionInfo position, byte[] hash ) { long nextVersion = locatorEntry.locator.getInfo().getVersion() + 1; SchemaStorageFormat.SchemaLocator locator = locatorEntry.locator; SchemaStorageFormat.IndexEntry info = SchemaStorageFormat.IndexEntry.newBuilder() .setVersion(nextVersion) .setPosition(position) .setHash(copyFrom(hash)) .build(); final ArrayList<SchemaStorageFormat.IndexEntry> indexList = new ArrayList<>(); indexList.addAll(locator.getIndexList()); indexList.add(info); return updateSchemaLocator(getSchemaPath(schemaId), SchemaStorageFormat.SchemaLocator.newBuilder() .setInfo(info) .addAllIndex(indexList) .build() , locatorEntry.version ).thenApply(ignore -> nextVersion).whenComplete((__, ex) -> { if (ex != null) { Throwable cause = FutureUtil.unwrapCompletionException(ex); log.warn("[{}] Failed to update schema locator with position {}", schemaId, position, cause); if (cause instanceof AlreadyExistsException || cause instanceof BadVersionException) { bookKeeper.asyncDeleteLedger(position.getLedgerId(), new AsyncCallback.DeleteCallback() { @Override public void deleteComplete(int rc, Object ctx) { if (rc != BKException.Code.OK) { log.warn("[{}] Failed to delete ledger {} after updating schema locator failed, rc: {}", schemaId, position.getLedgerId(), rc); } } }, null); } } }); } @NotNull private CompletableFuture<SchemaStorageFormat.SchemaEntry> findSchemaEntryByVersion( List<SchemaStorageFormat.IndexEntry> index, long version ) { if (index.isEmpty()) { return completedFuture(null); } SchemaStorageFormat.IndexEntry lowest = index.get(0); if (version < lowest.getVersion()) { return readSchemaEntry(lowest.getPosition()) .thenCompose(entry -> findSchemaEntryByVersion(entry.getIndexList(), version)); } for (SchemaStorageFormat.IndexEntry entry : index) { if (entry.getVersion() == version) { return readSchemaEntry(entry.getPosition()); } else if (entry.getVersion() > version) { break; } } return completedFuture(null); } @NotNull private CompletableFuture<SchemaStorageFormat.SchemaEntry> readSchemaEntry( SchemaStorageFormat.PositionInfo position ) { if (log.isDebugEnabled()) { log.debug("Reading schema entry from {}", position); } return openLedger(position.getLedgerId()) .thenCompose((ledger) -> Functions.getLedgerEntry(ledger, position.getEntryId()) .thenCompose(entry -> closeLedger(ledger) .thenApply(ignore -> entry) ) ).thenCompose(Functions::parseSchemaEntry); } @NotNull private CompletableFuture<Void> updateSchemaLocator(String id, SchemaStorageFormat.SchemaLocator schema, long version) { return store.put(id, schema.toByteArray(), Optional.of(version)).thenApply(__ -> null); } @NotNull private CompletableFuture<LocatorEntry> createSchemaLocator(String id, SchemaStorageFormat.SchemaLocator locator) { return store.put(id, locator.toByteArray(), Optional.of(-1L)) .thenApply(stat -> new LocatorEntry(locator, stat.getVersion())); } @NotNull private CompletableFuture<Optional<LocatorEntry>> getSchemaLocator(String schema) { return locatorEntryCache.getWithStats(schema) .thenApply(o -> o.map(r -> new LocatorEntry(r.getValue(), r.getStat().getVersion()))); } @NotNull private CompletableFuture<Long> addEntry(LedgerHandle ledgerHandle, SchemaStorageFormat.SchemaEntry entry) { final CompletableFuture<Long> future = new CompletableFuture<>(); ledgerHandle.asyncAddEntry(entry.toByteArray(), (rc, handle, entryId, ctx) -> { if (rc != BKException.Code.OK) { future.completeExceptionally(bkException("Failed to add entry", rc, ledgerHandle.getId(), -1)); } else { future.complete(entryId); } }, null ); return future; } @NotNull private CompletableFuture<LedgerHandle> createLedger(String schemaId) { Map<String, byte[]> metadata = LedgerMetadataUtils.buildMetadataForSchema(schemaId); final CompletableFuture<LedgerHandle> future = new CompletableFuture<>(); try { bookKeeper.asyncCreateLedger( config.getManagedLedgerDefaultEnsembleSize(), config.getManagedLedgerDefaultWriteQuorum(), config.getManagedLedgerDefaultAckQuorum(), BookKeeper.DigestType.fromApiDigestType(config.getManagedLedgerDigestType()), LedgerPassword, (rc, handle, ctx) -> { if (rc != BKException.Code.OK) { future.completeExceptionally(bkException("Failed to create ledger", rc, -1, -1)); } else { future.complete(handle); } }, null, metadata); } catch (Throwable t) { log.error("[{}] Encountered unexpected error when creating schema ledger", schemaId, t); return FutureUtil.failedFuture(t); } return future; } @NotNull private CompletableFuture<LedgerHandle> openLedger(Long ledgerId) { final CompletableFuture<LedgerHandle> future = new CompletableFuture<>(); bookKeeper.asyncOpenLedger( ledgerId, BookKeeper.DigestType.fromApiDigestType(config.getManagedLedgerDigestType()), LedgerPassword, (rc, handle, ctx) -> { if (rc != BKException.Code.OK) { future.completeExceptionally(bkException("Failed to open ledger", rc, ledgerId, -1)); } else { future.complete(handle); } }, null ); return future; } @NotNull private CompletableFuture<Void> closeLedger(LedgerHandle ledgerHandle) { CompletableFuture<Void> future = new CompletableFuture<>(); ledgerHandle.asyncClose((rc, handle, ctx) -> { if (rc != BKException.Code.OK) { future.completeExceptionally(bkException("Failed to close ledger", rc, ledgerHandle.getId(), -1)); } else { future.complete(null); } }, null); return future; } public CompletableFuture<List<Long>> getStoreLedgerIdsBySchemaId(String schemaId) { CompletableFuture<List<Long>> ledgerIdsFuture = new CompletableFuture<>(); getSchemaLocator(getSchemaPath(schemaId)).thenAccept(locator -> { if (log.isDebugEnabled()) { log.debug("[{}] Get all store schema ledgerIds - locator: {}", schemaId, locator); } if (!locator.isPresent()) { ledgerIdsFuture.complete(Collections.emptyList()); return; } Set<Long> ledgerIds = new HashSet<>(); SchemaStorageFormat.SchemaLocator schemaLocator = locator.get().locator; schemaLocator.getIndexList().forEach(indexEntry -> ledgerIds.add(indexEntry.getPosition().getLedgerId())); ledgerIdsFuture.complete(new ArrayList<>(ledgerIds)); }).exceptionally(e -> { ledgerIdsFuture.completeExceptionally(e); return null; }); return ledgerIdsFuture; } interface Functions { static CompletableFuture<LedgerEntry> getLedgerEntry(LedgerHandle ledger, long entry) { final CompletableFuture<LedgerEntry> future = new CompletableFuture<>(); ledger.asyncReadEntries(entry, entry, (rc, handle, entries, ctx) -> { if (rc != BKException.Code.OK) { future.completeExceptionally(bkException("Failed to read entry", rc, ledger.getId(), entry)); } else { future.complete(entries.nextElement()); } }, null ); return future; } static CompletableFuture<SchemaStorageFormat.SchemaEntry> parseSchemaEntry(LedgerEntry ledgerEntry) { CompletableFuture<SchemaStorageFormat.SchemaEntry> result = new CompletableFuture<>(); try { result.complete(SchemaStorageFormat.SchemaEntry.parseFrom(ledgerEntry.getEntry())); } catch (IOException e) { result.completeExceptionally(e); } return result; } static SchemaStorageFormat.SchemaEntry newSchemaEntry( List<SchemaStorageFormat.IndexEntry> index, byte[] data ) { return SchemaStorageFormat.SchemaEntry.newBuilder() .setSchemaData(copyFrom(data)) .addAllIndex(index) .build(); } static SchemaStorageFormat.PositionInfo newPositionInfo(long ledgerId, long entryId) { return SchemaStorageFormat.PositionInfo.newBuilder() .setLedgerId(ledgerId) .setEntryId(entryId) .build(); } } static class LocatorEntry { final SchemaStorageFormat.SchemaLocator locator; final long version; LocatorEntry(SchemaStorageFormat.SchemaLocator locator, long version) { this.locator = locator; this.version = version; } } public static Exception bkException(String operation, int rc, long ledgerId, long entryId) { String message = org.apache.bookkeeper.client.api.BKException.getMessage(rc) + " - ledger=" + ledgerId + " - operation=" + operation; if (entryId != -1) { message += " - entry=" + entryId; } boolean recoverable = rc != BKException.Code.NoSuchLedgerExistsException && rc != BKException.Code.NoSuchEntryException && rc != BKException.Code.NoSuchLedgerExistsOnMetadataServerException; return new SchemaException(recoverable, message); } public static <T> CompletableFuture<T> ignoreUnrecoverableBKException(CompletableFuture<T> source) { return source.exceptionally(t -> { if (t.getCause() != null && (t.getCause() instanceof SchemaException) && !(t.getCause() instanceof IncompatibleSchemaException) && !((SchemaException) t.getCause()).isRecoverable()) { // Meeting NoSuchLedgerExistsException, NoSuchEntryException or // NoSuchLedgerExistsOnMetadataServerException when reading schemas in // bookkeeper. This also means that the data has already been deleted by other operations // in deleting schema. if (log.isDebugEnabled()) { log.debug("Schema data in bookkeeper may be deleted by other operations.", t); } return null; } // rethrow other cases throw t instanceof CompletionException ? (CompletionException) t : new CompletionException(t); }); } } ```
Muhammad Hizlee Abdul Rais (born 23 February 1989) is a Malaysian lawn bowler. Bowls career World Championships Rais was selected by the Malaysian national team, to represent them at the sport's blue riband event, the 2023 World Bowls Championship. He participated in the men's triples and the men's fours events. The Malaysian team ranked seventh in the world at the start of the tournament, were given the target of reaching the semi finals. In the triples, his team reached the quarter final before losing to Ireland. Commonwealth Games Rais competed in both the men's singles and the men's pairs events at the 2014 Commonwealth Games. He failed to qualify from the group stages in the men's singles event, but won a silver medal in the men's pairs Other events In 2012 he became the World Singles Champion of Champions defeating Stanley Lai of Hong Kong in the final. Rais has won five medals at the Asia Pacific Bowls Championships, including a double bronze the 2019 Asia Pacific Bowls Championships in the Gold Coast, Queensland and a gold medal in the Lawn bowls at the 2017 Southeast Asian Games. In 2023, he won the fours gold medal at the 14th Asian Lawn Bowls Championship in Kuala Lumpur. References 1989 births Living people Bowls players at the 2014 Commonwealth Games Commonwealth Games silver medallists for Malaysia Malaysian male bowls players Commonwealth Games medallists in lawn bowls SEA Games medalists in lawn bowls SEA Games gold medalists for Malaysia SEA Games bronze medalists for Malaysia Competitors at the 2017 SEA Games Competitors at the 2019 SEA Games 21st-century Malaysian people Medallists at the 2014 Commonwealth Games
A hair whorl is a patch of hair growing in the opposite direction of the rest of the hair. Hair whorls can occur on animals with hairy coats, and are often found on horses and cows. Locations where whorls are found in equines include the stomach, face, stifle and hocks. Hair whorls in horses are also known as crowns, swirls, trichoglyphs, or cowlicks. Hair whorls are sometimes classified according to the direction of hair growth (e.g. clockwise or counterclockwise), shape, or other physical characteristics. Anecdotal evidence claims a statistical correlation between the location, number, or type of whorls and behaviour or temperament in horses and other species (but see Correlation does not imply causation). There is some research suggesting that the direction of hair whorls may correlate to a horse's preference for the right or left lead and other directionality. History The theories that hair whorls could describe various physical and personality characteristics in horses have been around for thousands of years. There are references of hair whorls in the works of the Indian sage Salihotra. Bedouin horsemen used whorls to determine the value of horses for sale. One Arabian horse has been recorded with 40 whorls on his body, although the average horse has around six. Bedouins looked for whorls between the horse's ears as a sign of swiftness, and if there were any on either side of the neck, they were known as the 'finger of the Prophet'. One legend of whorls is the "Prophet's Thumbprint" a birthmark in the form of an indentation, usually found on the side of a horse’s neck, totally harmless although it comes with a legend. The Prophet Mohammed was wandering the desert with his herd of horses for many days, and as they approached an oasis he sent them forth to drink. But as the thirsty horses approached the water, he called them back. Only five of his mares stopped and returned to him, and to thank them for their loyalty he blessed them by pressing his thumbprint into their necks. It’s believed that a horse with such a mark will be outstanding, being a descendant of one of these brood mares that the Prophet Mohammed particularly treasured. Other Bedouin beliefs include: A whorl on the chest meant prosperity. A whorl on the girth was a sign of good fortune, and an increase in flocks A whorl on the flank was known as a 'spur whorl' and if curved up meant safety in battle; if inclined downwards it meant prosperity. The Byerley Turk, a founding sire of the Thoroughbred breed, was said to have spur whorls and was never hurt in battle. The Whorl of the Sultan was located on the windpipe, and meant love and prosperity. Whorls above the eyes meant the master was to die of a head injury The whorl of the coffin was located close to the withers. If sloping downwards towards the shoulder it meant the rider would die in the saddle, probably in battle or from a gunshot. Classification There are several types of whorls on horses: Simple: hairs draw into a single point from all directions Tufted: hairs converges and piles up into a tuft Linear: hair growing in opposite directions meet along the same line vertically Crested: hair growing in opposite directions meet to form a crest Feathered: hair meets along a line but at an angle to form a feathered pattern Relation to behaviour Several studies have reported a statistical relationship between the location, number, or type of whorls and behaviour or temperament in horses. One study of 219 working horses found a relation between the direction of facial hair whorls and motor laterality; right-lateralised horses had significantly more clockwise facial hair whorls and left-lateralised horses had significantly more counter-clockwise facial hair whorls. Konik horses with a single whorl located above their eyes were rated as more difficult to handle whereas horses that also had a single whorl but located below or right in between their eyes were easier to handle. Whorls that were found to be elongated or doubled acted the most cautious when coming up to an unfamiliar object. They looked longer and were slower to approaching then the single whorled horses. Lundy ponies with 'left' whorls score highly on calmness, placidness, enthusiasm and friendliness, whereas those with 'right' whorls score highly on wariness, associated flightiness and unfriendliness. Ponies with two facial whorls are rated as significantly more 'enthusiastic' and less 'wary' than those with one or three facial whorls. Whorls on Thoroughbred horses may be physical indicators of a predisposition to perform repetitive abnormal behaviours, i.e. stereotypies. References Identification of domesticated animals Ethology Horse coat colors
Myanmar coup d'Γ©tat may refer to: 1962 Burmese coup d'Γ©tat 1988 Burmese coup d'Γ©tat 2021 Myanmar coup d'Γ©tat
```javascript function foo():(_:bool) => number{} ```
This is a list of buckwheat dishes, consisting of dishes that use buckwheat as a main ingredient. Buckwheat is a plant cultivated for its grain-like seeds and as a cover crop. A related and more bitter species, Fagopyrum tataricum, a domesticated food plant common in Asia, but not as common in Europe or North America, is also referred to as buckwheat. Buckwheat dishes Ajdovi ΕΎganci – translates to English as "buckwheat spoonbread". It is a national Slovene dish. Broeder – a traditional Dutch dish: a batter with buckwheat flour, yeast, and other ingredients is boiled in a cotton bag. Buckwheat pancake Blini – an Eastern European pancake made with buckwheat flour Kaletez – a Breton pancake made with buckwheat flour Memil-buchimgae – a Korean pancake made with buckwheat flour Ploye – a pancake made of buckwheat flour, wheat flour, baking powder and water popular in Northeastern Canada and the United States CrΓͺpe bretonne – a traditional dish in Lower Brittany, it can be made of wheat (sweet crΓͺpe) or buckwheat (salted crΓͺpe). This latter is less well-known and should not be confused with the buckwheat pancake typical of Upper Brittany, which has a different recipe. Crozets de Savoie – small flat square-shaped pasta originally made in the Savoie region in southeast France, the crozets were traditionally made at home by housewives using buckwheat or wheat, or sometimes both. Galette-saucisse – a French street food item consisting of a hot sausage wrapped in a cold type of crΓͺpe called galette de sarrasin or Breton galette. The French region known as Upper Brittany is the traditional homeland of the dish, which is prepared using buckwheat for the crΓͺpe and pork sausage. Grechka – toasted buckwheat, often prepared with butter, commonly eaten in Eastern Europe. Jat-guksu – a Korean noodle dish consisting of buckwheat or wheat flour noodles in a bowl of cold broth made from ground pine nuts. It is a local specialty of Gapyeong, Gyeonggi Province, South Korea. Kasha varnishkes – a traditional Ashkenazi Jewish dish that combines grechka (buckwheat groats) with noodles, typically farfalle (bow-tie pasta). Kig ha farz – a cooked dish eaten traditionally in Brittany consisting of various meats simmered in a broth with a buckwheat flour based pudding. Mak-guksu – a Korean buckwheat noodle dish served in a chilled broth and sometimes with sugar, mustard, sesame oil or vinegar. Memil-muk – a Korean dish consisting of a light gray-brown muk (jelly) made from buckwheat starch. It is commonly served as banchan (a side dish accompanying rice) as well as anju (food accompanying alcoholic drinks). Mezzelune – a semi-circular stuffed pasta, similar to ravioli or pierogi. The dough is usually made of white flour or buckwheat flour, durum semolina, and mixed with eggs and olive oil. Naengmyeon – a Korean noodle dish of long and thin handmade noodles made from the flour and starch of various ingredients, including buckwheat, potatoes, sweet potatoes, arrowroot starch and kudzu. Oyaki – a Japanese dumpling made from a fermented buckwheat dough wrapped around a stuffing of Japanese vegetables, fruit, or anko bean paste and then roasted on an iron pan. Pizzoccheri – an Italian pasta from Valtellina, Lombardy. Poffert – a traditional Dutch dish from Groningen; a batter containing buckwheat flour and other ingredients is cooked au bain marie in a special tin Poffertjes – a traditional Dutch batter treat resembling small, fluffy pancakes; they are made with yeast and buckwheat flour and have a light, spongy texture. Scrapple – a Pennsylvania Dutch mush of pork scraps, flour (can be wheat or buckwheat), and spices often eaten for breakfast. Soba – the Japanese name for buckwheat, it usually refers to thin noodles made from buckwheat flour, or a combination of buckwheat and wheat flours (nagano soba). Stip – a regional dish in the Dutch provinces of Groningen, Drenthe and Overijssel, it is served as buckwheat porridge with a hole containing fried bacon and a spoonful of syrup. Buchweizentorte – a traditional cake from the Italian South Tyrol region made of buckwheat flour. Beverages Buckwheat tea – a tea prepared using roasted buckwheat Buckwheat whisky – a type of distilled alcoholic beverage produced entirely or principally from buckwheat. See also Buckwheat Β§ Culinary use – detailed information about buckwheat as a food source Buckwheat honey List of edible seeds List of poppy seed pastries and dishes List of sesame seed dishes References Lists of foods by ingredient
```javascript import PropTypes from 'prop-types'; import React, { useCallback, useEffect, useMemo, useState } from 'react'; import './ProfileMenu.css'; import compose from '../utils/compose'; import fetchJSON from '../utils/fetchJSON'; import connectGitHubProfileAvatar from '../gitHubProfile/hoc/avatarURL'; import connectGitHubProfileName from '../gitHubProfile/hoc/name'; import connectGitHubSignInButton from '../gitHubProfile/hoc/signInButton'; import connectGitHubSignOutButton from '../gitHubProfile/hoc/signOutButton'; import GitHubProfileComposer from '../gitHubProfile/Composer'; const SETTINGS_URL = '/api/github/settings'; // We will fetch the authorize URL and client ID for the GitHub sign-in flow from the server. // This helps decouple the server settings (e.g. client ID) from the HTML code. async function fetchSettings() { try { const { authorizeURL, clientId } = await fetchJSON(SETTINGS_URL); return { authorizeURL, clientId }; } catch (err) { throw new Error('OAuth: Failed to fetch settings'); } } // The props are passed by GitHubProfileContext and its related composer. const GitHubProfileMenu = ({ avatarURL, name, oauthReviewAccessURL, onSignIn, // This will become falsy if sign in is not available, e.g. already signed in or misconfiguration onSignOut // This will become falsy if sign out is not available, e.g. not signed in }) => { const [expanded, setExpanded] = useState(false); const signedIn = !!onSignOut; // Listen to "signin" event from the window. // The "signin" event is fired when the user click on the "Sign in" button in Web Chat. useEffect(() => { window.addEventListener('signin', ({ data: { provider } = {} }) => provider === 'github' && onSignIn && onSignIn()); return () => window.removeEventListener('signin', onSignIn); }); // Listen to "signout" event from the window. // The "signout" event is fired when the bot requests the webpage to sign out. useEffect(() => { window.addEventListener('signout', onSignOut); return () => window.removeEventListener('signout', onSignOut); }); // CSS style for displaying avatar as background image. // Background image will ease handling 404 or other HTTP errors by not showing the image. const avatarStyle = useMemo( () => ({ backgroundImage: `url(${avatarURL || '/images/GitHub-Mark-64px-DDD-White.png'})` }), [avatarURL] ); // In addition to running the sign in logic from OAuth context, we will also collapse the menu. const handleSignIn = useCallback(() => { onSignIn && onSignIn(); setExpanded(false); }, [onSignIn]); // In addition to running the sign out logic from OAuth context, we will also collapse the menu. const handleSignOut = useCallback(() => { onSignOut && onSignOut(); setExpanded(false); }, [onSignOut]); const handleToggleExpand = useCallback(() => setExpanded(!expanded), [expanded]); return ( <div aria-expanded={expanded} className="sso__profile"> <button aria-label="Open profile menu" className="sso__profileAvatar" onClick={signedIn ? handleToggleExpand : handleSignIn} style={avatarStyle} > {signedIn && <div className="sso__profileAvatarBadge sso__profileAvatarBadge__gitHub" />} </button> {signedIn && expanded && ( <ul className="sso__profileMenu"> {name && ( <li className="sso__profileMenuItem"> <span> Signed in as <strong>{name}</strong> </span> </li> )} {onSignOut && oauthReviewAccessURL && ( <li className="sso__profileMenuItem"> <a href={oauthReviewAccessURL} rel="noopener noreferrer" target="_blank"> Review access on GitHub </a> </li> )} {onSignOut && ( <li className="sso__profileMenuItem"> <button onClick={handleSignOut} type="button"> Sign out </button> </li> )} </ul> )} </div> ); }; GitHubProfileMenu.defaultProps = { accessToken: '', avatarURL: '', name: '', oauthReviewAccessURL: '', onSignIn: undefined, onSignOut: undefined, setAccessToken: undefined }; GitHubProfileMenu.propTypes = { accessToken: PropTypes.string, avatarURL: PropTypes.string, name: PropTypes.string, oauthReviewAccessURL: PropTypes.string, onSignIn: PropTypes.func, onSignOut: PropTypes.func, setAccessToken: PropTypes.func }; // Borrowed from react-redux, "compose" is a function that combines the results of the functions. // The functions listed here will retrieve corresponding information from React context. const ComposedGitHubProfileMenu = compose( connectGitHubProfileAvatar(), connectGitHubProfileName(), connectGitHubSignInButton(({ onClick }) => ({ onSignIn: onClick })), connectGitHubSignOutButton(({ onClick }) => ({ onSignOut: onClick })) )(GitHubProfileMenu); const ConnectedGitHubProfileMenu = ({ accessToken, onAccessTokenChange }) => { const [oauthAuthorizeURL, setOAuthAuthorizeURL] = useState(''); const [oauthReviewAccessURL, setOAuthReviewAccessURL] = useState(''); useMemo(async () => { const { authorizeURL, clientId } = await fetchSettings(); setOAuthAuthorizeURL(authorizeURL); // The OAuth review access URL is constructed based on OAuth client ID. setOAuthReviewAccessURL(`path_to_url{clientId}`); }, []); return ( <GitHubProfileComposer accessToken={accessToken} oauthAuthorizeURL={oauthAuthorizeURL} onAccessTokenChange={onAccessTokenChange} > <ComposedGitHubProfileMenu oauthReviewAccessURL={oauthReviewAccessURL} /> </GitHubProfileComposer> ); }; ConnectedGitHubProfileMenu.defaultProps = { onSignedInChange: undefined }; ConnectedGitHubProfileMenu.propTypes = { accessToken: PropTypes.string.isRequired, onAccessTokenChange: PropTypes.func.isRequired, onSignedInChange: PropTypes.func }; export default ConnectedGitHubProfileMenu; ```
```go // // // path_to_url // // Unless required by applicable law or agreed to in writing, software // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. package main import ( "fmt" "os" "github.com/prometheus-operator/prometheus-operator/pkg/versionutil" ) func main() { versionutil.RegisterParseFlags() if versionutil.ShouldPrintVersion() { versionutil.Print(os.Stdout, "po-docgen") os.Exit(0) } switch os.Args[1] { case "compatibility": cm := getCompatibilityMatrix() var ( opt string lines []string ) if len(os.Args) > 2 { opt = os.Args[2] } switch opt { case "defaultAlertmanagerVersion": lines = []string{cm.DefaultAlertmanager} case "defaultPrometheusVersion": lines = []string{cm.DefaultPrometheus} case "defaultThanosVersion": lines = []string{cm.DefaultThanos} default: lines = cm.PrometheusVersions } for _, s := range lines { fmt.Printf("* %s\n", s) } } } ```
Satbani is a village and union council (an administrative subdivision) of Mansehra District in the Khyber-Pakhtunkhwa province of Pakistan. It is located in Balakot tehsil and lies in an area that was affected by the 2005 Kashmir earthquake. On 3 September 2007, survivors of the earthquake from Satbani, Ghanool, Garlat and Kawai Union Councils organised a protest in Satbani against the authorities for the delay in compensation. References Union councils of Mansehra District Populated places in Mansehra District
```smalltalk using Elsa.Extensions; using Elsa.Workflows; using Elsa.Workflows.Activities; using Elsa.Workflows.Contracts; using Elsa.Workflows.Memory; using Elsa.Workflows.Models; using Elsa.Workflows.Options; var services = new ServiceCollection(); services.AddElsa(); var serviceProvider = services.BuildServiceProvider(); var runner = serviceProvider.GetRequiredService<IWorkflowRunner>(); var input = new Dictionary<string, object>{ ["name"] = "Anne", ["age"] = 37 }; var option = new RunWorkflowOptions { Input = input }; await runner.RunAsync<InputWorkflow>(option); public class InputWorkflow : WorkflowBase { protected override void Build(IWorkflowBuilder builder) { var nameInput = new Variable<string>(); var ageInput = new Variable<int>(); builder.Root = new Sequence { Variables = { nameInput, ageInput }, Activities = { new SetVariable { Variable = nameInput, Value = new Input<object>(ctx => ctx.GetInput<string>("name")) }, new SetVariable { Variable = ageInput, Value = new Input<object>(ctx => ctx.GetInput<int>("age")) }, new WriteLine(ctx => $"Name: {nameInput.Get(ctx)}"), new WriteLine(ctx => $"Age: {ageInput.Get(ctx)}") } }; } } ```
```markdown # Using the GIS The `GIS` object in the `gis` module is the most important object when working with the ArcGIS API for Python. The GIS object represents the GIS you are working with, be it ArcGIS Online or an instance of ArcGIS Enterprise. You use the GIS object to consume and publish GIS content and administrators may use it to manage GIS users, groups and datastores. This object becomes your entry point in your Python script when using the API.``` ```markdown To use the GIS object, import GIS from the `arcgis.gis` module:``` ```python from arcgis.gis import GIS ``` ```markdown To create the GIS object, we pass in the url and our login credentials as shown below:``` ```python gis = GIS('home') ``` ```markdown If connecting to an ArcGIS Enterprise in your premises, your URL becomes `path_to_url`. Your GIS can support a [number of authentication schemes](path_to_url refer to [this section of the guide](path_to_url to know how to **authenticate your scripts and notebooks** for various such schemes.``` ```markdown Below, we're connecting to ArcGIS Online (the default GIS used when the url is not provided) as an anonymous user:``` ```python gis = GIS() ``` ```markdown Adding a '?' mark after an object and querying it brings up help for that object in the notebook:``` ```python gis? ``` ```markdown The notebook provides intellisense and code-completion. Typing a dot after an object and hitting tab brings up a drop-down with its properties and methods: <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAu8AAACJCAYAAACcsdY/your_sha256_hashyour_sha256_hashOnw568pe2ZQ2R00ggviFIA0sDhjBcjAX/GIT2o2ySbHabJN10sRyGtvrxGmU1HblOnaU/kjV9Eeem2d+SKPRaHe0O+your_sha256_hashB5f+Pmhiy98X35t9ePAwAAAADAPvO9jR/I+VvvuDY+PKHy/tRbPxzYICGEEEIIIWR/your_sha256_hashyour_sha256_hashRt4nKFrs27dvS6fTAQAAAIAJRF1Om7FhieLcyPsERc/Wwj4EAAAAADA5qNOFJYpzI+your_sha256_hash4BAAAAJh/kfUqCvAMAAABMPsj7lCQOeb/43ENiPPCEvNwKXw8AAAAA+wvyvq+5Ky9/tyrfmnP50TV3eZRck2e911k8e9ldvMsg7wAAAACTz3TL++VaT6y71OR9d/your_sha256_hashyour_sha256_hashash5k8nTLk6fO+5yjbyHvq8NOy3mz3LQcAAACAeEHeNdvI+7d+dNV9ZKX1lvzrSOM1+yzvI2T7zvtFeW420Hk3FmW9HXjeEHnvdFpy/your_sha256_hashfUHZuflufMhXfSh8t6jffVlWZxNyeMvt0LXAwAAAMDuQd410y7v55+VBx56Ts4HO+1BIsh7p60nAoPy/rW1j28MQ7557rz8xbccAAAAAKKDvGsmVd5j/your_sha512_hash+Za2XH5fUPz0r533LAAAAACBZ6Lzva/ZZ3kfIdvLefPHbknroWflZ99KQLVl/elaMx1/mh6cAAAAAYwTyvq+ZDHnvtC/Ki0/your_sha512_hash/your_sha512_hash+fMwwWgNVeC/+uorBB4AAABskPf7IH5x//your_sha256_hashGHNRDsxX9k3eK/MHZDFBedeaam21xlprrbnWnhBCCCHESxTnRt4TTLDrrl+l6FiFjlgMyvuGnH3mMTly8IAcONBjvrJur1+your_sha256_hash6Pbb5iilnFo/KQX3twYI8461fr8i8vb0g/u2vyWlv37OHZP7kik/yVcznpWKekcWjB+3XHiw8I2c3nPXmYnC7Dt7f7V6gtdSaam21xlprrTkCTwghhBB/ojg38p5gwrruvyour_sha512_hash+t7IoR2aPyWn7WJ1jmrWk+7HKmv14TY8x2GUf2nnfEPPJI3LEO3Y9ibCOpSffKu+zMnvoMams6WM91qCcJ99515pqbem+E0IIIWRYojg38p5g/PKus+7amW232/YPGYLybp482CfltqTPD86IbyfvB45a8r/W64hHxnxSZh8+KWfD1p0/J6ePHZDHnj/nW7YhZ05YAv2871sBS76769cqUgh2/ofJ+8YZOXHAOjFwO+k21slB7727Yr7SW79WKQT+BsnLu9ZUa6s11loj74QQQggJJopzh8r7U2/+EHm/B/your_sha512_hashyour_sha512_hash7aE3L8tRPyn699R77z6n/Ld2tPyvdr/zMg7zo2c+jQke7c99ETp2XNt95juLz32Fg7IyeOHJBjp/3d8m1QsX70lLwUts7tvPdvq3your_sha512_hashXG7evBmQ93V5/rEDcjyCbEeRd50bP2XJbFDeN8yT8ujsrDz6TGCWfsOUxYdnZf5UeLf+3Oljvm8FvJn3nnBHknd72bycsufW/eiJgDNvvx4q8FHkXb8JmN3DzP/e0FpqTbW23ty796NVOu+your_sha256_hash2_hashv69VoDsnswOv3H62l1hR5J4QQQsheg7wnGE/e/Vea0R82Xr9+PSDva3LqaLDzflaeeXSE0RdIDK2l1lRr68m71hx5J4QQQsioQd4TjF/e9a6qei3wcHk35clZvaLLmmy4yzZeqsj87BE5ebYniTCe+OVda6y1Rt4JIYQQspsg7wlmmLz/your_sha512_hash+EEEIIiSvIe4LZSd7J/RHknRBCCCFxBXlPMMj7dAR5J4QQQkhcQd4TDPI+HUHeCSGEEBJXkPcEE7+8b8nynCHpUk3a7hKSfJB3QgghhMQV5D3BIO/your_sha256_hashMfG95edNe1p+your_sha256_hash2sRuSKZSl3lvpHLv/mL3tOQ/your_sha256_hashyour_sha256_hashNKc8Y0nP7vBiFqmzZL+hI0yxY76ViPcvJzvJuCXl6Qar2BtpSX5rpe+97em/3IMg7IYQQQuJK6uOPPxaPX//613AP0b/5tWvX5MMPP5StrS25dOmSvP/++/Luu+8OyHujku4TXFt4C6uW2vZnO3lP5cpSb/your_sha256_hashq8L/nfku7fOh6veb+X93YvorXUmmpttcZaa6251p7/dwAAALAdQU9PNZtN8fj000/hHqJ/808++your_sha256_hashUs20Hk3lurSVeU+2Q5mU8qpvLjTPW4cIfcOM4q8970llfe+/e3hvd2DaC21plpbrbHWWmuutef/your_sha256_hash72W4vPfSadZkKZuSYi2i4jarkrf27czbpyW7EJg531bewzrv/csGjtk6WZjbRt479VLotw6akd/bPYjWUmuqtfXEXWuutdfPQNhnAwAAACCM1I0bN8Tj5s2bcA/Rv7kWQc+qtAv70Ucf2Z1ZHa/ol/e2rC5YQrq6s5BGkXfLcKVqCXFQcDuNiswZhswt95m2PbKjy4YOpWwr7/r6rBgFU5r2BtyZ91y1e/Kh3yqkcsuypQ9ajnz3Ouvu2Ezd3bv+KDZnDJfzIe8tyWgttaZaW62x1lprrrXn/x0AAABsR9DVU61WSzxu3boF9xD9m2sRPIHXOSbtzOp4RbDz3q6XJOO/0kvfFV0cwe2tc+lKfGB9OifFamNgtGSYvNsyour_sha512_hash+5ae/7fAQAAwHb4XV1J/e53vxNIBi3Ib3your_sha512_hashGwAAAABhpD7//HOBZNBLB3oCr51YnYPWK5DobHS/vDekbKRkwWx1O86dpikFIxuYJdyour_sha512_hashzDgo7zrqvSzFnDNaYo+tzC1Ixbn0zL6ns7Uq5YWspN3RlHQ2Hxhd2c/cP/KutdUaa6215lp7/QyEfTYAAAAAwkjdvXtXIBm0AF73XeefdZxCBU9/2BiUdzK50VpqTbW2WmOttdd1189A2GcDAAAAIIzUF198IZAMWgCv+65jFHrZQP1Bo97EB3m/f6K11JpqbbXGWmuv666fgbDPBgAAAEAYyHuCIO/your_sha256_hashyour_sha256_hashdaSwvSDbt3CjJI2+27LUtM99bHnpHo6aslnKSMbzXliXafY/0Jkl5WTbLMmcYMre8KZuVOTGMOak0erdpcm4ilba3nc4uyLJvnR5b3mxYrys4+09b2/Otv9+DvAMAAEBcIO8JMoq8d+your_sha256_hashm3n4ZUyzXZcjfeto51xuidHDgnFmlZqG7Z+7fXz1SsV01HkHcAAACIC+Q9QUaR90Yl3SflthAXVi217s928p7KlaXeGrXjrfKeklLdel3LlHyqJM4/w/fjZFPKqby4Xwo4z13yP9dd33Qf3udB3gEAACAukPcEGUXe27XiQOc9Vx203+FS3ZZGtSi5jCFGJicl0+your_sha256_hashaTdWbGjYzkSjVL4QezfUfcSadZk6VsSoq1KPq+your_sha256_hashWbajyLtl71K1hDwo751Gxf1Rqn8afWd5t/89tyxb+qDTkno5J8bA2ExdnIEd7dJb64vTczlL5B0AAADiAnlPkFE67+16STLdkRQl7eu+O4LdW+fSlfjA+your_sha256_hashS7vTanmgp1yvSFT1NGXZNMn+lMY5B0AAADiAnlPkOjy3pCykZIFs+WOnujkiykFIyshV4scuyDvyDsAAADEA/KeINHlXdybIDmjKamUIZm5Bak4l54Z+yDvyDsAAADEA/your_sha256_hashyour_sha256_hashhYg7wDAABAXCDvCYK836u0pFbKSmW7q1Ui7wAAADABIO8JEr+8k/your_sha256_hashcZw3ttWSK77GbZ2mZNtsyS5HT/your_sha256_hashCs7xpfOy3PDuJatpSs07diMjhcpm3zcLm2X9O2zZxzdnPcfIFLqv13Xdv4sP7++m8T/Hv9xLp7EsC1l9XylJ50pibvmPzXp3vvVh20DeAQAAIC6Q9wQZRd479SUxsirI+qgpZsGQmUrDXufPsLuZNqs5mVmq726cRuXdku65pZrYWtqsWrK+IKvexmx5z0p2riCmLeUNKc8Y3U63HpNRqMqW/fyONM2C9V4q1rOcOCceaVmobtnH17be68yMt946aSlb27Y25rzcee9B+Z7JZmXOfU5rdUFSedM5VjvROu/your_sha256_hashSbgtvYdUnqE62k/dUriz1Vn/XOFJU3i0Z7vmpI8NL3m5ceS93jdYvwg2pzMxI/3lGU6pzlty7y+xj7m5MsynlVN4RZkuOS6mS1P2HvbnU9951X0bBd3zeNwHuQ+94dyPvzeqczPWZuXXykO8duyPvM1I0G0NPjJB3AAAAiAvkPUFGkfd2rTjQec+FtHuHyXt3dCVjiJHJScl0utyR4sp7T2sdGe6K7oAs++OKeJ8T98v04DHra9z19rZ7Yy1dfMcT2jHvy+7lXZcN7Nuib1vNmpQXspJOpSW7ULFOkNzlbpB3AAAAiAvkPUFGkXcdm8lkst257/6Z8l6Gy3svHUs2l7IpKdYi6vuAvPd3zreX97DOe/+ygWO2u+your_sha256_hashyour_sha256_hashy12ALuzrznvO25x7xUt9Zo2rJZyYlR9C532ZZa0ZCc/kh1iMDvLO/6HGPHmf/Q7Vh/k+xMUVYDP1Idlv55fSfIOwAAAMQF8p4go3Te2/WSZPpGN/xXdHE6y/6xDpuuxAfWp3NSrA7OaG8r793tOqMhzviOmx3k3RFy90oyetzFqjR8r7fl3du+/a2CKX2u3NkSs+9KOf2SHUXerT+glHMZMQZe74zodPc/sN566Wal72oyfVfq6fvbOFfS8V+IR4O8AwAAQFwg7wkSXd6bUs0FO+V6Q6YRRl/your_sha256_hashU5B0AAADiAnlPkOjyrqMby1Lsjn0YkplbkErf7Mo+your_sha256_hashBwAAgLhA3hMEeZ+OIO8AAAAQF8h7giDv0xHkHQAAAOICeU+Q+OVdr/your_sha256_hashyour_sha256_hashXdtZ12Y1kWsunuMXjH5aVZK0kuY1jrDMkUKtJ3fyl3+1tbppTmrOcYGSksN7p3k93TsY9BkHcAAACIC+Q9QUaR9059SYysJa229DbFLBgyU2nY6/your_sha256_hashe0VmXBt/09HfsYBHkHAACAuEDeE2QUeW9U0n1Sbkt6YdUn1E62k/dUriz1Vq+fHTmR5H1GimYjRLA7Ui+your_sha256_hashQec+FtLqHybu1BWlUi/boipHJScncit7J3kneNc2alBeykk6lJbtQsUTbXe6KuDdO08P3+oHtB7OHYx+DIO8AAAAQF8h7gowi7zo2k8lknblvIyO5Us1Syour_sha512_hash/L6oIlrKs7C2sUebcMWKr5QQHuNCoyZxgytxyYpbeWzxglsZ/e3pJqQX94OkTerbStE42Zrrxbj/Vbg5z+your_sha256_hashfPpQeZeObFZykrZea2QKstyoyVJw7MXbtn01mbLU+your_sha256_hashDsAAADEBfKeINHlvSFlIyULZqt77fNO05SCkZWQq0WSMQvyDgAAAHGBvCfIMHm/evVqQN5F2pvLUsxlxPBGU+YWpNJ3pyMyrtFaak2RdwAAANgryHuC+OW91WrJ9evXh8o7mdz45V1rrLVG3gEAAGA3IO8J4sn77du3u/L+ySef2KL3k5/8RL766itX/8ikRmuotdSaam09edeaI+8AAAAwKsh7gvjlXUcpPHn/8MMP5fLly/LjH//your_sha256_hashGxobU63V5/your_sha256_hashyour_sha256_hashxy/utW7fk5s2b8tlnn3Vyour_sha512_hash/batWv2tcF/your_sha256_hashRBgftCZaG62R1kprprXTGmot6boDAABAHCDvCRPsvus8tHfZSJU+your_sha256_hashyour_sha256_hash8Dpm4c3B6yUGVQaDqCRCcoTVRGvlzbcrnrgrnrgj7wAAALAbkPcxwS/wXgde0Rlpv8hrF9f/b0iesNpozbz5dsXfcUfcAQAAYLcg72OEX+CDEu9HpRDGj7BaedKOuAMAAEAcIO9jhid4Hp74eTIP44+/ZsF6htUcAAAAIBpfyP8DRul8dO6D+EEAAAAASUVORK5CYII="></img>``` ```markdown ## Helper objects The `GIS` object provides helper objects to manage the GIS resources, i.e. the users, groups, content and datastores. These helper utilities are in the form of helper objects named: `users`, `groups`, `content` and `datastore` respectively. The helper utility for managing user roles named `roles` is available as a property on the helper object `users`. Each such helper object has similar patterns of usage: there are methods to `get()`, `search()` and `create()` the respective resources. The prescribed programming pattern is to not create the GIS resources (user, group, item, role, datastore) directly using their constructor, but to access them through their corresponding helper objects described above.``` ```markdown Thus, to access a user, you would use the `users` property of your `gis` object which gives you an instance of `UserManager` class. You would then call the `get()` method of the `UserManager` object and pass the user name of the user you are interested in.``` ```python user = gis.users.get('john.smith') ``` ```markdown ## Rich IDE experience with Jupyter notebooks The ArcGIS API for Python is integrated with Jupyter Notebook to make it easy to visualize and interact with GIS resources. The `user` object has a rich representation that can be queried like this:``` ```python user ``` ```markdown The resources are implemented as Python dictionaries. You can query for the resource properties using the <b><code>resource['property']</code></b> notation:``` ```python user['firstName'] ``` ```markdown The properties are also available as properties on the resource object, so you can use the dot notation to access them:``` ```python user.lastName ``` ```markdown The resources provide methods to `update()`, `delete()` and use the object. The reminder of topics in this module talk in detail about using the various helper objects and resource objects.``` ```markdown ### Embedded maps in Jupyter notebooks The `GIS` object includes a map widget that can be used to visualize the content of your GIS as well as see the results of your analysis. Let's bring up a map of Palm Springs, CA:``` ```python map1 = gis.map("Palm Springs, CA") map1 ``` ```markdown We can search for content in our GIS. Let's search for Hiking Trails in the Palm Springs region. We do that by calling **`gis.content.search()`** and for each web map or web layers that gets returned, we can display its rich representation within the notebook:``` ```python from IPython.display import display items = gis.content.search('Palm Springs Trails', item_type='feature layer') for item in items: display(item) ``` ```markdown We can then add the returned web layers to our map. To add the last layer returned above, we call the `add_layer()` method and pass in the layer for Palm Springs Trail:``` ```python # Let us filter out the item with title 'Trails' that we want to add item_to_add = [temp_item for temp_item in items if 'Trail' in temp_item.title] item_to_add ``` ```python map1.add_layer(item_to_add[0]) map1.zoom_to_layer(item_to_add[0].layers[0]) ``` ```markdown The above cell updated the map widget, if you scroll to the top, you can notice a new trail layer being rendered on the map.```
Pocasset River may refer to a stream in southern New England in the United States: Pocasset River (Massachusetts) Pocasset River (Rhode Island)
Thomas Neumann (born 1977) is a German computer scientist and full professor for Data Science and Engineering at the Technical University of Munich (TUM). Education and career Thomas Neumann finished his studies in business informatics at the University of Mannheim in 2001 and received his doctor's degree in computer science under the supervision of Guido Moerkotte in 2005. He then worked as a senior researcher at the Max Planck Institute for Computer Science in SaarbrΓΌcken with Gerhard Weikum. During this time, Neumann developed RDF-3X, a system for graph databases. He habilitated in 2010 at Saarland University. In the same year, he joined the group for database systems at TUM under Alfons Kemper as associate professor. In 2017, he became a full professor for Data Science and Engineering, also at TUM. Research His research areas are query optimisation and efficient query processing by just-in-time compilation. As part of this research, he developed the main memory database system HyPer, which was sold to Tableau Software in 2016, and its successor system Umbra. He was awarded the Gottfried Wilhelm Leibniz Prize by the German Research Foundation for his work on HyPer. Weblinks Homepage of Thomas Neumann Publications indexed in the Digital Bibliography & Library Project (DBLP) HyPer: Hybrid OLTP&OLAP High-Performance Database System Umbra: A Disk-Based System with In-Memory Performance Awards 2021 VLDB Test of Time Award 2021 ICDE Ten-Year Influential Paper Award 2020 Gottfried Wilhelm Leibniz Prize 2016 ERC Consolidator Grant 2014 Early Career Award of the VLDB Conference 1994 International Olympiad in Informatics gold medal References German computer scientists Academic staff of the Technical University of Munich Gottfried Wilhelm Leibniz Prize winners 1977 births Living people Database researchers
Patrick J. Hartnett (October 20, 1863 – April 10, 1935) was a professional baseball player. Nicknamed "Happy", he played part of one season in Major League Baseball for the St. Louis Browns in 1890. External links Major League Baseball first basemen St. Louis Browns (AA) players Portland (minor league baseball) players Newburyport Clamdiggers players Boston Blues players Lowell Magicians players Toronto Canucks players St. Paul Apostles players Providence Clamdiggers (baseball) players Brockton Shoemakers players Baseball players from Boston 19th-century baseball players 1863 births 1935 deaths
Lieutenant George Rensbury Hicks (24 January 1900 – 24 November 1951) was a First World War flying ace credited with eight aerial victories. Biography Hicks was born in Walthamstow, Essex, to George and Matilda Ann Hicks on 24 January 1900. During the First World War, Hicks served with the Artists Rifles as a private up until 3 November 1917, when he joined the Royal Flying Corps as a lieutenant. Posted to No. 74 (Fighter) Squadron, Hicks achieved all eight of his victories in the Royal Aircraft Factory S.E.5a. Hicks scored his first victory on 15 July 1918, shooting down a Fokker D.VII. His second triumph came on 24 July, this time against a DFW C.V. Hicks shot down another DFW C.V on 19 August. His fourth and fifth victories both came on 5 September 1918, with Hicks shooting down two Fokker D.VIIs, becoming an ace. 24 September saw Hicks achieve another two triumphs in one day – shooting down a Siemens-Schuckert D.IV and an unknown Rumpler biplane. Two days later saw his last victory of the war, when Hicks shot down another Fokker D.VII. Hicks was wounded in action on 2 October 1918. On 3 June 1919, Hicks was awarded the Distinguished Flying Cross. Hicks died on 24 November 1951 in West Derby. References 1900 births 1951 deaths Royal Air Force personnel of World War I British World War I flying aces Recipients of the Distinguished Flying Cross (United Kingdom) British Army personnel of World War I Artists' Rifles soldiers Royal Flying Corps officers
```c /* packet-echo.c * Routines for ECHO packet disassembly (RFC862) * * Only useful to mark the packets as ECHO in the summary and in the * protocol hierarchy statistics (since not so many fields to decode ;-) * * Laurent Deniel <laurent.deniel@free.fr> * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * * This program is free software; you can redistribute it and/or * as published by the Free Software Foundation; either version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #define ECHO_PORT 7 void proto_register_echo(void); void proto_reg_handoff_echo(void); static int proto_echo = -1; static int hf_echo_data = -1; static int hf_echo_request = -1; static int hf_echo_response = -1; static gint ett_echo = -1; static int dissect_echo(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_) { int offset = 0; gboolean request = FALSE; if (pinfo->destport == pinfo->match_uint) { request = TRUE; } col_set_str(pinfo->cinfo, COL_PROTOCOL, "ECHO"); col_set_str(pinfo->cinfo, COL_INFO, (request) ? "Request" : "Response"); if (tree) { proto_tree *echo_tree; proto_item *ti, *hidden_item; ti = proto_tree_add_item(tree, proto_echo, tvb, offset, -1, ENC_NA); echo_tree = proto_item_add_subtree(ti, ett_echo); if (request) { hidden_item = proto_tree_add_boolean(echo_tree, hf_echo_request, tvb, 0, 0, 1); } else { hidden_item = proto_tree_add_boolean(echo_tree, hf_echo_response, tvb, 0, 0, 1); } PROTO_ITEM_SET_HIDDEN(hidden_item); proto_tree_add_item(echo_tree, hf_echo_data, tvb, offset, -1, ENC_NA); } return tvb_captured_length(tvb); } /* dissect_echo */ void proto_register_echo(void) { static hf_register_info hf[] = { { &hf_echo_data, { "Echo data", "echo.data", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_echo_request, { "Echo request", "echo.request", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Echo data", HFILL }}, { &hf_echo_response, { "Echo response","echo.response", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Echo data", HFILL }} }; static gint *ett[] = { &ett_echo }; proto_echo = proto_register_protocol("Echo", "ECHO", "echo"); proto_register_field_array(proto_echo, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); } void proto_reg_handoff_echo(void) { dissector_handle_t echo_handle; echo_handle = create_dissector_handle(dissect_echo, proto_echo); dissector_add_uint("udp.port", ECHO_PORT, echo_handle); dissector_add_uint("tcp.port", ECHO_PORT, echo_handle); } /* * Editor modelines - path_to_url * * Local Variables: * c-basic-offset: 2 * tab-width: 8 * indent-tabs-mode: nil * End: * * ex: set shiftwidth=2 tabstop=8 expandtab: * :indentSize=2:tabSize=8:noTabs=true: */ ```
Bouhmame is a small town and rural commune in Sidi Bennour Province of the Casablanca-Settat region of Morocco. At the time of the 2004 census, the commune had a total population of 30,540 people living in 5268 households. References Populated places in Sidi Bennour Province Rural communes of Casablanca-Settat
```xml /* * @license Apache-2.0 * * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // TypeScript Version: 4.1 /// <reference types="@stdlib/types"/> import { ArrayLike } from '@stdlib/types/array'; /** * Interface describing loop interchange data. */ interface LoopOrderObject { /** * Dimensions sorted in loop order. */ sh: Array<number>; /** * Array strides sorted in loop order. */ sx: Array<number>; /** * Dimension indices sorted in loop order. */ idx: Array<number>; } /** * Reorders ndarray dimensions and associated strides for loop interchange. * * ## Notes * * - The returned object has the following properties: * * - **sh**: dimensions sorted in loop order. * - **sx**: ndarray strides sorted in loop order. * - **idx**: dimension indices sorted in loop order. * * - When iterating over the elements of a multi-dimensional array, accessing elements which are closer in memory can improve performance. To this end, loop interchange is a technique used in loop nest optimization to improve locality of reference and take advantage of CPU cache. * * The purpose of this function is to order ndarray dimensions according to the magnitude of array strides. By using the ordered dimensions and associated strides, one can construct nested loops (one for each dimension) such that the innermost loop iterates over the dimension in which array elements are closest in memory and the outermost loop iterates over the dimension in which array elements are farthest apart in memory. As a consequence, element iteration is optimized to minimize cache misses and ensure locality of reference. * * @param sh - array dimensions * @param sx - array stride lengths * @returns loop interchange data * * @example * var sh = [ 2, 3, 4 ]; * * var sx = [ 12, 4, 1 ]; // row-major * * var o = nullaryLoopOrder( sh, sx ); * // returns {...} * * var ssh = o.sh; * // returns [ 4, 3, 2 ] * * var ssx = o.sx; * // returns [ 1, 4, 12 ] * * var idx = o.idx; * // returns [ 2, 1, 0 ] */ declare function nullaryLoopOrder( shape: ArrayLike<number>, stridesX: ArrayLike<number> ): LoopOrderObject; // EXPORTS // export = nullaryLoopOrder; ```
```c++ //===---------------------- system_error.cpp ------------------------------===// // // See path_to_url for license information. // //===your_sha256_hash------===// #include "__config" #include "system_error" #include "include/config_elast.h" #include "cerrno" #include "cstring" #include "cstdio" #include "cstdlib" #include "string" #include "string.h" #include "__debug" #if defined(__ANDROID__) #include <android/api-level.h> #endif _LIBCPP_BEGIN_NAMESPACE_STD // class error_category #if defined(your_sha256_hashCTIONS) error_category::error_category() _NOEXCEPT { } #endif error_category::~error_category() _NOEXCEPT { } error_condition error_category::default_error_condition(int ev) const _NOEXCEPT { return error_condition(ev, *this); } bool error_category::equivalent(int code, const error_condition& condition) const _NOEXCEPT { return default_error_condition(code) == condition; } bool error_category::equivalent(const error_code& code, int condition) const _NOEXCEPT { return *this == code.category() && code.value() == condition; } #if !defined(_LIBCPP_HAS_NO_THREADS) namespace { // GLIBC also uses 1024 as the maximum buffer size internally. constexpr size_t strerror_buff_size = 1024; string do_strerror_r(int ev); #if defined(_LIBCPP_MSVCRT_LIKE) string do_strerror_r(int ev) { char buffer[strerror_buff_size]; if (::strerror_s(buffer, strerror_buff_size, ev) == 0) return string(buffer); std::snprintf(buffer, strerror_buff_size, "unknown error %d", ev); return string(buffer); } #else // Only one of the two following functions will be used, depending on // the return type of strerror_r: // For the GNU variant, a char* return value: __attribute__((unused)) const char * handle_strerror_r_return(char *strerror_return, char *buffer) { // GNU always returns a string pointer in its return value. The // string might point to either the input buffer, or a static // buffer, but we don't care which. return strerror_return; } // For the POSIX variant: an int return value. __attribute__((unused)) const char * handle_strerror_r_return(int strerror_return, char *buffer) { // The POSIX variant either: // - fills in the provided buffer and returns 0 // - returns a positive error value, or // - returns -1 and fills in errno with an error value. if (strerror_return == 0) return buffer; // Only handle EINVAL. Other errors abort. int new_errno = strerror_return == -1 ? errno : strerror_return; if (new_errno == EINVAL) return ""; _LIBCPP_ASSERT(new_errno == ERANGE, "unexpected error from ::strerror_r"); // FIXME maybe? 'strerror_buff_size' is likely to exceed the // maximum error size so ERANGE shouldn't be returned. std::abort(); } // This function handles both GNU and POSIX variants, dispatching to // one of the two above functions. string do_strerror_r(int ev) { char buffer[strerror_buff_size]; // Preserve errno around the call. (The C++ standard requires that // system_error functions not modify errno). const int old_errno = errno; const char *error_message = handle_strerror_r_return( ::strerror_r(ev, buffer, strerror_buff_size), buffer); // If we didn't get any message, print one now. if (!error_message[0]) { std::snprintf(buffer, strerror_buff_size, "Unknown error %d", ev); error_message = buffer; } errno = old_errno; return string(error_message); } #endif } // end namespace #endif string __do_message::message(int ev) const { #if defined(_LIBCPP_HAS_NO_THREADS) return string(::strerror(ev)); #else return do_strerror_r(ev); #endif } class _LIBCPP_HIDDEN __generic_error_category : public __do_message { public: virtual const char* name() const _NOEXCEPT; virtual string message(int ev) const; }; const char* __generic_error_category::name() const _NOEXCEPT { return "generic"; } string __generic_error_category::message(int ev) const { #ifdef _LIBCPP_ELAST if (ev > _LIBCPP_ELAST) return string("unspecified generic_category error"); #endif // _LIBCPP_ELAST return __do_message::message(ev); } const error_category& generic_category() _NOEXCEPT { static __generic_error_category s; return s; } class _LIBCPP_HIDDEN __system_error_category : public __do_message { public: virtual const char* name() const _NOEXCEPT; virtual string message(int ev) const; virtual error_condition default_error_condition(int ev) const _NOEXCEPT; }; const char* __system_error_category::name() const _NOEXCEPT { return "system"; } string __system_error_category::message(int ev) const { #ifdef _LIBCPP_ELAST if (ev > _LIBCPP_ELAST) return string("unspecified system_category error"); #endif // _LIBCPP_ELAST return __do_message::message(ev); } error_condition __system_error_category::default_error_condition(int ev) const _NOEXCEPT { #ifdef _LIBCPP_ELAST if (ev > _LIBCPP_ELAST) return error_condition(ev, system_category()); #endif // _LIBCPP_ELAST return error_condition(ev, generic_category()); } const error_category& system_category() _NOEXCEPT { static __system_error_category s; return s; } // error_condition string error_condition::message() const { return __cat_->message(__val_); } // error_code string error_code::message() const { return __cat_->message(__val_); } // system_error string system_error::__init(const error_code& ec, string what_arg) { if (ec) { if (!what_arg.empty()) what_arg += ": "; what_arg += ec.message(); } return what_arg; } system_error::system_error(error_code ec, const string& what_arg) : runtime_error(__init(ec, what_arg)), __ec_(ec) { } system_error::system_error(error_code ec, const char* what_arg) : runtime_error(__init(ec, what_arg)), __ec_(ec) { } system_error::system_error(error_code ec) : runtime_error(__init(ec, "")), __ec_(ec) { } system_error::system_error(int ev, const error_category& ecat, const string& what_arg) : runtime_error(__init(error_code(ev, ecat), what_arg)), __ec_(error_code(ev, ecat)) { } system_error::system_error(int ev, const error_category& ecat, const char* what_arg) : runtime_error(__init(error_code(ev, ecat), what_arg)), __ec_(error_code(ev, ecat)) { } system_error::system_error(int ev, const error_category& ecat) : runtime_error(__init(error_code(ev, ecat), "")), __ec_(error_code(ev, ecat)) { } system_error::~system_error() _NOEXCEPT { } void __throw_system_error(int ev, const char* what_arg) { #ifndef _LIBCPP_NO_EXCEPTIONS throw system_error(error_code(ev, system_category()), what_arg); #else (void)ev; (void)what_arg; _VSTD::abort(); #endif } _LIBCPP_END_NAMESPACE_STD ```
Bellsmyre is a large housing estate in the town of Dumbarton in West Dunbartonshire, Scotland. Located on the edge of the Kilpatrick Hills in the northeast of Dumbarton. Bellsmyre is one of five predominantly council housing schemes in Dumbarton, the other four being Westcliff, Silverton, Brucehill and Castlehill. The estate was originally masterplanned by the architect Ninian Johnston of Boswell, Mitchell & Johnston. The first streets were built in the late 1940s, i.e. Aitkenbar Circle, Bellsmyre Avenue, Carman View and Lomond Drive. The original houses in Bellsmyre are recognisable as they are British Iron and Steel Federation houses The scheme was expanded by the Scottish Special Housing Association (SSHA) in the 1950s as a municipal housing estate, with many residents moving from the city of Glasgow as part of the "overspill" programme of moving people from inner city districts to new towns and other areas outwith the city boundaries. Much of the remaining publicly owned housing under landlord Scottish Homes was transferred to the Bellsmyre Housing Association in 1992. There are two later built areas adjacent to Bellsmyre; Stoneyflatt and Glenside, of which Stoneyflatt has been almost totally demolished to make way for new mixed tenure housing. There are also two primary schools in the area, namely Aitkenbar and St Peter's. St Peter's Roman Catholic Church in Bellsmyre was designed by Garner, Preston & Strebel and opened in 1971, winning a RIBA award in 1973. In 2002, Bellsmyre was selected as the pilot area for a Scottish Executive digital inclusion project, which entailed the free provision of home computers with internet access to all households in the area. On 5 June 2007, an unexploded World War II bomb was found at a housing construction site, and some houses nearby and Aitkenbar Primary School were evacuated. The pupils were relocated to St Peters Primary School for that day. It was the childhood home of John McFall, a former MP for Dumbarton and the current the Lord Speaker. In a profile of him after his election, The Times noted, "the language which Lord McFall of Alcluith uses to describe the way peers scrutinise and amend bills passed to them from the House of Commons, is pure Bellsmyre." References External links Bellsmyre Housing Association Bellsmyre Housing Complaints Bellsmyre Community Portal Website Dumbarton Housing estates in Scotland
```javascript import { vi } from 'vitest'; const fs = vi.createMockFromModule('fs'); // This is a custom function that our tests can use during setup to specify // what the files on the "mock" filesystem should look like when any of the // `fs` APIs are used. let mockFiles = Object.create(null); // eslint-disable-next-line no-underscore-dangle, @typescript-eslint/naming-convention function __setMockFiles(newMockFiles) { mockFiles = newMockFiles; } // A custom version of `readdirSync` that reads from the special mocked out // file list set via __setMockFiles const readFileSync = (filePath = '') => mockFiles[filePath]; const existsSync = (filePath) => !!mockFiles[filePath]; const lstatSync = (filePath) => ({ isFile: () => !!mockFiles[filePath], }); // eslint-disable-next-line no-underscore-dangle fs.__setMockFiles = __setMockFiles; fs.readFileSync = readFileSync; fs.existsSync = existsSync; fs.lstatSync = lstatSync; module.exports = fs; ```
is a Japanese film director and screenwriter. An alumnus of the University of Tokyo and the Tokyo National University of Fine Arts and Music, he started gaining attention in his home country with the graduate film Passion (2008). Hamaguchi first gained international recognition with the film Happy Hour (2015) and continued with Asako I & II (2018). In 2021, he released two films, Wheel of Fortune and Fantasy and Drive My Car; for the latter he received two Academy Awards nominations, for Best Director and Best Adapted Screenplay. He is the third Japanese director to be nominated for an Oscar for Best Director. Career After graduating from the University of Tokyo, Hamaguchi worked in the commercial film industry for a few years before entering the graduate program in film at Tokyo University of the Arts where he studied with and was influenced by Kiyoshi Kurosawa. His graduation film Passion was selected for the competition of the 2008 Tokyo Filmex. With Kō Sakai, he made a three-part documentary about survivors of the 2011 Tōhoku earthquake and tsunami, with Voices from the Waves being selected for the competition at the 2013 Yamagata International Documentary Film Festival, and Storytellers winning the Sky Perfect IDEHA Prize. His next film, Happy Hour, was first developed while Hamaguchi was an artist in residence at KIITO Design and Creative Center Kobe in 2013. It came out of an improvisational acting workshop he held for non-professionals, with many of the film's performers having participated in the workshop. The four lead actresses shared the best actress award and the film earned a special mention for its script at the 2015 Locarno Film Festival. Hamaguchi was also given a special jury award at the 2016 Japan Movie Critic Awards, as well as a best newcomer award in the film division of the Agency for Cultural Affairs' Geijutsu Sensho Awards that year. His Asako I & II was selected to compete for the Palme d'Or at the 2018 Cannes Film Festival. In 2021, Hamaguchi won the Silver Bear award at the Berlinale with his Wheel of Fortune and Fantasy. That same year his Drive My Car won Best Picture awards from the New York Film Critics Circle, Boston Society of Film Critics, and Los Angeles Film Critics Association as well as "Best Motion Picture - Non-English Language" at the Golden Globes. Hamaguchi was nominated for an Oscar for Best Director for Drive My Car, becoming the third Japanese director to accomplish this feat. In 2023, his film Evil Does Not Exist was awarded the Grand Jury Prize at the Venice Film Festival. Influences and style Hamaguchi has referred to himself as "purely a cinephile" and "conventionally in love with Hollywood films." He has been influenced by the works of John Cassavetes. Quotes "To some extent, all films are fiction and documentary at the same time. I have experienced to make both, and I believe there is no such thing as pure fiction or pure documentary." "The actor is acting in front of the camera. What the camera captures there is a documentary about the actors, because they're doing something which happens only once." (On the multilingual staging in Drive My Car) "In a multilingual staging, of course, they're not understanding the meaning of the words. Instead, the body language and the voice tones is what becomes more important to convey those feelings or the emotional state of the respective actors. It becomes easier to focus and react. That's a nice way I look at it to get a more simple and strong performance." (On the ending of Drive My Car) "Once I talked with a big fan of Drive My Car who said that it really would have been perfect without that ending. (Laugh) Well, I think maybe the reason I ended that way is to make it a bit imperfect." "In terms of the final staging of the play in applause, if I had ended the movie at that point, presumably the audience would want to do a round of applause, and it would almost be like closing of a full circle. But for me that didn't really feel like a satisfying ending. I wanted to do something a bit more disruptive, to leave some sort of break." (On the ending of Drive My Car) "I have no any plans of making a sequel, but I was just sort of playing around with things at the end there. One other thing I'd like to say is that the title itself also might give a clue to how you can interpret the ending." Filmography Awards References External links 1978 births Living people Japanese film directors Japanese screenwriters People from Kanagawa Prefecture Tokyo University of the Arts alumni University of Tokyo alumni Cannes Film Festival Award for Best Screenplay winners Directors of Best Foreign Language Film Academy Award winners
```objective-c // // QBAssetsCollectionViewCell.h // QBImagePickerController // // Created by Tanaka Katsuma on 2013/12/31. // #import <UIKit/UIKit.h> #import <AssetsLibrary/AssetsLibrary.h> @interface QBAssetsCollectionViewCell : UICollectionViewCell @property (nonatomic, strong) ALAsset *asset; @property (nonatomic, assign) BOOL showsOverlayViewWhenSelected; @end ```
Topuk is a neighbourhood in the municipality and district of Orhaneli, Bursa Province in Turkey. Its population is 166 (2022). References Neighbourhoods in Orhaneli District
LejΕ‚awki MaΕ‚e is a village in the administrative district of Gmina Orneta, within Lidzbark County, Warmian-Masurian Voivodeship, in northern Poland. It lies approximately north-west of Orneta, west of Lidzbark WarmiΕ„ski, and north-west of the regional capital Olsztyn. Before 1772 the area was part of Kingdom of Poland, and in 1772–1945 it belonged to Prussia and Germany (East Prussia). References Villages in Lidzbark County
John Montgomery Ward (March 3, 1860 – March 4, 1925), known as Monte Ward, was an American Major League Baseball pitcher, shortstop, second baseman, third baseman, manager, executive, union organizer, owner and author. Ward, of English descent, was born in Bellefonte, Pennsylvania and grew up in Renovo, Pennsylvania. He led the formation of the first professional sports players union and a new baseball league, the Players' League. Early life Ward attended the Bellefonte Academy in the early 1870s, and at 13 years of age, he was sent to Pennsylvania State University. In his short time there, he helped jumpstart a baseball program and is often credited for developing the first curveball. However, he was kicked out of school for pushing an upperclassman who attempted to haze him down a flight of stairs, and stealing chickens. The following year, in 1874, his parents James and Ruth died. He tried to make it as a travelling salesman, but when that proved unsuccessful, he returned to his hometown. There, he rediscovered baseball. In , the semiprofessional team for which he was playing folded, which opened the door for him to move on to a new opportunity. He was offered a contract to pitch for the Providence Grays of the still new National League, an all-professional major league that had begun its operations in . Providence Grays Ward's first season with the Grays was a successful one, going 22–13 with a 1.51 ERA. He played that season exclusively as a pitcher, but during the following two seasons he played increasingly in the outfield and at third base. Ward had his two finest seasons as a pitcher, going 47–19 with 239 strikeouts and a 2.15 ERA in 1879 and 39–24 with 230 strikeouts and a 1.74 ERA in 1880. He pitched nearly 600 innings each year (587.0 in 1879 and 595.0 in 1880). As a 19-year-old pitcher, he won 47 games and led the Providence Grays to a first-place finish. In , he began to play other positions. On June 17, 1880, Ward pitched the second perfect game in baseball history, defeating future Hall of Famer Pud Galvin and the Buffalo Bisons, 5–0. Lee Richmond had thrown baseball's first perfect game just five days before, on June 12. The next perfect game by a National League pitcher would not happen for 84 years, when Jim Bunning pitched a perfect game in 1964. Ward also expanded his leadership role to include managing when he became a player-manager for the team's final 32 games, winning 18 of them, as the Grays finished in second place. The seasons of and were the first in which he played more games in the outfield than he pitched. This was due to a nagging arm injury he originally incurred sliding into a base. He still pitched well when he did pitch, winning 37 games over those two seasons and having ERAs of 2.13 and 2.59 respectively, and on August 17, 1882, he pitched the longest complete game shutout in history, blanking the Detroit Wolverines 1–0 in 18 innings. By this time, however, the Grays felt his best days were behind him and sold their former ace hurler to the New York Giants. New York and reserve clause Ward moved to the new New York NL club in 1883. An injury to his right arm while running the bases during the 1884 season ended Ward's pitching career. As he could not wait for his arm to heal before he returned to the field, he taught himself to throw left-handed so he could play center field for the remainder of the 1884 season. He replaced Jim Price as the Giants' manager for the final 16 games of the 1884 season. With his arm fully recuperated, he became the everyday shortstop in 1885. Ward graduated from Columbia Law School in 1885 and led the players in forming the Brotherhood of Professional Base Ball Players, the first sports labor union. Ward and the players had become frustrated with the owners' reserve clause, which allowed them to sign players to one-year contracts and then not allow them to negotiate with other teams when those contracts expired. The players felt that the owners had absolute power. At first, the players had some success, gaining the freedom to negotiate with other teams when they were asked to take a pay cut by their current team. In October 1887, Ward married actress Helen Dauvray. In , after the Giants had finished first in the National League, and had won a playoff series known today as a "World Series", they played the St. Louis Browns of the American Association for the "Dauvray Cup", which was named after Ward's wife. Ward then captained an All-Star team which, paired with Cap Anson's Chicago club, headed off on a barnstorming world tour. The owners held their winter meetings, and created a classification system that would determine a player's salary. Under the system, the most a player could earn was $2,500 (). The Giants then sold Ward to the Washington Nationals for a record price of $12,000. Ward was furious and left the tour early. He then demanded a meeting with the owners, and said he would refuse to play for Washington unless he received a large portion of his record sale price. Washington eventually refused payment on the transaction, nullifying the deal. The owners denied Ward's request for a meeting to discuss the new classification system, saying no talks would be held until after the upcoming season. Though Ward and the union fought hard for these issues, this did not distract him or his Giants team, as he hit .299 and helped the Giants capture their second-straight "World Series" title in . Amidst Ward's commitments as a ballplayer and union organizer, he still found time for a third occupation, that of author. His 1888 book, Base-Ball: How to Become a Player, with the Origin, History and Explanation of the Game was the first published effort to explore baseball's development from its early roots. The Players' League Ward realized that negotiations with the owners were going nowhere and threatened to create a Players' League. The owners thought of it as nothing more than an idle threat but had failed to realize Ward's connections in the business community, and he began to launch the new league. This new Players' League included a profit sharing system for the players and had no reserve clause or classification plan. The season began in with over half of the National League's players from the previous year in its ranks. Ward acted as a player-manager for the Brooklyn club, nicknamed the Ward's Wonders, and finished seventh in the league with a .335 batting average. While the Players' League drew well at the box office, the teams' owners grew nervous when the money did not come in as expected because of the profit sharing system. Soon they began holding secret meetings with their National League counterparts and, one by one, sold their teams to the rival league. Later career Due to an agreement after the dissolution of the Players' League, Ward stayed in Brooklyn as player-manager for the National League team, the Brooklyn Grooms. Following the season, Ward expressed his desire to return to the Giants and was sold to his former club for $6,000. Despite his declining bat, Ward led the league in stolen bases in 1893. Following the 1894 season, he retired at the age of 34. He finished his career with a .275 average, 2,104 hits, and 540 stolen bases. He is the only man in history to win over 100 games as a pitcher and collect over 2,000 hits. Post-career Ward retired from baseball at age 34 in order to enter the legal profession. As a successful lawyer he represented baseball players against the National League. Later he acted as president and part-owner of the Boston Braves franchise and became an official in the short-lived Federal League in 1914, acting as the business manager of the Brooklyn Tip-Tops. In the last quarter century of his life, Ward's sporting passion became golf. He won several championships around New York, played all over Europe, and competed regularly in the United States Golf Association U.S. Amateur. He finished second in the prestigious North and South Amateur Championship at Pinehurst Country Club in North Carolina in 1903. The North and South Amateur was the equal of any major golf event at the turn of the century. The first North and South event took place in 1901. Ever the organizer, he was one of the founders of the New York Golf Association and the Long Island Golf Association. Ward died in Augusta, Georgia, the day after his 65th birthday on March 4, 1925, after a bout of pneumonia, and is interred in Greenfield Cemetery in Hempstead, Long Island, New York. Named in the Honor Rolls of Baseball in 1946, Ward was elected to the Baseball Hall of Fame by the Veterans Committee in 1964. He was inducted into the Suffolk Sports Hall of Fame on Long Island in the Baseball and Historic Recognition Categories with the Class of 2000. See also List of Major League Baseball annual ERA leaders List of Major League Baseball perfect games List of Major League Baseball player-managers List of Major League Baseball career hits leaders List of Major League Baseball career runs scored leaders List of Major League Baseball career stolen bases leaders List of Major League Baseball annual saves leaders List of Major League Baseball annual stolen base leaders List of Major League Baseball annual strikeout leaders List of Major League Baseball annual wins leaders References Bibliography In-line citations External links Retrosheet Box score for Ward's perfect game National Baseball Hall of Fame inductees Providence Grays players New York Gothams players New York Giants (NL) players Brooklyn Ward's Wonders players Brooklyn Grooms players New York Gothams managers Brooklyn Bridegrooms managers New York Giants (NL) managers Providence Grays managers Major League Baseball shortstops Major League Baseball second basemen Major League Baseball pitchers Major League Baseball outfielders Major League Baseball pitchers who have pitched a perfect game Major League Baseball player-managers 19th-century baseball players National League ERA champions National League strikeout champions National League wins champions National League stolen base champions Baseball players from Pennsylvania Penn State Nittany Lions baseball players Philadelphia Athletic players Philadelphia (minor league baseball) players Janesville Mutual players Buffalo (minor league baseball) players Binghamton Crickets (1870s) players Baseball developers Sports labor leaders Columbia Law School alumni Pennsylvania State University alumni Burials in New York (state) 1860 births 1925 deaths Deaths from pneumonia in Georgia (U.S. state) American people of English descent Players' League
```go package brotli Distributed under MIT license. See file LICENSE for detail or copy at path_to_url */ /* Greedy block splitter for one block category (literal, command or distance). */ type blockSplitterCommand struct { alphabet_size_ uint min_block_size_ uint split_threshold_ float64 num_blocks_ uint split_ *blockSplit histograms_ []histogramCommand histograms_size_ *uint target_block_size_ uint block_size_ uint curr_histogram_ix_ uint last_histogram_ix_ [2]uint last_entropy_ [2]float64 merge_last_count_ uint } func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramCommand, histograms_size *uint) { var max_num_blocks uint = num_symbols/min_block_size + 1 var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) /* We have to allocate one more histogram than the maximum number of block types for the current histogram when the meta-block is too big. */ self.alphabet_size_ = alphabet_size self.min_block_size_ = min_block_size self.split_threshold_ = split_threshold self.num_blocks_ = 0 self.split_ = split self.histograms_size_ = histograms_size self.target_block_size_ = min_block_size self.block_size_ = 0 self.curr_histogram_ix_ = 0 self.merge_last_count_ = 0 brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) self.split_.num_blocks = max_num_blocks *histograms_size = max_num_types if histograms == nil || cap(*histograms) < int(*histograms_size) { *histograms = make([]histogramCommand, (*histograms_size)) } else { *histograms = (*histograms)[:*histograms_size] } self.histograms_ = *histograms /* Clear only current histogram. */ histogramClearCommand(&self.histograms_[0]) self.last_histogram_ix_[1] = 0 self.last_histogram_ix_[0] = self.last_histogram_ix_[1] } /* Does either of three things: (1) emits the current block with a new block type; (2) emits the current block with the type of the second last block; (3) merges the current block with the last block. */ func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) { var split *blockSplit = self.split_ var last_entropy []float64 = self.last_entropy_[:] var histograms []histogramCommand = self.histograms_ self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) if self.num_blocks_ == 0 { /* Create first block. */ split.lengths[0] = uint32(self.block_size_) split.types[0] = 0 last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) last_entropy[1] = last_entropy[0] self.num_blocks_++ split.num_types++ self.curr_histogram_ix_++ if self.curr_histogram_ix_ < *self.histograms_size_ { histogramClearCommand(&histograms[self.curr_histogram_ix_]) } self.block_size_ = 0 } else if self.block_size_ > 0 { var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) var combined_histo [2]histogramCommand var combined_entropy [2]float64 var diff [2]float64 var j uint for j = 0; j < 2; j++ { var last_histogram_ix uint = self.last_histogram_ix_[j] combined_histo[j] = histograms[self.curr_histogram_ix_] histogramAddHistogramCommand(&combined_histo[j], &histograms[last_histogram_ix]) combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) diff[j] = combined_entropy[j] - entropy - last_entropy[j] } if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { /* Create new block. */ split.lengths[self.num_blocks_] = uint32(self.block_size_) split.types[self.num_blocks_] = byte(split.num_types) self.last_histogram_ix_[1] = self.last_histogram_ix_[0] self.last_histogram_ix_[0] = uint(byte(split.num_types)) last_entropy[1] = last_entropy[0] last_entropy[0] = entropy self.num_blocks_++ split.num_types++ self.curr_histogram_ix_++ if self.curr_histogram_ix_ < *self.histograms_size_ { histogramClearCommand(&histograms[self.curr_histogram_ix_]) } self.block_size_ = 0 self.merge_last_count_ = 0 self.target_block_size_ = self.min_block_size_ } else if diff[1] < diff[0]-20.0 { split.lengths[self.num_blocks_] = uint32(self.block_size_) split.types[self.num_blocks_] = split.types[self.num_blocks_-2] /* Combine this block with second last block. */ var tmp uint = self.last_histogram_ix_[0] self.last_histogram_ix_[0] = self.last_histogram_ix_[1] self.last_histogram_ix_[1] = tmp histograms[self.last_histogram_ix_[0]] = combined_histo[1] last_entropy[1] = last_entropy[0] last_entropy[0] = combined_entropy[1] self.num_blocks_++ self.block_size_ = 0 histogramClearCommand(&histograms[self.curr_histogram_ix_]) self.merge_last_count_ = 0 self.target_block_size_ = self.min_block_size_ } else { /* Combine this block with last block. */ split.lengths[self.num_blocks_-1] += uint32(self.block_size_) histograms[self.last_histogram_ix_[0]] = combined_histo[0] last_entropy[0] = combined_entropy[0] if split.num_types == 1 { last_entropy[1] = last_entropy[0] } self.block_size_ = 0 histogramClearCommand(&histograms[self.curr_histogram_ix_]) self.merge_last_count_++ if self.merge_last_count_ > 1 { self.target_block_size_ += self.min_block_size_ } } } if is_final { *self.histograms_size_ = split.num_types split.num_blocks = self.num_blocks_ } } /* Adds the next symbol to the current histogram. When the current histogram reaches the target size, decides on merging the block. */ func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) { histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol) self.block_size_++ if self.block_size_ == self.target_block_size_ { blockSplitterFinishBlockCommand(self, false) /* is_final = */ } } ```
Lowell K. Bridwell (14 June 1924 – 21 November 1986) was an American journalist. He was a correspondent for the Associated Press and the Ohio State Journal from 1946 to 1950. He was director of the Federal Highway Administration from 1967 to 1969. Biography He was born on June 14, 1924, in Westerville, Ohio. His father worked for the Anti-Saloon League. After World War II, he briefly attended Ohio State University in the late 1940s and early 1950s, married Margaret Bridwell, a physician. He was a Scripps-Howard reporter in Columbus, Ohio, then Cincinnati, Ohio. During his time as a reporter in the 1950s he wrote many stories about first local and then national interest, and drew the interest of J. Edgar Hoover during the second red scare. His brother Charles Bridwell also worked for the Federal Bureau of Investigation (FBI) at the time in the Columbus field office and refused to spy on his brother for Hoover, thus ending his career in the FBI. Years later, Lowell became well known as the reporter who wrote an exposΓ© debunking Walter Williams, who claimed to be the last surviving Civil War veteran. After covering John F. Kennedy's run for the presidency in 1960 as a reporter, he joined the administration United States Department of Commerce in April 1962 as assistant to Under Secretary for Transportation Clarence Martin Jr., (under President Kennedy) before being appointed Acting Deputy Federal Highway Administrator on January 20, 1964, under President Johnson a post he held until becoming Deputy Under Secretary of Commerce for Transportation (Operations) on July 2, 1964. He worked at the Federal Highway Administration from March 23, 1967, until the end of the Johnson administration on January 20, 1969. During this time billions of dollars of highway funds were used to build America's highways from coast to coast. From 1972 to 1981, Bridwell was the executive director of the Westside Highway Project. Between 1981 and 1984, he was appointed Secretary of the Maryland Department of Transportation, and taught at the Univ. of Maryland during the 1980s. He died on November 21, 1986, in Columbia, Maryland. References External links Lowell K. Bridwell bio via Department of Transportation 1924 births 1986 deaths Secretaries of Transportation of Maryland People from Westerville, Ohio 20th-century American politicians Administrators of the Federal Highway Administration United States Department of Commerce officials Kennedy administration personnel Lyndon B. Johnson administration personnel Ohio State University alumni
```smalltalk // // Author: // Jb Evain (jbevain@gmail.com) // // // namespace ILRuntime.Mono.Cecil { public class ModuleReference : IMetadataScope { string name; internal MetadataToken token; public string Name { get { return name; } set { name = value; } } public virtual MetadataScopeType MetadataScopeType { get { return MetadataScopeType.ModuleReference; } } public MetadataToken MetadataToken { get { return token; } set { token = value; } } internal ModuleReference () { this.token = new MetadataToken (TokenType.ModuleRef); } public ModuleReference (string name) : this () { this.name = name; } public override string ToString () { return name; } } } ```
Have Gun, Will Travel is an American Alternative rock/folk rock rock band formed in January 2006. Based in Bradenton, Florida, they make reference to the town in several songs, including "Salad Days", from the 2009 album Postcards from the Friendly City (The Friendly City" is a local nickname for Bradenton). Their name is taken from the 1957–1963 Western series Have Gun – Will Travel. The band consists of vocalist and guitarist Matt Burke, guitarist Scott Anderson, bassist Daniel Burke, and drummer JP Beaubien. The group's music includes Americana, folk rock, country, and punk rock influences, and has been featured in a national Chevrolet television commercial as well as the PBS series Roadtrip Nation. Their song "Blessing and a Curse" was used during the final scenes and credits of The Good Wife, season 3, episode 21, in 2012. Lead vocalist Matt Burke's songwriting draws on western themes of life and death, honor, and morality throughout the band's narrative songs. Their fifth record, Science from an Easy Chair, is a concept album based entirely on British explorer Sir Ernest Shackleton and his 1914–16 Imperial Trans-Antarctic Expedition. Band members Matt Burke – vocals, guitar, harmonica Daniel Burke – bass, vocals JP Beaubien – drums, percussion Scott Anderson – guitars Discography Casting Shadows Tall as Giants (2008) Postcards from the Friendly City (2009) Mergers & Acquisitions (2011) Fiction, Fact or Folktale? (2013) Science from an Easy Chair (2015) Strange Chemistry (2019) Silver Sounds (EP, 2022) References External links Musical groups established in 2006 American country rock groups Musical groups from Tampa, Florida 2006 establishments in Florida
```go package htlcswitch import ( "context" "crypto/rand" "crypto/sha256" "fmt" "io" mrand "math/rand" "reflect" "testing" "time" "github.com/btcsuite/btcd/btcutil" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/models" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/fn" "github.com/lightningnetwork/lnd/htlcswitch/hodl" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/ticker" "github.com/stretchr/testify/require" ) var zeroCircuit = models.CircuitKey{} var emptyScid = lnwire.ShortChannelID{} func genPreimage() ([32]byte, error) { var preimage [32]byte if _, err := io.ReadFull(rand.Reader, preimage[:]); err != nil { return preimage, err } return preimage, nil } // TestSwitchAddDuplicateLink tests that the switch will reject duplicate links // for live links. It also tests that we can successfully add a link after // having removed it. func TestSwitchAddDuplicateLink(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, aliceScid := genID() aliceChannelLink := newMockChannelLink( s, chanID1, aliceScid, emptyScid, alicePeer, false, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } // Alice should have a live link, adding again should fail. if err := s.AddLink(aliceChannelLink); err == nil { t.Fatalf("adding duplicate link should have failed") } // Remove the live link to ensure the indexes are cleared. s.RemoveLink(chanID1) // Alice has no links, adding should succeed. if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } } // TestSwitchHasActiveLink tests the behavior of HasActiveLink, and asserts that // it only returns true if a link's short channel id has confirmed (meaning the // channel is no longer pending) and it's EligibleToForward method returns true, // i.e. it has received ChannelReady from the remote peer. func TestSwitchHasActiveLink(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, aliceScid := genID() aliceChannelLink := newMockChannelLink( s, chanID1, aliceScid, emptyScid, alicePeer, false, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } // The link has been added, but it's still pending. HasActiveLink should // return false since the link has not been added to the linkIndex // containing live links. if s.HasActiveLink(chanID1) { t.Fatalf("link should not be active yet, still pending") } // Finally, simulate the link receiving channel_ready by setting its // eligibility to true. aliceChannelLink.eligible = true // The link should now be reported as active, since EligibleToForward // returns true and the link is in the linkIndex. if !s.HasActiveLink(chanID1) { t.Fatalf("link should not be active now") } } // TestSwitchSendPending checks the inability of htlc switch to forward adds // over pending links. func TestSwitchSendPending(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, chanID2, aliceChanID, bobChanID := genIDs() pendingChanID := lnwire.ShortChannelID{} aliceChannelLink := newMockChannelLink( s, chanID1, pendingChanID, emptyScid, alicePeer, false, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should is being forwarded from Bob channel // link to Alice channel link. preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) packet := &htlcPacket{ incomingChanID: bobChanID, incomingHTLCID: 0, outgoingChanID: aliceChanID, obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Send the ADD packet, this should not be forwarded out to the link // since there are no eligible links. if err = s.ForwardPackets(nil, packet); err != nil { t.Fatal(err) } select { case p := <-bobChannelLink.packets: if p.linkFailure != nil { err = p.linkFailure } case <-time.After(time.Second): t.Fatal("no timely reply from switch") } linkErr, ok := err.(*LinkError) if !ok { t.Fatalf("expected link error, got: %T", err) } if linkErr.WireMessage().Code() != lnwire.CodeUnknownNextPeer { t.Fatalf("expected fail unknown next peer, got: %T", linkErr.WireMessage().Code()) } // No message should be sent, since the packet was failed. select { case <-aliceChannelLink.packets: t.Fatal("expected not to receive message") case <-time.After(time.Second): } // Since the packet should have been failed, there should be no active // circuits. if s.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } } // TestSwitchForwardMapping checks that the Switch properly consults its maps // when forwarding packets. func TestSwitchForwardMapping(t *testing.T) { tests := []struct { name string // If this is true, then Alice's channel will be private. alicePrivate bool // If this is true, then Alice's channel will be a zero-conf // channel. zeroConf bool // If this is true, then Alice's channel will be an // option-scid-alias feature-bit, non-zero-conf channel. optionScid bool // If this is true, then an alias will be used for forwarding. useAlias bool // This is Alice's channel alias. This may not be set if this // is not an option_scid_alias channel (feature bit). aliceAlias lnwire.ShortChannelID // This is Alice's confirmed SCID. This may not be set if this // is a zero-conf channel before confirmation. aliceReal lnwire.ShortChannelID // If this is set, we expect Bob forwarding to Alice to fail. expectErr bool }{ { name: "private unconfirmed zero-conf", alicePrivate: true, zeroConf: true, useAlias: true, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_002, TxIndex: 2, TxPosition: 2, }, aliceReal: lnwire.ShortChannelID{}, expectErr: false, }, { name: "private confirmed zero-conf", alicePrivate: true, zeroConf: true, useAlias: true, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_003, TxIndex: 3, TxPosition: 3, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 300000, TxIndex: 3, TxPosition: 3, }, expectErr: false, }, { name: "private confirmed zero-conf failure", alicePrivate: true, zeroConf: true, useAlias: false, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_004, TxIndex: 4, TxPosition: 4, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 300002, TxIndex: 4, TxPosition: 4, }, expectErr: true, }, { name: "public unconfirmed zero-conf", alicePrivate: false, zeroConf: true, useAlias: true, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_005, TxIndex: 5, TxPosition: 5, }, aliceReal: lnwire.ShortChannelID{}, expectErr: false, }, { name: "public confirmed zero-conf w/ alias", alicePrivate: false, zeroConf: true, useAlias: true, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_006, TxIndex: 6, TxPosition: 6, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 500000, TxIndex: 6, TxPosition: 6, }, expectErr: false, }, { name: "public confirmed zero-conf w/ real", alicePrivate: false, zeroConf: true, useAlias: false, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_007, TxIndex: 7, TxPosition: 7, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 502000, TxIndex: 7, TxPosition: 7, }, expectErr: false, }, { name: "private non-option channel", alicePrivate: true, aliceAlias: lnwire.ShortChannelID{}, aliceReal: lnwire.ShortChannelID{ BlockHeight: 505000, TxIndex: 8, TxPosition: 8, }, }, { name: "private option channel w/ alias", alicePrivate: true, optionScid: true, useAlias: true, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_015, TxIndex: 9, TxPosition: 9, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 506000, TxIndex: 10, TxPosition: 10, }, expectErr: false, }, { name: "private option channel failure", alicePrivate: true, optionScid: true, useAlias: false, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_016, TxIndex: 16, TxPosition: 16, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 507000, TxIndex: 17, TxPosition: 17, }, expectErr: true, }, { name: "public non-option channel", alicePrivate: false, useAlias: false, aliceAlias: lnwire.ShortChannelID{}, aliceReal: lnwire.ShortChannelID{ BlockHeight: 508000, TxIndex: 17, TxPosition: 17, }, expectErr: false, }, { name: "public option channel w/ alias", alicePrivate: false, optionScid: true, useAlias: true, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_018, TxIndex: 18, TxPosition: 18, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 509000, TxIndex: 19, TxPosition: 19, }, expectErr: false, }, { name: "public option channel w/ real", alicePrivate: false, optionScid: true, useAlias: false, aliceAlias: lnwire.ShortChannelID{ BlockHeight: 16_000_019, TxIndex: 19, TxPosition: 19, }, aliceReal: lnwire.ShortChannelID{ BlockHeight: 510000, TxIndex: 20, TxPosition: 20, }, expectErr: false, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { t.Parallel() testSwitchForwardMapping( t, test.alicePrivate, test.zeroConf, test.useAlias, test.optionScid, test.aliceAlias, test.aliceReal, test.expectErr, ) }) } } func testSwitchForwardMapping(t *testing.T, alicePrivate, aliceZeroConf, useAlias, optionScid bool, aliceAlias, aliceReal lnwire.ShortChannelID, expectErr bool) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() // Create the lnwire.ChannelIDs that we'll use. chanID1, chanID2, _, _ := genIDs() var aliceChannelLink *mockChannelLink if aliceZeroConf { aliceChannelLink = newMockChannelLink( s, chanID1, aliceAlias, aliceReal, alicePeer, true, alicePrivate, true, false, ) } else { aliceChannelLink = newMockChannelLink( s, chanID1, aliceReal, emptyScid, alicePeer, true, alicePrivate, false, optionScid, ) if optionScid { aliceChannelLink.addAlias(aliceAlias) } } err = s.AddLink(aliceChannelLink) require.NoError(t, err) // Bob will just have a non-option_scid_alias channel so no mapping is // necessary. bobScid := lnwire.ShortChannelID{ BlockHeight: 501000, TxIndex: 200, TxPosition: 2, } bobChannelLink := newMockChannelLink( s, chanID2, bobScid, emptyScid, bobPeer, true, false, false, false, ) err = s.AddLink(bobChannelLink) require.NoError(t, err) // Generate preimage. preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) // Determine the outgoing SCID to use. outgoingSCID := aliceReal if useAlias { outgoingSCID = aliceAlias } packet := &htlcPacket{ incomingChanID: bobScid, incomingHTLCID: 0, outgoingChanID: outgoingSCID, obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } err = s.ForwardPackets(nil, packet) require.NoError(t, err) // If we expect a forwarding error, then assert that we receive one. // option_scid_alias forwards may fail if forwarding would be a privacy // leak. if expectErr { select { case <-bobChannelLink.packets: case <-time.After(time.Second * 5): t.Fatal("expected a forwarding error") } select { case <-aliceChannelLink.packets: t.Fatal("did not expect a packet") case <-time.After(time.Second * 5): } } else { select { case <-bobChannelLink.packets: t.Fatal("did not expect a forwarding error") case <-time.After(time.Second * 5): } select { case <-aliceChannelLink.packets: case <-time.After(time.Second * 5): t.Fatal("expected alice to receive packet") } } } // TestSwitchSendHTLCMapping tests that SendHTLC will properly route packets to // zero-conf or option-scid-alias (feature-bit) channels if the confirmed SCID // is used. It also tests that nothing breaks with the mapping change. func TestSwitchSendHTLCMapping(t *testing.T) { tests := []struct { name string // If this is true, the channel will be zero-conf. zeroConf bool // Denotes whether the channel is option-scid-alias, non // zero-conf feature bit. optionFeature bool // If this is true, then the alias will be used in the packet. useAlias bool // This will be the channel alias if there is a mapping. alias lnwire.ShortChannelID // This will be the confirmed SCID if the channel is confirmed. real lnwire.ShortChannelID }{ { name: "non-zero-conf real scid w/ option", zeroConf: false, optionFeature: true, useAlias: false, alias: lnwire.ShortChannelID{ BlockHeight: 10010, TxIndex: 10, TxPosition: 10, }, real: lnwire.ShortChannelID{ BlockHeight: 500000, TxIndex: 50, TxPosition: 50, }, }, { name: "non-zero-conf real scid no option", zeroConf: false, useAlias: false, alias: lnwire.ShortChannelID{}, real: lnwire.ShortChannelID{ BlockHeight: 400000, TxIndex: 50, TxPosition: 50, }, }, { name: "zero-conf alias scid w/ conf", zeroConf: true, useAlias: true, alias: lnwire.ShortChannelID{ BlockHeight: 10020, TxIndex: 20, TxPosition: 20, }, real: lnwire.ShortChannelID{ BlockHeight: 450000, TxIndex: 50, TxPosition: 50, }, }, { name: "zero-conf alias scid no conf", zeroConf: true, useAlias: true, alias: lnwire.ShortChannelID{ BlockHeight: 10015, TxIndex: 25, TxPosition: 35, }, real: lnwire.ShortChannelID{}, }, { name: "zero-conf real scid", zeroConf: true, useAlias: false, alias: lnwire.ShortChannelID{ BlockHeight: 10035, TxIndex: 35, TxPosition: 35, }, real: lnwire.ShortChannelID{ BlockHeight: 470000, TxIndex: 35, TxPosition: 45, }, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { t.Parallel() testSwitchSendHtlcMapping( t, test.zeroConf, test.useAlias, test.alias, test.real, test.optionFeature, ) }) } } func testSwitchSendHtlcMapping(t *testing.T, zeroConf, useAlias bool, alias, realScid lnwire.ShortChannelID, optionFeature bool) { peer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() // Create the lnwire.ChannelID that we'll use. chanID, _ := genID() var link *mockChannelLink if zeroConf { link = newMockChannelLink( s, chanID, alias, realScid, peer, true, false, true, false, ) } else { link = newMockChannelLink( s, chanID, realScid, emptyScid, peer, true, false, false, true, ) if optionFeature { link.addAlias(alias) } } err = s.AddLink(link) require.NoError(t, err) // Generate preimage. preimage, err := genPreimage() require.NoError(t, err) rhash := sha256.Sum256(preimage[:]) // Determine the outgoing SCID to use. outgoingSCID := realScid if useAlias { outgoingSCID = alias } // Send the HTLC and assert that we don't get an error. htlc := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, } err = s.SendHTLC(outgoingSCID, 0, htlc) require.NoError(t, err) } // TestSwitchUpdateScid verifies that zero-conf and non-zero-conf // option-scid-alias (feature bit) channels will have the expected entries in // the aliasToReal and baseIndex maps. func TestSwitchUpdateScid(t *testing.T) { t.Parallel() peer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() // Create the IDs that we'll use. chanID, chanID2, _, _ := genIDs() alias := lnwire.ShortChannelID{ BlockHeight: 16_000_000, TxIndex: 0, TxPosition: 0, } alias2 := alias alias2.TxPosition = 1 realScid := lnwire.ShortChannelID{ BlockHeight: 500000, TxIndex: 0, TxPosition: 0, } link := newMockChannelLink( s, chanID, alias, emptyScid, peer, true, false, true, false, ) link.addAlias(alias2) err = s.AddLink(link) require.NoError(t, err) // Assert that the zero-conf link does not have entries in the // aliasToReal map. s.indexMtx.RLock() _, ok := s.aliasToReal[alias] require.False(t, ok) _, ok = s.aliasToReal[alias2] require.False(t, ok) // Assert that both aliases point to the "base" SCID, which is actually // just the first alias. baseScid, ok := s.baseIndex[alias] require.True(t, ok) require.Equal(t, alias, baseScid) baseScid, ok = s.baseIndex[alias2] require.True(t, ok) require.Equal(t, alias, baseScid) s.indexMtx.RUnlock() // We'll set the mock link's confirmed SCID so that UpdateShortChanID // populates aliasToReal and adds an entry to baseIndex. link.realScid = realScid link.confirmedZC = true err = s.UpdateShortChanID(chanID) require.NoError(t, err) // Assert that aliasToReal is populated and there is an entry in // baseIndex for realScid. s.indexMtx.RLock() realMapping, ok := s.aliasToReal[alias] require.True(t, ok) require.Equal(t, realScid, realMapping) realMapping, ok = s.aliasToReal[alias2] require.True(t, ok) require.Equal(t, realScid, realMapping) baseScid, ok = s.baseIndex[realScid] require.True(t, ok) require.Equal(t, alias, baseScid) s.indexMtx.RUnlock() // Now we'll perform the same checks with a non-zero-conf // option-scid-alias channel (feature-bit). optionReal := lnwire.ShortChannelID{ BlockHeight: 600000, TxIndex: 0, TxPosition: 0, } optionAlias := lnwire.ShortChannelID{ BlockHeight: 12000, TxIndex: 0, TxPosition: 0, } optionAlias2 := optionAlias optionAlias2.TxPosition = 1 link2 := newMockChannelLink( s, chanID2, optionReal, emptyScid, peer, true, false, false, true, ) link2.addAlias(optionAlias) link2.addAlias(optionAlias2) err = s.AddLink(link2) require.NoError(t, err) // Assert that the option-scid-alias link does have entries in the // aliasToReal and baseIndex maps. s.indexMtx.RLock() realMapping, ok = s.aliasToReal[optionAlias] require.True(t, ok) require.Equal(t, optionReal, realMapping) realMapping, ok = s.aliasToReal[optionAlias2] require.True(t, ok) require.Equal(t, optionReal, realMapping) baseScid, ok = s.baseIndex[optionReal] require.True(t, ok) require.Equal(t, optionReal, baseScid) baseScid, ok = s.baseIndex[optionAlias] require.True(t, ok) require.Equal(t, optionReal, baseScid) baseScid, ok = s.baseIndex[optionAlias2] require.True(t, ok) require.Equal(t, optionReal, baseScid) s.indexMtx.RUnlock() } // TestSwitchForward checks the ability of htlc switch to forward add/settle // requests. func TestSwitchForward(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } s, err := initSwitchWithTempDB(t, testStartingHeight) if err != nil { t.Fatalf("unable to init switch: %v", err) } if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, chanID2, aliceChanID, bobChanID := genIDs() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarded from Alice channel link to // bob channel link. preimage, err := genPreimage() if err != nil { t.Fatalf("unable to generate preimage: %v", err) } rhash := sha256.Sum256(preimage[:]) packet := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, packet); err != nil { t.Fatal(err) } select { case <-bobChannelLink.packets: if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumOpen() != 1 { t.Fatal("wrong amount of circuits") } if !s.IsForwardedHTLC(bobChannelLink.ShortChanID(), 0) { t.Fatal("htlc should be identified as forwarded") } // Create settle request pretending that bob link handled the add htlc // request and sent the htlc settle request back. This request should // be forwarder back to Alice link. packet = &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFulfillHTLC{ PaymentPreimage: preimage, }, } // Handle the request and checks that payment circuit works properly. if err := s.ForwardPackets(nil, packet); err != nil { t.Fatal(err) } select { case pkt := <-aliceChannelLink.packets: if err := aliceChannelLink.deleteCircuit(pkt); err != nil { t.Fatalf("unable to remove circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to channelPoint") } if s.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } } func TestSwitchForwardFailAfterFullAdd(t *testing.T) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to open channeldb") t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } // Even though we intend to Stop s later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s.Stop() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarded from Alice channel link to // bob channel link. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } if s.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Pull packet from bob's link, but do not perform a full add. select { case packet := <-bobChannelLink.packets: // Complete the payment circuit and assign the outgoing htlc id // before restarting. if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 1 { t.Fatalf("wrong amount of circuits") } // Now we will restart bob, leaving the forwarding decision for this // htlc is in the half-added state. if err := s.Stop(); err != nil { t.Fatalf(err.Error()) } if err := cdb.Close(); err != nil { t.Fatalf(err.Error()) } cdb2, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to reopen channeldb") t.Cleanup(func() { cdb2.Close() }) s2, err := initSwitchWithDB(testStartingHeight, cdb2) require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } // Even though we intend to Stop s2 later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s2.Stop() aliceChannelLink = newMockChannelLink( s2, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink = newMockChannelLink( s2, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s2.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s2.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } if s2.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 1 { t.Fatalf("wrong amount of circuits") } // Craft a failure message from the remote peer. fail := &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFailHTLC{}, } // Send the fail packet from the remote peer through the switch. if err := s2.ForwardPackets(nil, fail); err != nil { t.Fatalf(err.Error()) } // Pull packet from alice's link, as it should have gone through // successfully. select { case pkt := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(pkt); err != nil { t.Fatalf("unable to remove circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Circuit map should be empty now. if s2.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Send the fail packet from the remote peer through the switch. if err := s.ForwardPackets(nil, fail); err != nil { t.Fatal(err) } select { case <-aliceChannelLink.packets: t.Fatalf("expected duplicate fail to not arrive at the destination") case <-time.After(time.Second): } } func TestSwitchForwardSettleAfterFullAdd(t *testing.T) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to open channeldb") t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } // Even though we intend to Stop s later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s.Stop() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarded from Alice channel link to // bob channel link. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } if s.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Pull packet from bob's link, but do not perform a full add. select { case packet := <-bobChannelLink.packets: // Complete the payment circuit and assign the outgoing htlc id // before restarting. if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 1 { t.Fatalf("wrong amount of circuits") } // Now we will restart bob, leaving the forwarding decision for this // htlc is in the half-added state. if err := s.Stop(); err != nil { t.Fatalf(err.Error()) } if err := cdb.Close(); err != nil { t.Fatalf(err.Error()) } cdb2, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to reopen channeldb") t.Cleanup(func() { cdb2.Close() }) s2, err := initSwitchWithDB(testStartingHeight, cdb2) require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } // Even though we intend to Stop s2 later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s2.Stop() aliceChannelLink = newMockChannelLink( s2, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink = newMockChannelLink( s2, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s2.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s2.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } if s2.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 1 { t.Fatalf("wrong amount of circuits") } // Craft a settle message from the remote peer. settle := &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFulfillHTLC{ PaymentPreimage: preimage, }, } // Send the settle packet from the remote peer through the switch. if err := s2.ForwardPackets(nil, settle); err != nil { t.Fatalf(err.Error()) } // Pull packet from alice's link, as it should have gone through // successfully. select { case packet := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete circuit with in key=%s: %v", packet.inKey(), err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Circuit map should be empty now. if s2.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Send the settle packet again, which not arrive at destination. if err := s2.ForwardPackets(nil, settle); err != nil { t.Fatal(err) } select { case <-bobChannelLink.packets: t.Fatalf("expected duplicate fail to not arrive at the destination") case <-time.After(time.Second): } } func TestSwitchForwardDropAfterFullAdd(t *testing.T) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to open channeldb") t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } // Even though we intend to Stop s later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s.Stop() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarded from Alice channel link to // bob channel link. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } if s.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of half circuits") } // Pull packet from bob's link, but do not perform a full add. select { case packet := <-bobChannelLink.packets: // Complete the payment circuit and assign the outgoing htlc id // before restarting. if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Now we will restart bob, leaving the forwarding decision for this // htlc is in the half-added state. if err := s.Stop(); err != nil { t.Fatalf(err.Error()) } if err := cdb.Close(); err != nil { t.Fatalf(err.Error()) } cdb2, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to reopen channeldb") t.Cleanup(func() { cdb2.Close() }) s2, err := initSwitchWithDB(testStartingHeight, cdb2) require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } // Even though we intend to Stop s2 later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s2.Stop() aliceChannelLink = newMockChannelLink( s2, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink = newMockChannelLink( s2, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s2.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s2.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } if s2.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 1 { t.Fatalf("wrong amount of half circuits") } // Resend the failed htlc. The packet will be dropped silently since the // switch will detect that it has been half added previously. if err := s2.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } // After detecting an incomplete forward, the fail packet should have // been returned to the sender. select { case <-aliceChannelLink.packets: t.Fatal("request should not have returned to source") case <-bobChannelLink.packets: t.Fatal("request should not have forwarded to destination") case <-time.After(time.Second): } } func TestSwitchForwardFailAfterHalfAdd(t *testing.T) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to open channeldb") t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } // Even though we intend to Stop s later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s.Stop() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarded from Alice channel link to // bob channel link. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } if s.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of half circuits") } // Pull packet from bob's link, but do not perform a full add. select { case <-bobChannelLink.packets: case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Now we will restart bob, leaving the forwarding decision for this // htlc is in the half-added state. if err := s.Stop(); err != nil { t.Fatalf(err.Error()) } if err := cdb.Close(); err != nil { t.Fatalf(err.Error()) } cdb2, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to reopen channeldb") t.Cleanup(func() { cdb2.Close() }) s2, err := initSwitchWithDB(testStartingHeight, cdb2) require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } // Even though we intend to Stop s2 later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s2.Stop() aliceChannelLink = newMockChannelLink( s2, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink = newMockChannelLink( s2, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s2.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s2.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } if s2.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of half circuits") } // Resend the failed htlc, it should be returned to alice since the // switch will detect that it has been half added previously. err = s2.ForwardPackets(nil, ogPacket) if err != nil { t.Fatal(err) } // After detecting an incomplete forward, the fail packet should have // been returned to the sender. select { case pkt := <-aliceChannelLink.packets: linkErr := pkt.linkFailure if linkErr.FailureDetail != OutgoingFailureIncompleteForward { t.Fatalf("expected incomplete forward, got: %v", linkErr.FailureDetail) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } } // TestSwitchForwardCircuitPersistence checks the ability of htlc switch to // maintain the proper entries in the circuit map in the face of restarts. func TestSwitchForwardCircuitPersistence(t *testing.T) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to open channeldb") t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } // Even though we intend to Stop s later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s.Stop() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarded from Alice channel link to // bob channel link. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } if s.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } // Retrieve packet from outgoing link and cache until after restart. var packet *htlcPacket select { case packet = <-bobChannelLink.packets: case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if err := s.Stop(); err != nil { t.Fatalf(err.Error()) } if err := cdb.Close(); err != nil { t.Fatalf(err.Error()) } cdb2, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to reopen channeldb") t.Cleanup(func() { cdb2.Close() }) s2, err := initSwitchWithDB(testStartingHeight, cdb2) require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } // Even though we intend to Stop s2 later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. defer s2.Stop() aliceChannelLink = newMockChannelLink( s2, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink = newMockChannelLink( s2, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s2.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s2.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } if s2.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of half circuits") } // Now that the switch has restarted, complete the payment circuit. if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } if s2.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s2.circuits.NumOpen() != 1 { t.Fatal("wrong amount of circuits") } // Create settle request pretending that bob link handled the add htlc // request and sent the htlc settle request back. This request should // be forwarder back to Alice link. ogPacket = &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFulfillHTLC{ PaymentPreimage: preimage, }, } // Handle the request and checks that payment circuit works properly. if err := s2.ForwardPackets(nil, ogPacket); err != nil { t.Fatal(err) } select { case packet = <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete circuit with in key=%s: %v", packet.inKey(), err) } case <-time.After(time.Second): t.Fatal("request was not propagated to channelPoint") } if s2.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits, want 1, got %d", s2.circuits.NumPending()) } if s2.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } if err := s2.Stop(); err != nil { t.Fatal(err) } if err := cdb2.Close(); err != nil { t.Fatalf(err.Error()) } cdb3, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to reopen channeldb") t.Cleanup(func() { cdb3.Close() }) s3, err := initSwitchWithDB(testStartingHeight, cdb3) require.NoError(t, err, "unable reinit switch") if err := s3.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } defer s3.Stop() aliceChannelLink = newMockChannelLink( s3, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink = newMockChannelLink( s3, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s3.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s3.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } if s3.circuits.NumPending() != 0 { t.Fatalf("wrong amount of half circuits") } if s3.circuits.NumOpen() != 0 { t.Fatalf("wrong amount of circuits") } } type multiHopFwdTest struct { name string eligible1, eligible2 bool failure1, failure2 *LinkError expectedReply lnwire.FailCode } // TestCircularForwards tests the allowing/disallowing of circular payments // through the same channel in the case where the switch is configured to allow // and disallow same channel circular forwards. func TestCircularForwards(t *testing.T) { chanID1, aliceChanID := genID() preimage := [sha256.Size]byte{1} hash := sha256.Sum256(preimage[:]) tests := []struct { name string allowCircularPayment bool expectedErr error }{ { name: "circular payment allowed", allowCircularPayment: true, expectedErr: nil, }, { name: "circular payment disallowed", allowCircularPayment: false, expectedErr: NewDetailedLinkError( lnwire.NewTemporaryChannelFailure(nil), OutgoingFailureCircularRoute, ), }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } s, err := initSwitchWithTempDB(t, testStartingHeight) if err != nil { t.Fatalf("unable to init switch: %v", err) } if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer func() { _ = s.Stop() }() // Set the switch to allow or disallow circular routes // according to the test's requirements. s.cfg.AllowCircularRoute = test.allowCircularPayment aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } // Create a new packet that loops through alice's link // in a circle. obfuscator := NewMockObfuscator() packet := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), outgoingChanID: aliceChannelLink.ShortChanID(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: hash, Amount: 1, }, obfuscator: obfuscator, } // Attempt to forward the packet and check for the expected // error. if err = s.ForwardPackets(nil, packet); err != nil { t.Fatal(err) } select { case p := <-aliceChannelLink.packets: if p.linkFailure != nil { err = p.linkFailure } case <-time.After(time.Second): t.Fatal("no timely reply from switch") } if !reflect.DeepEqual(err, test.expectedErr) { t.Fatalf("expected: %v, got: %v", test.expectedErr, err) } // Ensure that no circuits were opened. if s.circuits.NumOpen() > 0 { t.Fatal("do not expect any open circuits") } }) } } // TestCheckCircularForward tests the error returned by checkCircularForward // in cases where we allow and disallow same channel circular forwards. func TestCheckCircularForward(t *testing.T) { tests := []struct { name string // aliasMapping determines whether the test should add an alias // mapping to Switch alias maps before checkCircularForward. aliasMapping bool // allowCircular determines whether we should allow circular // forwards. allowCircular bool // incomingLink is the link that the htlc arrived on. incomingLink lnwire.ShortChannelID // outgoingLink is the link that the htlc forward // is destined to leave on. outgoingLink lnwire.ShortChannelID // expectedErr is the error we expect to be returned. expectedErr *LinkError }{ { name: "not circular, allowed in config", aliasMapping: false, allowCircular: true, incomingLink: lnwire.NewShortChanIDFromInt(123), outgoingLink: lnwire.NewShortChanIDFromInt(321), expectedErr: nil, }, { name: "not circular, not allowed in config", aliasMapping: false, allowCircular: false, incomingLink: lnwire.NewShortChanIDFromInt(123), outgoingLink: lnwire.NewShortChanIDFromInt(321), expectedErr: nil, }, { name: "circular, allowed in config", aliasMapping: false, allowCircular: true, incomingLink: lnwire.NewShortChanIDFromInt(123), outgoingLink: lnwire.NewShortChanIDFromInt(123), expectedErr: nil, }, { name: "circular, not allowed in config", aliasMapping: false, allowCircular: false, incomingLink: lnwire.NewShortChanIDFromInt(123), outgoingLink: lnwire.NewShortChanIDFromInt(123), expectedErr: NewDetailedLinkError( lnwire.NewTemporaryChannelFailure(nil), OutgoingFailureCircularRoute, ), }, { name: "circular with map, not allowed", aliasMapping: true, allowCircular: false, incomingLink: lnwire.NewShortChanIDFromInt(1 << 60), outgoingLink: lnwire.NewShortChanIDFromInt(1 << 55), expectedErr: NewDetailedLinkError( lnwire.NewTemporaryChannelFailure(nil), OutgoingFailureCircularRoute, ), }, { name: "circular with map, not allowed 2", aliasMapping: true, allowCircular: false, incomingLink: lnwire.NewShortChanIDFromInt(1 << 55), outgoingLink: lnwire.NewShortChanIDFromInt(1 << 60), expectedErr: NewDetailedLinkError( lnwire.NewTemporaryChannelFailure(nil), OutgoingFailureCircularRoute, ), }, { name: "circular with map, allowed", aliasMapping: true, allowCircular: true, incomingLink: lnwire.NewShortChanIDFromInt(1 << 60), outgoingLink: lnwire.NewShortChanIDFromInt(1 << 55), expectedErr: nil, }, { name: "circular with map, allowed 2", aliasMapping: true, allowCircular: true, incomingLink: lnwire.NewShortChanIDFromInt(1 << 55), outgoingLink: lnwire.NewShortChanIDFromInt(1 << 61), expectedErr: nil, }, { name: "not circular, both confirmed SCID", aliasMapping: false, allowCircular: false, incomingLink: lnwire.NewShortChanIDFromInt(1 << 60), outgoingLink: lnwire.NewShortChanIDFromInt(1 << 61), expectedErr: nil, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { t.Parallel() s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() if test.aliasMapping { // Make the incoming and outgoing point to the // same base SCID. inScid := test.incomingLink outScid := test.outgoingLink s.indexMtx.Lock() s.baseIndex[inScid] = outScid s.baseIndex[outScid] = outScid s.indexMtx.Unlock() } // Check for a circular forward, the hash passed can // be nil because it is only used for logging. err = s.checkCircularForward( test.incomingLink, test.outgoingLink, test.allowCircular, lntypes.Hash{}, ) if !reflect.DeepEqual(err, test.expectedErr) { t.Fatalf("expected: %v, got: %v", test.expectedErr, err) } }) } } // TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes // along, then we won't attempt to forward it down al ink that isn't yet able // to forward any HTLC's. func TestSkipIneligibleLinksMultiHopForward(t *testing.T) { tests := []multiHopFwdTest{ // None of the channels is eligible. { name: "not eligible", expectedReply: lnwire.CodeUnknownNextPeer, }, // Channel one has a policy failure and the other channel isn't // available. { name: "policy fail", eligible1: true, failure1: NewLinkError( lnwire.NewFinalIncorrectCltvExpiry(0), ), expectedReply: lnwire.CodeFinalIncorrectCltvExpiry, }, // The requested channel is not eligible, but the packet is // forwarded through the other channel. { name: "non-strict success", eligible2: true, expectedReply: lnwire.CodeNone, }, // The requested channel has insufficient bandwidth and the // other channel's policy isn't satisfied. { name: "non-strict policy fail", eligible1: true, failure1: NewDetailedLinkError( lnwire.NewTemporaryChannelFailure(nil), OutgoingFailureInsufficientBalance, ), eligible2: true, failure2: NewLinkError( lnwire.NewFinalIncorrectCltvExpiry(0), ), expectedReply: lnwire.CodeTemporaryChannelFailure, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { testSkipIneligibleLinksMultiHopForward(t, &test) }) } } // testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes // along, then we won't attempt to forward it down al ink that isn't yet able // to forward any HTLC's. func testSkipIneligibleLinksMultiHopForward(t *testing.T, testCase *multiHopFwdTest) { t.Parallel() var packet *htlcPacket alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, aliceChanID := genID() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) // We'll create a link for Bob, but mark the link as unable to forward // any new outgoing HTLC's. chanID2, bobChanID2 := genID() bobChannelLink1 := newMockChannelLink( s, chanID2, bobChanID2, emptyScid, bobPeer, testCase.eligible1, false, false, false, ) bobChannelLink1.checkHtlcForwardResult = testCase.failure1 chanID3, bobChanID3 := genID() bobChannelLink2 := newMockChannelLink( s, chanID3, bobChanID3, emptyScid, bobPeer, testCase.eligible2, false, false, false, ) bobChannelLink2.checkHtlcForwardResult = testCase.failure2 if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink1); err != nil { t.Fatalf("unable to add bob link: %v", err) } if err := s.AddLink(bobChannelLink2); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create a new packet that's destined for Bob as an incoming HTLC from // Alice. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) obfuscator := NewMockObfuscator() packet = &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink1.ShortChanID(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, obfuscator: obfuscator, } // The request to forward should fail as if err := s.ForwardPackets(nil, packet); err != nil { t.Fatal(err) } // We select from all links and extract the error if exists. // The packet must be selected but we don't always expect a link error. var linkError *LinkError select { case p := <-aliceChannelLink.packets: linkError = p.linkFailure case p := <-bobChannelLink1.packets: linkError = p.linkFailure case p := <-bobChannelLink2.packets: linkError = p.linkFailure case <-time.After(time.Second): t.Fatal("no timely reply from switch") } failure := obfuscator.(*mockObfuscator).failure if testCase.expectedReply == lnwire.CodeNone { if linkError != nil { t.Fatalf("forwarding should have succeeded") } if failure != nil { t.Fatalf("unexpected failure %T", failure) } } else { if linkError == nil { t.Fatalf("forwarding should have failed due to " + "inactive link") } if failure.Code() != testCase.expectedReply { t.Fatalf("unexpected failure %T", failure) } } if s.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } } // TestSkipIneligibleLinksLocalForward ensures that the switch will not attempt // to forward any HTLC's down a link that isn't yet eligible for forwarding. func TestSkipIneligibleLinksLocalForward(t *testing.T) { t.Parallel() testSkipLinkLocalForward(t, false, nil) } // TestSkipPolicyUnsatisfiedLinkLocalForward ensures that the switch will not // attempt to send locally initiated HTLCs that would violate the channel policy // down a link. func TestSkipPolicyUnsatisfiedLinkLocalForward(t *testing.T) { t.Parallel() testSkipLinkLocalForward(t, true, lnwire.NewTemporaryChannelFailure(nil)) } func testSkipLinkLocalForward(t *testing.T, eligible bool, policyResult lnwire.FailureMessage) { // We'll create a single link for this test, marking it as being unable // to forward form the get go. alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, _, aliceChanID, _ := genIDs() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, eligible, false, false, false, ) aliceChannelLink.checkHtlcTransitResult = NewLinkError( policyResult, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) addMsg := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, } // We'll attempt to send out a new HTLC that has Alice as the first // outgoing link. This should fail as Alice isn't yet able to forward // any active HTLC's. err = s.SendHTLC(aliceChannelLink.ShortChanID(), 0, addMsg) if err == nil { t.Fatalf("local forward should fail due to inactive link") } if s.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } } // TestSwitchCancel checks that if htlc was rejected we remove unused // circuits. func TestSwitchCancel(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, chanID2, aliceChanID, bobChanID := genIDs() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarder from alice channel link // to bob channel link. preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) request := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, request); err != nil { t.Fatal(err) } select { case packet := <-bobChannelLink.packets: if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumPending() != 1 { t.Fatalf("wrong amount of half circuits") } if s.circuits.NumOpen() != 1 { t.Fatal("wrong amount of circuits") } // Create settle request pretending that bob channel link handled // the add htlc request and sent the htlc settle request back. This // request should be forwarder back to alice channel link. request = &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFailHTLC{}, } // Handle the request and checks that payment circuit works properly. if err := s.ForwardPackets(nil, request); err != nil { t.Fatal(err) } select { case pkt := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(pkt); err != nil { t.Fatalf("unable to remove circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to channelPoint") } if s.circuits.NumPending() != 0 { t.Fatal("wrong amount of circuits") } if s.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } } // TestSwitchAddSamePayment tests that we send the payment with the same // payment hash. func TestSwitchAddSamePayment(t *testing.T) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } // Create request which should be forwarder from alice channel link // to bob channel link. preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) request := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, request); err != nil { t.Fatal(err) } select { case packet := <-bobChannelLink.packets: if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumOpen() != 1 { t.Fatal("wrong amount of circuits") } request = &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 1, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Handle the request and checks that bob channel link received it. if err := s.ForwardPackets(nil, request); err != nil { t.Fatal(err) } select { case packet := <-bobChannelLink.packets: if err := bobChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumOpen() != 2 { t.Fatal("wrong amount of circuits") } // Create settle request pretending that bob channel link handled // the add htlc request and sent the htlc settle request back. This // request should be forwarder back to alice channel link. request = &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFailHTLC{}, } // Handle the request and checks that payment circuit works properly. if err := s.ForwardPackets(nil, request); err != nil { t.Fatal(err) } select { case pkt := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(pkt); err != nil { t.Fatalf("unable to remove circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to channelPoint") } if s.circuits.NumOpen() != 1 { t.Fatal("wrong amount of circuits") } request = &htlcPacket{ outgoingChanID: bobChannelLink.ShortChanID(), outgoingHTLCID: 1, amount: 1, htlc: &lnwire.UpdateFailHTLC{}, } // Handle the request and checks that payment circuit works properly. if err := s.ForwardPackets(nil, request); err != nil { t.Fatal(err) } select { case pkt := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(pkt); err != nil { t.Fatalf("unable to remove circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to channelPoint") } if s.circuits.NumOpen() != 0 { t.Fatal("wrong amount of circuits") } } // TestSwitchSendPayment tests ability of htlc switch to respond to the // users when response is came back from channel link. func TestSwitchSendPayment(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, _, aliceChanID, _ := genIDs() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add link: %v", err) } // Create request which should be forwarder from alice channel link // to bob channel link. preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) update := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, } paymentID := uint64(123) // First check that the switch will correctly respond that this payment // ID is unknown. _, err = s.GetAttemptResult( paymentID, rhash, newMockDeobfuscator(), ) if err != ErrPaymentIDNotFound { t.Fatalf("expected ErrPaymentIDNotFound, got %v", err) } // Handle the request and checks that bob channel link received it. errChan := make(chan error) go func() { err := s.SendHTLC( aliceChannelLink.ShortChanID(), paymentID, update, ) if err != nil { errChan <- err return } resultChan, err := s.GetAttemptResult( paymentID, rhash, newMockDeobfuscator(), ) if err != nil { errChan <- err return } result, ok := <-resultChan if !ok { errChan <- fmt.Errorf("shutting down") } if result.Error != nil { errChan <- result.Error return } errChan <- nil }() select { case packet := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case err := <-errChan: if err != nil { t.Fatalf("unable to send payment: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } if s.circuits.NumOpen() != 1 { t.Fatal("wrong amount of circuits") } // Create fail request pretending that bob channel link handled // the add htlc request with error and sent the htlc fail request // back. This request should be forwarded back to alice channel link. obfuscator := NewMockObfuscator() failure := lnwire.NewFailIncorrectDetails(update.Amount, 100) reason, err := obfuscator.EncryptFirstHop(failure) require.NoError(t, err, "unable obfuscate failure") if s.IsForwardedHTLC(aliceChannelLink.ShortChanID(), update.ID) { t.Fatal("htlc should be identified as not forwarded") } packet := &htlcPacket{ outgoingChanID: aliceChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFailHTLC{ Reason: reason, }, } if err := s.ForwardPackets(nil, packet); err != nil { t.Fatalf("can't forward htlc packet: %v", err) } select { case err := <-errChan: assertFailureCode( t, err, lnwire.CodeIncorrectOrUnknownPaymentDetails, ) case <-time.After(time.Second): t.Fatal("err wasn't received") } } // TestLocalPaymentNoForwardingEvents tests that if we send a series of locally // initiated payments, then they aren't reflected in the forwarding log. func TestLocalPaymentNoForwardingEvents(t *testing.T) { t.Parallel() // First, we'll create our traditional three hop network. We'll only be // interacting with and asserting the state of the first end point for // this test. channels, _, err := createClusterChannels( t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) require.NoError(t, err, "unable to create channel") n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, channels.carolToBob, testStartingHeight) if err := n.start(); err != nil { t.Fatalf("unable to start three hop network: %v", err) } // We'll now craft and send a payment from Alice to Bob. amount := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) htlcAmt, totalTimelock, hops := generateHops( amount, testStartingHeight, n.firstBobChannelLink, ) // With the payment crafted, we'll send it from Alice to Bob. We'll // wait for Alice to receive the preimage for the payment before // proceeding. receiver := n.bobServer firstHop := n.firstBobChannelLink.ShortChanID() _, err = makePayment( n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, totalTimelock, ).Wait(30 * time.Second) require.NoError(t, err, "unable to make the payment") // At this point, we'll forcibly stop the three hop network. Doing // this will cause any pending forwarding events to be flushed by the // various switches in the network. n.stop() // With all the switches stopped, we'll fetch Alice's mock forwarding // event log. log, ok := n.aliceServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) if !ok { t.Fatalf("mockForwardingLog assertion failed") } log.Lock() defer log.Unlock() // If we examine the memory of the forwarding log, then it should be // blank. if len(log.events) != 0 { t.Fatalf("log should have no events, instead has: %v", spew.Sdump(log.events)) } } // TestMultiHopPaymentForwardingEvents tests that if we send a series of // multi-hop payments via Alice->Bob->Carol. Then Bob properly logs forwarding // events, while Alice and Carol don't. func TestMultiHopPaymentForwardingEvents(t *testing.T) { t.Parallel() // First, we'll create our traditional three hop network. channels, _, err := createClusterChannels( t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) require.NoError(t, err, "unable to create channel") n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, channels.carolToBob, testStartingHeight) if err := n.start(); err != nil { t.Fatalf("unable to start three hop network: %v", err) } // We'll make now 10 payments, of 100k satoshis each from Alice to // Carol via Bob. const numPayments = 10 finalAmt := lnwire.NewMSatFromSatoshis(100000) htlcAmt, totalTimelock, hops := generateHops( finalAmt, testStartingHeight, n.firstBobChannelLink, n.carolChannelLink, ) firstHop := n.firstBobChannelLink.ShortChanID() for i := 0; i < numPayments/2; i++ { _, err := makePayment( n.aliceServer, n.carolServer, firstHop, hops, finalAmt, htlcAmt, totalTimelock, ).Wait(30 * time.Second) if err != nil { t.Fatalf("unable to send payment: %v", err) } } bobLog, ok := n.bobServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) if !ok { t.Fatalf("mockForwardingLog assertion failed") } // After sending 5 of the payments, trigger the forwarding ticker, to // make sure the events are properly flushed. bobTicker, ok := n.bobServer.htlcSwitch.cfg.FwdEventTicker.(*ticker.Force) if !ok { t.Fatalf("mockTicker assertion failed") } // We'll trigger the ticker, and wait for the events to appear in Bob's // forwarding log. timeout := time.After(15 * time.Second) for { select { case bobTicker.Force <- time.Now(): case <-time.After(1 * time.Second): t.Fatalf("unable to force tick") } // If all 5 events is found in Bob's log, we can break out and // continue the test. bobLog.Lock() if len(bobLog.events) == 5 { bobLog.Unlock() break } bobLog.Unlock() // Otherwise wait a little bit before checking again. select { case <-time.After(50 * time.Millisecond): case <-timeout: bobLog.Lock() defer bobLog.Unlock() t.Fatalf("expected 5 events in event log, instead "+ "found: %v", spew.Sdump(bobLog.events)) } } // Send the remaining payments. for i := numPayments / 2; i < numPayments; i++ { _, err := makePayment( n.aliceServer, n.carolServer, firstHop, hops, finalAmt, htlcAmt, totalTimelock, ).Wait(30 * time.Second) if err != nil { t.Fatalf("unable to send payment: %v", err) } } // With all 10 payments sent. We'll now manually stop each of the // switches so we can examine their end state. n.stop() // Alice and Carol shouldn't have any recorded forwarding events, as // they were the source and the sink for these payment flows. aliceLog, ok := n.aliceServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) if !ok { t.Fatalf("mockForwardingLog assertion failed") } aliceLog.Lock() defer aliceLog.Unlock() if len(aliceLog.events) != 0 { t.Fatalf("log should have no events, instead has: %v", spew.Sdump(aliceLog.events)) } carolLog, ok := n.carolServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) if !ok { t.Fatalf("mockForwardingLog assertion failed") } carolLog.Lock() defer carolLog.Unlock() if len(carolLog.events) != 0 { t.Fatalf("log should have no events, instead has: %v", spew.Sdump(carolLog.events)) } // Bob on the other hand, should have 10 events. bobLog.Lock() defer bobLog.Unlock() if len(bobLog.events) != 10 { t.Fatalf("log should have 10 events, instead has: %v", spew.Sdump(bobLog.events)) } // Each of the 10 events should have had all fields set properly. for _, event := range bobLog.events { // The incoming and outgoing channels should properly be set for // the event. if event.IncomingChanID != n.aliceChannelLink.ShortChanID() { t.Fatalf("chan id mismatch: expected %v, got %v", event.IncomingChanID, n.aliceChannelLink.ShortChanID()) } if event.OutgoingChanID != n.carolChannelLink.ShortChanID() { t.Fatalf("chan id mismatch: expected %v, got %v", event.OutgoingChanID, n.carolChannelLink.ShortChanID()) } // Additionally, the incoming and outgoing amounts should also // be properly set. if event.AmtIn != htlcAmt { t.Fatalf("incoming amt mismatch: expected %v, got %v", event.AmtIn, htlcAmt) } if event.AmtOut != finalAmt { t.Fatalf("outgoing amt mismatch: expected %v, got %v", event.AmtOut, finalAmt) } } } // TestUpdateFailMalformedHTLCErrorConversion tests that we're able to properly // convert malformed HTLC errors that originate at the direct link, as well as // during multi-hop HTLC forwarding. func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) { t.Parallel() // First, we'll create our traditional three hop network. channels, _, err := createClusterChannels( t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) require.NoError(t, err, "unable to create channel") n := newThreeHopNetwork( t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, channels.carolToBob, testStartingHeight, ) if err := n.start(); err != nil { t.Fatalf("unable to start three hop network: %v", err) } assertPaymentFailure := func(t *testing.T) { // With the decoder modified, we'll now attempt to send a // payment from Alice to carol. finalAmt := lnwire.NewMSatFromSatoshis(100000) htlcAmt, totalTimelock, hops := generateHops( finalAmt, testStartingHeight, n.firstBobChannelLink, n.carolChannelLink, ) firstHop := n.firstBobChannelLink.ShortChanID() _, err = makePayment( n.aliceServer, n.carolServer, firstHop, hops, finalAmt, htlcAmt, totalTimelock, ).Wait(30 * time.Second) // The payment should fail as Carol is unable to decode the // onion blob sent to her. if err == nil { t.Fatalf("unable to send payment: %v", err) } routingErr := err.(ClearTextError) failureMsg := routingErr.WireMessage() if _, ok := failureMsg.(*lnwire.FailInvalidOnionKey); !ok { t.Fatalf("expected onion failure instead got: %v", routingErr.WireMessage()) } } t.Run("multi-hop error conversion", func(t *testing.T) { // Now that we have our network up, we'll modify the hop // iterator for the Bob <-> Carol channel to fail to decode in // order to simulate either a replay attack or an issue // decoding the onion. n.carolOnionDecoder.decodeFail = true assertPaymentFailure(t) }) t.Run("direct channel error conversion", func(t *testing.T) { // Similar to the above test case, we'll now make the Alice <-> // Bob link always fail to decode an onion. This differs from // the above test case in that there's no encryption on the // error at all since Alice will directly receive a // UpdateFailMalformedHTLC message. n.bobOnionDecoder.decodeFail = true assertPaymentFailure(t) }) } // TestSwitchGetAttemptResult tests that the switch interacts as expected with // the circuit map and network result store when looking up the result of a // payment ID. This is important for not to lose results under concurrent // lookup and receiving results. func TestSwitchGetAttemptResult(t *testing.T) { t.Parallel() const paymentID = 123 var preimg lntypes.Preimage preimg[0] = 3 s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() lookup := make(chan *PaymentCircuit, 1) s.circuits = &mockCircuitMap{ lookup: lookup, } // If the payment circuit is not found in the circuit map, the payment // result must be found in the store if available. Since we haven't // added anything to the store yet, ErrPaymentIDNotFound should be // returned. lookup <- nil _, err = s.GetAttemptResult( paymentID, lntypes.Hash{}, newMockDeobfuscator(), ) if err != ErrPaymentIDNotFound { t.Fatalf("expected ErrPaymentIDNotFound, got %v", err) } // Next let the lookup find the circuit in the circuit map. It should // subscribe to payment results, and return the result when available. lookup <- &PaymentCircuit{} resultChan, err := s.GetAttemptResult( paymentID, lntypes.Hash{}, newMockDeobfuscator(), ) require.NoError(t, err, "unable to get payment result") // Add the result to the store. n := &networkResult{ msg: &lnwire.UpdateFulfillHTLC{ PaymentPreimage: preimg, }, unencrypted: true, isResolution: true, } err = s.networkResults.storeResult(paymentID, n) require.NoError(t, err, "unable to store result") // The result should be available. select { case res, ok := <-resultChan: if !ok { t.Fatalf("channel was closed") } if res.Error != nil { t.Fatalf("got unexpected error result") } if res.Preimage != preimg { t.Fatalf("expected preimg %v, got %v", preimg, res.Preimage) } case <-time.After(1 * time.Second): t.Fatalf("result not received") } // As a final test, try to get the result again. Now that is no longer // in the circuit map, it should be immediately available from the // store. lookup <- nil resultChan, err = s.GetAttemptResult( paymentID, lntypes.Hash{}, newMockDeobfuscator(), ) require.NoError(t, err, "unable to get payment result") select { case res, ok := <-resultChan: if !ok { t.Fatalf("channel was closed") } if res.Error != nil { t.Fatalf("got unexpected error result") } if res.Preimage != preimg { t.Fatalf("expected preimg %v, got %v", preimg, res.Preimage) } case <-time.After(1 * time.Second): t.Fatalf("result not received") } } // TestInvalidFailure tests that the switch returns an unreadable failure error // if the failure cannot be decrypted. func TestInvalidFailure(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer s.Stop() chanID1, _, aliceChanID, _ := genIDs() // Set up a mock channel link. aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add link: %v", err) } // Create a request which should be forwarded to the mock channel link. preimage, err := genPreimage() require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) update := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, } paymentID := uint64(123) // Send the request. err = s.SendHTLC( aliceChannelLink.ShortChanID(), paymentID, update, ) require.NoError(t, err, "unable to send payment") // Catch the packet and complete the circuit so that the switch is ready // for a response. select { case packet := <-aliceChannelLink.packets: if err := aliceChannelLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Send response packet with an unreadable failure message to the // switch. The reason failed is not relevant, because we mock the // decryption. packet := &htlcPacket{ outgoingChanID: aliceChannelLink.ShortChanID(), outgoingHTLCID: 0, amount: 1, htlc: &lnwire.UpdateFailHTLC{ Reason: []byte{1, 2, 3}, }, } if err := s.ForwardPackets(nil, packet); err != nil { t.Fatalf("can't forward htlc packet: %v", err) } // Get payment result from switch. We expect an unreadable failure // message error. deobfuscator := SphinxErrorDecrypter{ OnionErrorDecrypter: &mockOnionErrorDecryptor{ err: ErrUnreadableFailureMessage, }, } resultChan, err := s.GetAttemptResult( paymentID, rhash, &deobfuscator, ) if err != nil { t.Fatal(err) } select { case result := <-resultChan: if result.Error != ErrUnreadableFailureMessage { t.Fatal("expected unreadable failure message") } case <-time.After(time.Second): t.Fatal("err wasn't received") } // Modify the decryption to simulate that decryption went alright, but // the failure cannot be decoded. deobfuscator = SphinxErrorDecrypter{ OnionErrorDecrypter: &mockOnionErrorDecryptor{ sourceIdx: 2, message: []byte{200}, }, } resultChan, err = s.GetAttemptResult( paymentID, rhash, &deobfuscator, ) if err != nil { t.Fatal(err) } select { case result := <-resultChan: rtErr, ok := result.Error.(ClearTextError) if !ok { t.Fatal("expected ClearTextError") } source, ok := rtErr.(*ForwardingError) if !ok { t.Fatalf("expected forwarding error, got: %T", rtErr) } if source.FailureSourceIdx != 2 { t.Fatal("unexpected error source index") } if rtErr.WireMessage() != nil { t.Fatal("expected empty failure message") } case <-time.After(time.Second): t.Fatal("err wasn't received") } } // htlcNotifierEvents is a function that generates a set of expected htlc // notifier evetns for each node in a three hop network with the dynamic // values provided. These functions take dynamic values so that changes to // external systems (such as our default timelock delta) do not break // these tests. type htlcNotifierEvents func(channels *clusterChannels, htlcID uint64, ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload, preimage *lntypes.Preimage) ([]interface{}, []interface{}, []interface{}) // TestHtlcNotifier tests the notifying of htlc events that are routed over a // three hop network. It sets up an Alice -> Bob -> Carol network and routes // payments from Alice -> Carol to test events from the perspective of a // sending (Alice), forwarding (Bob) and receiving (Carol) node. Test cases // are present for saduccessful and failed payments. func TestHtlcNotifier(t *testing.T) { tests := []struct { name string // Options is a set of options to apply to the three hop // network's servers. options []serverOption // expectedEvents is a function which returns an expected set // of events for the test. expectedEvents htlcNotifierEvents // iterations is the number of times we will send a payment, // this is used to send more than one payment to force non- // zero htlc indexes to make sure we aren't just checking // default values. iterations int }{ { name: "successful three hop payment", options: nil, expectedEvents: func(channels *clusterChannels, htlcID uint64, ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload, preimage *lntypes.Preimage) ([]interface{}, []interface{}, []interface{}) { return getThreeHopEvents( channels, htlcID, ts, htlc, hops, nil, preimage, ) }, iterations: 2, }, { name: "failed at forwarding link", // Set a functional option which disables bob as a // forwarding node to force a payment error. options: []serverOption{ serverOptionRejectHtlc(false, true, false), }, expectedEvents: func(channels *clusterChannels, htlcID uint64, ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload, preimage *lntypes.Preimage) ([]interface{}, []interface{}, []interface{}) { return getThreeHopEvents( channels, htlcID, ts, htlc, hops, &LinkError{ msg: &lnwire.FailChannelDisabled{}, FailureDetail: OutgoingFailureForwardsDisabled, }, preimage, ) }, iterations: 1, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { testHtcNotifier( t, test.options, test.iterations, test.expectedEvents, ) }) } } // testHtcNotifier runs a htlc notifier test. func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int, getEvents htlcNotifierEvents) { t.Parallel() // First, we'll create our traditional three hop // network. channels, _, err := createClusterChannels( t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) require.NoError(t, err, "unable to create channel") // Mock time so that all events are reported with a static timestamp. now := time.Now() mockTime := func() time.Time { return now } // Create htlc notifiers for each server in the three hop network and // start them. aliceNotifier := NewHtlcNotifier(mockTime) if err := aliceNotifier.Start(); err != nil { t.Fatalf("could not start alice notifier") } t.Cleanup(func() { if err := aliceNotifier.Stop(); err != nil { t.Fatalf("failed to stop alice notifier: %v", err) } }) bobNotifier := NewHtlcNotifier(mockTime) if err := bobNotifier.Start(); err != nil { t.Fatalf("could not start bob notifier") } t.Cleanup(func() { if err := bobNotifier.Stop(); err != nil { t.Fatalf("failed to stop bob notifier: %v", err) } }) carolNotifier := NewHtlcNotifier(mockTime) if err := carolNotifier.Start(); err != nil { t.Fatalf("could not start carol notifier") } t.Cleanup(func() { if err := carolNotifier.Stop(); err != nil { t.Fatalf("failed to stop carol notifier: %v", err) } }) // Create a notifier server option which will set our htlc notifiers // for the three hop network. notifierOption := serverOptionWithHtlcNotifier( aliceNotifier, bobNotifier, carolNotifier, ) // Add the htlcNotifier option to any other options // set in the test. options := append(testOpts, notifierOption) // nolint:gocritic n := newThreeHopNetwork( t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, channels.carolToBob, testStartingHeight, options..., ) if err := n.start(); err != nil { t.Fatalf("unable to start three hop "+ "network: %v", err) } t.Cleanup(n.stop) // Before we forward anything, subscribe to htlc events // from each notifier. aliceEvents, err := aliceNotifier.SubscribeHtlcEvents() if err != nil { t.Fatalf("could not subscribe to alice's"+ " events: %v", err) } t.Cleanup(aliceEvents.Cancel) bobEvents, err := bobNotifier.SubscribeHtlcEvents() if err != nil { t.Fatalf("could not subscribe to bob's"+ " events: %v", err) } t.Cleanup(bobEvents.Cancel) carolEvents, err := carolNotifier.SubscribeHtlcEvents() if err != nil { t.Fatalf("could not subscribe to carol's"+ " events: %v", err) } t.Cleanup(carolEvents.Cancel) // Send multiple payments, as specified by the test to test incrementing // of htlc ids. for i := 0; i < iterations; i++ { // We'll start off by making a payment from // Alice -> Bob -> Carol. The preimage, generated // by Carol's Invoice is expected in the Settle events htlc, hops, preimage := n.sendThreeHopPayment(t) alice, bob, carol := getEvents( channels, uint64(i), now, htlc, hops, preimage, ) checkHtlcEvents(t, aliceEvents.Updates(), alice) checkHtlcEvents(t, bobEvents.Updates(), bob) checkHtlcEvents(t, carolEvents.Updates(), carol) } } // checkHtlcEvents checks that a subscription has the set of htlc events // we expect it to have. func checkHtlcEvents(t *testing.T, events <-chan interface{}, expectedEvents []interface{}) { t.Helper() for _, expected := range expectedEvents { select { case event := <-events: if !reflect.DeepEqual(event, expected) { t.Fatalf("expected %v, got: %v", expected, event) } case <-time.After(5 * time.Second): t.Fatalf("expected event: %v", expected) } } // Check that there are no unexpected events following. select { case event := <-events: t.Fatalf("unexpected event: %v", event) default: } } // sendThreeHopPayment is a helper function which sends a payment over // Alice -> Bob -> Carol in a three hop network and returns Alice's first htlc // and the remainder of the hops. func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHTLC, []*hop.Payload, *lntypes.Preimage) { amount := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, n.firstBobChannelLink, n.carolChannelLink) blob, err := generateRoute(hops...) if err != nil { t.Fatal(err) } invoice, htlc, pid, err := generatePayment( amount, htlcAmt, totalTimelock, blob, ) if err != nil { t.Fatal(err) } err = n.carolServer.registry.AddInvoice( context.Background(), *invoice, htlc.PaymentHash, ) require.NoError(t, err, "unable to add invoice in carol registry") if err := n.aliceServer.htlcSwitch.SendHTLC( n.firstBobChannelLink.ShortChanID(), pid, htlc, ); err != nil { t.Fatalf("could not send htlc") } return htlc, hops, invoice.Terms.PaymentPreimage } // getThreeHopEvents gets the set of htlc events that we expect for a payment // from Alice -> Bob -> Carol. If a non-nil link error is provided, the set // of events will fail on Bob's outgoing link. func getThreeHopEvents(channels *clusterChannels, htlcID uint64, ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload, linkError *LinkError, preimage *lntypes.Preimage) ([]interface{}, []interface{}, []interface{}) { aliceKey := HtlcKey{ IncomingCircuit: zeroCircuit, OutgoingCircuit: models.CircuitKey{ ChanID: channels.aliceToBob.ShortChanID(), HtlcID: htlcID, }, } // Alice always needs a forwarding event because she initiates the // send. aliceEvents := []interface{}{ &ForwardingEvent{ HtlcKey: aliceKey, HtlcInfo: HtlcInfo{ OutgoingTimeLock: htlc.Expiry, OutgoingAmt: htlc.Amount, }, HtlcEventType: HtlcEventTypeSend, Timestamp: ts, }, } bobKey := HtlcKey{ IncomingCircuit: models.CircuitKey{ ChanID: channels.bobToAlice.ShortChanID(), HtlcID: htlcID, }, OutgoingCircuit: models.CircuitKey{ ChanID: channels.bobToCarol.ShortChanID(), HtlcID: htlcID, }, } bobInfo := HtlcInfo{ IncomingTimeLock: htlc.Expiry, IncomingAmt: htlc.Amount, OutgoingTimeLock: hops[1].FwdInfo.OutgoingCTLV, OutgoingAmt: hops[1].FwdInfo.AmountToForward, } // If we expect the payment to fail, we add failures for alice and // bob, and no events for carol because the payment never reaches her. if linkError != nil { aliceEvents = append(aliceEvents, &ForwardingFailEvent{ HtlcKey: aliceKey, HtlcEventType: HtlcEventTypeSend, Timestamp: ts, }, ) bobEvents := []interface{}{ &LinkFailEvent{ HtlcKey: bobKey, HtlcInfo: bobInfo, HtlcEventType: HtlcEventTypeForward, LinkError: linkError, Incoming: false, Timestamp: ts, }, &FinalHtlcEvent{ CircuitKey: bobKey.IncomingCircuit, Settled: false, Offchain: true, Timestamp: ts, }, } return aliceEvents, bobEvents, nil } // If we want to get events for a successful payment, we add a settle // for alice, a forward and settle for bob and a receive settle for // carol. aliceEvents = append( aliceEvents, &SettleEvent{ HtlcKey: aliceKey, Preimage: *preimage, HtlcEventType: HtlcEventTypeSend, Timestamp: ts, }, ) bobEvents := []interface{}{ &ForwardingEvent{ HtlcKey: bobKey, HtlcInfo: bobInfo, HtlcEventType: HtlcEventTypeForward, Timestamp: ts, }, &SettleEvent{ HtlcKey: bobKey, Preimage: *preimage, HtlcEventType: HtlcEventTypeForward, Timestamp: ts, }, &FinalHtlcEvent{ CircuitKey: bobKey.IncomingCircuit, Settled: true, Offchain: true, Timestamp: ts, }, } carolEvents := []interface{}{ &SettleEvent{ HtlcKey: HtlcKey{ IncomingCircuit: models.CircuitKey{ ChanID: channels.carolToBob.ShortChanID(), HtlcID: htlcID, }, OutgoingCircuit: zeroCircuit, }, Preimage: *preimage, HtlcEventType: HtlcEventTypeReceive, Timestamp: ts, }, &FinalHtlcEvent{ CircuitKey: models.CircuitKey{ ChanID: channels.carolToBob.ShortChanID(), HtlcID: htlcID, }, Settled: true, Offchain: true, Timestamp: ts, }, } return aliceEvents, bobEvents, carolEvents } type mockForwardInterceptor struct { t *testing.T interceptedChan chan InterceptedPacket } func (m *mockForwardInterceptor) InterceptForwardHtlc( intercepted InterceptedPacket) error { m.interceptedChan <- intercepted return nil } func (m *mockForwardInterceptor) getIntercepted() InterceptedPacket { m.t.Helper() select { case p := <-m.interceptedChan: return p case <-time.After(time.Second): require.Fail(m.t, "timeout") return InterceptedPacket{} } } func assertNumCircuits(t *testing.T, s *Switch, pending, opened int) { if s.circuits.NumPending() != pending { t.Fatalf("wrong amount of half circuits, expected %v but "+ "got %v", pending, s.circuits.NumPending()) } if s.circuits.NumOpen() != opened { t.Fatalf("wrong amount of circuits, expected %v but got %v", opened, s.circuits.NumOpen()) } } func assertOutgoingLinkReceive(t *testing.T, targetLink *mockChannelLink, expectReceive bool) *htlcPacket { // Pull packet from targetLink link. select { case packet := <-targetLink.packets: if !expectReceive { t.Fatal("forward was intercepted, shouldn't land at bob link") } else if err := targetLink.completeCircuit(packet); err != nil { t.Fatalf("unable to complete payment circuit: %v", err) } return packet case <-time.After(time.Second): if expectReceive { t.Fatal("request was not propagated to destination") } } return nil } func assertOutgoingLinkReceiveIntercepted(t *testing.T, targetLink *mockChannelLink) { t.Helper() select { case <-targetLink.packets: case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } } type interceptableSwitchTestContext struct { t *testing.T preimage [sha256.Size]byte rhash [32]byte onionBlob [1366]byte incomingHtlcID uint64 cltvRejectDelta uint32 cltvInterceptDelta uint32 forwardInterceptor *mockForwardInterceptor aliceChannelLink *mockChannelLink bobChannelLink *mockChannelLink s *Switch } func newInterceptableSwitchTestContext( t *testing.T) *interceptableSwitchTestContext { chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err, "unable to create bob server") tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err, "unable to open channeldb") t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } if err := s.AddLink(bobChannelLink); err != nil { t.Fatalf("unable to add bob link: %v", err) } preimage := [sha256.Size]byte{1} ctx := &interceptableSwitchTestContext{ t: t, preimage: preimage, rhash: sha256.Sum256(preimage[:]), onionBlob: [1366]byte{4, 5, 6}, incomingHtlcID: uint64(0), cltvRejectDelta: 10, cltvInterceptDelta: 13, forwardInterceptor: &mockForwardInterceptor{ t: t, interceptedChan: make(chan InterceptedPacket), }, aliceChannelLink: aliceChannelLink, bobChannelLink: bobChannelLink, s: s, } return ctx } func (c *interceptableSwitchTestContext) createTestPacket() *htlcPacket { c.incomingHtlcID++ return &htlcPacket{ incomingChanID: c.aliceChannelLink.ShortChanID(), incomingHTLCID: c.incomingHtlcID, incomingTimeout: testStartingHeight + c.cltvInterceptDelta + 1, outgoingChanID: c.bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: c.rhash, Amount: 1, OnionBlob: c.onionBlob, }, } } func (c *interceptableSwitchTestContext) finish() { if err := c.s.Stop(); err != nil { c.t.Fatalf(err.Error()) } } func (c *interceptableSwitchTestContext) createSettlePacket( outgoingHTLCID uint64) *htlcPacket { return &htlcPacket{ outgoingChanID: c.bobChannelLink.ShortChanID(), outgoingHTLCID: outgoingHTLCID, amount: 1, htlc: &lnwire.UpdateFulfillHTLC{ PaymentPreimage: c.preimage, }, } } func TestSwitchHoldForward(t *testing.T) { t.Parallel() c := newInterceptableSwitchTestContext(t) defer c.finish() notifier := &mock.ChainNotifier{ EpochChan: make(chan *chainntnfs.BlockEpoch, 1), } notifier.EpochChan <- &chainntnfs.BlockEpoch{Height: testStartingHeight} switchForwardInterceptor, err := NewInterceptableSwitch( &InterceptableSwitchConfig{ Switch: c.s, CltvRejectDelta: c.cltvRejectDelta, CltvInterceptDelta: c.cltvInterceptDelta, Notifier: notifier, }, ) require.NoError(t, err) require.NoError(t, switchForwardInterceptor.Start()) switchForwardInterceptor.SetInterceptor(c.forwardInterceptor.InterceptForwardHtlc) linkQuit := make(chan struct{}) // Test a forward that expires too soon. packet := c.createTestPacket() packet.incomingTimeout = testStartingHeight + c.cltvRejectDelta - 1 err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet) require.NoError(t, err, "can't forward htlc packet") assertOutgoingLinkReceive(t, c.bobChannelLink, false) assertOutgoingLinkReceiveIntercepted(t, c.aliceChannelLink) assertNumCircuits(t, c.s, 0, 0) // Test a forward that expires too soon and can't be failed. packet = c.createTestPacket() packet.incomingTimeout = testStartingHeight + c.cltvRejectDelta - 1 // Simulate an error during the composition of the failure message. currentCallback := c.s.cfg.FetchLastChannelUpdate c.s.cfg.FetchLastChannelUpdate = func( lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) { return nil, errors.New("cannot fetch update") } err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet) require.NoError(t, err, "can't forward htlc packet") receivedPkt := assertOutgoingLinkReceive(t, c.bobChannelLink, true) assertNumCircuits(t, c.s, 1, 1) require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createSettlePacket(receivedPkt.outgoingHTLCID), )) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) c.s.cfg.FetchLastChannelUpdate = currentCallback // Test resume a hold forward. assertNumCircuits(t, c.s, 0, 0) err = switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), ) require.NoError(t, err) assertNumCircuits(t, c.s, 0, 0) assertOutgoingLinkReceive(t, c.bobChannelLink, false) require.NoError(t, switchForwardInterceptor.Resolve(&FwdResolution{ Action: FwdActionResume, Key: c.forwardInterceptor.getIntercepted().IncomingCircuit, })) receivedPkt = assertOutgoingLinkReceive(t, c.bobChannelLink, true) assertNumCircuits(t, c.s, 1, 1) // settling the htlc to close the circuit. err = switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createSettlePacket(receivedPkt.outgoingHTLCID), ) require.NoError(t, err) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) // Test resume a hold forward after disconnection. require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), )) // Wait until the packet is offered to the interceptor. _ = c.forwardInterceptor.getIntercepted() // No forward expected yet. assertNumCircuits(t, c.s, 0, 0) assertOutgoingLinkReceive(t, c.bobChannelLink, false) // Disconnect should resume the forwarding. switchForwardInterceptor.SetInterceptor(nil) receivedPkt = assertOutgoingLinkReceive(t, c.bobChannelLink, true) assertNumCircuits(t, c.s, 1, 1) // Settle the htlc to close the circuit. require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createSettlePacket(receivedPkt.outgoingHTLCID), )) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) // Test failing a hold forward switchForwardInterceptor.SetInterceptor( c.forwardInterceptor.InterceptForwardHtlc, ) require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), )) assertNumCircuits(t, c.s, 0, 0) assertOutgoingLinkReceive(t, c.bobChannelLink, false) require.NoError(t, switchForwardInterceptor.Resolve(&FwdResolution{ Action: FwdActionFail, Key: c.forwardInterceptor.getIntercepted().IncomingCircuit, FailureCode: lnwire.CodeTemporaryChannelFailure, })) assertOutgoingLinkReceive(t, c.bobChannelLink, false) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) // Test failing a hold forward with a failure message. require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), ), ) assertNumCircuits(t, c.s, 0, 0) assertOutgoingLinkReceive(t, c.bobChannelLink, false) reason := lnwire.OpaqueReason([]byte{1, 2, 3}) require.NoError(t, switchForwardInterceptor.Resolve(&FwdResolution{ Action: FwdActionFail, Key: c.forwardInterceptor.getIntercepted().IncomingCircuit, FailureMessage: reason, })) assertOutgoingLinkReceive(t, c.bobChannelLink, false) packet = assertOutgoingLinkReceive(t, c.aliceChannelLink, true) require.Equal(t, reason, packet.htlc.(*lnwire.UpdateFailHTLC).Reason) assertNumCircuits(t, c.s, 0, 0) // Test failing a hold forward with a malformed htlc failure. err = switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), ) require.NoError(t, err) assertNumCircuits(t, c.s, 0, 0) assertOutgoingLinkReceive(t, c.bobChannelLink, false) code := lnwire.CodeInvalidOnionKey require.NoError(t, switchForwardInterceptor.Resolve(&FwdResolution{ Action: FwdActionFail, Key: c.forwardInterceptor.getIntercepted().IncomingCircuit, FailureCode: code, })) assertOutgoingLinkReceive(t, c.bobChannelLink, false) packet = assertOutgoingLinkReceive(t, c.aliceChannelLink, true) failPacket := packet.htlc.(*lnwire.UpdateFailHTLC) shaOnionBlob := sha256.Sum256(c.onionBlob[:]) expectedFailure := &lnwire.FailInvalidOnionKey{ OnionSHA256: shaOnionBlob, } fwdErr, err := newMockDeobfuscator().DecryptError(failPacket.Reason) require.NoError(t, err) require.Equal(t, expectedFailure, fwdErr.WireMessage()) assertNumCircuits(t, c.s, 0, 0) // Test settling a hold forward require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), )) assertNumCircuits(t, c.s, 0, 0) assertOutgoingLinkReceive(t, c.bobChannelLink, false) require.NoError(t, switchForwardInterceptor.Resolve(&FwdResolution{ Key: c.forwardInterceptor.getIntercepted().IncomingCircuit, Action: FwdActionSettle, Preimage: c.preimage, })) assertOutgoingLinkReceive(t, c.bobChannelLink, false) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) require.NoError(t, switchForwardInterceptor.Stop()) // Test always-on interception. notifier = &mock.ChainNotifier{ EpochChan: make(chan *chainntnfs.BlockEpoch, 1), } notifier.EpochChan <- &chainntnfs.BlockEpoch{Height: testStartingHeight} switchForwardInterceptor, err = NewInterceptableSwitch( &InterceptableSwitchConfig{ Switch: c.s, CltvRejectDelta: c.cltvRejectDelta, CltvInterceptDelta: c.cltvInterceptDelta, RequireInterceptor: true, Notifier: notifier, }, ) require.NoError(t, err) require.NoError(t, switchForwardInterceptor.Start()) // Forward a fresh packet. It is expected to be failed immediately, // because there is no interceptor registered. require.NoError(t, switchForwardInterceptor.ForwardPackets( linkQuit, false, c.createTestPacket(), )) assertOutgoingLinkReceive(t, c.bobChannelLink, false) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) // Forward a replayed packet. It is expected to be held until the // interceptor connects. To continue the test, it needs to be ran in a // goroutine. errChan := make(chan error) go func() { errChan <- switchForwardInterceptor.ForwardPackets( linkQuit, true, c.createTestPacket(), ) }() // Assert that nothing is forward to the switch. assertOutgoingLinkReceive(t, c.bobChannelLink, false) assertNumCircuits(t, c.s, 0, 0) // Register an interceptor. switchForwardInterceptor.SetInterceptor( c.forwardInterceptor.InterceptForwardHtlc, ) // Expect the ForwardPackets call to unblock. require.NoError(t, <-errChan) // Now expect the queued packet to come through. c.forwardInterceptor.getIntercepted() // Disconnect and reconnect interceptor. switchForwardInterceptor.SetInterceptor(nil) switchForwardInterceptor.SetInterceptor( c.forwardInterceptor.InterceptForwardHtlc, ) // A replay of the held packet is expected. intercepted := c.forwardInterceptor.getIntercepted() // Settle the packet. require.NoError(t, switchForwardInterceptor.Resolve(&FwdResolution{ Key: intercepted.IncomingCircuit, Action: FwdActionSettle, Preimage: c.preimage, })) assertOutgoingLinkReceive(t, c.bobChannelLink, false) assertOutgoingLinkReceive(t, c.aliceChannelLink, true) assertNumCircuits(t, c.s, 0, 0) require.NoError(t, switchForwardInterceptor.Stop()) select { case <-c.forwardInterceptor.interceptedChan: require.Fail(t, "unexpected interception") default: } } func TestInterceptableSwitchWatchDog(t *testing.T) { t.Parallel() c := newInterceptableSwitchTestContext(t) defer c.finish() // Start interceptable switch. notifier := &mock.ChainNotifier{ EpochChan: make(chan *chainntnfs.BlockEpoch, 1), } notifier.EpochChan <- &chainntnfs.BlockEpoch{Height: testStartingHeight} switchForwardInterceptor, err := NewInterceptableSwitch( &InterceptableSwitchConfig{ Switch: c.s, CltvRejectDelta: c.cltvRejectDelta, CltvInterceptDelta: c.cltvInterceptDelta, Notifier: notifier, }, ) require.NoError(t, err) require.NoError(t, switchForwardInterceptor.Start()) // Set interceptor. switchForwardInterceptor.SetInterceptor( c.forwardInterceptor.InterceptForwardHtlc, ) // Receive a packet. linkQuit := make(chan struct{}) packet := c.createTestPacket() err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet) require.NoError(t, err, "can't forward htlc packet") // Intercept the packet. intercepted := c.forwardInterceptor.getIntercepted() require.Equal(t, int32(packet.incomingTimeout-c.cltvRejectDelta), intercepted.AutoFailHeight, ) // Htlc expires before a resolution from the interceptor. notifier.EpochChan <- &chainntnfs.BlockEpoch{ Height: int32(packet.incomingTimeout) - int32(c.cltvRejectDelta), } // Expect the htlc to be failed back. assertOutgoingLinkReceive(t, c.aliceChannelLink, true) // It is too late now to resolve. Expect an error. require.Error(t, switchForwardInterceptor.Resolve(&FwdResolution{ Action: FwdActionSettle, Key: intercepted.IncomingCircuit, Preimage: c.preimage, })) } // TestSwitchDustForwarding tests that the switch properly fails HTLC's which // have incoming or outgoing links that breach their fee thresholds. func TestSwitchDustForwarding(t *testing.T) { t.Parallel() // We'll create a three-hop network: // - Alice has a dust limit of 200sats with Bob // - Bob has a dust limit of 800sats with Alice // - Bob has a dust limit of 200sats with Carol // - Carol has a dust limit of 800sats with Bob channels, _, err := createClusterChannels( t, btcutil.SatoshiPerBitcoin, btcutil.SatoshiPerBitcoin, ) require.NoError(t, err) n := newThreeHopNetwork( t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, channels.carolToBob, testStartingHeight, ) err = n.start() require.NoError(t, err) // We'll also put Alice and Bob into hodl.ExitSettle mode, such that // they won't settle incoming exit-hop HTLC's automatically. n.aliceChannelLink.cfg.HodlMask = hodl.ExitSettle.Mask() n.firstBobChannelLink.cfg.HodlMask = hodl.ExitSettle.Mask() // We'll test that once the default threshold is exceeded on the // Alice -> Bob channel, either side's calls to SendHTLC will fail. // // Alice will send 354 HTLC's of 700sats. Bob will also send 354 HTLC's // of 700sats. numHTLCs := 354 aliceAttemptID, bobAttemptID := numHTLCs, numHTLCs amt := lnwire.NewMSatFromSatoshis(700) aliceBobFirstHop := n.aliceChannelLink.ShortChanID() sendDustHtlcs(t, n, true, amt, aliceBobFirstHop, numHTLCs) sendDustHtlcs(t, n, false, amt, aliceBobFirstHop, numHTLCs) // Generate the parameters needed for Bob to send another dust HTLC. _, timelock, hops := generateHops( amt, testStartingHeight, n.aliceChannelLink, ) blob, err := generateRoute(hops...) require.NoError(t, err) // Assert that if Bob sends a dust HTLC it will fail. failingPreimage := lntypes.Preimage{0, 0, 3} failingHash := failingPreimage.Hash() failingHtlc := &lnwire.UpdateAddHTLC{ PaymentHash: failingHash, Amount: amt, Expiry: timelock, OnionBlob: blob, } checkAlmostDust := func(link *channelLink, mbox MailBox, whoseCommit lntypes.ChannelParty) bool { timeout := time.After(15 * time.Second) pollInterval := 300 * time.Millisecond expectedDust := 354 * 2 * amt for { <-time.After(pollInterval) select { case <-timeout: return false default: } linkDust := link.getDustSum( whoseCommit, fn.None[chainfee.SatPerKWeight](), ) localMailDust, remoteMailDust := mbox.DustPackets() totalDust := linkDust if whoseCommit.IsRemote() { totalDust += remoteMailDust } else { totalDust += localMailDust } if totalDust == expectedDust { break } } return true } // Wait until Bob is almost at the fee threshold. bobMbox := n.bobServer.htlcSwitch.mailOrchestrator.GetOrCreateMailBox( n.firstBobChannelLink.ChanID(), n.firstBobChannelLink.ShortChanID(), ) require.True( t, checkAlmostDust( n.firstBobChannelLink, bobMbox, lntypes.Local, ), ) // Sending one more HTLC should fail. SendHTLC won't error, but the // HTLC should be failed backwards. err = n.bobServer.htlcSwitch.SendHTLC( aliceBobFirstHop, uint64(bobAttemptID), failingHtlc, ) require.Nil(t, err) // Use the network result store to ensure the HTLC was failed // backwards. bobResultChan, err := n.bobServer.htlcSwitch.GetAttemptResult( uint64(bobAttemptID), failingHash, newMockDeobfuscator(), ) require.NoError(t, err) result, ok := <-bobResultChan require.True(t, ok) assertFailureCode( t, result.Error, lnwire.CodeTemporaryChannelFailure, ) bobAttemptID++ // Generate the parameters needed for bob to send a non-dust HTLC. nondustAmt := lnwire.NewMSatFromSatoshis(10_000) _, _, hops = generateHops( nondustAmt, testStartingHeight, n.aliceChannelLink, ) blob, err = generateRoute(hops...) require.NoError(t, err) // Now attempt to send an HTLC above Bob's dust limit. Even though this // is not a dust HTLC, it should fail because the increase in weight // pushes us over the threshold. nondustPreimage := lntypes.Preimage{0, 0, 4} nondustHash := nondustPreimage.Hash() nondustHtlc := &lnwire.UpdateAddHTLC{ PaymentHash: nondustHash, Amount: nondustAmt, Expiry: timelock, OnionBlob: blob, } err = n.bobServer.htlcSwitch.SendHTLC( aliceBobFirstHop, uint64(bobAttemptID), nondustHtlc, ) require.NoError(t, err) require.True(t, checkAlmostDust( n.firstBobChannelLink, bobMbox, lntypes.Local, )) // Check that the HTLC failed. bobResultChan, err = n.bobServer.htlcSwitch.GetAttemptResult( uint64(bobAttemptID), nondustHash, newMockDeobfuscator(), ) require.NoError(t, err) result, ok = <-bobResultChan require.True(t, ok) assertFailureCode( t, result.Error, lnwire.CodeTemporaryChannelFailure, ) // Introduce Carol into the mix and assert that sending a multi-hop // dust HTLC to Alice will fail. Bob should fail back the HTLC with a // temporary channel failure. carolAmt, carolTimelock, carolHops := generateHops( amt, testStartingHeight, n.secondBobChannelLink, n.aliceChannelLink, ) carolBlob, err := generateRoute(carolHops...) require.NoError(t, err) carolPreimage := lntypes.Preimage{0, 0, 5} carolHash := carolPreimage.Hash() carolHtlc := &lnwire.UpdateAddHTLC{ PaymentHash: carolHash, Amount: carolAmt, Expiry: carolTimelock, OnionBlob: carolBlob, } // Initialize Carol's attempt ID. carolAttemptID := 0 err = n.carolServer.htlcSwitch.SendHTLC( n.carolChannelLink.ShortChanID(), uint64(carolAttemptID), carolHtlc, ) require.NoError(t, err) carolResultChan, err := n.carolServer.htlcSwitch.GetAttemptResult( uint64(carolAttemptID), carolHash, newMockDeobfuscator(), ) require.NoError(t, err) result, ok = <-carolResultChan require.True(t, ok) assertFailureCode( t, result.Error, lnwire.CodeTemporaryChannelFailure, ) // Send an HTLC from Alice to Carol and assert that it gets failed. htlcAmt, totalTimelock, aliceHops := generateHops( amt, testStartingHeight, n.firstBobChannelLink, n.carolChannelLink, ) blob, err = generateRoute(aliceHops...) require.NoError(t, err) aliceMultihopPreimage := lntypes.Preimage{0, 0, 6} aliceMultihopHash := aliceMultihopPreimage.Hash() aliceMultihopHtlc := &lnwire.UpdateAddHTLC{ PaymentHash: aliceMultihopHash, Amount: htlcAmt, Expiry: totalTimelock, OnionBlob: blob, } // Wait until Alice's expected dust for the remote commitment is just // under the fee threshold. aliceOrch := n.aliceServer.htlcSwitch.mailOrchestrator aliceMbox := aliceOrch.GetOrCreateMailBox( n.aliceChannelLink.ChanID(), n.aliceChannelLink.ShortChanID(), ) require.True( t, checkAlmostDust( n.aliceChannelLink, aliceMbox, lntypes.Remote, ), ) err = n.aliceServer.htlcSwitch.SendHTLC( n.aliceChannelLink.ShortChanID(), uint64(aliceAttemptID), aliceMultihopHtlc, ) require.Nil(t, err) aliceResultChan, err := n.aliceServer.htlcSwitch.GetAttemptResult( uint64(aliceAttemptID), aliceMultihopHash, newMockDeobfuscator(), ) require.NoError(t, err) result, ok = <-aliceResultChan require.True(t, ok) assertFailureCode( t, result.Error, lnwire.CodeTemporaryChannelFailure, ) // Check that there are numHTLCs circuits open for both Alice and Bob. require.Equal(t, numHTLCs, n.aliceServer.htlcSwitch.circuits.NumOpen()) require.Equal(t, numHTLCs, n.bobServer.htlcSwitch.circuits.NumOpen()) } // sendDustHtlcs is a helper function used to send many dust HTLC's to test the // Switch's dust-threshold logic. It takes a boolean denoting whether or not // Alice is the sender. func sendDustHtlcs(t *testing.T, n *threeHopNetwork, alice bool, amt lnwire.MilliSatoshi, sid lnwire.ShortChannelID, numHTLCs int) { t.Helper() // Extract the destination into a variable. If alice is the sender, the // destination is Bob. destLink := n.aliceChannelLink if alice { destLink = n.firstBobChannelLink } // Create hops that will be used in the onion payload. htlcAmt, totalTimelock, hops := generateHops( amt, testStartingHeight, destLink, ) // Convert the hops to a blob that will be put in the Add message. blob, err := generateRoute(hops...) require.NoError(t, err) // Create a slice to store the preimages. preimages := make([]lntypes.Preimage, numHTLCs) // Initialize the attempt ID used in SendHTLC calls. attemptID := uint64(0) // Deterministically generate preimages. Avoid the all-zeroes preimage // because that will be rejected by the database. We'll use a different // third byte for Alice and Bob. endByte := byte(2) if alice { endByte = byte(3) } for i := 0; i < numHTLCs; i++ { preimages[i] = lntypes.Preimage{byte(i >> 8), byte(i), endByte} } sendingSwitch := n.bobServer.htlcSwitch if alice { sendingSwitch = n.aliceServer.htlcSwitch } // Call SendHTLC in a loop for numHTLCs. for i := 0; i < numHTLCs; i++ { // Construct the htlc packet. hash := preimages[i].Hash() htlc := &lnwire.UpdateAddHTLC{ PaymentHash: hash, Amount: htlcAmt, Expiry: totalTimelock, OnionBlob: blob, } for { // It may be the case that the fee threshold is hit // before all numHTLCs*2 HTLC's are sent due to double // counting. Get around this by continuing to send // until successful. err = sendingSwitch.SendHTLC(sid, attemptID, htlc) if err == nil { break } } attemptID++ } } // TestSwitchMailboxDust tests that the switch takes into account the mailbox // dust when evaluating the fee threshold. The mockChannelLink does not have // channel state, so this only tests the switch-mailbox interaction. func TestSwitchMailboxDust(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) carolPeer, err := newMockServer( t, "carol", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() chanID1, chanID2, aliceChanID, bobChanID := genIDs() chanID3, carolChanID := genID() aliceLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) err = s.AddLink(aliceLink) require.NoError(t, err) bobLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) err = s.AddLink(bobLink) require.NoError(t, err) carolLink := newMockChannelLink( s, chanID3, carolChanID, emptyScid, carolPeer, true, false, false, false, ) err = s.AddLink(carolLink) require.NoError(t, err) // mockChannelLink sets the local and remote dust limits of the mailbox // to 400 satoshis and the feerate to 0. We'll fill the mailbox up with // dust packets and assert that calls to SendHTLC will fail. preimage, err := genPreimage() require.NoError(t, err) rhash := sha256.Sum256(preimage[:]) amt := lnwire.NewMSatFromSatoshis(350) addMsg := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: amt, ChanID: chanID1, } // Initialize the carolHTLCID. var carolHTLCID uint64 // It will take aliceCount HTLC's of 350sats to fill up Alice's mailbox // to the point where another would put Alice over the fee threshold. aliceCount := 1428 mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID1, aliceChanID) for i := 0; i < aliceCount; i++ { alicePkt := &htlcPacket{ incomingChanID: carolChanID, incomingHTLCID: carolHTLCID, outgoingChanID: aliceChanID, obfuscator: NewMockObfuscator(), incomingAmount: amt, amount: amt, htlc: addMsg, } err = mailbox.AddPacket(alicePkt) require.NoError(t, err) carolHTLCID++ } // Sending one more HTLC to Alice should result in the fee threshold // being breached. err = s.SendHTLC(aliceChanID, 0, addMsg) require.ErrorIs(t, err, errFeeExposureExceeded) // We'll now call ForwardPackets from Bob to ensure that the mailbox // sum is also accounted for in the forwarding case. packet := &htlcPacket{ incomingChanID: bobChanID, incomingHTLCID: 0, outgoingChanID: aliceChanID, obfuscator: NewMockObfuscator(), incomingAmount: amt, amount: amt, htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: amt, ChanID: chanID1, }, } err = s.ForwardPackets(nil, packet) require.NoError(t, err) // Bob should receive a failure from the switch. select { case p := <-bobLink.packets: require.NotEmpty(t, p.linkFailure) assertFailureCode( t, p.linkFailure, lnwire.CodeTemporaryChannelFailure, ) case <-time.After(5 * time.Second): t.Fatal("no timely reply from switch") } } // TestSwitchResolution checks the ability of the switch to persist and handle // resolution messages. func TestSwitchResolution(t *testing.T) { t.Parallel() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) s, err := initSwitchWithTempDB(t, testStartingHeight) require.NoError(t, err) // Even though we intend to Stop s later in the test, it is safe to // defer this Stop since its execution it is protected by an atomic // guard, guaranteeing it executes at most once. t.Cleanup(func() { var _ = s.Stop() }) err = s.Start() require.NoError(t, err) chanID1, chanID2, aliceChanID, bobChanID := genIDs() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, false, false, false, ) bobChannelLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) err = s.AddLink(aliceChannelLink) require.NoError(t, err) err = s.AddLink(bobChannelLink) require.NoError(t, err) // Create an add htlcPacket that Alice will send to Bob. preimage, err := genPreimage() require.NoError(t, err) rhash := sha256.Sum256(preimage[:]) packet := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobChannelLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } err = s.ForwardPackets(nil, packet) require.NoError(t, err) // Bob will receive the packet and open the circuit. select { case <-bobChannelLink.packets: err = bobChannelLink.completeCircuit(packet) require.NoError(t, err) case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Check that only one circuit is open. require.Equal(t, 1, s.circuits.NumOpen()) // We'll send a settle resolution to Switch that should go to Alice. settleResMsg := contractcourt.ResolutionMsg{ SourceChan: bobChanID, HtlcIndex: 0, PreImage: &preimage, } // Before the resolution is sent, remove alice's link so we can assert // that the resolution is actually stored. Otherwise, it would be // deleted shortly after being sent. s.RemoveLink(chanID1) // Send the resolution message. err = s.ProcessContractResolution(settleResMsg) require.NoError(t, err) // Assert that the resolution store contains the settle reoslution. resMsgs, err := s.resMsgStore.fetchAllResolutionMsg() require.NoError(t, err) require.Equal(t, 1, len(resMsgs)) require.Equal(t, settleResMsg.SourceChan, resMsgs[0].SourceChan) require.Equal(t, settleResMsg.HtlcIndex, resMsgs[0].HtlcIndex) require.Nil(t, resMsgs[0].Failure) require.Equal(t, preimage, *resMsgs[0].PreImage) // Now we'll restart Alice's link and delete the circuit. err = s.AddLink(aliceChannelLink) require.NoError(t, err) // Alice will receive the packet and open the circuit. select { case alicePkt := <-aliceChannelLink.packets: err = aliceChannelLink.completeCircuit(alicePkt) require.NoError(t, err) case <-time.After(time.Second): t.Fatal("request was not propagated to destination") } // Assert that there are no more circuits. require.Equal(t, 0, s.circuits.NumOpen()) // We'll restart the Switch and assert that Alice does not receive // another packet. switchDB := s.cfg.DB.(*channeldb.DB) err = s.Stop() require.NoError(t, err) s, err = initSwitchWithDB(testStartingHeight, switchDB) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() err = s.AddLink(aliceChannelLink) require.NoError(t, err) err = s.AddLink(bobChannelLink) require.NoError(t, err) // Alice should not receive a packet since the Switch should have // deleted the resolution message since the circuit was closed. select { case alicePkt := <-aliceChannelLink.packets: t.Fatalf("received erroneous packet: %v", alicePkt) case <-time.After(time.Second * 5): } // Check that the resolution message no longer exists in the store. resMsgs, err = s.resMsgStore.fetchAllResolutionMsg() require.NoError(t, err) require.Equal(t, 0, len(resMsgs)) } // TestSwitchForwardFailAlias tests that if ForwardPackets returns a failure // before actually forwarding, the ChannelUpdate uses the SCID from the // incoming channel and does not leak private information like the UTXO. func TestSwitchForwardFailAlias(t *testing.T) { tests := []struct { name string // Whether or not Alice will be a zero-conf channel or an // option-scid-alias channel (feature-bit). zeroConf bool }{ { name: "option-scid-alias forwarding failure", zeroConf: false, }, { name: "zero-conf forwarding failure", zeroConf: true, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { testSwitchForwardFailAlias(t, test.zeroConf) }) } } func testSwitchForwardFailAlias(t *testing.T, zeroConf bool) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err) t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err) err = s.Start() require.NoError(t, err) // Make Alice's channel zero-conf or option-scid-alias (feature bit). aliceAlias := lnwire.ShortChannelID{ BlockHeight: 16_000_000, TxIndex: 5, TxPosition: 5, } var aliceLink *mockChannelLink if zeroConf { aliceLink = newMockChannelLink( s, chanID1, aliceAlias, aliceChanID, alicePeer, true, true, true, false, ) } else { aliceLink = newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, true, false, true, ) aliceLink.addAlias(aliceAlias) } err = s.AddLink(aliceLink) require.NoError(t, err) bobLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) err = s.AddLink(bobLink) require.NoError(t, err) // Create a packet that will be sent from Alice to Bob via the switch. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceLink.ShortChanID(), incomingHTLCID: 0, outgoingChanID: bobLink.ShortChanID(), obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Forward the packet and check that Bob's channel link received it. err = s.ForwardPackets(nil, ogPacket) require.NoError(t, err) // Assert that the circuits are in the expected state. require.Equal(t, 1, s.circuits.NumPending()) require.Equal(t, 0, s.circuits.NumOpen()) // Pull packet from Bob's link, and do nothing with it. select { case <-bobLink.packets: case <-s.quit: t.Fatal("switch shutting down, failed to forward packet") } // Now we will restart the Switch to trigger the LoadedFromDisk logic. err = s.Stop() require.NoError(t, err) err = cdb.Close() require.NoError(t, err) cdb2, err := channeldb.Open(tempPath) require.NoError(t, err) t.Cleanup(func() { cdb2.Close() }) s2, err := initSwitchWithDB(testStartingHeight, cdb2) require.NoError(t, err) err = s2.Start() require.NoError(t, err) defer func() { _ = s2.Stop() }() var aliceLink2 *mockChannelLink if zeroConf { aliceLink2 = newMockChannelLink( s2, chanID1, aliceAlias, aliceChanID, alicePeer, true, true, true, false, ) } else { aliceLink2 = newMockChannelLink( s2, chanID1, aliceChanID, emptyScid, alicePeer, true, true, false, true, ) aliceLink2.addAlias(aliceAlias) } err = s2.AddLink(aliceLink2) require.NoError(t, err) bobLink2 := newMockChannelLink( s2, chanID2, bobChanID, emptyScid, bobPeer, true, false, false, false, ) err = s2.AddLink(bobLink2) require.NoError(t, err) // Reforward the ogPacket and wait for Alice to receive a failure // packet. err = s2.ForwardPackets(nil, ogPacket) require.NoError(t, err) select { case failPacket := <-aliceLink2.packets: // Assert that the failPacket does not leak UTXO information. // This means checking that aliceChanID was not returned. msg := failPacket.linkFailure.msg failMsg, ok := msg.(*lnwire.FailTemporaryChannelFailure) require.True(t, ok) require.Equal(t, aliceAlias, failMsg.Update.ShortChannelID) case <-s2.quit: t.Fatal("switch shutting down, failed to forward packet") } } // TestSwitchAliasFailAdd tests that the mailbox does not leak UTXO information // when failing back an HTLC due to the 5-second timeout. This is tested in the // switch rather than the mailbox because the mailbox tests do not have the // proper context (e.g. the Switch's failAliasUpdate function). The caveat here // is that if the private UTXO is already known, it is fine to send a failure // back. This tests option-scid-alias (feature-bit) and zero-conf channels. func TestSwitchAliasFailAdd(t *testing.T) { tests := []struct { name string // Denotes whether the opened channel will be zero-conf. zeroConf bool // Denotes whether the opened channel will be private. private bool // Denotes whether an alias was used during forwarding. useAlias bool }{ { name: "public zero-conf using alias", zeroConf: true, private: false, useAlias: true, }, { name: "public zero-conf using real", zeroConf: true, private: false, useAlias: true, }, { name: "private zero-conf using alias", zeroConf: true, private: true, useAlias: true, }, { name: "public option-scid-alias using alias", zeroConf: false, private: false, useAlias: true, }, { name: "public option-scid-alias using real", zeroConf: false, private: false, useAlias: false, }, { name: "private option-scid-alias using alias", zeroConf: false, private: true, useAlias: true, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { testSwitchAliasFailAdd( t, test.zeroConf, test.private, test.useAlias, ) }) } } func testSwitchAliasFailAdd(t *testing.T, zeroConf, private, useAlias bool) { t.Parallel() chanID1, chanID2, aliceChanID, bobChanID := genIDs() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err) defer cdb.Close() s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err) // Change the mailOrchestrator's expiry to a second. s.mailOrchestrator.cfg.expiry = time.Second err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() // Make Alice's channel zero-conf or option-scid-alias (feature bit). aliceAlias := lnwire.ShortChannelID{ BlockHeight: 16_000_000, TxIndex: 5, TxPosition: 5, } aliceAlias2 := aliceAlias aliceAlias2.TxPosition = 6 var aliceLink *mockChannelLink if zeroConf { aliceLink = newMockChannelLink( s, chanID1, aliceAlias, aliceChanID, alicePeer, true, private, true, false, ) aliceLink.addAlias(aliceAlias2) } else { aliceLink = newMockChannelLink( s, chanID1, aliceChanID, emptyScid, alicePeer, true, private, false, true, ) aliceLink.addAlias(aliceAlias) aliceLink.addAlias(aliceAlias2) } err = s.AddLink(aliceLink) require.NoError(t, err) bobLink := newMockChannelLink( s, chanID2, bobChanID, emptyScid, bobPeer, true, true, false, false, ) err = s.AddLink(bobLink) require.NoError(t, err) // Create a packet that Bob will send to Alice via ForwardPackets. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: bobLink.ShortChanID(), incomingHTLCID: 0, obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Determine which outgoingChanID to set based on the useAlias boolean. outgoingChanID := aliceChanID if useAlias { // Choose randomly from the 2 possible aliases. aliases := aliceLink.getAliases() idx := mrand.Intn(len(aliases)) outgoingChanID = aliases[idx] } ogPacket.outgoingChanID = outgoingChanID // Forward the packet so Alice's mailbox fails it backwards. err = s.ForwardPackets(nil, ogPacket) require.NoError(t, err) // Assert that the circuits are in the expected state. require.Equal(t, 1, s.circuits.NumPending()) require.Equal(t, 0, s.circuits.NumOpen()) // Wait to receive the packet from Bob's mailbox. select { case failPacket := <-bobLink.packets: // Assert that failPacket returns the expected SCID in the // ChannelUpdate. msg := failPacket.linkFailure.msg failMsg, ok := msg.(*lnwire.FailTemporaryChannelFailure) require.True(t, ok) require.Equal(t, outgoingChanID, failMsg.Update.ShortChannelID) case <-s.quit: t.Fatal("switch shutting down, failed to receive fail packet") } } // TestSwitchHandlePacketForwardAlias checks that handlePacketForward (which // calls CheckHtlcForward) does not leak the UTXO in a failure message for // alias channels. This test requires us to have a REAL link, which we also // must modify in order to test it properly (e.g. making it a private channel). // This doesn't lead to good code, but short of refactoring the link-generation // code there is not a good alternative. func TestSwitchHandlePacketForward(t *testing.T) { tests := []struct { name string // Denotes whether or not the channel will be zero-conf. zeroConf bool // Denotes whether or not the channel will have negotiated the // option-scid-alias feature-bit and is not zero-conf. optionFeature bool // Denotes whether or not the channel will be private. private bool // Denotes whether or not the alias will be used for // forwarding. useAlias bool }{ { name: "public zero-conf using alias", zeroConf: true, private: false, useAlias: true, }, { name: "public zero-conf using real", zeroConf: true, private: false, useAlias: false, }, { name: "private zero-conf using alias", zeroConf: true, private: true, useAlias: true, }, { name: "public option-scid-alias using alias", zeroConf: false, optionFeature: true, private: false, useAlias: true, }, { name: "public option-scid-alias using real", zeroConf: false, optionFeature: true, private: false, useAlias: false, }, { name: "private option-scid-alias using alias", zeroConf: false, optionFeature: true, private: true, useAlias: true, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { testSwitchHandlePacketForward( t, test.zeroConf, test.private, test.useAlias, test.optionFeature, ) }) } } func testSwitchHandlePacketForward(t *testing.T, zeroConf, private, useAlias, optionFeature bool) { t.Parallel() // Create a link for Alice that we'll add to the switch. harness, err := newSingleLinkTestHarness(t, btcutil.SatoshiPerBitcoin, 0) require.NoError(t, err) aliceLink := harness.aliceLink s, err := initSwitchWithTempDB(t, testStartingHeight) if err != nil { t.Fatalf("unable to init switch: %v", err) } if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } defer func() { _ = s.Stop() }() // Change Alice's ShortChanID and OtherShortChanID here. aliceAlias := lnwire.ShortChannelID{ BlockHeight: 16_000_000, TxIndex: 5, TxPosition: 5, } aliceAlias2 := aliceAlias aliceAlias2.TxPosition = 6 aliceChannelLink := aliceLink.(*channelLink) aliceChannelState := aliceChannelLink.channel.State() // Set the link's GetAliases function. aliceChannelLink.cfg.GetAliases = func( base lnwire.ShortChannelID) []lnwire.ShortChannelID { return []lnwire.ShortChannelID{aliceAlias, aliceAlias2} } if !private { // Change the channel to public depending on the test. aliceChannelState.ChannelFlags = lnwire.FFAnnounceChannel } // If this is an option-scid-alias feature-bit non-zero-conf channel, // we'll mark the channel as such. if optionFeature { aliceChannelState.ChanType |= channeldb.ScidAliasFeatureBit } // This is the ShortChannelID field in the OpenChannel struct. aliceScid := aliceLink.ShortChanID() if zeroConf { // Store the alias in the shortChanID field and mark the real // scid in the database. err = aliceChannelState.MarkRealScid(aliceScid) require.NoError(t, err) aliceChannelState.ChanType |= channeldb.ZeroConfBit } err = s.AddLink(aliceLink) require.NoError(t, err) // Add a mockChannelLink for Bob. bobChanID, bobScid := genID() bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) bobLink := newMockChannelLink( s, bobChanID, bobScid, emptyScid, bobPeer, true, false, false, false, ) err = s.AddLink(bobLink) require.NoError(t, err) preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: bobLink.ShortChanID(), incomingHTLCID: 0, incomingAmount: 1000, obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Determine which outgoingChanID to set based on the useAlias bool. outgoingChanID := aliceScid if useAlias { // Choose from the possible aliases. aliases := aliceLink.getAliases() idx := mrand.Intn(len(aliases)) outgoingChanID = aliases[idx] } ogPacket.outgoingChanID = outgoingChanID // Forward the packet to Alice and she should fail it back with an // AmountBelowMinimum FailureMessage. err = s.ForwardPackets(nil, ogPacket) require.NoError(t, err) select { case failPacket := <-bobLink.packets: // Assert that failPacket returns the expected ChannelUpdate. msg := failPacket.linkFailure.msg failMsg, ok := msg.(*lnwire.FailAmountBelowMinimum) require.True(t, ok) require.Equal(t, outgoingChanID, failMsg.Update.ShortChannelID) case <-s.quit: t.Fatal("switch shutting down, failed to receive failure") } } // TestSwitchAliasInterceptFail tests that when the InterceptableSwitch fails // an incoming HTLC, it does not leak the on-chain UTXO for option-scid-alias // (feature bit) or zero-conf channels. func TestSwitchAliasInterceptFail(t *testing.T) { tests := []struct { name string // Denotes whether or not the incoming channel is a zero-conf // channel or an option-scid-alias channel instead (feature // bit). zeroConf bool }{ { name: "option-scid-alias", zeroConf: false, }, { name: "zero-conf", zeroConf: true, }, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { testSwitchAliasInterceptFail(t, test.zeroConf) }) } } func testSwitchAliasInterceptFail(t *testing.T, zeroConf bool) { t.Parallel() chanID, aliceScid := genID() alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) require.NoError(t, err) tempPath := t.TempDir() cdb, err := channeldb.Open(tempPath) require.NoError(t, err) t.Cleanup(func() { cdb.Close() }) s, err := initSwitchWithDB(testStartingHeight, cdb) require.NoError(t, err) err = s.Start() require.NoError(t, err) defer func() { _ = s.Stop() }() // Make Alice's alias here. aliceAlias := lnwire.ShortChannelID{ BlockHeight: 16_000_000, TxIndex: 5, TxPosition: 5, } aliceAlias2 := aliceAlias aliceAlias2.TxPosition = 6 var aliceLink *mockChannelLink if zeroConf { aliceLink = newMockChannelLink( s, chanID, aliceAlias, aliceScid, alicePeer, true, true, true, false, ) aliceLink.addAlias(aliceAlias2) } else { aliceLink = newMockChannelLink( s, chanID, aliceScid, emptyScid, alicePeer, true, true, false, true, ) aliceLink.addAlias(aliceAlias) aliceLink.addAlias(aliceAlias2) } err = s.AddLink(aliceLink) require.NoError(t, err) // Now we'll create the packet that will be sent from the Alice link. preimage := [sha256.Size]byte{1} rhash := sha256.Sum256(preimage[:]) ogPacket := &htlcPacket{ incomingChanID: aliceLink.ShortChanID(), incomingTimeout: 1000, incomingHTLCID: 0, outgoingChanID: lnwire.ShortChannelID{}, obfuscator: NewMockObfuscator(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, } // Now setup the interceptable switch so that we can reject this // packet. forwardInterceptor := &mockForwardInterceptor{ t: t, interceptedChan: make(chan InterceptedPacket), } notifier := &mock.ChainNotifier{ EpochChan: make(chan *chainntnfs.BlockEpoch, 1), } notifier.EpochChan <- &chainntnfs.BlockEpoch{Height: testStartingHeight} interceptSwitch, err := NewInterceptableSwitch( &InterceptableSwitchConfig{ Switch: s, Notifier: notifier, CltvRejectDelta: 10, CltvInterceptDelta: 13, }, ) require.NoError(t, err) require.NoError(t, interceptSwitch.Start()) interceptSwitch.SetInterceptor(forwardInterceptor.InterceptForwardHtlc) err = interceptSwitch.ForwardPackets(nil, false, ogPacket) require.NoError(t, err) inCircuit := forwardInterceptor.getIntercepted().IncomingCircuit require.NoError(t, interceptSwitch.resolve(&FwdResolution{ Action: FwdActionFail, Key: inCircuit, FailureCode: lnwire.CodeTemporaryChannelFailure, })) select { case failPacket := <-aliceLink.packets: // Assert that failPacket returns the expected ChannelUpdate. failHtlc, ok := failPacket.htlc.(*lnwire.UpdateFailHTLC) require.True(t, ok) fwdErr, err := newMockDeobfuscator().DecryptError( failHtlc.Reason, ) require.NoError(t, err) failure := fwdErr.WireMessage() failureMsg, ok := failure.(*lnwire.FailTemporaryChannelFailure) require.True(t, ok) failScid := failureMsg.Update.ShortChannelID isAlias := failScid == aliceAlias || failScid == aliceAlias2 require.True(t, isAlias) case <-s.quit: t.Fatalf("switch shutting down, failed to receive failure") } require.NoError(t, interceptSwitch.Stop()) } ```
```python # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.common.system.platforminfo_mock import MockPlatformInfo from webkitpy.common.system.systemhost_mock import MockSystemHost from .profiler import ProfilerFactory, GooglePProf class ProfilerFactoryTest(unittest.TestCase): def _assert_default_profiler_name(self, os_name, expected_profiler_name): profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name)) self.assertEqual(profiler_name, expected_profiler_name) def test_default_profilers(self): self._assert_default_profiler_name('mac', 'iprofiler') self._assert_default_profiler_name('linux', 'perf') self._assert_default_profiler_name('win32', None) self._assert_default_profiler_name('freebsd', None) def test_default_profiler_output(self): host = MockSystemHost() self.assertFalse(host.filesystem.exists("/tmp/output")) # Default mocks are Mac, so iprofile should be default. profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output') self.assertTrue(host.filesystem.exists("/tmp/output")) self.assertEqual(profiler._output_path, "/tmp/output/test.dtps") # Linux defaults to perf. host.platform.os_name = 'linux' profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output') self.assertEqual(profiler._output_path, "/tmp/output/test.data") class GooglePProfTest(unittest.TestCase): def test_pprof_output_regexp(self): pprof_output = """ sometimes there is junk before the total line Total: 3770 samples 76 2.0% 2.0% 104 2.8% lookup (inline) 60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline) 56 1.5% 5.1% 56 1.5% MaskPtr (inline) 51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken 42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity 35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline) 33 0.9% 9.4% 43 1.1% append (inline) 29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline) 29 0.8% 10.9% 100 2.7% add (inline) 28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline) 25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue 24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back 23 0.6% 13.6% 23 0.6% intHash (inline) 23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next 23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push 22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline) """ expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline) 60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline) 56 1.5% 5.1% 56 1.5% MaskPtr (inline) 51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken 42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity 35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline) 33 0.9% 9.4% 43 1.1% append (inline) 29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline) 29 0.8% 10.9% 100 2.7% add (inline) 28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline) """ host = MockSystemHost() profiler = GooglePProf(host, '/bin/executable', '/tmp/output') self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines) ```
```objective-c #pragma once #include <Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.h> namespace DB { /* Format JSONCompactColumns outputs all data as a single block in the next format: * [ * [value1, value2, value3, ...], * [value1, value2m value3, ...], * ... * ] */ class JSONCompactColumnsBlockOutputFormat : public JSONColumnsBlockOutputFormatBase { public: JSONCompactColumnsBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "JSONCompactColumnsBlockOutputFormat"; } protected: void writeChunkStart() override; void writeChunkEnd() override; void writeColumnStart(size_t column_index) override; const Names column_names; }; } ```
```scss @import '~@proton/styles/scss/lib'; .calendar-modal { &-content { > * + * { margin-block-start: rem(16); } } } ```
That's Why I Sing This Way is the fourth studio album by American country music singer Daryle Singletary. It was released on April 23, 2002 via Koch Records. Except for its title track, the album is composed of cover songs. Two singles were released from it: the title track and a cover of Conway Twitty's 1980 Number One single "I'd Love to Lay You Down", which respectively reached #47 and #43 on the U.S. Billboard country singles charts. The version of "I Never Go Around Mirrors" on this album was first recorded by Keith Whitley, Whitley had Shafer write the second verse heard here. The album includes guest appearances from George Jones, Dwight Yoakam, Rhonda Vincent, John Wesley Ryles, Merle Haggard and Johnny Paycheck. Track listing Personnel Joe Caverlee - fiddle, mandolin Gregory Cole - background vocals Merle Haggard - vocals on "Makeup and Faded Blue Jeans" George Jones - vocals on "Walk Through This World with Me" Paul Leim - drums Terry McMillan - harmonica Brent Mason - electric guitar Mike Johnson - pedal steel guitar Danny Parks - acoustic guitar, electric guitar Johnny Paycheck - vocals on "Old Violin" John Wesley Ryles - vocals on "Kay" Daryle Singletary - lead vocals Catherine Styron - piano Darrin Vincent - background vocals Rhonda Vincent - vocals on "After the Fire Is Gone" Glenn Worf - bass guitar Dwight Yoakam - vocals on "Love's Gonna Live Here" Chart performance References Allmusic 2003 albums Covers albums E1 Music albums Daryle Singletary albums
```xml import * as routerUtils from "@erxes/ui/src/utils/router"; import { useLocation, useNavigate } from "react-router-dom"; import AccountList from "../accounts/containers/List"; import Box from "@erxes/ui/src/components/Box"; import Button from "@erxes/ui/src/components/Button"; import { ConfigList } from "../../../styles"; import EmptyState from "@erxes/ui/src/components/EmptyState"; import Form from "../../configs/containers/Form"; import { IKhanbankConfigsItem } from "../../configs/types"; import Icon from "@erxes/ui/src/components/Icon"; import LeftSidebar from "@erxes/ui/src/layout/components/Sidebar"; import LoadMore from "@erxes/ui/src/components/LoadMore"; import ModalTrigger from "@erxes/ui/src/components/ModalTrigger"; import React from "react"; import { SidebarList } from "@erxes/ui/src/layout/styles"; import Spinner from "@erxes/ui/src/components/Spinner"; import { TopHeader } from "@erxes/ui/src/styles/main"; type Props = { configs: IKhanbankConfigsItem[]; totalCount: number; queryParams: any; loading: boolean; refetch?: () => void; }; const ConfigsList = (props: Props) => { const { configs, totalCount, queryParams, loading, refetch } = props; const location = useLocation(); const navigate = useNavigate(); const [currentConfig, setCurrentConfig] = React.useState<string | undefined>( queryParams._id ); const [fetchPolicy, setFetchPolicy] = React.useState("cache-first"); React.useEffect(() => { const defaultAccount = JSON.parse( localStorage.getItem("khanbankDefaultAccount") || "{}" ); if (defaultAccount.configId && defaultAccount.accountNumber) { routerUtils.setParams(navigate, location, { _id: defaultAccount.configId, account: defaultAccount.accountNumber, }); } }, []); const onClickRow = (config) => { setCurrentConfig(config._id); routerUtils.setParams(navigate, location, { _id: config._id }); }; const onRefresh = () => { setFetchPolicy("network-only"); }; const reload = ( <a href="#refresh" onClick={onRefresh} tabIndex={0}> <Icon icon="refresh" size={8} /> </a> ); const renderRow = () => { return configs.map((config, index) => { return ( <Box key={index} extraButtons={reload} title={config.name} isOpen={currentConfig === config._id} name={config._id} callback={() => { onClickRow(config); }} > <AccountList {...props} configId={config._id} fetchPolicy={fetchPolicy} /> </Box> ); }); }; const renderSidebarHeader = () => { const addConfig = ( <Button btnStyle="success" block={true} uppercase={false} icon="plus-circle" > Add New Config </Button> ); const formContent = (formProps) => <Form {...formProps} />; return ( <TopHeader> <ModalTrigger size="sm" title="Corporate Gateway" trigger={addConfig} enforceFocus={false} content={formContent} /> </TopHeader> ); }; return ( <ConfigList> <LeftSidebar wide={true} header={renderSidebarHeader()} hasBorder={true}> <SidebarList $noTextColor={true} $noBackground={true} id={"khanbankSidebar"} > {renderRow()} <LoadMore all={totalCount} loading={loading} /> </SidebarList> {!loading && totalCount === 0 && ( <EmptyState image="/images/actions/18.svg" text="There is no config yet. Start by adding one." /> )} </LeftSidebar> </ConfigList> ); }; export default ConfigsList; ```
```smalltalk " System support for object events (change/update mechanism) " Class { #name : 'ManifestSystemObjectEvents', #superclass : 'PackageManifest', #category : 'System-Object Events-Manifest', #package : 'System-Object Events', #tag : 'Manifest' } { #category : 'meta-data - dependency analyser' } ManifestSystemObjectEvents class >> manuallyResolvedDependencies [ ^ #(#'Collections-Abstract') ] ```
```smalltalk namespace SixLabors.ImageSharp; /// <content> /// Contains the definition of <see cref="WebSafePalette"/>. /// </content> public partial struct Color { private static readonly Lazy<Color[]> WebSafePaletteLazy = new Lazy<Color[]>(CreateWebSafePalette, true); /// <summary> /// Gets a collection of named, web safe colors as defined in the CSS Color Module Level 4. /// </summary> public static ReadOnlyMemory<Color> WebSafePalette => WebSafePaletteLazy.Value; private static Color[] CreateWebSafePalette() => new[] { AliceBlue, AntiqueWhite, Aqua, Aquamarine, Azure, Beige, Bisque, Black, BlanchedAlmond, Blue, BlueViolet, Brown, BurlyWood, CadetBlue, Chartreuse, Chocolate, Coral, CornflowerBlue, Cornsilk, Crimson, Cyan, DarkBlue, DarkCyan, DarkGoldenrod, DarkGray, DarkGreen, DarkKhaki, DarkMagenta, DarkOliveGreen, DarkOrange, DarkOrchid, DarkRed, DarkSalmon, DarkSeaGreen, DarkSlateBlue, DarkSlateGray, DarkTurquoise, DarkViolet, DeepPink, DeepSkyBlue, DimGray, DodgerBlue, Firebrick, FloralWhite, ForestGreen, Fuchsia, Gainsboro, GhostWhite, Gold, Goldenrod, Gray, Green, GreenYellow, Honeydew, HotPink, IndianRed, Indigo, Ivory, Khaki, Lavender, LavenderBlush, LawnGreen, LemonChiffon, LightBlue, LightCoral, LightCyan, LightGoldenrodYellow, LightGray, LightGreen, LightPink, LightSalmon, LightSeaGreen, LightSkyBlue, LightSlateGray, LightSteelBlue, LightYellow, Lime, LimeGreen, Linen, Magenta, Maroon, MediumAquamarine, MediumBlue, MediumOrchid, MediumPurple, MediumSeaGreen, MediumSlateBlue, MediumSpringGreen, MediumTurquoise, MediumVioletRed, MidnightBlue, MintCream, MistyRose, Moccasin, NavajoWhite, Navy, OldLace, Olive, OliveDrab, Orange, OrangeRed, Orchid, PaleGoldenrod, PaleGreen, PaleTurquoise, PaleVioletRed, PapayaWhip, PeachPuff, Peru, Pink, Plum, PowderBlue, Purple, RebeccaPurple, Red, RosyBrown, RoyalBlue, SaddleBrown, Salmon, SandyBrown, SeaGreen, SeaShell, Sienna, Silver, SkyBlue, SlateBlue, SlateGray, Snow, SpringGreen, SteelBlue, Tan, Teal, Thistle, Tomato, Transparent, Turquoise, Violet, Wheat, White, WhiteSmoke, Yellow, YellowGreen }; } ```
The 1975 Bandy World Championship was the ninth Bandy World Championship and was contested between four men's bandy playing nations. The championship was played in Finland from 25 January-2 February 1975. The Soviet Union became champions. Participants Premier tour 25 January Norway – Finland 1–2 Soviet Union – Sweden 1–3 26 January Finland – Sweden 2–3 Soviet Union – Norway 8–0 28 January Norway – Sweden 0–6 Soviet Union – Finland 12–4 29 January Soviet Union – Sweden 7–2 30 January Norway – Finland 2–2 1 February Finland – Sweden 3–6 Soviet Union – Norway 14–2 2 February Norway – Sweden 1–8 Soviet Union – Finland 5–0 References 1975 1975 in bandy 1975 in Finnish sport International bandy competitions hosted by Finland Bandy Bandy