index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer/ScriptManagerExplorer.java
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.manager.explorer; import javax.annotation.PostConstruct; import javax.inject.Inject; import com.netflix.explorers.AbstractExplorerModule; import com.netflix.explorers.ExplorerManager; import com.netflix.karyon.spi.Component; @Component public class ScriptManagerExplorer extends AbstractExplorerModule { private ExplorerManager explorerManager; @Inject public ScriptManagerExplorer(ExplorerManager manager) { super("scriptmanager"); this.explorerManager = manager; } @PostConstruct public void initialize() { super.initialize(); explorerManager.registerExplorer(this); } }
1,900
0
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer/ScriptManagerBootstrap.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.manager.explorer; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.inject.Scopes; import com.google.inject.name.Names; import com.netflix.explorers.AppConfigGlobalModelContext; import com.netflix.explorers.ExplorerManager; import com.netflix.explorers.ExplorersManagerImpl; import com.netflix.explorers.context.GlobalModelContext; import com.netflix.governator.guice.LifecycleInjectorBuilder; import com.netflix.karyon.server.ServerBootstrap; import com.netflix.nicobar.manager.rest.GsonMessageBodyHandler; import com.sun.jersey.api.core.PackagesResourceConfig; import com.sun.jersey.guice.JerseyServletModule; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; public class ScriptManagerBootstrap extends ServerBootstrap { private static final Logger LOG = LoggerFactory.getLogger(ScriptManagerBootstrap.class); @Override protected void beforeInjectorCreation(@SuppressWarnings("unused") LifecycleInjectorBuilder builderToBeUsed) { JerseyServletModule jerseyServletModule = new JerseyServletModule() { @Override protected void configureServlets() { bind(String.class).annotatedWith(Names.named("explorerAppName")).toInstance("scriptmanager"); bind(GsonMessageBodyHandler.class).in(Scopes.SINGLETON); bind(GlobalModelContext.class).to(AppConfigGlobalModelContext.class); bind(ExplorerManager.class).to(ExplorersManagerImpl.class); bind(ScriptManagerExplorer.class); bind(GuiceContainer.class).asEagerSingleton(); Map<String, String> params = new HashMap<String, String>(); params.put(PackagesResourceConfig.PROPERTY_PACKAGES, // pytheas resources "com.netflix.explorers.resources;" + "com.netflix.explorers.providers;" + // nicobar resources "com.netflix.nicobar.manager.explorer.resources"); // Route all requests through GuiceContainer serve("/*").with(GuiceContainer.class, params); } }; builderToBeUsed.withAdditionalModules(jerseyServletModule); LOG.debug("HelloWorldBootstrap injected jerseyServletModule in LifecycleInjectorBuilder"); } }
1,901
0
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer/resources/ScriptManagerHomeResource.java
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.manager.explorer.resources; import java.util.HashMap; import java.util.Map; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.sun.jersey.api.view.Viewable; /** * Application resource which serves the explores home page * @author James Kojo */ @Path("/") public class ScriptManagerHomeResource { private Logger LOG = LoggerFactory.getLogger(ScriptManagerHomeResource.class); @GET @Produces(MediaType.TEXT_HTML) public Viewable showIndex() { LOG.info("Script manager home page requested"); Map<String, Object> model = new HashMap<String, Object>(); return new Viewable( "/scriptmanager/home.ftl", model ); } }
1,902
0
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer/resources/ArchiveRepositoriesResource.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.manager.explorer.resources; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import org.apache.commons.collections.CollectionUtils; import com.netflix.nicobar.core.persistence.ArchiveRepository; import com.netflix.nicobar.core.persistence.ArchiveSummary; import com.netflix.nicobar.core.persistence.RepositorySummary; import com.sun.jersey.api.NotFoundException; import com.sun.jersey.api.view.Viewable; /** * REST resource for accessing the a collection of {@link ArchiveRepository}s. * * @author James Kojo */ @Path("scriptmanager") @Produces(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON) public class ArchiveRepositoriesResource { private final Map<String, ArchiveRepository> repositories; // avoid exception for classpath scanners which attempt to instantiate this resource public ArchiveRepositoriesResource() { this(Collections.<String, ArchiveRepository>emptyMap()); } public ArchiveRepositoriesResource(Map<String, ArchiveRepository> repositories) { this.repositories = Objects.requireNonNull(repositories, "repositories"); } @GET @Produces( MediaType.TEXT_HTML ) public Viewable showIndex() { Map<String, Object> model = new HashMap<String, Object>(); return new Viewable( "/scriptmanager/repository_list.ftl", model); } /** * Get a list of all of the repository summaries */ @GET @Path("/repositorysummaries") public List<RepositorySummary> getRepositorySummaries() { List<RepositorySummary> result = new ArrayList<RepositorySummary>(repositories.size()); for (String repositoryId : repositories.keySet()) { RepositorySummary repositorySummary = getScriptRepo(repositoryId).getRepositorySummary(); result.add(repositorySummary); } return result; } /** * Get a map of summaries from different repositories. * @param repositoryIds ids for repositories to query. if empty, then all repositories will be queried. * @return map of repository id to list of summaries in the respective repository */ @GET @Path("/archivesummaries") public Map<String, List<ArchiveSummary>> getArchiveSummaries(@QueryParam("repositoryIds") Set<String> repositoryIds) { if (CollectionUtils.isEmpty(repositoryIds)) { repositoryIds = repositories.keySet(); } Map<String, List<ArchiveSummary>> result = new LinkedHashMap<String, List<ArchiveSummary>>(); for (String repositoryId : repositoryIds) { List<ArchiveSummary> repoSummaries = getScriptRepo(repositoryId).getSummaries(); result.put(repositoryId, repoSummaries); } return result; } @Path("/{repositoryId}") public ArchiveRepositoryResource getScriptRepo(@PathParam("repositoryId") String repositoryId) { ArchiveRepository repository = repositories.get(repositoryId); if (repository == null) { throw new NotFoundException("no such repository '" + repositoryId + "'"); } return new ArchiveRepositoryResource(repository); } }
1,903
0
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/explorer/resources/ArchiveRepositoryResource.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.manager.explorer.resources; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.StandardCopyOption; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import com.netflix.nicobar.core.archive.JarScriptArchive; import com.netflix.nicobar.core.archive.ModuleId; import com.netflix.nicobar.core.archive.ScriptModuleSpec; import com.netflix.nicobar.core.persistence.ArchiveRepository; import com.netflix.nicobar.core.persistence.ArchiveSummary; import com.netflix.nicobar.core.persistence.RepositorySummary; import com.sun.jersey.api.Responses; import com.sun.jersey.api.view.Viewable; import com.sun.jersey.multipart.FormDataParam; /** * REST sub-resource for a single {@link ArchiveRepository} * * @author James Kojo * @author Vasanth Asokan */ @Produces(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON) public class ArchiveRepositoryResource { private final ArchiveRepository repository; /** * @param repository */ public ArchiveRepositoryResource(ArchiveRepository repository) { this.repository = Objects.requireNonNull(repository, "repository"); } @GET @Produces(MediaType.TEXT_HTML) public Viewable showView() { Map<String, Object> model = new HashMap<String, Object>(); model.put("repositoryId", repository.getRepositoryId()); return new Viewable( "/scriptmanager/repository_view.ftl", model); } /** * @return a summary for this repository */ @GET @Path("summary") public RepositorySummary getRepositorySummary() { RepositorySummary repositorySummary; try { repositorySummary = repository.getDefaultView().getRepositorySummary(); } catch (IOException e) { throw new WebApplicationException(e); } return repositorySummary; } /** * @return summaries for the archives in the repo */ @GET @Path("archivesummaries") public List<ArchiveSummary> getSummaries() { try { return repository.getDefaultView().getArchiveSummaries(); } catch (IOException e) { throw new WebApplicationException(e); } } @POST @Consumes(MediaType.MULTIPART_FORM_DATA) public void insertArchive( @FormDataParam("moduleSpec") ScriptModuleSpec moduleSpec, @FormDataParam("archiveJar") InputStream file) { validateModuleSpec(moduleSpec); String moduleId = moduleSpec.getModuleId().toString(); try { java.nio.file.Path tempFile = Files.createTempFile(moduleId, ".jar"); Files.copy(file, tempFile, StandardCopyOption.REPLACE_EXISTING); JarScriptArchive jarScriptArchive = new JarScriptArchive.Builder(tempFile) .setModuleSpec(moduleSpec) .build(); repository.insertArchive(jarScriptArchive); } catch (IOException e) { throw new WebApplicationException(e); } } @DELETE @Path("{moduleId}") public void deleteArchive(@PathParam("moduleId") String moduleId) { try { repository.deleteArchive(ModuleId.fromString(moduleId)); } catch (IOException e) { throw new WebApplicationException(e); } } public void validateModuleSpec(ScriptModuleSpec moduleSpec) { Set<String> missing = new HashSet<String>(1); if (moduleSpec == null) { missing.add("moduleSpec"); } else { if (moduleSpec.getCompilerPluginIds() == null) missing.add("compilerPluginIds"); if (moduleSpec.getMetadata() == null) missing.add("metadata"); if (moduleSpec.getModuleDependencies() == null) missing.add("moduleDependencies"); if (moduleSpec.getModuleId() == null) missing.add("moduleId"); } if (!missing.isEmpty()) { throw new WebApplicationException( Responses .clientError() .entity(Collections.singletonMap("missing", missing)) .build()); } } }
1,904
0
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager
Create_ds/Nicobar/nicobar-manager/src/main/java/com/netflix/nicobar/manager/rest/GsonMessageBodyHandler.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.manager.rest; /** * Jersey/jax-rs shim to get it use gson as the default json serializer * * @author James Kojo */ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.lang.annotation.Annotation; import java.lang.reflect.Type; import javax.ws.rs.Consumes; import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.ext.MessageBodyReader; import javax.ws.rs.ext.MessageBodyWriter; import javax.ws.rs.ext.Provider; import org.apache.commons.io.Charsets; import org.apache.commons.io.IOUtils; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @Provider @Produces(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON) public final class GsonMessageBodyHandler implements MessageBodyWriter<Object>, MessageBodyReader<Object> { private Gson gson; @Override public boolean isReadable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) { return true; } @Override public Object readFrom(Class<Object> type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, String> httpHeaders, InputStream entityStream) { InputStreamReader streamReader = new InputStreamReader(entityStream, Charsets.UTF_8); try { Type jsonType; if (type.equals(genericType)) { jsonType = type; } else { jsonType = genericType; } return getGson().fromJson(streamReader, jsonType); } finally { IOUtils.closeQuietly(streamReader); } } @Override public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) { return true; } @Override public long getSize(Object object, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) { return -1; } @Override public void writeTo(Object object, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) throws IOException, WebApplicationException { OutputStreamWriter writer = new OutputStreamWriter(entityStream, Charsets.UTF_8); try { Type jsonType; if (type.equals(genericType)) { jsonType = type; } else { jsonType = genericType; } getGson().toJson(object, jsonType, writer); } finally { IOUtils.closeQuietly(writer); } } protected Gson getGson() { if (gson == null) { final GsonBuilder gsonBuilder = new GsonBuilder(); gson = gsonBuilder.create(); } return gson; } }
1,905
0
Create_ds/Nicobar/nicobar-cassandra/src/test/java/com/netflix/nicobar
Create_ds/Nicobar/nicobar-cassandra/src/test/java/com/netflix/nicobar/cassandra/CassandraArchiveRepositoryTest.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.FutureTask; import org.apache.commons.io.IOUtils; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.model.Row; import com.netflix.astyanax.model.Rows; import com.netflix.nicobar.cassandra.CassandraArchiveRepository.Columns; import com.netflix.nicobar.core.archive.JarScriptArchive; import com.netflix.nicobar.core.archive.ModuleId; import com.netflix.nicobar.core.archive.ScriptModuleSpec; /** * Tests for {@link CassandraArchiveRepository} * @author Vasanth Asokan */ public class CassandraArchiveRepositoryTest { /** * Metadata for test resources found in test/resource */ public static enum TestResource { TEST_HELLOWORLD_JAR("helloworld", "testmodules/helloworld.jar"); private final ModuleId moduleId; private final String resourcePath; private TestResource(String moduleId, String resourcePath) { this.moduleId = ModuleId.create(moduleId); this.resourcePath = resourcePath; } /** * @return the expected moduleId after this is converted to a archive */ public ModuleId getModuleId() { return moduleId; } /** * @return path name suitable for passing to {@link ClassLoader#getResource(String)} */ public String getResourcePath() { return resourcePath; } } private CassandraArchiveRepository repository; private Path testArchiveJarFile; private CassandraGateway gateway; private CassandraArchiveRepositoryConfig config; @BeforeClass public void setup() throws Exception { gateway = mock(CassandraGateway.class); Keyspace mockKeyspace = mock(Keyspace.class); when(mockKeyspace.getKeyspaceName()).thenReturn("testKeySpace"); when(gateway.getKeyspace()).thenReturn(mockKeyspace); when(gateway.getColumnFamily()).thenReturn("testColumnFamily"); config = new BasicCassandraRepositoryConfig.Builder(gateway) .setRepositoryId("TestRepo") .setArchiveOutputDirectory(Files.createTempDirectory(this.getClass().getSimpleName() + "_")) .build(); repository = new CassandraArchiveRepository(config); URL testJarUrl = getClass().getClassLoader().getResource(TestResource.TEST_HELLOWORLD_JAR.getResourcePath()); if (testJarUrl == null) { fail("Couldn't locate " + TestResource.TEST_HELLOWORLD_JAR.getResourcePath()); } testArchiveJarFile = Files.createTempFile(TestResource.TEST_HELLOWORLD_JAR.getModuleId().toString(), ".jar"); InputStream inputStream = testJarUrl.openStream(); Files.copy(inputStream, testArchiveJarFile, StandardCopyOption.REPLACE_EXISTING); IOUtils.closeQuietly(inputStream); } @SuppressWarnings({ "rawtypes", "unchecked" }) @Test public void testInsertArchive() throws IOException { JarScriptArchive jarArchive = new JarScriptArchive.Builder(testArchiveJarFile).build(); repository.insertArchive(jarArchive); Map<String, Object> columns = new HashMap<String, Object>(); Path jarFilePath; try { jarFilePath = Paths.get(jarArchive.getRootUrl().toURI()); } catch (URISyntaxException e) { throw new IOException(e); } ScriptModuleSpec moduleSpec = jarArchive.getModuleSpec(); String serialized = config.getModuleSpecSerializer().serialize(moduleSpec); byte[] jarBytes = Files.readAllBytes(jarFilePath); columns.put(Columns.shard_num.name(), repository.calculateShardNum(moduleSpec.getModuleId())); columns.put(Columns.last_update.name(), jarArchive.getCreateTime()); columns.put(Columns.archive_content.name(), jarBytes); columns.put(Columns.archive_content_hash.name(), repository.calculateHash(jarBytes)); columns.put(Columns.module_spec.name(), serialized); ArgumentCaptor<String> argument1 = ArgumentCaptor.forClass(String.class); ArgumentCaptor<Map> argument2 = ArgumentCaptor.forClass(Map.class); verify(gateway).upsert(argument1.capture(), argument2.capture()); assertEquals(moduleSpec.getModuleId().toString(), argument1.getValue()); Map columnMap = argument2.getValue(); assertEquals(repository.calculateShardNum(moduleSpec.getModuleId()), columnMap.get(Columns.shard_num.name())); assertTrue(Arrays.equals(jarBytes, (byte[])columnMap.get(Columns.archive_content.name()))); assertTrue(Arrays.equals(repository.calculateHash(jarBytes), (byte[])columnMap.get(Columns.archive_content_hash.name()))); assertEquals(serialized, (String)columnMap.get(Columns.module_spec.name())); assertEquals(jarArchive.getCreateTime(), (long)columnMap.get(Columns.last_update.name())); } @Test(expectedExceptions=UnsupportedOperationException.class) public void testArchiveWithDeploySpecs() throws IOException { JarScriptArchive jarArchive = new JarScriptArchive.Builder(testArchiveJarFile).build(); repository.insertArchive(jarArchive, null); } @Test(expectedExceptions=UnsupportedOperationException.class) public void testGetView() throws IOException { repository.getView(""); } @Test public void testDeleteArchive() throws IllegalArgumentException, IOException { repository.deleteArchive(ModuleId.fromString("testModule.v3")); verify(gateway).deleteRow("testModule.v3"); } @Test @SuppressWarnings("unchecked") public void testGetRows() throws Exception { EnumSet<Columns> columns = EnumSet.of(Columns.module_id, Columns.module_name); Rows<String, String> mockRows = mock(Rows.class); Row<String, String> row1 = mock(Row.class); Row<String, String> row2 = mock(Row.class); List<Row<String, String>> rowList = Arrays.asList(row1, row2); when(mockRows.iterator()).thenReturn(rowList.iterator()); FutureTask<Rows<String, String>> future = new FutureTask<Rows<String, String>>(new Runnable() { @Override public void run() { } }, mockRows); ExecutorService executor = Executors.newFixedThreadPool(1); executor.execute(future); when(gateway.selectAsync(anyString())).thenReturn(future); repository.getRows(columns); List<String> selectList = new ArrayList<String>(); for (int shardNum = 0; shardNum < config.getShardCount(); shardNum++) { selectList.add(repository.generateSelectByShardCql(columns, shardNum)); } InOrder inOrder = inOrder(gateway); for (int shardNum = 0; shardNum < config.getShardCount(); shardNum++) { inOrder.verify(gateway).selectAsync(selectList.get(shardNum)); } } @Test public void testGetArchiveUpdateTimes() { // TODO: Fill out test. } @Test public void testGetArchiveSummaries() { // TODO: Fill out test. } @Test public void testGetRepositorySummary() throws Exception { // TODO: Fill out test. } @Test public void testGetScriptArchives() { // TODO: Fill out test. } }
1,906
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/CassandraArchiveRepository.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.cassandra; import java.io.IOException; import java.net.URISyntaxException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.Future; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Iterables; import com.netflix.astyanax.connectionpool.exceptions.ConnectionException; import com.netflix.astyanax.model.Column; import com.netflix.astyanax.model.ColumnList; import com.netflix.astyanax.model.Row; import com.netflix.astyanax.model.Rows; import com.netflix.nicobar.core.archive.JarScriptArchive; import com.netflix.nicobar.core.archive.ModuleId; import com.netflix.nicobar.core.archive.ScriptArchive; import com.netflix.nicobar.core.archive.ScriptModuleSpec; import com.netflix.nicobar.core.persistence.ArchiveRepository; import com.netflix.nicobar.core.persistence.ArchiveSummary; import com.netflix.nicobar.core.persistence.RepositorySummary; import com.netflix.nicobar.core.persistence.RepositoryView; /** * Data access object of {@link ScriptArchive}s stored in Cassandra. * This implementation is based on the Astyanax and requires CQL 3 support to be enabled. * <p> * The query algorithm attempts to divide up read operations such that they won't overwhelm Cassandra * if many instances are using this implementation to poll for updates. * Upon insertion, all archives are assigned a shard number calculated as {@code (moduleId.hashCode() % shardNum)}. * The shard number is subsequently inserted into a column for which a secondary index has been defined. * RepositoryView poller methods will first search each shard for any rows with an update timestamp greater than * the last poll time, and if any are found, the contents of those archives are loaded in small batches. * * *<pre> * Default Schema: * * CREATE TABLE script_repo ( * module_id varchar, * module_name varchar, * module_version varchar, * shard_num int, * last_update timestamp, * module_spec varchar, * archive_content_hash blob, * archive_content blob, * PRIMARY KEY (module_id) * ); * * CREATE INDEX script_repo_shard_num_index on script_repo (shard_num); * </pre> * * See {@link CassandraArchiveRepositoryConfig} to override the default table name. * @author James Kojo * @author Vasanth Asokan */ public class CassandraArchiveRepository implements ArchiveRepository { private final static Logger logger = LoggerFactory.getLogger(CassandraArchiveRepository.class); /** column names */ public static enum Columns { module_id, module_name, module_version, shard_num, last_update, module_spec, archive_content_hash, archive_content; } protected final RepositoryView defaultView; private final CassandraArchiveRepositoryConfig config; private final CassandraGateway cassandra; /** * Construct a instance of the repository with the given configuration * @param config */ public CassandraArchiveRepository(CassandraArchiveRepositoryConfig config) { this.config = Objects.requireNonNull(config, "config"); this.cassandra = this.config.getCassandraGateway(); defaultView = new DefaultView(); } /** * Construct a instance of the repository with the given configuration * @param config */ public CassandraArchiveRepository(CassandraArchiveRepositoryConfig config, RepositoryView defaultView) { this.config = Objects.requireNonNull(config, "config"); this.cassandra = this.config.getCassandraGateway(); this.defaultView = defaultView; } @Override public String getRepositoryId() { return getConfig().getRepositoryId(); } /** * The default view reports all archives inserted into this repository. * @return the default view into all archives. */ @Override public RepositoryView getDefaultView() { return defaultView; } /** * No named views supported by this repository! * Throws UnsupportedOperationException. */ @Override public RepositoryView getView(String view) { throw new UnsupportedOperationException(); } /** * insert a Jar into the script archive */ @Override public void insertArchive(JarScriptArchive jarScriptArchive) throws IOException { Objects.requireNonNull(jarScriptArchive, "jarScriptArchive"); ScriptModuleSpec moduleSpec = jarScriptArchive.getModuleSpec(); ModuleId moduleId = moduleSpec.getModuleId(); Path jarFilePath; try { jarFilePath = Paths.get(jarScriptArchive.getRootUrl().toURI()); } catch (URISyntaxException e) { throw new IOException(e); } int shardNum = calculateShardNum(moduleId); byte[] jarBytes = Files.readAllBytes(jarFilePath); byte[] hash = calculateHash(jarBytes); Map<String, Object> columns = new HashMap<String, Object>(); columns.put(Columns.module_id.name(), moduleId.toString()); columns.put(Columns.module_name.name(), moduleId.getName()); columns.put(Columns.module_version.name(), moduleId.getVersion()); columns.put(Columns.shard_num.name(), shardNum); columns.put(Columns.last_update.name(), jarScriptArchive.getCreateTime()); columns.put(Columns.archive_content_hash.name(), hash); columns.put(Columns.archive_content.name(), jarBytes); String serialized = getConfig().getModuleSpecSerializer().serialize(moduleSpec); columns.put(Columns.module_spec.name(), serialized); try { cassandra.upsert(moduleId.toString(), columns); } catch (Exception e) { throw new IOException(e); } } /** * Unsupported. */ @Override public void insertArchive(JarScriptArchive jarScriptArchive, Map<String, Object> initialDeploySpecs) throws IOException { throw new UnsupportedOperationException("This repository does not support deployment specs."); } /** * Get all of the {@link ScriptArchive}s for the given set of moduleIds. Will perform the operation in batches * as specified by {@link CassandraArchiveRepositoryConfig#getArchiveFetchBatchSize()} and outputs the jar files in * the path specified by {@link CassandraArchiveRepositoryConfig#getArchiveOutputDirectory()}. * * @param moduleIds keys to search for * @return set of ScriptArchives retrieved from the database */ @Override public Set<ScriptArchive> getScriptArchives(Set<ModuleId> moduleIds) throws IOException { Set<ScriptArchive> archives = new LinkedHashSet<ScriptArchive>(moduleIds.size()*2); Path archiveOuputDir = getConfig().getArchiveOutputDirectory(); List<ModuleId> moduleIdList = new LinkedList<ModuleId>(moduleIds); int batchSize = getConfig().getArchiveFetchBatchSize(); int start = 0; try { while (start < moduleIdList.size()) { int end = Math.min(moduleIdList.size(), start + batchSize); List<ModuleId> batchModuleIds = moduleIdList.subList(start, end); List<String> rowKeys = new ArrayList<String>(batchModuleIds.size()); for (ModuleId batchModuleId:batchModuleIds) { rowKeys.add(batchModuleId.toString()); } Rows<String, String> rows = cassandra.getRows(rowKeys.toArray(new String[0])); for (Row<String, String> row : rows) { String moduleId = row.getKey(); ColumnList<String> columns = row.getColumns(); Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name()); Column<String> hashColumn = columns.getColumnByName(Columns.archive_content_hash.name()); Column<String> contentColumn = columns.getColumnByName(Columns.archive_content.name()); if (lastUpdateColumn == null || hashColumn == null || contentColumn == null) { continue; } ScriptModuleSpec moduleSpec = getModuleSpec(columns); long lastUpdateTime = lastUpdateColumn.getLongValue(); byte[] hash = hashColumn.getByteArrayValue(); byte[] content = contentColumn.getByteArrayValue(); // verify the hash if (hash != null && hash.length > 0 && !verifyHash(hash, content)) { logger.warn("Content hash validation failed for moduleId {}. size: {}", moduleId, content.length); continue; } String fileName = new StringBuilder().append(moduleId).append("-").append(lastUpdateTime).append(".jar").toString(); Path jarFile = archiveOuputDir.resolve(fileName); Files.write(jarFile, content); JarScriptArchive scriptArchive = new JarScriptArchive.Builder(jarFile) .setModuleSpec(moduleSpec) .setCreateTime(lastUpdateTime) .build(); archives.add(scriptArchive); } start = end; } } catch (Exception e) { throw new IOException(e); } return archives; } /** * Delete an archive by ID * @param moduleId module id to delete * @throws IOException */ @Override public void deleteArchive(ModuleId moduleId) throws IOException { Objects.requireNonNull(moduleId, "moduleId"); cassandra.deleteRow(moduleId.toString()); } /** * Get all of the rows in in the table. Attempts to reduce the load on cassandra by splitting up the query into smaller sub-queries * @param columns which columns to select * @return result rows */ protected Iterable<Row<String, String>> getRows(EnumSet<?> columns) throws Exception { int shardCount = config.getShardCount(); List<Future<Rows<String, String>>> futures = new ArrayList<Future<Rows<String, String>>>(); for (int i = 0; i < shardCount; i++) { futures.add(cassandra.selectAsync(generateSelectByShardCql(columns, i))); } List<Row<String, String>> rows = new LinkedList<Row<String, String>>(); for (Future<Rows<String, String>> f: futures) { Rows<String, String> shardRows = f.get(); Iterables.addAll(rows, shardRows); } return rows; } /** * Generate the CQL to select specific columns by shard number. * <pre> * SELECT ${columns}... FROM script_repo WHERE shard_num = ? * </pre> */ protected String generateSelectByShardCql(EnumSet<?> columns, Integer shardNum) { StringBuilder sb = new StringBuilder() .append("SELECT "); boolean first = true; for (Enum<?> column : columns) { if (first) { first = false; } else { sb.append(","); } sb.append(column.name()); } sb.append("\n") .append("FROM ").append(cassandra.getColumnFamily()) .append("\n").append("WHERE ").append(Columns.shard_num.name()) .append(" = ").append(shardNum).append("\n"); return sb.toString(); } protected boolean verifyHash(byte[] expectedHashCode, byte[] content) { byte[] hashCode = calculateHash(content); return Arrays.equals(expectedHashCode, hashCode); } protected byte[] calculateHash(byte[] content) { MessageDigest digester; try { digester = MessageDigest.getInstance("SHA-1"); } catch (NoSuchAlgorithmException e) { // should never happen return null; } byte[] hashCode = digester.digest(content); return hashCode; } protected int calculateShardNum(ModuleId moduleId) { return Math.abs(moduleId.hashCode() % getConfig().getShardCount()); } private ScriptModuleSpec getModuleSpec(ColumnList<String> columns) { ScriptModuleSpec moduleSpec = null; if (columns != null) { Column<String> moduleSpecColumn = columns.getColumnByName(Columns.module_spec.name()); if (moduleSpecColumn != null && moduleSpecColumn.hasValue()) { String moduleSpecString = moduleSpecColumn.getStringValue(); moduleSpec = getConfig().getModuleSpecSerializer().deserialize(moduleSpecString); } } return moduleSpec; } /** * @return configuration settings for this repository */ public CassandraArchiveRepositoryConfig getConfig() { return config; } protected class DefaultView implements RepositoryView { @Override public String getName() { return "Default View"; } /** * Get the last update times of all of the script archives managed by this Repository. * @return map of moduleId to last update time */ @Override public Map<ModuleId, Long> getArchiveUpdateTimes() throws IOException { Iterable<Row<String, String>> rows; try { rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update)); } catch (Exception e) { throw new IOException(e); } Map<ModuleId, Long> updateTimes = new LinkedHashMap<ModuleId, Long>(); for (Row<String, String> row : rows) { String moduleId = row.getKey(); Column<String> lastUpdateColumn = row.getColumns().getColumnByName(Columns.last_update.name()); Long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : null; if (StringUtils.isNotBlank(moduleId) && updateTime != null) { updateTimes.put(ModuleId.fromString(moduleId), updateTime); } } return updateTimes; } @Override public RepositorySummary getRepositorySummary() throws IOException { Map<ModuleId, Long> updateTimes = getArchiveUpdateTimes(); int archiveCount = updateTimes.size(); long maxUpdateTime = 0; for (Long updateTime : updateTimes.values()) { if (updateTime > maxUpdateTime) { maxUpdateTime = updateTime; } } String description = String.format("Cassandra Keyspace: %s Column Family: %s", cassandra.getKeyspace().getKeyspaceName(), cassandra.getColumnFamily()); RepositorySummary repositorySummary = new RepositorySummary(getRepositoryId(), description, archiveCount, maxUpdateTime); return repositorySummary; } /** * Get a summary of all archives in this Repository * @return List of summaries */ @Override public List<ArchiveSummary> getArchiveSummaries() throws IOException { List<ArchiveSummary> summaries = new LinkedList<ArchiveSummary>(); Iterable<Row<String, String>> rows; try { rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update, Columns.module_spec)); } catch (Exception e) { throw new IOException(e); } for (Row<String, String> row : rows) { String moduleId = row.getKey(); ColumnList<String> columns = row.getColumns(); Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name()); long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : 0; ScriptModuleSpec moduleSpec = getModuleSpec(columns); ArchiveSummary summary = new ArchiveSummary(ModuleId.fromString(moduleId), moduleSpec, updateTime, null); summaries.add(summary); } return summaries; } } }
1,907
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/CassandraArchiveRepositoryConfig.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.cassandra; import java.nio.file.Path; import com.netflix.nicobar.core.archive.ScriptModuleSpec; import com.netflix.nicobar.core.archive.ScriptModuleSpecSerializer; /** * Configuration provider interface for the {@link CassandraArchiveRepository} * * @author James Kojo */ public interface CassandraArchiveRepositoryConfig { /** * * @return a gateway to perform common operations on the datastore. */ public CassandraGateway getCassandraGateway(); /** * @return number of shards to put the archives in */ public int getShardCount(); /** * @return how many archives to fetch at a time */ public int getArchiveFetchBatchSize(); /** * @return the output directory for archives */ public Path getArchiveOutputDirectory(); /** * @return serializer for the {@link ScriptModuleSpec} for use when inserting or fetching data. */ public ScriptModuleSpecSerializer getModuleSpecSerializer(); /** * @return Descriptive string which uniquely identifies the repository. Used for display and reporting. */ public String getRepositoryId(); }
1,908
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/CassandraGatewayImpl.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra; import java.util.Map; import java.util.concurrent.Future; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.model.ColumnList; import com.netflix.astyanax.model.Rows; import com.netflix.nicobar.cassandra.internal.HystrixCassandraDeleteColumns; import com.netflix.nicobar.cassandra.internal.HystrixCassandraDeleteRow; import com.netflix.nicobar.cassandra.internal.HystrixCassandraGetRow; import com.netflix.nicobar.cassandra.internal.HystrixCassandraGetRowsByKeys; import com.netflix.nicobar.cassandra.internal.HystrixCassandraGetRowsByQuery; import com.netflix.nicobar.cassandra.internal.HystrixCassandraPut; /** * Concrete implementation of CassandraGateway, using Cassandra Hystrix commands. * * It is not ideal that this behavior is provided by nicobar-cassandra. Preferably, * Astyanax itself should expose hystrix protected cassandra operations that we * can directly use. * * @author Vasanth Asokan */ public class CassandraGatewayImpl implements CassandraGateway { private final Keyspace keyspace; private final String columnFamily; public CassandraGatewayImpl(Keyspace keyspace, String cf) { this.keyspace = keyspace; this.columnFamily = cf; } @Override public Keyspace getKeyspace() { return this.keyspace; } @Override public String getColumnFamily() { return this.columnFamily; } @Override public void upsert(String rowKey, Map<String, Object> attributes) { new HystrixCassandraPut<String>(keyspace, columnFamily, rowKey, attributes).execute(); } @Override public void upsert(String rowKey, Map<String, Object> attributes, int ttlSeconds) { new HystrixCassandraPut<String>(keyspace, columnFamily, rowKey, attributes, ttlSeconds).execute(); } @Override public ColumnList<String> getRow(String rowKey) { return new HystrixCassandraGetRow<String>(keyspace, columnFamily, rowKey).execute(); } @Override public Rows<String, String> getRows(String... rowKeys) { return new HystrixCassandraGetRowsByKeys<String>(keyspace, columnFamily, rowKeys).execute(); } @Override public Rows<String, String> select(String cql) { return new HystrixCassandraGetRowsByQuery<String>(keyspace, columnFamily, String.class, cql).execute(); } @Override public Future<Rows<String, String>> selectAsync(String cql) { return new HystrixCassandraGetRowsByQuery<String>(keyspace, columnFamily, String.class, cql).queue(); } @Override public ColumnList<String> getColumns(String rowKey, String... columns) { return new HystrixCassandraGetRow<String>(keyspace, columnFamily, rowKey).withColumns(columns).execute(); } @Override public void deleteRow(String rowKey) { new HystrixCassandraDeleteRow<String>(keyspace, columnFamily, rowKey).execute(); } @Override public void deleteColumn(String rowKey, String column) { new HystrixCassandraDeleteColumns<String>(keyspace, columnFamily, rowKey, column); } }
1,909
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/CassandraGateway.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra; import java.util.Map; import java.util.concurrent.Future; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.model.ColumnList; import com.netflix.astyanax.model.Rows; /** * Common cassandra CRUD operations. * * @author Vasanth Asokan */ public interface CassandraGateway { /** * Return the keyspace that this gateway provides access to. * @return the Cassandra keyspace. */ public Keyspace getKeyspace(); /** * Return the column family that this gateway provides access to. * @return a cassandra column family name. */ public String getColumnFamily(); /** * Performs an insert/update for a row in Cassandra. * * @param rowKey the row key to use for insertions. * @param attributes map of column names to column values. */ public void upsert(String rowKey, Map<String, Object> attributes); /** * Performs an insert/update for a row in Cassandra. * * @param rowKey the row key to use for insertions * @param attributes map of column names to column values. * @param ttlSeconds how long should columns in this upsert live. */ public void upsert(String rowKey, Map<String, Object> attributes, int ttlSeconds); /** * Deletes a row in Cassandra. * @param rowKey the key of the row to delete. */ public void deleteRow(String rowKey); /** * Deletes a column from a row in Cassandra. * @param rowKey the key of the row containing the column. * @param column the name of the column to delete. */ public void deleteColumn(String rowKey, String column); /** * Gets specific columns from a specific row * * @param rowKey the specific row's key. * @param columns the specific columns * @return retrieved column list, possibly null. */ public ColumnList<String> getColumns(String rowKey, String... columns); /** * Gets all columns for the specified row. * @param rowKey a single row key. * @return list of columns for the row, possibly null. */ public ColumnList<String> getRow(String rowKey); /** * Gets all columns for all the listed row keys. * @param rowKeys a list of row keys. * @return list of rows, possibly null. */ public Rows<String, String> getRows(String... rowKeys); /** * Performs a CQL query and returns result. * * @param cql the CQL query string. * @return resulting row set, could be null. */ public Rows<String, String> select(String cql); /** * Performs a CQL query asynchronously * * @param cql the CQL query string. * @return Future containing result row set. */ public Future<Rows<String, String>> selectAsync(String cql); }
1,910
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/BasicCassandraRepositoryConfig.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.cassandra; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Objects; import com.netflix.astyanax.Keyspace; import com.netflix.nicobar.core.archive.GsonScriptModuleSpecSerializer; import com.netflix.nicobar.core.archive.ScriptModuleSpecSerializer; /** * Configuration provider for the {@link CassandraArchiveRepository} * * @author James Kojo */ public class BasicCassandraRepositoryConfig implements CassandraArchiveRepositoryConfig { /** default number of shards to separate the archives into */ public static final int DEFAULT_SHARD_COUNT = 10; /** Default number of archives to fetch per round-trip */ public static final int DEFAULT_FETCH_BATCH_SIZE = 10; /** Default module spec serializer */ public static final ScriptModuleSpecSerializer DEFAULT_SPEC_SERIALIZER = new GsonScriptModuleSpecSerializer(); public static class Builder { private String repositoryId; private int shardCount = DEFAULT_SHARD_COUNT; private int fetchBatchSize = DEFAULT_FETCH_BATCH_SIZE; private Path archiveOutputDirectory; private ScriptModuleSpecSerializer specSerializer = DEFAULT_SPEC_SERIALIZER; private CassandraGateway cassandraGateway; /** Build by constructing a cassandra gateway for the given keyspace and column family */ public Builder(Keyspace keyspace, String columnFamily) { this.cassandraGateway = new CassandraGatewayImpl(keyspace, columnFamily); } /** Build with the given cassandra gateway */ public Builder(CassandraGateway gateway) { this.cassandraGateway = gateway; } /** Set a unique, descriptive identifier used for reporting and display*/ public Builder setRepositoryId(String repositoryId) { this.repositoryId = repositoryId; return this; } /** Number of shards or buckets the archives should be put into */ public Builder setShardCount(int shardCount) { this.shardCount = shardCount; return this; } /** Number of archives to fetch per round-trip to the database */ public Builder setFetchBatchSizeCount(int fetchBatchSize) { this.fetchBatchSize = fetchBatchSize; return this; } /** Output Directory for the script archives that were downloaded */ public Builder setArchiveOutputDirectory(Path archiveOutputDirectory) { this.archiveOutputDirectory = archiveOutputDirectory; return this; } /** Set a customer serializer for the module specification */ public Builder setModuleSpecSerialize(ScriptModuleSpecSerializer specSerializer) { this.specSerializer = specSerializer; return this; } /** Construct the config with defaults if necessary */ public CassandraArchiveRepositoryConfig build() throws IOException { Keyspace keyspace = cassandraGateway.getKeyspace(); String columnFamilyName = cassandraGateway.getColumnFamily(); String buildRepositoryId = repositoryId; if (buildRepositoryId == null) { buildRepositoryId = keyspace.getKeyspaceName() + "-" + columnFamilyName; } Path buildArchiveDir = archiveOutputDirectory; if (buildArchiveDir == null) { buildArchiveDir = Files.createTempDirectory("ScriptArchiveOutputDir"); } return new BasicCassandraRepositoryConfig(buildRepositoryId, cassandraGateway, shardCount, fetchBatchSize, buildArchiveDir, specSerializer); } } private final String repositoryId; private final int shardCount; private final int fetchBatchSize; private final Path archiveOutputDirectory; private final ScriptModuleSpecSerializer moduleSpecSerializer; private final CassandraGateway cassandraGateway; protected BasicCassandraRepositoryConfig(String repositoryId, CassandraGateway gateway, int shardCount, int fetchBatchSize, Path archiveOutputDirectory, ScriptModuleSpecSerializer moduleSpecSerializer) { this.repositoryId = Objects.requireNonNull(repositoryId, "repositoryId"); this.cassandraGateway = Objects.requireNonNull(gateway, "cassandraGateway"); this.shardCount = shardCount; this.fetchBatchSize = fetchBatchSize; this.archiveOutputDirectory = Objects.requireNonNull(archiveOutputDirectory, "archiveOutputDirectory"); this.moduleSpecSerializer = Objects.requireNonNull(moduleSpecSerializer, "moduleSpecSerializer"); } @Override public CassandraGateway getCassandraGateway() { return cassandraGateway; } @Override public int getShardCount() { return shardCount; } @Override public int getArchiveFetchBatchSize() { return fetchBatchSize; } @Override public Path getArchiveOutputDirectory() { return archiveOutputDirectory; } @Override public ScriptModuleSpecSerializer getModuleSpecSerializer() { return moduleSpecSerializer; } @Override public String getRepositoryId() { return repositoryId; } }
1,911
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/HystrixCassandraGetRowsByQuery.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra.internal; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.model.CqlResult; import com.netflix.astyanax.model.Rows; /** * Hystrix command to get rows from Cassandra that match a particular CQL query. * See http://crlog.info/2011/06/13/cassandra-query-language-cql-v1-0-0-updated/ * @param <RowKeyType> the type of the row key, String, Integer etc. * @author Vasanth Asokan, modified from hystrix command implementations in Zuul * Zuul (https://github.com/Netflix/zuul) */ public class HystrixCassandraGetRowsByQuery<RowKeyType> extends AbstractCassandraHystrixCommand<Rows<RowKeyType, String>> { private final Keyspace keyspace; private final ColumnFamily<RowKeyType, String> columnFamily; private final String cql; /** * Get rows specified by their row keys. * * @param keyspace * @param columnFamily * @param cql */ public HystrixCassandraGetRowsByQuery(Keyspace keyspace, ColumnFamily<RowKeyType, String> columnFamily, String cql) { this.keyspace = keyspace; this.columnFamily = columnFamily; this.cql = cql; } /** * Get rows specified by their row keys. * * @param keyspace * @param columnFamilyName * @param cql */ @SuppressWarnings("unchecked") public HystrixCassandraGetRowsByQuery(Keyspace keyspace, String columnFamilyName, Class<?> columnFamilyKeyType, String cql) { this.keyspace = keyspace; this.columnFamily = getColumnFamilyViaColumnName(columnFamilyName, columnFamilyKeyType); this.cql = cql; } @Override protected Rows<RowKeyType, String> run() throws Exception { CqlResult<RowKeyType, String> cqlresult = keyspace.prepareQuery(columnFamily).withCql(cql).execute() .getResult(); Rows<RowKeyType, String> result = cqlresult.getRows(); return result; } }
1,912
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/HystrixCassandraGetRowsByKeys.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra.internal; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.model.Rows; import com.netflix.astyanax.query.RowSliceQuery; /** * Hystrix command to get rows from Cassandra specified by a set of row keys. * See http://crlog.info/2011/06/13/cassandra-query-language-cql-v1-0-0-updated/ * @param <RowKeyType> the type of the row key, String, Integer etc. * @author Vasanth Asokan, modified from hystrix command implementations in Zuul * Zuul (https://github.com/Netflix/zuul) */ public class HystrixCassandraGetRowsByKeys<RowKeyType> extends AbstractCassandraHystrixCommand<Rows<RowKeyType, String>> { private final Keyspace keyspace; private final ColumnFamily<RowKeyType, String> columnFamily; private final RowKeyType[] rowKeys; private String[] columns; @SuppressWarnings("unchecked") public HystrixCassandraGetRowsByKeys(Keyspace keyspace, String columnFamilyName, RowKeyType... rowKeys) { this.keyspace = keyspace; this.columnFamily = getColumnFamilyViaColumnName(columnFamilyName, rowKeys[0]); this.rowKeys = rowKeys; } /** * Restrict the response to only these columns. * * Example usage: new HystrixCassandraGetRow(args).withColumns("column1", * "column2").execute() * * @param columns list of column names. * @return result row sets. */ public HystrixCassandraGetRowsByKeys<RowKeyType> withColumns(String... columns) { this.columns = columns; return this; } @Override protected Rows<RowKeyType, String> run() throws Exception { RowSliceQuery<RowKeyType, String> rowQuery = null; rowQuery = keyspace.prepareQuery(columnFamily).getKeySlice(rowKeys); /* apply column slice if we have one */ if (columns != null) { rowQuery = rowQuery.withColumnSlice(columns); } Rows<RowKeyType, String> result = rowQuery.execute().getResult(); return result; } }
1,913
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/HystrixCassandraGetRow.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra.internal; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.model.ColumnList; import com.netflix.astyanax.query.RowQuery; /** * Hystrix command to get a row from Cassandra using a specific row key. * * @param <RowKeyType> the row key type - String, Integer etc. * @author Vasanth Asokan, modified from hystrix command implementations in Zuul (https://github.com/Netflix/zuul) */ public class HystrixCassandraGetRow<RowKeyType> extends AbstractCassandraHystrixCommand<ColumnList<String>> { private final Keyspace keyspace; private final ColumnFamily<RowKeyType, String> columnFamily; private final RowKeyType rowKey; private String[] columns; public HystrixCassandraGetRow(Keyspace keyspace, ColumnFamily<RowKeyType, String> columnFamily, RowKeyType rowKey) { this.keyspace = keyspace; this.columnFamily = columnFamily; this.rowKey = rowKey; } /** * Restrict the response to only these columns. * * Example usage: new HystrixCassandraGetRow(args).withColumns("column1", "column2").execute() * * @param columns the list of column names. * @return self. */ public HystrixCassandraGetRow<RowKeyType> withColumns(String... columns) { this.columns = columns; return this; } @SuppressWarnings("unchecked") public HystrixCassandraGetRow(Keyspace keyspace, String columnFamilyName, RowKeyType rowKey) { this.keyspace = keyspace; this.columnFamily = getColumnFamilyViaColumnName(columnFamilyName, rowKey); this.rowKey = rowKey; } @Override protected ColumnList<String> run() throws Exception { RowQuery<RowKeyType, String> rowQuery = keyspace.prepareQuery(columnFamily).getKey(rowKey); /* apply column slice if we have one */ if (columns != null) { rowQuery = rowQuery.withColumnSlice(columns); } ColumnList<String> result = rowQuery.execute().getResult(); return result; } }
1,914
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/HystrixCassandraPut.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra.internal; import java.nio.ByteBuffer; import java.util.Date; import java.util.Map; import com.netflix.astyanax.ColumnListMutation; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.MutationBatch; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.model.ConsistencyLevel; /** * Hystrix command to store a row in Cassandra using String keys. Supported * value types in this implementation are: String, Boolean, Integer, Long, * Double, Date, byte[], ByteBuffer * @param <RowKeyType> the type of the row key. * @author Vasanth Asokan, modified from hystrix command implementations in Zuul * (https://github.com/Netflix/zuul) */ public class HystrixCassandraPut<RowKeyType> extends AbstractCassandraHystrixCommand<Void> { private final Keyspace keyspace; private final ColumnFamily<RowKeyType, String> columnFamily; private final RowKeyType rowKey; private final Map<String, Object> attributes; private Integer ttlSeconds = null; public HystrixCassandraPut(Keyspace keyspace, String columnFamilyName, RowKeyType rowKey, Map<String, Object> attributes) { this(keyspace, columnFamilyName, rowKey, attributes, -1); } @SuppressWarnings("unchecked") public HystrixCassandraPut(Keyspace keyspace, String columnFamilyName, RowKeyType rowKey, Map<String, Object> attributes, int ttlSeconds) { this.keyspace = keyspace; this.columnFamily = getColumnFamilyViaColumnName(columnFamilyName, rowKey); this.rowKey = rowKey; this.attributes = attributes; if (ttlSeconds > 0) { this.ttlSeconds = ttlSeconds; } } @Override protected Void run() throws Exception { MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(ConsistencyLevel.CL_QUORUM); // Setting columns in a standard column ColumnListMutation<String> cm = m.withRow(columnFamily, rowKey); for (String key : attributes.keySet()) { Object o = attributes.get(key); if (o != null) { // unfortunately the 'putColumn' method does not nicely figure // out what type the Object is so we need to do it manually if (o instanceof String) { cm.putColumn(key, (String) o, ttlSeconds); } else if (o instanceof Boolean) { cm.putColumn(key, (Boolean) o, ttlSeconds); } else if (o instanceof Integer) { cm.putColumn(key, (Integer) o, ttlSeconds); } else if (o instanceof Long) { cm.putColumn(key, (Long) o, ttlSeconds); } else if (o instanceof Double) { cm.putColumn(key, (Double) o, ttlSeconds); } else if (o instanceof Date) { cm.putColumn(key, (Date) o, ttlSeconds); } else if (o instanceof byte[]) { cm.putColumn(key, (byte[]) o, ttlSeconds); } else if (o instanceof ByteBuffer) { cm.putColumn(key, (ByteBuffer) o, ttlSeconds); } else { throw new IllegalArgumentException("Unsupported object instance type: " + o.getClass().getSimpleName()); } } } m.execute(); return null; } }
1,915
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/HystrixCassandraDeleteColumns.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.cassandra.internal; import java.util.Arrays; import java.util.List; import com.netflix.astyanax.ColumnListMutation; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.MutationBatch; import com.netflix.astyanax.model.ColumnFamily; /** * Hystrix command to delete columns from a specific row in from Cassandra. * @param <RowKeyType> the row key type, String, Integer etc. * @author Vasanth Asokan, modified from hystrix command implementations in * Zuul (https://github.com/Netflix/zuul) */ public class HystrixCassandraDeleteColumns<RowKeyType> extends AbstractCassandraHystrixCommand<Void> { private final Keyspace keyspace; private final ColumnFamily<RowKeyType, String> columnFamily; private final RowKeyType rowKey; private final List<String> columnNames; @SuppressWarnings("unchecked") public HystrixCassandraDeleteColumns(Keyspace keyspace, String columnFamilyName, RowKeyType rowKey, String... columnNames) { this.keyspace = keyspace; this.columnFamily = getColumnFamilyViaColumnName(columnFamilyName, rowKey); this.rowKey = rowKey; this.columnNames = Arrays.asList(columnNames); } @Override protected Void run() throws Exception { MutationBatch m = keyspace.prepareMutationBatch(); ColumnListMutation<String> mutation = m.withRow(columnFamily, rowKey); for (String column: columnNames) { mutation = mutation.deleteColumn(column); } m.execute(); return null; } }
1,916
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/HystrixCassandraDeleteRow.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra.internal; import com.netflix.astyanax.Keyspace; import com.netflix.astyanax.MutationBatch; import com.netflix.astyanax.model.ColumnFamily; /** * Hystrix command to delete a row from Cassandra. * @author Vasanth Asokan, modified from hystrix command implementations in * Zuul (https://github.com/Netflix/zuul) */ public class HystrixCassandraDeleteRow<RowKeyType> extends AbstractCassandraHystrixCommand<Void> { private final Keyspace keyspace; private final ColumnFamily<RowKeyType, String> columnFamily; private final RowKeyType rowKey; public HystrixCassandraDeleteRow(Keyspace keyspace, ColumnFamily<RowKeyType, String> columnFamily, RowKeyType rowKey) { this.keyspace = keyspace; this.columnFamily = columnFamily; this.rowKey = rowKey; } @SuppressWarnings("unchecked") public HystrixCassandraDeleteRow(Keyspace keyspace, String columnFamilyName, RowKeyType rowKey) { this.keyspace = keyspace; this.columnFamily = getColumnFamilyViaColumnName(columnFamilyName, rowKey); this.rowKey = rowKey; } @Override protected Void run() throws Exception { MutationBatch m = keyspace.prepareMutationBatch(); m.withRow(columnFamily, rowKey).delete(); m.execute(); return null; } }
1,917
0
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra
Create_ds/Nicobar/nicobar-cassandra/src/main/java/com/netflix/nicobar/cassandra/internal/AbstractCassandraHystrixCommand.java
/* * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.nicobar.cassandra.internal; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.serializers.IntegerSerializer; import com.netflix.astyanax.serializers.LongSerializer; import com.netflix.astyanax.serializers.StringSerializer; import com.netflix.hystrix.HystrixCommand; import com.netflix.hystrix.HystrixCommandGroupKey; /** * Abstract Hystrix wrapper for Astyanax Cassandra calls * @author Vasanth Asokan, modified from hystrix command implementations in * Zuul (https://github.com/Netflix/zuul) */ public abstract class AbstractCassandraHystrixCommand<K> extends HystrixCommand<K> { public AbstractCassandraHystrixCommand() { super(HystrixCommandGroupKey.Factory.asKey("Cassandra")); } /** * returns a ColumnFamily given a columnFamilyName * @param columnFamilyName * @param rowKey * @return the constructed ColumnFamily */ @SuppressWarnings("rawtypes") protected ColumnFamily getColumnFamilyViaColumnName(String columnFamilyName, Object rowKey) { return getColumnFamilyViaColumnName(columnFamilyName, rowKey.getClass()); } /** * returns a ColumnFamily given a columnFamilyName * @param columnFamilyName * @param rowKeyClass * @return a constructed ColumnFamily */ @SuppressWarnings({"unchecked", "rawtypes"}) protected ColumnFamily getColumnFamilyViaColumnName(String columnFamilyName, Class rowKeyClass) { if (rowKeyClass == String.class) { return new ColumnFamily(columnFamilyName, StringSerializer.get(), StringSerializer.get()); } else if (rowKeyClass == Integer.class) { return new ColumnFamily(columnFamilyName, IntegerSerializer.get(), StringSerializer.get()); } else if (rowKeyClass == Long.class) { return new ColumnFamily(columnFamilyName, LongSerializer.get(), StringSerializer.get()); } else { throw new IllegalArgumentException("RowKeyType is not supported: " + rowKeyClass.getSimpleName() + ". String/Integer/Long are supported, or you can define the ColumnFamily yourself and use the other constructor."); } } }
1,918
0
Create_ds/Nicobar/nicobar-example/src/test
Create_ds/Nicobar/nicobar-example/src/test/java/ExampleTest.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ public class ExampleTest { }
1,919
0
Create_ds/Nicobar/nicobar-example/src/main/java/com/netflix/nicobar/example
Create_ds/Nicobar/nicobar-example/src/main/java/com/netflix/nicobar/example/groovy2/ExampleResourceLocator.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.example.groovy2; import java.nio.file.Path; import com.netflix.nicobar.core.utils.ClassPathUtils; /** * Examples of how to find resource jars in the classpath for loading as separate modules. * * @author James Kojo */ public class ExampleResourceLocator { // module ID to use for the Groovy plugin public static final String GROOVY2_PLUGIN_ID = "groovy2"; public static final String GROOVY2_COMPILER_PLUGIN_CLASS = "com.netflix.nicobar.groovy2.plugin.Groovy2CompilerPlugin"; /** * Locate the groovy-all-n.n.n.jar file on the classpath. * * The ScriptModuleLoader will attempt to load the scripting runtimes into their own classloaders. * To accomplish this, the loader requires the actual file location of the groovy runtime jar. * This method is an example of a strategy for locating the groovy jar file in a programmatic way. * * This strategy assumes that the classloader of this example application has the * script runtime somewhere in it's classpath. * This is not necessarily true of all applications, depending on disposition of the deployed application artifacts. * * It further assumes that the groovy runtime contains the file "groovy-release-info.properties" * which was true of the time of groovy-all-2.1.6.jar */ public static Path getGroovyRuntime() { Path path = ClassPathUtils.findRootPathForResource("META-INF/groovy-release-info.properties", ExampleResourceLocator.class.getClassLoader()); if (path == null) { throw new IllegalStateException("couldn't find groovy-all.n.n.n.jar in the classpath."); } return path; } /** * Locate the classpath root which contains the groovy2 plugin. * * The ScriptModuleLoader will load the groovy2 plugin into the same classlaoder as the * groovy runtime. To accomplish this, the loader requires the file location of the groovy2 plugin. * This method is an example strategy for locating the plugin's jar file in a programmatic way. * * This strategy assumes that the classloader of this example application has the plugin in it' classpath. * see {@link ExampleResourceLocator#getGroovyRuntime()}. */ public static Path getGroovyPluginLocation() { String resourceName = ClassPathUtils.classNameToResourceName(GROOVY2_COMPILER_PLUGIN_CLASS); Path path = ClassPathUtils.findRootPathForResource(resourceName, ExampleResourceLocator.class.getClassLoader()); if (path == null) { throw new IllegalStateException("couldn't find groovy2 plugin jar in the classpath."); } return path; } }
1,920
0
Create_ds/Nicobar/nicobar-example/src/main/java/com/netflix/nicobar/example
Create_ds/Nicobar/nicobar-example/src/main/java/com/netflix/nicobar/example/groovy2/GroovyModuleLoaderExample.java
/* * * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.nicobar.example.groovy2; import static com.netflix.nicobar.example.groovy2.ExampleResourceLocator.GROOVY2_COMPILER_PLUGIN_CLASS; import static com.netflix.nicobar.example.groovy2.ExampleResourceLocator.GROOVY2_PLUGIN_ID; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.Collections; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.commons.io.IOUtils; import com.netflix.hystrix.Hystrix; import com.netflix.nicobar.core.archive.JarScriptArchive; import com.netflix.nicobar.core.execution.HystrixScriptModuleExecutor; import com.netflix.nicobar.core.execution.ScriptModuleExecutable; import com.netflix.nicobar.core.module.BaseScriptModuleListener; import com.netflix.nicobar.core.module.ScriptModule; import com.netflix.nicobar.core.module.ScriptModuleLoader; import com.netflix.nicobar.core.module.ScriptModuleUtils; import com.netflix.nicobar.core.persistence.ArchiveRepository; import com.netflix.nicobar.core.persistence.ArchiveRepositoryPoller; import com.netflix.nicobar.core.persistence.JarArchiveRepository; import com.netflix.nicobar.core.plugin.ScriptCompilerPluginSpec; /** * Example of how to build a script runtime that polls for Groovy based archives on disk. * At the end of this example, there will be a the following classloaders * *<pre> * Bootstrap Classloader (/jre/lib) - virtual * ExtClassLoader (/jre/lib/exp) * AppClassLoader (nicobar-core, nicobar-example) * Groovy2RuntimeModule (nicobar-groovy2, groovy-all.jar) * HelloWorld (HelloWorld.class) * </pre> * @author James Kojo */ public class GroovyModuleLoaderExample { // test script module info private static final String SCRIPT_MODULE_ID = "HelloWorld"; private static final String ARCHIVE_JAR_NAME = "HelloWorld.jar"; public static void main(String[] args) throws Exception { new GroovyModuleLoaderExample().runExample(); } public void runExample() throws Exception { // create the loader with the groovy plugin ScriptModuleLoader moduleLoader = new ScriptModuleLoader.Builder() .addPluginSpec(new ScriptCompilerPluginSpec.Builder(GROOVY2_PLUGIN_ID) // configure Groovy plugin .addRuntimeResource(ExampleResourceLocator.getGroovyRuntime()) .addRuntimeResource(ExampleResourceLocator.getGroovyPluginLocation()) .withPluginClassName(GROOVY2_COMPILER_PLUGIN_CLASS) .build()) .addListener(new BaseScriptModuleListener() { // add an example listener for module updates public void moduleUpdated(ScriptModule newScriptModule, ScriptModule oldScriptModule) { System.out.printf("Received module update event. newModule: %s, oldModule: %s%n", newScriptModule, oldScriptModule); } }) .build(); // create an archive repository and wrap a poller around it to feed updates to the module loader Path baseArchiveDir = Files.createTempDirectory(GroovyModuleLoaderExample.class.getSimpleName()); JarArchiveRepository repository = new JarArchiveRepository.Builder(baseArchiveDir).build(); deployTestArchive(repository); ArchiveRepositoryPoller poller = new ArchiveRepositoryPoller.Builder(moduleLoader).build(); poller.addRepository(repository, 30, TimeUnit.SECONDS, true); // the test module has now been compiled and is ready for execution. // create a closure which knows how to bind any request time inputs (if any) and execute the module. ScriptModuleExecutable<String> executable = new ScriptModuleExecutable<String>() { @Override public String execute(ScriptModule scriptModule) throws Exception { // the script doesn't necessarily have to implement any specific interfaces, but it does need to // be compilable to a class. Class<?> callable = ScriptModuleUtils.findAssignableClass(scriptModule, Callable.class); @SuppressWarnings("unchecked") Callable<String> instance = (Callable<String>) callable.newInstance(); String result = instance.call(); return result; } }; // Execute it in a Hystrix command. HystrixScriptModuleExecutor<String> executor = new HystrixScriptModuleExecutor<String>("TestModuleExecutor"); List<String> results = executor.executeModules(Collections.singletonList(SCRIPT_MODULE_ID), executable, moduleLoader); System.out.println("Module(s) have been executed. Output: " + results); // release the Hystrix resources Hystrix.reset(); } /* * Copy the example script module files to temporary directory and insert it into the repository */ private static void deployTestArchive(ArchiveRepository repository) throws IOException { InputStream archiveJarIs = GroovyModuleLoaderExample.class.getClassLoader().getResourceAsStream(ARCHIVE_JAR_NAME); Path archiveToDeploy = Files.createTempFile(SCRIPT_MODULE_ID, ".jar"); Files.copy(archiveJarIs, archiveToDeploy, StandardCopyOption.REPLACE_EXISTING); IOUtils.closeQuietly(archiveJarIs); JarScriptArchive jarScriptArchive = new JarScriptArchive.Builder(archiveToDeploy).build(); repository.insertArchive(jarScriptArchive); } }
1,921
0
Create_ds/SpinalTap/spinaltap-kafka/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-kafka/src/test/java/com/airbnb/spinaltap/kafka/KafkaDestinationTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.kafka; import com.airbnb.common.metrics.TaggedMetricRegistry; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.jitney.event.spinaltap.v1.MutationType; import com.airbnb.spinaltap.common.destination.DestinationMetrics; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.mutation.MysqlDeleteMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.MysqlUpdateMutation; import com.airbnb.spinaltap.mysql.mutation.mapper.ThriftMutationMapper; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Properties; import kafka.admin.AdminUtils; import kafka.server.KafkaConfig; import kafka.utils.ZKStringSerializer$; import kafka.utils.ZkUtils; import org.I0Itec.zkclient.ZkClient; import org.I0Itec.zkclient.ZkConnection; import org.I0Itec.zkclient.serialize.ZkSerializer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.thrift.TBase; import org.apache.thrift.TDeserializer; import org.apache.thrift.protocol.TBinaryProtocol; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class KafkaDestinationTest extends AbstractKafkaIntegrationTestHarness { private static final int SESSION_TIMEOUT_MS = 10000; private static final int CONNECTION_TIMEOUT_MS = 10000; private static final ZkSerializer ZK_SERIALIZER = ZKStringSerializer$.MODULE$; private static final String SOURCE_NAME = "localhost"; private static final String HOSTNAME = "127.0.0.1"; private static final String DATABASE = "database"; private static final String TABLE = "table"; private static final String TOPIC = "spinaltap" + "." + SOURCE_NAME + "-" + DATABASE + "-" + TABLE; private static final ThreadLocal<TDeserializer> deserializer = ThreadLocal.withInitial(() -> new TDeserializer((new TBinaryProtocol.Factory()))); private final DestinationMetrics metrics = new DestinationMetrics("test", "test", new TaggedMetricRegistry()); @Before public void setUp() { super.setUp(); } @After public void tearDown() { super.tearDown(); } @Override public Properties overridingProps() { Properties props = new Properties(); props.setProperty(KafkaConfig.AutoCreateTopicsEnableProp(), Boolean.toString(false)); return props; } @Override public int clusterSize() { return 3; } private void createKafkaTopic(String topicName) throws Exception { ZkConnection zkConn = null; ZkClient zkClient = null; try { zkClient = new ZkClient( "localhost:" + zkPort(), SESSION_TIMEOUT_MS, CONNECTION_TIMEOUT_MS, ZK_SERIALIZER); zkConn = new ZkConnection("localhost:" + zkPort()); Properties props = new Properties(); props.setProperty("min.insync.replicas", "2"); AdminUtils.createTopic(new ZkUtils(zkClient, zkConn, false), topicName, 1, 3, props); } catch (Exception ex) { logger().error("Kafka topic creation failed due to " + ex.getLocalizedMessage()); // We need to abort upon topic creation failure. throw ex; } finally { if (zkClient != null) zkClient.close(); if (zkConn != null) zkConn.close(); } } @SuppressWarnings("unchecked") @Test public void KafkaDestination() throws Exception { createKafkaTopic(TOPIC); KafkaProducerConfiguration configs = new KafkaProducerConfiguration(this.bootstrapServers()); KafkaDestination kafkaDestination = new KafkaDestination(null, configs, x -> x, metrics, 0L); List<Mutation> messages = new ArrayList<>(); messages.add(createMutation(MutationType.INSERT)); messages.add(createMutation(MutationType.UPDATE)); messages.add(createMutation(MutationType.DELETE)); kafkaDestination.publish(messages); Properties props = new Properties(); props.setProperty("bootstrap.servers", this.bootstrapServers()); props.setProperty( "key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.setProperty( "value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(props); kafkaConsumer.assign(Collections.singletonList(new TopicPartition(TOPIC, 0))); kafkaConsumer.seekToBeginning(new TopicPartition(TOPIC, 0)); List<ConsumerRecords<byte[], byte[]>> records = new ArrayList<>(); ConsumerRecords<byte[], byte[]> record; long startMs = current(); while (current() - startMs <= 10000L) { record = kafkaConsumer.poll(1000L); records.add(record); if (records.size() == 3) break; } Assert.assertEquals(records.size(), 3); for (ConsumerRecords<byte[], byte[]> consumerRecords : records) { for (ConsumerRecord<byte[], byte[]> consumerRecord : consumerRecords) { com.airbnb.jitney.event.spinaltap.v1.Mutation mutation = getMutation(consumerRecord.value()); switch (mutation.getType()) { case INSERT: Assert.assertEquals(mutation, createMutation(MutationType.INSERT)); break; case UPDATE: Assert.assertEquals(mutation, createMutation(MutationType.UPDATE)); break; case DELETE: Assert.assertEquals(mutation, createMutation(MutationType.DELETE)); break; } } } kafkaDestination.close(); kafkaConsumer.close(); } private long current() { return System.currentTimeMillis(); } private Mutation getMutation(byte[] payload) throws Exception { Mutation mutation = new Mutation(); deserializer.get().deserialize(mutation, payload); return mutation; } private Mutation createMutation(MutationType type) { Mapper<com.airbnb.spinaltap.Mutation<?>, ? extends TBase<?, ?>> thriftMutationMapper = ThriftMutationMapper.create("spinaltap"); Table table = new Table( 0L, TABLE, DATABASE, null, ImmutableList.of(new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0)), ImmutableList.of("id")); MysqlMutationMetadata metadata = new MysqlMutationMetadata( new DataSource(HOSTNAME, 0, SOURCE_NAME), new BinlogFilePos(), table, 0L, 0L, 0L, null, null, 0L, 0); Row row = new Row( table, ImmutableMap.of( "id", new Column(new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0), 1L))); MysqlMutation mutation; switch (type) { case INSERT: mutation = new MysqlInsertMutation(metadata, row); break; case UPDATE: mutation = new MysqlUpdateMutation(metadata, row, row); break; case DELETE: mutation = new MysqlDeleteMutation(metadata, row); break; default: mutation = null; } return (Mutation) (thriftMutationMapper.map(mutation)); } }
1,922
0
Create_ds/SpinalTap/spinaltap-kafka/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-kafka/src/main/java/com/airbnb/spinaltap/kafka/KafkaDestinationBuilder.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.kafka; import com.airbnb.spinaltap.common.destination.Destination; import com.airbnb.spinaltap.common.destination.DestinationBuilder; import lombok.NonNull; import lombok.RequiredArgsConstructor; import org.apache.thrift.TBase; /** Represents an implement of {@link DestinationBuilder} for {@link KafkaDestination}s. */ @RequiredArgsConstructor public final class KafkaDestinationBuilder<T extends TBase<?, ?>> extends DestinationBuilder<T> { @NonNull private final KafkaProducerConfiguration producerConfig; @Override protected Destination createDestination() { return new KafkaDestination<>(topicNamePrefix, producerConfig, mapper, metrics, delaySendMs); } }
1,923
0
Create_ds/SpinalTap/spinaltap-kafka/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-kafka/src/main/java/com/airbnb/spinaltap/kafka/KafkaDestination.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.kafka; import com.airbnb.jitney.event.spinaltap.v1.Table; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.destination.AbstractDestination; import com.airbnb.spinaltap.common.destination.DestinationMetrics; import com.airbnb.spinaltap.common.util.BatchMapper; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Set; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.thrift.TBase; import org.apache.thrift.TException; import org.apache.thrift.TSerializer; import org.apache.thrift.protocol.TBinaryProtocol; /** * Represents an implement of {@link com.airbnb.spinaltap.common.destination.Destination} using <a * href="https://kafka.apache.org">Apache Kafka</a>. */ @Slf4j public final class KafkaDestination<T extends TBase<?, ?>> extends AbstractDestination<T> { private static final String DEFAULT_TOPIC_PREFIX = "spinaltap"; private volatile boolean failed = false; private final String topicNamePrefix; private final KafkaProducer<byte[], byte[]> kafkaProducer; private final Callback callback = new SpinalTapPublishCallback(); private final ThreadLocal<TSerializer> serializer = ThreadLocal.withInitial(() -> new TSerializer((new TBinaryProtocol.Factory()))); public KafkaDestination( final String prefix, final KafkaProducerConfiguration producerConfig, final BatchMapper<Mutation<?>, T> mapper, final DestinationMetrics metrics, final long delaySendMs) { super(mapper, metrics, delaySendMs); topicNamePrefix = Optional.ofNullable(prefix).orElse(DEFAULT_TOPIC_PREFIX); Properties props = new Properties(); setKafkaDefaultConfigs(props, producerConfig.getBootstrapServers()); kafkaProducer = new KafkaProducer<>(props); } private void setKafkaDefaultConfigs(Properties props, String bootstrapServers) { // For bootstrap.servers. props.setProperty("bootstrap.servers", bootstrapServers); // For durability. props.setProperty("acks", "-1"); // For in-order delivery. props.setProperty("max.in.flight.requests.per.connection", "1"); // For default serializer. props.setProperty( "key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); props.setProperty( "value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); } @Override public void publish(List<T> messages) throws Exception { try { failed = false; messages.forEach(message -> kafkaProducer.send(transform(message), callback)); kafkaProducer.flush(); if (failed) { throw new Exception("Error when sending event to Kafka."); } } catch (Exception ex) { throw new Exception("Error when sending event to Kafka."); } } /** Transform from TBase to the ProducerRecord. */ private ProducerRecord<byte[], byte[]> transform(TBase<?, ?> event) throws RuntimeException { try { String topic = getTopic(event); byte[] key = getKey(event); byte[] value = serializer.get().serialize(event); return new ProducerRecord<>(topic, key, value); } catch (TException ex) { throw new RuntimeException("Error when transforming event from TBase to ProducerRecord.", ex); } catch (Exception ex) { throw new RuntimeException("Invalid mutation found when transforming.", ex); } } /** Use the primary key as the key of the ProducerRecord. */ private byte[] getKey(TBase<?, ?> event) { com.airbnb.jitney.event.spinaltap.v1.Mutation mutation = ((com.airbnb.jitney.event.spinaltap.v1.Mutation) event); Set<String> primaryKeys = mutation.getTable().getPrimaryKey(); String tableName = mutation.getTable().getName(); String databaseName = mutation.getTable().getDatabase(); Map<String, ByteBuffer> entities = mutation.getEntity(); StringBuilder builder = new StringBuilder(databaseName + ":" + tableName); for (String keyComponent : primaryKeys) { String component = new String(entities.get(keyComponent).array(), StandardCharsets.UTF_8); builder.append(":").append(component); } return builder.toString().getBytes(StandardCharsets.UTF_8); } /** * The format of the topic for a table from source in database is as follows: * [source]-[database]-[table] */ private String getTopic(final TBase<?, ?> event) { com.airbnb.jitney.event.spinaltap.v1.Mutation mutation = ((com.airbnb.jitney.event.spinaltap.v1.Mutation) event); Table table = mutation.getTable(); return String.format( "%s.%s-%s-%s", topicNamePrefix, mutation.getDataSource().getSynapseService(), table.isSetOverridingDatabase() ? table.getOverridingDatabase() : table.getDatabase(), mutation.getTable().getName()); } /** * The callback to mark the asynchronous send result for KafkaProducer. Close the KafkaProducer * inside the callback if there is an exception to prevent out-of-order delivery. */ private class SpinalTapPublishCallback implements Callback { public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { failed = true; kafkaProducer.close(); } } } }
1,924
0
Create_ds/SpinalTap/spinaltap-kafka/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-kafka/src/main/java/com/airbnb/spinaltap/kafka/KafkaProducerConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.kafka; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; /** Represents the Kafka producer configuration used in {@link KafkaDestination}. */ @Data @NoArgsConstructor @AllArgsConstructor public class KafkaProducerConfiguration { @JsonProperty("bootstrap_servers") private String bootstrapServers; }
1,925
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/EventOrderValidatorTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.common.source.SourceEvent; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.validator.EventOrderValidator; import com.google.common.collect.Lists; import java.util.Collections; import java.util.List; import org.junit.Test; public class EventOrderValidatorTest { private final BinlogEvent firstEvent = mock(BinlogEvent.class); private final BinlogEvent secondEvent = mock(BinlogEvent.class); @Test public void testEventInOrder() throws Exception { List<SourceEvent> unorderedEvents = Lists.newArrayList(); when(firstEvent.getOffset()).thenReturn(1L); when(secondEvent.getOffset()).thenReturn(2L); EventOrderValidator validator = new EventOrderValidator(unorderedEvents::add); validator.validate(firstEvent); validator.validate(secondEvent); assertTrue(unorderedEvents.isEmpty()); } @Test public void testEventOutOfOrder() throws Exception { List<SourceEvent> unorderedEvents = Lists.newArrayList(); when(firstEvent.getOffset()).thenReturn(2L); when(secondEvent.getOffset()).thenReturn(1L); EventOrderValidator validator = new EventOrderValidator(unorderedEvents::add); validator.validate(firstEvent); validator.validate(secondEvent); assertEquals(Collections.singletonList(secondEvent), unorderedEvents); } @Test public void testReset() throws Exception { List<SourceEvent> unorderedEvents = Lists.newArrayList(); when(firstEvent.getOffset()).thenReturn(1L); when(secondEvent.getOffset()).thenReturn(2L); EventOrderValidator validator = new EventOrderValidator(unorderedEvents::add); validator.validate(firstEvent); validator.validate(secondEvent); validator.reset(); validator.validate(firstEvent); validator.validate(secondEvent); assertTrue(unorderedEvents.isEmpty()); } }
1,926
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/ColumnSerializationUtilTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.assertEquals; import com.airbnb.jitney.event.spinaltap.v1.BinlogHeader; import com.airbnb.jitney.event.spinaltap.v1.DataSource; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.jitney.event.spinaltap.v1.MutationType; import com.airbnb.jitney.event.spinaltap.v1.Table; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import java.nio.ByteBuffer; import java.util.Map; import org.apache.thrift.TDeserializer; import org.apache.thrift.TSerializer; import org.apache.thrift.protocol.TBinaryProtocol; import org.junit.Test; public class ColumnSerializationUtilTest { private static final long TIMESTAMP = 1234; private static final String SOURCE_ID = "localhost"; private static final BinlogHeader BINLOG_HEADER = new BinlogHeader("123", 2, 3, 4); private static final DataSource DATA_SOURCE = new DataSource("localhost", 9192, "db"); private static final Table TABLE = new Table( TIMESTAMP, "table", "db", ImmutableSet.of("c1", "c2"), ImmutableMap.of("c1", new com.airbnb.jitney.event.spinaltap.v1.Column(1, false, "c1"))); @Test public void testDeserializeColumn() throws Exception { Mutation mutation = new Mutation( MutationType.DELETE, TIMESTAMP, SOURCE_ID, DATA_SOURCE, BINLOG_HEADER, TABLE, getEntity()); TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory()); TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); byte[] serialized = serializer.serialize(mutation); Mutation deserialized = new Mutation(); deserializer.deserialize(deserialized, serialized); assertEquals(mutation, deserialized); } private static Map<String, ByteBuffer> getEntity() { return ImmutableMap.of( "c1", ByteBuffer.wrap( ColumnSerializationUtil.serializeColumn( new Column(new ColumnMetadata("c1", ColumnDataType.INT24, false, 0), 12345))), "c2", ByteBuffer.wrap( ColumnSerializationUtil.serializeColumn( new Column(new ColumnMetadata("c2", ColumnDataType.STRING, false, 1), "string"))), "c3", ByteBuffer.wrap( ColumnSerializationUtil.serializeColumn( new Column( new ColumnMetadata("c3", ColumnDataType.BLOB, false, 2), "blob.data".getBytes()))), "c4", ByteBuffer.wrap( ColumnSerializationUtil.serializeColumn( new Column(new ColumnMetadata("c4", ColumnDataType.DATETIME, false, 3), null)))); } }
1,927
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/MysqlSourceTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.util.Repository; import com.airbnb.spinaltap.mysql.binlog_connector.BinaryLogConnectorSource; import com.airbnb.spinaltap.mysql.exception.InvalidBinlogPositionException; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import lombok.Getter; import org.junit.Test; public class MysqlSourceTest { private static final String SOURCE_NAME = "test"; private static final DataSource DATA_SOURCE = new DataSource("test_host", 1, "test"); private static final Set<String> TABLE_NAMES = Sets.newHashSet(Table.canonicalNameOf("db", "users")); private static final long SAVED_OFFSET = 12L; private static final long SAVED_TIMESTAMP = 12L; private static final BinlogFilePos BINLOG_FILE_POS = new BinlogFilePos("mysql-binlog.123450", 14, 100); private final TableCache tableCache = mock(TableCache.class); private final MysqlSourceMetrics mysqlMetrics = mock(MysqlSourceMetrics.class); @SuppressWarnings("unchecked") private final StateRepository<MysqlSourceState> stateRepository = mock(StateRepository.class); private final MysqlSource.Listener listener = mock(MysqlSource.Listener.class); private final MysqlSchemaManager schemaManager = mock(MysqlSchemaManager.class); @Test public void testOpenClose() throws Exception { TestSource source = new TestSource(); MysqlSourceState savedState = new MysqlSourceState(SAVED_TIMESTAMP, SAVED_OFFSET, 0L, BINLOG_FILE_POS); when(stateRepository.read()).thenReturn(savedState); source.open(); Transaction lastTransaction = new Transaction( savedState.getLastTimestamp(), savedState.getLastOffset(), savedState.getLastPosition()); assertEquals(savedState, source.getLastSavedState().get()); assertEquals(lastTransaction, source.getLastTransaction().get()); assertEquals(BINLOG_FILE_POS, source.getPosition()); verify(tableCache, times(1)).clear(); } @Test public void testSaveState() throws Exception { TestSource source = new TestSource(); MysqlSourceState savedState = mock(MysqlSourceState.class); MysqlSourceState newState = mock(MysqlSourceState.class); when(stateRepository.read()).thenReturn(savedState); source.saveState(newState); verify(stateRepository, times(1)).save(newState); assertEquals(newState, source.getLastSavedState().get()); } @Test public void testGetState() throws Exception { TestSource source = new TestSource(); MysqlSourceState savedState = mock(MysqlSourceState.class); when(stateRepository.read()).thenReturn(savedState); source.initialize(); MysqlSourceState state = source.getSavedState(); assertEquals(savedState, state); when(stateRepository.read()).thenReturn(null); state = source.getSavedState(); assertEquals(0L, state.getLastOffset()); assertEquals(0L, state.getLastTimestamp()); assertEquals(MysqlSource.LATEST_BINLOG_POS, state.getLastPosition()); } @Test public void testResetToLastValidState() throws Exception { StateHistory<MysqlSourceState> stateHistory = createTestStateHistory(); TestSource source = new TestSource(stateHistory); MysqlSourceState savedState = mock(MysqlSourceState.class); MysqlSourceState earliestState = new MysqlSourceState(0L, 0L, 0L, MysqlSource.EARLIEST_BINLOG_POS); when(stateRepository.read()).thenReturn(savedState); MysqlSourceState firstState = mock(MysqlSourceState.class); MysqlSourceState secondState = mock(MysqlSourceState.class); MysqlSourceState thirdState = mock(MysqlSourceState.class); MysqlSourceState fourthState = mock(MysqlSourceState.class); stateHistory.add(firstState); stateHistory.add(secondState); stateHistory.add(thirdState); source.initialize(); source.resetToLastValidState(); assertEquals(thirdState, source.getLastSavedState().get()); source.resetToLastValidState(); assertEquals(firstState, source.getLastSavedState().get()); assertTrue(stateHistory.isEmpty()); source.resetToLastValidState(); assertEquals(earliestState, source.getLastSavedState().get()); stateHistory.add(firstState); stateHistory.add(secondState); stateHistory.add(thirdState); stateHistory.add(fourthState); source.resetToLastValidState(); assertEquals(firstState, source.getLastSavedState().get()); stateHistory.add(firstState); stateHistory.add(secondState); source.resetToLastValidState(); assertEquals(earliestState, source.getLastSavedState().get()); assertTrue(stateHistory.isEmpty()); BinlogFilePos filePos = new BinlogFilePos("mysql-binlog.123450", 18, 156); Transaction lastTransaction = new Transaction(0L, 0L, filePos); MysqlMutationMetadata metadata = new MysqlMutationMetadata(null, filePos, null, 0L, 1L, 23L, null, lastTransaction, 0L, 0); source.checkpoint(new MysqlInsertMutation(metadata, null)); assertFalse(stateHistory.isEmpty()); source.resetToLastValidState(); assertEquals(new MysqlSourceState(23L, 1L, 0L, filePos), source.getLastSavedState().get()); } @Test public void testOnCommunicationError() throws Exception { TestSource source = new TestSource(); source.addListener(listener); source.setPosition(null); try { source.onCommunicationError(new RuntimeException()); fail("Should not reach here"); } catch (Exception ex) { } assertNull(source.getLastSavedState().get()); try { source.onCommunicationError(new InvalidBinlogPositionException("")); fail("Should not reach here"); } catch (Exception ex) { } assertEquals( MysqlSource.EARLIEST_BINLOG_POS, source.getLastSavedState().get().getLastPosition()); } @Test public void testCommitCheckpoint() throws Exception { StateHistory<MysqlSourceState> stateHistory = createTestStateHistory(); TestSource source = new TestSource(stateHistory); Row row = new Row(null, ImmutableMap.of()); BinlogFilePos filePos = new BinlogFilePos("mysql-binlog.123450", 18, 156); Transaction lastTransaction = new Transaction(0L, 0L, filePos); MysqlMutationMetadata metadata = new MysqlMutationMetadata(null, filePos, null, 0L, 0L, 0L, null, lastTransaction, 0, 0); MysqlMutation mutation = new MysqlInsertMutation(metadata, row); MysqlSourceState savedState = new MysqlSourceState(SAVED_TIMESTAMP, SAVED_OFFSET, 0L, BINLOG_FILE_POS); when(stateRepository.read()).thenReturn(savedState); source.initialize(); source.checkpoint(mutation); assertEquals(savedState, source.getLastSavedState().get()); source.checkpoint(null); assertEquals(savedState, source.getLastSavedState().get()); long newOffset = SAVED_OFFSET + 1; metadata = new MysqlMutationMetadata( null, filePos, null, 0L, newOffset, 23L, null, lastTransaction, 0, 0); mutation = new MysqlInsertMutation(metadata, row); source.checkpoint(mutation); assertEquals( new MysqlSourceState(23L, newOffset, 0L, filePos), source.getLastSavedState().get()); assertEquals(stateHistory.removeLast(), source.getLastSavedState().get()); } private StateHistory<MysqlSourceState> createTestStateHistory() { return new StateHistory<MysqlSourceState>("", 10, mock(Repository.class), mysqlMetrics); } @Getter class TestSource extends MysqlSource { private boolean isConnected; private BinlogFilePos position; TestSource() { this(createTestStateHistory()); } TestSource(StateHistory<MysqlSourceState> stateHistory) { super( SOURCE_NAME, DATA_SOURCE, TABLE_NAMES, tableCache, stateRepository, stateHistory, BinaryLogConnectorSource.LATEST_BINLOG_POS, schemaManager, mysqlMetrics, new AtomicLong(0L), new AtomicReference<>(), new AtomicReference<>()); } public void setPosition(BinlogFilePos pos) { position = pos; } public void connect() { isConnected = true; } public void disconnect() { isConnected = false; } public boolean isConnected() { return isConnected; } } }
1,928
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/TableCacheTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.airbnb.spinaltap.mysql.schema.MysqlColumn; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import java.util.Arrays; import java.util.List; import org.junit.Test; public class TableCacheTest { private static final String DATABASE_NAME = "db"; private static final String OVERRIDING_DATABASE_NAME = "overriding_db"; private static final String TABLE_NAME = "test"; private static final long TABLE_ID = 1L; private static final Table TABLE = new Table( TABLE_ID, TABLE_NAME, DATABASE_NAME, OVERRIDING_DATABASE_NAME, Arrays.asList( new ColumnMetadata("col1", ColumnDataType.TINY, true, 0, "TINY"), new ColumnMetadata("col2", ColumnDataType.STRING, false, 1, "TEXT"), new ColumnMetadata("col3", ColumnDataType.FLOAT, true, 2, "FLOAT"), new ColumnMetadata("col4", ColumnDataType.LONG, false, 3, "LONG")), Arrays.asList("col1", "col3")); private static final List<MysqlColumn> TABLE_COLUMNS = Arrays.asList( new MysqlColumn("col1", "TINY", "TINY", true), new MysqlColumn("col2", "STRING", "TEXT", false), new MysqlColumn("col3", "FLOAT", "FLOAT", true), new MysqlColumn("col4", "LONG", "LONG", false)); private static final Table TABLE_UPDATED = new Table( TABLE_ID, TABLE_NAME, DATABASE_NAME, OVERRIDING_DATABASE_NAME, Arrays.asList( new ColumnMetadata("col1", ColumnDataType.TINY, true, 0, "TINY"), new ColumnMetadata("col2", ColumnDataType.STRING, false, 1, "STRING"), new ColumnMetadata("col3", ColumnDataType.FLOAT, true, 2, "FLOAT")), Arrays.asList("col1", "col3")); private static final List<MysqlColumn> TABLE_COLUMNS_UPDATED = Arrays.asList( new MysqlColumn("col1", "TINY", "TINY", true), new MysqlColumn("col2", "STRING", "STRING", false), new MysqlColumn("col3", "FLOAT", "FLOAT", true)); private static final List<MysqlColumn> TABLE_COLUMNS_LARGE_STUB = Arrays.asList( new MysqlColumn("col1", "TINY", "TINY", true), new MysqlColumn("col2", "STRING", "TEXT", false), new MysqlColumn("col3", "FLOAT", "FLOAT", true), new MysqlColumn("col4", "LONG", "LONG", false), new MysqlColumn("col5", "VARCHAR", "VARCHAR", false)); private final MysqlSchemaManager schemaManager = mock(MysqlSchemaManager.class); private final MysqlSourceMetrics metrics = mock(MysqlSourceMetrics.class); private final BinlogFilePos binlogFilePos = new BinlogFilePos("mysql-bin-changelog.000532"); @Test public void test() throws Exception { TableCache tableCache = new TableCache(schemaManager, OVERRIDING_DATABASE_NAME); List<ColumnDataType> columnTypes = Arrays.asList( ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT, ColumnDataType.LONG); when(schemaManager.getTableColumns(DATABASE_NAME, TABLE_NAME)).thenReturn(TABLE_COLUMNS); assertNull(tableCache.get(TABLE_ID)); tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, columnTypes); Table table = tableCache.get(TABLE_ID); assertEquals(TABLE, table); verify(schemaManager, times(1)).getTableColumns(DATABASE_NAME, TABLE_NAME); tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, columnTypes); table = tableCache.get(TABLE_ID); assertEquals(TABLE, table); verify(schemaManager, times(1)).getTableColumns(DATABASE_NAME, TABLE_NAME); columnTypes = Arrays.asList(ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT); when(schemaManager.getTableColumns(DATABASE_NAME, TABLE_NAME)) .thenReturn(TABLE_COLUMNS_UPDATED); tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, columnTypes); table = tableCache.get(TABLE_ID); assertEquals(TABLE_UPDATED, table); verify(schemaManager, times(2)).getTableColumns(DATABASE_NAME, TABLE_NAME); tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, columnTypes); table = tableCache.get(TABLE_ID); assertEquals(TABLE_UPDATED, table); verify(schemaManager, times(2)).getTableColumns(DATABASE_NAME, TABLE_NAME); // Schema reader now returns schema with 5 columns, but columnTypes has size 4 columnTypes = Arrays.asList( ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT, ColumnDataType.LONG); when(schemaManager.getTableColumns(DATABASE_NAME, TABLE_NAME)) .thenReturn(TABLE_COLUMNS_LARGE_STUB); tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, columnTypes); table = tableCache.get(TABLE_ID); assertEquals(TABLE, table); verify(schemaManager, times(3)).getTableColumns(DATABASE_NAME, TABLE_NAME); } @Test public void testNewTableName() throws Exception { TableCache tableCache = new TableCache(schemaManager, OVERRIDING_DATABASE_NAME); String newTable = "new_table"; when(schemaManager.getTableColumns(DATABASE_NAME, TABLE_NAME)).thenReturn(TABLE_COLUMNS); List<ColumnDataType> columnTypes = Arrays.asList( ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT, ColumnDataType.LONG); tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, columnTypes); verify(schemaManager, times(1)).getTableColumns(DATABASE_NAME, TABLE_NAME); when(schemaManager.getTableColumns(DATABASE_NAME, newTable)).thenReturn(TABLE_COLUMNS_UPDATED); columnTypes = Arrays.asList(ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT); tableCache.addOrUpdate(TABLE_ID, newTable, DATABASE_NAME, columnTypes); verify(schemaManager, times(1)).getTableColumns(DATABASE_NAME, newTable); verifyZeroInteractions(metrics); } }
1,929
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/StateHistoryTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.util.Repository; import com.google.common.collect.Lists; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import lombok.AllArgsConstructor; import lombok.NoArgsConstructor; import org.junit.Test; public class StateHistoryTest { private final String SOURCE_NAME = "test_source"; private final MysqlSourceMetrics metrics = mock(MysqlSourceMetrics.class); @Test public void test() throws Exception { MysqlSourceState firstState = mock(MysqlSourceState.class); MysqlSourceState secondState = mock(MysqlSourceState.class); MysqlSourceState thirdState = mock(MysqlSourceState.class); MysqlSourceState fourthState = mock(MysqlSourceState.class); TestRepository repository = new TestRepository(firstState); StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 2, repository, metrics); history.add(secondState); assertEquals(Arrays.asList(firstState, secondState), repository.get()); history.add(thirdState); history.add(fourthState); assertEquals(Arrays.asList(thirdState, fourthState), repository.get()); } @Test public void testEmptyHistory() throws Exception { MysqlSourceState state = mock(MysqlSourceState.class); TestRepository repository = new TestRepository(); StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 2, repository, metrics); assertTrue(history.isEmpty()); repository = new TestRepository(state); history = new StateHistory<>(SOURCE_NAME, 2, repository, metrics); assertFalse(history.isEmpty()); } @Test public void testRemoveLastFromHistory() throws Exception { MysqlSourceState firstState = mock(MysqlSourceState.class); MysqlSourceState secondState = mock(MysqlSourceState.class); MysqlSourceState thirdState = mock(MysqlSourceState.class); TestRepository repository = new TestRepository(firstState, secondState, thirdState); StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 3, repository, metrics); assertEquals(thirdState, history.removeLast()); assertEquals(secondState, history.removeLast()); assertEquals(firstState, history.removeLast()); assertTrue(history.isEmpty()); } @Test(expected = IllegalStateException.class) public void testRemoveFromEmptyHistory() throws Exception { StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 2, new TestRepository(), metrics); history.removeLast(); } @Test(expected = IllegalStateException.class) public void testRemoveMoreElementsThanInHistory() throws Exception { MysqlSourceState firstState = mock(MysqlSourceState.class); MysqlSourceState secondState = mock(MysqlSourceState.class); TestRepository repository = new TestRepository(firstState, secondState); StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 2, repository, metrics); history.removeLast(3); } @Test public void testRemoveAllElementsFromHistory() throws Exception { MysqlSourceState firstState = mock(MysqlSourceState.class); MysqlSourceState secondState = mock(MysqlSourceState.class); TestRepository repository = new TestRepository(firstState, secondState); StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 2, repository, metrics); assertEquals(firstState, history.removeLast(2)); assertTrue(history.isEmpty()); } @Test public void testRemoveMultipleElementsFromHistory() throws Exception { MysqlSourceState firstState = mock(MysqlSourceState.class); MysqlSourceState secondState = mock(MysqlSourceState.class); MysqlSourceState thirdState = mock(MysqlSourceState.class); TestRepository repository = new TestRepository(firstState, secondState, thirdState); StateHistory<MysqlSourceState> history = new StateHistory<>(SOURCE_NAME, 3, repository, metrics); assertEquals(secondState, history.removeLast(2)); assertEquals(Collections.singletonList(firstState), repository.get()); } @Test public void testRemoveStateHistory() throws Exception { TestRepository repository = new TestRepository(mock(MysqlSourceState.class)); assertTrue(repository.exists()); repository.remove(); assertFalse(repository.exists()); } @NoArgsConstructor @AllArgsConstructor public static class TestRepository implements Repository<Collection<MysqlSourceState>> { private List<MysqlSourceState> states; TestRepository(MysqlSourceState... states) { this(Arrays.asList(states)); } @Override public boolean exists() throws Exception { return states != null; } @Override public void create(Collection<MysqlSourceState> states) throws Exception { this.states = Lists.newArrayList(states); } @Override public void set(Collection<MysqlSourceState> states) throws Exception { create(states); } @Override public void update( Collection<MysqlSourceState> states, DataUpdater<Collection<MysqlSourceState>> updater) throws Exception { create(states); } @Override public Collection<MysqlSourceState> get() throws Exception { return states; } @Override public void remove() throws Exception { states = null; } } }
1,930
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/StateRepositoryTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.util.Repository; import java.util.concurrent.atomic.AtomicReference; import org.junit.Assert; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; public class StateRepositoryTest { private final MysqlSourceMetrics metrics = mock(MysqlSourceMetrics.class); @SuppressWarnings("unchecked") private final Repository<MysqlSourceState> repository = mock(Repository.class); private final StateRepository<MysqlSourceState> stateRepository = new StateRepository<>("test", repository, metrics); @Test public void testSave() throws Exception { MysqlSourceState state = mock(MysqlSourceState.class); MysqlSourceState nextState = mock(MysqlSourceState.class); AtomicReference<MysqlSourceState> updatedState = new AtomicReference<>(); when(state.getCurrentLeaderEpoch()).thenReturn(5l); doAnswer( new Answer<MysqlSourceState>() { @Override public MysqlSourceState answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); MysqlSourceState newState = (MysqlSourceState) args[0]; Repository.DataUpdater<MysqlSourceState> updater = (Repository.DataUpdater<MysqlSourceState>) args[1]; updatedState.set(updater.apply(state, newState)); return null; } }) .when(repository) .update(any(MysqlSourceState.class), any(Repository.DataUpdater.class)); // Test new leader epoch leader less than current when(nextState.getCurrentLeaderEpoch()).thenReturn(4l); stateRepository.save(nextState); assertEquals(state, updatedState.get()); // Test new leader epoch leader same as current when(nextState.getCurrentLeaderEpoch()).thenReturn(5l); stateRepository.save(nextState); assertEquals(nextState, updatedState.get()); // Test new leader epoch leader greather current when(nextState.getCurrentLeaderEpoch()).thenReturn(6l); stateRepository.save(nextState); assertEquals(nextState, updatedState.get()); } @Test(expected = RuntimeException.class) public void testSaveFailure() throws Exception { doThrow(new RuntimeException()) .when(repository) .update(any(MysqlSourceState.class), any(Repository.DataUpdater.class)); try { stateRepository.save(mock(MysqlSourceState.class)); } catch (RuntimeException ex) { verify(metrics, times(1)).stateSaveFailure(any(Exception.class)); throw ex; } } @Test public void testRead() throws Exception { MysqlSourceState state = mock(MysqlSourceState.class); when(repository.get()).thenReturn(state); when(repository.exists()).thenReturn(false); assertNull(stateRepository.read()); when(repository.exists()).thenReturn(true); Assert.assertEquals(state, stateRepository.read()); verify(metrics, times(2)).stateRead(); } @Test(expected = RuntimeException.class) public void testReadFailure() throws Exception { when(repository.exists()).thenThrow(new RuntimeException()); try { stateRepository.read(); } catch (RuntimeException ex) { verify(metrics, times(1)).stateReadFailure(any(Exception.class)); throw ex; } } }
1,931
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/validator/MutationSchemaValidatorTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.validator; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.io.Serializable; import org.junit.Test; public class MutationSchemaValidatorTest { private static final String ID_COLUMN = "id"; private static final String NAME_COLUMN = "name"; private static final String AGE_COLUMN = "age"; private static final Table TABLE = new Table( 0L, "Users", "test_db", null, ImmutableList.of( new ColumnMetadata(ID_COLUMN, ColumnDataType.LONGLONG, true, 0), new ColumnMetadata(NAME_COLUMN, ColumnDataType.VARCHAR, false, 1), new ColumnMetadata(AGE_COLUMN, ColumnDataType.INT24, false, 2)), ImmutableList.of(ID_COLUMN)); private static final MysqlMutationMetadata MUTATION_METADATA = new MysqlMutationMetadata(null, null, TABLE, 0L, 0L, 0L, null, null, 0L, 0); private final MutationSchemaValidator validator = new MutationSchemaValidator( (mutation) -> { throw new IllegalStateException(); }); @Test public void testValidSchema() throws Exception { Row row = new Row( TABLE, ImmutableMap.of( ID_COLUMN, createColumn(ID_COLUMN, ColumnDataType.LONGLONG, true, 1L, 0), NAME_COLUMN, createColumn(NAME_COLUMN, ColumnDataType.VARCHAR, false, "bob", 1), AGE_COLUMN, createColumn(AGE_COLUMN, ColumnDataType.INT24, false, 25, 2))); validator.validate(createMutation(row)); } @Test(expected = IllegalStateException.class) public void testMissingColumn() throws Exception { Row row = new Row( TABLE, ImmutableMap.of( ID_COLUMN, createColumn(ID_COLUMN, ColumnDataType.LONGLONG, true, 1L, 0), NAME_COLUMN, createColumn(NAME_COLUMN, ColumnDataType.VARCHAR, false, "bob", 1))); validator.validate(createMutation(row)); } @Test(expected = IllegalStateException.class) public void testIncorrectColumn() throws Exception { Row row = new Row( TABLE, ImmutableMap.of( ID_COLUMN, createColumn(ID_COLUMN, ColumnDataType.LONGLONG, true, 1L, 0), NAME_COLUMN, createColumn(NAME_COLUMN, ColumnDataType.VARCHAR, false, "bob", 1), AGE_COLUMN, createColumn(AGE_COLUMN, ColumnDataType.INT24, false, 25, 2), "bad_column", createColumn("bad_column", ColumnDataType.VARCHAR, false, "bad", 3))); validator.validate(createMutation(row)); } @Test(expected = IllegalStateException.class) public void testIncorrectColumnDataType() throws Exception { Row row = new Row( TABLE, ImmutableMap.of( ID_COLUMN, createColumn(ID_COLUMN, ColumnDataType.LONGLONG, true, 1L, 0), NAME_COLUMN, createColumn(NAME_COLUMN, ColumnDataType.VARCHAR, false, "bob", 1), AGE_COLUMN, createColumn(AGE_COLUMN, ColumnDataType.LONGLONG, false, 25, 2))); validator.validate(createMutation(row)); } private MysqlMutation createMutation(Row row) { return new MysqlInsertMutation(MUTATION_METADATA, row); } private Column createColumn( String name, ColumnDataType dataType, boolean isPk, Serializable value, int position) { return new Column(new ColumnMetadata(name, dataType, isPk, position), value); } }
1,932
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/util/JsonSerializationTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.util; import static org.junit.Assert.*; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.util.JsonUtil; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.config.MysqlConfiguration; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import com.github.shyiko.mysql.binlog.network.SSLMode; import com.google.common.collect.ImmutableList; import com.google.common.collect.Queues; import java.util.Collection; import java.util.Deque; import org.junit.Test; public class JsonSerializationTest { private static final DataSource DATA_SOURCE = new DataSource("test", 3306, "test_service"); private static final BinlogFilePos BINLOG_FILE_POS = new BinlogFilePos("test.218", 1234, 5678); private static final MysqlSourceState SOURCE_STATE = new MysqlSourceState(15l, 20l, -1l, BINLOG_FILE_POS); private static final String SERVER_UUID = "4a4ac150-fe5b-4093-a1ef-a8876011adaa"; @Test public void testSerializeDataSource() throws Exception { assertEquals( DATA_SOURCE, JsonUtil.OBJECT_MAPPER.readValue( JsonUtil.OBJECT_MAPPER.writeValueAsString(DATA_SOURCE), DataSource.class)); } @Test public void testSerializeBinlogFilePos() throws Exception { assertEquals( BINLOG_FILE_POS, JsonUtil.OBJECT_MAPPER.readValue( JsonUtil.OBJECT_MAPPER.writeValueAsString(BINLOG_FILE_POS), new TypeReference<BinlogFilePos>() {})); } @Test public void testSerializeBinlogFilePosWithGTID() throws Exception { BinlogFilePos pos = new BinlogFilePos("test.123", 123, 456, SERVER_UUID + ":1-123", SERVER_UUID); assertEquals( pos, JsonUtil.OBJECT_MAPPER.readValue( JsonUtil.OBJECT_MAPPER.writeValueAsString(pos), new TypeReference<BinlogFilePos>() {})); } @Test public void testDeserialzeBinlogFilePosWithoutGTID() throws Exception { String jsonString = "{\"fileName\": \"test.123\", \"position\": 4, \"nextPosition\": 8}"; BinlogFilePos pos = JsonUtil.OBJECT_MAPPER.readValue(jsonString, new TypeReference<BinlogFilePos>() {}); assertEquals("test.123", pos.getFileName()); assertEquals(123, pos.getFileNumber()); assertEquals(4, pos.getPosition()); assertEquals(8, pos.getNextPosition()); assertNull(pos.getServerUUID()); assertNull(pos.getGtidSet()); } @Test public void testSerializeSourceState() throws Exception { MysqlSourceState state = JsonUtil.OBJECT_MAPPER.readValue( JsonUtil.OBJECT_MAPPER.writeValueAsString(SOURCE_STATE), new TypeReference<MysqlSourceState>() {}); assertEquals(BINLOG_FILE_POS, state.getLastPosition()); assertEquals(SOURCE_STATE.getLastTimestamp(), state.getLastTimestamp()); assertEquals(SOURCE_STATE.getLastOffset(), state.getLastOffset()); } @Test public void testSerializeStateHistory() throws Exception { MysqlSourceState firstState = new MysqlSourceState(15l, 20l, -1l, BINLOG_FILE_POS); MysqlSourceState secondState = new MysqlSourceState(16l, 21l, -1l, BINLOG_FILE_POS); MysqlSourceState thirdState = new MysqlSourceState(17l, 22l, -1l, BINLOG_FILE_POS); Deque<MysqlSourceState> stateHistory = Queues.newArrayDeque(); stateHistory.addLast(firstState); stateHistory.addLast(secondState); stateHistory.addLast(thirdState); Collection<MysqlSourceState> states = JsonUtil.OBJECT_MAPPER.readValue( JsonUtil.OBJECT_MAPPER.writeValueAsString(stateHistory), new TypeReference<Collection<MysqlSourceState>>() {}); stateHistory = Queues.newArrayDeque(states); assertEquals(3, states.size()); assertEquals(thirdState, stateHistory.removeLast()); assertEquals(secondState, stateHistory.removeLast()); assertEquals(firstState, stateHistory.removeLast()); } @Test public void testDeserializeMysqlConfiguration() throws Exception { String configYaml = "name: test\n" + "host: localhost\n" + "port: 3306\n" + "tables:\n" + " - test_db:test_table\n" + " - test_db:test_table2\n" + "socket_timeout_seconds: -1\n" + "ssl_mode: REQUIRED\n" + "mtls_enabled: true\n" + "destination:\n" + " pool_size: 5\n" + " buffer_size: 1000\n"; MysqlConfiguration config = new ObjectMapper(new YAMLFactory()).readValue(configYaml, MysqlConfiguration.class); assertEquals("test", config.getName()); assertEquals("localhost", config.getHost()); assertEquals(3306, config.getPort()); assertEquals( ImmutableList.of("test_db:test_table", "test_db:test_table2"), config.getCanonicalTableNames()); assertEquals(-1, config.getSocketTimeoutInSeconds()); assertEquals(1000, config.getDestinationConfiguration().getBufferSize()); assertEquals(5, config.getDestinationConfiguration().getPoolSize()); assertEquals(SSLMode.REQUIRED, config.getSslMode()); assertTrue(config.isMTlsEnabled()); } }
1,933
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/mutation/MysqlKeyProviderTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.mutation; import static org.junit.Assert.assertEquals; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.junit.Test; public class MysqlKeyProviderTest { private static final String ID_COLUMN = "id"; private static final Table TABLE = new Table( 0L, "users", "test", null, ImmutableList.of(new ColumnMetadata(ID_COLUMN, ColumnDataType.LONGLONG, true, 0)), ImmutableList.of(ID_COLUMN)); private static final MysqlMutationMetadata MUTATION_METADATA = new MysqlMutationMetadata(null, null, TABLE, 0L, 0L, 0L, null, null, 0L, 0); @Test public void testGetKey() throws Exception { Row row = new Row( TABLE, ImmutableMap.of(ID_COLUMN, new Column(TABLE.getColumns().get(ID_COLUMN), 1234))); MysqlMutation mutation = new MysqlInsertMutation(MUTATION_METADATA, row); assertEquals("test:users:1234", MysqlKeyProvider.INSTANCE.get(mutation)); } @Test public void testGetNullKey() throws Exception { Row row = new Row( TABLE, ImmutableMap.of(ID_COLUMN, new Column(TABLE.getColumns().get(ID_COLUMN), null))); MysqlMutation mutation = new MysqlInsertMutation(MUTATION_METADATA, row); assertEquals("test:users:null", MysqlKeyProvider.INSTANCE.get(mutation)); } }
1,934
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/binlog_connector/BinaryLogConnectorEventMapperTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.binlog_connector; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.github.shyiko.mysql.binlog.event.DeleteRowsEventData; import com.github.shyiko.mysql.binlog.event.Event; import com.github.shyiko.mysql.binlog.event.EventHeaderV4; import com.github.shyiko.mysql.binlog.event.EventType; import com.github.shyiko.mysql.binlog.event.FormatDescriptionEventData; import com.github.shyiko.mysql.binlog.event.QueryEventData; import com.github.shyiko.mysql.binlog.event.TableMapEventData; import com.github.shyiko.mysql.binlog.event.UpdateRowsEventData; import com.github.shyiko.mysql.binlog.event.WriteRowsEventData; import com.github.shyiko.mysql.binlog.event.XAPrepareEventData; import com.github.shyiko.mysql.binlog.event.XidEventData; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import java.io.Serializable; import java.util.Optional; import org.junit.Before; import org.junit.Test; public class BinaryLogConnectorEventMapperTest { private static final long TABLE_ID = 888L; private static final long SERVER_ID = 65535L; private static final long TIMESTAMP = 100000L; private static final String DATABASE = "db"; private static final String TABLE = "table"; private static final BinlogFilePos BINLOG_FILE_POS = new BinlogFilePos(1000); private static final Serializable[] ROW = new Integer[] {1, 2, 3, 4}; private static final Serializable[] PREV_ROW = new Integer[] {4, 3, 1, 2}; private EventHeaderV4 eventHeader = new EventHeaderV4(); @Before public void setUp() { eventHeader.setEventLength(123L); eventHeader.setServerId(SERVER_ID); eventHeader.setNextPosition(100L); eventHeader.setTimestamp(TIMESTAMP); } @Test public void testWriteEvent() { eventHeader.setEventType(EventType.EXT_WRITE_ROWS); WriteRowsEventData eventData = new WriteRowsEventData(); eventData.setTableId(TABLE_ID); eventData.setRows(ImmutableList.of(ROW)); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof WriteEvent); WriteEvent writeEvent = (WriteEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, writeEvent.getBinlogFilePos()); assertEquals(ImmutableList.of(ROW), writeEvent.getRows()); assertEquals(SERVER_ID, writeEvent.getServerId()); assertEquals(TABLE_ID, writeEvent.getTableId()); assertEquals(TIMESTAMP, writeEvent.getTimestamp()); } @Test public void testUpdateEvent() { eventHeader.setEventType(EventType.EXT_UPDATE_ROWS); UpdateRowsEventData eventData = new UpdateRowsEventData(); eventData.setTableId(TABLE_ID); eventData.setRows(ImmutableList.of(Maps.immutableEntry(PREV_ROW, ROW))); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof UpdateEvent); UpdateEvent updateEvent = (UpdateEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, updateEvent.getBinlogFilePos()); assertEquals(ImmutableList.of(Maps.immutableEntry(PREV_ROW, ROW)), updateEvent.getRows()); assertEquals(SERVER_ID, updateEvent.getServerId()); assertEquals(TABLE_ID, updateEvent.getTableId()); assertEquals(TIMESTAMP, updateEvent.getTimestamp()); } @Test public void testDeleteEvent() { eventHeader.setEventType(EventType.EXT_DELETE_ROWS); DeleteRowsEventData eventData = new DeleteRowsEventData(); eventData.setTableId(TABLE_ID); eventData.setRows(ImmutableList.of(PREV_ROW)); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof DeleteEvent); DeleteEvent deleteEvent = (DeleteEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, deleteEvent.getBinlogFilePos()); assertEquals(ImmutableList.of(PREV_ROW), deleteEvent.getRows()); assertEquals(SERVER_ID, deleteEvent.getServerId()); assertEquals(TABLE_ID, deleteEvent.getTableId()); assertEquals(TIMESTAMP, deleteEvent.getTimestamp()); } @Test public void testTableMapEvent() { eventHeader.setEventType(EventType.TABLE_MAP); TableMapEventData eventData = new TableMapEventData(); eventData.setDatabase(DATABASE); eventData.setTable(TABLE); eventData.setTableId(TABLE_ID); eventData.setColumnTypes(new byte[] {(byte) 0, (byte) 1, (byte) 2}); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof TableMapEvent); TableMapEvent tableMapEvent = (TableMapEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, tableMapEvent.getBinlogFilePos()); assertEquals(DATABASE, tableMapEvent.getDatabase()); assertEquals(TABLE, tableMapEvent.getTable()); assertEquals(TABLE_ID, tableMapEvent.getTableId()); assertEquals( ImmutableList.of(ColumnDataType.DECIMAL, ColumnDataType.TINY, ColumnDataType.SHORT), tableMapEvent.getColumnTypes()); } @Test public void testXidEvent() { long xid = 88888L; eventHeader.setEventType(EventType.XID); XidEventData eventData = new XidEventData(); eventData.setXid(xid); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof XidEvent); XidEvent xidEvent = (XidEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, xidEvent.getBinlogFilePos()); assertEquals(SERVER_ID, xidEvent.getServerId()); assertEquals(TIMESTAMP, xidEvent.getTimestamp()); assertEquals(xid, xidEvent.getXid()); } @Test public void testQueryEvent() { String sql = "CREATE UNIQUE INDEX unique_index ON `my_db`.`my_table` (`col1`, `col2`)"; eventHeader.setEventType(EventType.QUERY); QueryEventData eventData = new QueryEventData(); eventData.setDatabase(DATABASE); eventData.setSql(sql); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof QueryEvent); QueryEvent queryEvent = (QueryEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, queryEvent.getBinlogFilePos()); assertEquals(DATABASE, queryEvent.getDatabase()); assertEquals(SERVER_ID, queryEvent.getServerId()); assertEquals(TIMESTAMP, queryEvent.getTimestamp()); assertEquals(sql, queryEvent.getSql()); } @Test public void testFormatDescriptionEvent() { eventHeader.setEventType(EventType.FORMAT_DESCRIPTION); FormatDescriptionEventData eventData = new FormatDescriptionEventData(); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertTrue(binlogEvent.isPresent()); assertTrue(binlogEvent.get() instanceof StartEvent); StartEvent startEvent = (StartEvent) (binlogEvent.get()); assertEquals(BINLOG_FILE_POS, startEvent.getBinlogFilePos()); assertEquals(SERVER_ID, startEvent.getServerId()); assertEquals(TIMESTAMP, startEvent.getTimestamp()); } @Test public void testIgnoredEvents() { eventHeader.setEventType(EventType.UNKNOWN); XAPrepareEventData eventData = new XAPrepareEventData(); Optional<BinlogEvent> binlogEvent = BinaryLogConnectorEventMapper.INSTANCE.map( new Event(eventHeader, eventData), BINLOG_FILE_POS); assertFalse(binlogEvent.isPresent()); } }
1,935
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaDatabaseTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import org.jdbi.v3.core.Jdbi; import org.junit.Test; public class MysqlSchemaDatabaseTest { private static final String SOURCE_NAME = "source"; private final Jdbi jdbi = mock(Jdbi.class); private final MysqlSourceMetrics metrics = mock(MysqlSourceMetrics.class); private final MysqlSchemaDatabase schemaDatabase = new MysqlSchemaDatabase(SOURCE_NAME, jdbi, metrics);; @Test public void testAddSourcePrefixCreateTable() throws Exception { String ddl = "create table `gibraltar_production`.`_instrument_details_paypal_new` (\n" + " `instrument_token` varbinary(255) NOT NULL,\n" + " `version` int(11) NOT NULL,\n" + " `paypal_email_encrypted` varbinary(255) NOT NULL,\n" + " `created_at` datetime NOT NULL,\n" + " PRIMARY KEY (`instrument_token`,`version`),\n" + " KEY `index_instrument_details_paypal_paypal_email` (`paypal_email_encrypted`)\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8"; String expectedDDL = "create table `source/gibraltar_production`.`_instrument_details_paypal_new` (\n" + " `instrument_token` varbinary(255) NOT NULL,\n" + " `version` int(11) NOT NULL,\n" + " `paypal_email_encrypted` varbinary(255) NOT NULL,\n" + " `created_at` datetime NOT NULL,\n" + " PRIMARY KEY (`instrument_token`,`version`),\n" + " KEY `index_instrument_details_paypal_paypal_email` (`paypal_email_encrypted`)\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE TABLE table123 (\n" + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + "`name` varchar(255) NOT NULL\n" + ") ENGINE=InnoDB AUTO_INCREMENT=2145755390 DEFAULT CHARSET=latin1"; assertEquals(ddl, schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE TABLE `table1234` (\n" + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + "`name` varchar(255) NOT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=latin1"; assertEquals(ddl, schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE TABLE IF NOT EXISTS my_database.`my_table` (\n" + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + "`name` varchar(255) NOT NULL\n" + ") ENGINE=InnoDB AUTO_INCREMENT=2145755390 DEFAULT CHARSET=latin1"; expectedDDL = "CREATE TABLE IF NOT EXISTS `source/my_database`.`my_table` (\n" + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + "`name` varchar(255) NOT NULL\n" + ") ENGINE=InnoDB AUTO_INCREMENT=2145755390 DEFAULT CHARSET=latin1"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "create table `test3`.`test_table` (\n" + " `id` int(11) NOT NULL AUTO_INCREMENT,\n" + " `name` varchar(45) DEFAULT NULL,\n" + " `balance` varchar(45) DEFAULT NULL, \n" + " `timestamp` timestamp NULL DEFAULT NULL,\n" + " `float_col` float DEFAULT NULL,\n" + " `created_at` datetime DEFAULT NULL,\n" + " `updated_at` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE NOW(),\n" + " PRIMARY KEY (`id`) ) ENGINE=InnoDB"; expectedDDL = "create table `source/test3`.`test_table` (\n" + " `id` int(11) NOT NULL AUTO_INCREMENT,\n" + " `name` varchar(45) DEFAULT NULL,\n" + " `balance` varchar(45) DEFAULT NULL, \n" + " `timestamp` timestamp NULL DEFAULT NULL,\n" + " `float_col` float DEFAULT NULL,\n" + " `created_at` datetime DEFAULT NULL,\n" + " `updated_at` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE NOW(),\n" + " PRIMARY KEY (`id`) ) ENGINE=InnoDB"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "create table `airbed3_production`.`_users_ghc` ( \t\t\t" + "id bigint auto_increment, \t\t\t" + "last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, \t\t\t" + "hint varchar(64) charset ascii not null, \t\t\t" + "value varchar(4096) charset ascii not null, \t\t\t" + "primary key(id), \t\t\t" + "unique key hint_uidx(hint) \t\t\t" + ") auto_increment=256"; expectedDDL = "create table `source/airbed3_production`.`_users_ghc` ( \t\t\t" + "id bigint auto_increment, \t\t\t" + "last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, \t\t\t" + "hint varchar(64) charset ascii not null, \t\t\t" + "value varchar(4096) charset ascii not null, \t\t\t" + "primary key(id), \t\t\t" + "unique key hint_uidx(hint) \t\t\t" + ") auto_increment=256"; String d = schemaDatabase.addSourcePrefix(ddl); assertEquals(expectedDDL, d); } @Test public void testAddSourcePrefixAlterTable() throws Exception { String ddl = "ALTER TABLE `gibraltar_production`.`_instrument_details_paypal_new`\n" + "ADD COLUMN `account_id` VARBINARY ( 255 ) NULL AFTER `paypal_email_encrypted`,\n" + "ADD COLUMN `first_name` VARBINARY ( 255 ) NULL AFTER `account_id`,\n" + "ADD COLUMN `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"; String expectedDDL = "ALTER TABLE `source/gibraltar_production`.`_instrument_details_paypal_new`\n" + "ADD COLUMN `account_id` VARBINARY ( 255 ) NULL AFTER `paypal_email_encrypted`,\n" + "ADD COLUMN `first_name` VARBINARY ( 255 ) NULL AFTER `account_id`,\n" + "ADD COLUMN `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "ALTER TABLE my_test_table RENAME TO `tmp`.`my_test_table_1234`;"; expectedDDL = "ALTER TABLE my_test_table RENAME TO `source/tmp`.`my_test_table_1234`;"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "ALTER TABLE test_db.test_table DROP INDEX some_index"; expectedDDL = "ALTER TABLE `source/test_db`.`test_table` DROP INDEX some_index"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "ALTER TABLE `test_table123` DROP COLUMN `some_column`"; assertEquals(ddl, schemaDatabase.addSourcePrefix(ddl)); } @Test public void testAddSourcePrefixRenameTable() throws Exception { String ddl = "RENAME TABLE `db1`.`table1` TO `tmp`.`table1`"; String expectedDDL = "RENAME TABLE `source/db1`.`table1` TO `source/tmp`.`table1`"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "RENAME TABLE airbed3_production.20170810023312170_reservation2s to tmp.20170810023312170_reservation2s"; expectedDDL = "RENAME TABLE `source/airbed3_production`.`20170810023312170_reservation2s` to `source/tmp`.`20170810023312170_reservation2s`"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "RENAME TABLE `table123` TO new_table_123, some_db.table123 TO `other_db`.table456"; expectedDDL = "RENAME TABLE `table123` TO new_table_123, `source/some_db`.`table123` TO `source/other_db`.`table456`"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); } @Test public void testAddSourcePrefixDropTable() throws Exception { String ddl = "DROP TABLE my_table_111, `another_table_222`, my_db.my_table_333, my_db222.`my_table000`, `my_db123`.my_table_444, `my_db_333`.`my_table_555`"; String expectedDDL = "DROP TABLE my_table_111, `another_table_222`, `source/my_db`.`my_table_333`, `source/my_db222`.`my_table000`, `source/my_db123`.`my_table_444`, `source/my_db_333`.`my_table_555`"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "DROP TABLE IF EXISTS `db123`.`table456`"; expectedDDL = "DROP TABLE IF EXISTS `source/db123`.`table456`"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); ddl = "DROP TABLE ```escaped_backquotes``test_db:@fds!aaa`.`DFDS..``table``sss`"; expectedDDL = "DROP TABLE `source/``escaped_backquotes``test_db:@fds!aaa`.`DFDS..``table``sss`"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); } @Test public void testAddSourcePrefixCreateDatabase() throws Exception { String ddl = "CREATE DATABASE new_database"; assertEquals("CREATE DATABASE `source/new_database`", schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE DATABASE `another_new_database`"; assertEquals( "CREATE DATABASE `source/another_new_database`", schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE SCHEMA `new_database_123`"; assertEquals("CREATE SCHEMA `source/new_database_123`", schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE SCHEMA `new_``schema``+456.789`"; assertEquals( "CREATE SCHEMA `source/new_``schema``+456.789`", schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE DATABASE new_db DEFAULT CHARSET=utf8"; assertEquals( "CREATE DATABASE `source/new_db` DEFAULT CHARSET=utf8", schemaDatabase.addSourcePrefix(ddl)); } @Test public void testAddSourcePrefixDropDatabase() throws Exception { String ddl = "DROP DATABASE old_database"; assertEquals("DROP DATABASE `source/old_database`", schemaDatabase.addSourcePrefix(ddl)); ddl = "DROP SCHEMA `old_database`"; assertEquals("DROP SCHEMA `source/old_database`", schemaDatabase.addSourcePrefix(ddl)); } @Test public void testAddSourcePrefixCreateIndex() throws Exception { String ddl = "CREATE INDEX id_index ON lookup (id) USING BTREE;"; assertEquals(ddl, schemaDatabase.addSourcePrefix(ddl)); ddl = "CREATE UNIQUE INDEX unique_index ON `my_db`.`my_table` (`col1`, `col2`)"; String expectedDDL = "CREATE UNIQUE INDEX unique_index ON `source/my_db`.`my_table` (`col1`, `col2`)"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); } @Test public void testAddSourcePrefixDropIndex() throws Exception { String ddl = "DROP INDEX `index123` ON table"; assertEquals(ddl, schemaDatabase.addSourcePrefix(ddl)); ddl = "DROP INDEX `index222` ON `db_name`.`table22`;"; String expectedDDL = "DROP INDEX `index222` ON `source/db_name`.`table22`;"; assertEquals(expectedDDL, schemaDatabase.addSourcePrefix(ddl)); } }
1,936
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaUtilTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import static org.junit.Assert.assertEquals; import org.junit.Test; public class MysqlSchemaUtilTest { @Test public void testBlockSQLCommentsRemoval() { String sql_with_block_comments = "CREATE/* ! COMMENTS ! */UNIQUE /* ANOTHER COMMENTS ! */INDEX unique_index\n" + "ON `my_db`.`my_table` (`col1`, `col2`)"; String sql_with_comments_in_multi_lines = "CREATE UNIQUE /*\n" + "COMMENT Line1 \n" + "COMMENT Line 2\n" + "*/\n" + "INDEX ON `my_db`.`my_table` (`col1`, `col2`)"; String expected_sql = "CREATE UNIQUE INDEX unique_index\nON `my_db`.`my_table` (`col1`, `col2`)"; String stripped_sql = MysqlSchemaUtil.removeCommentsFromDDL(sql_with_block_comments); assertEquals(expected_sql, stripped_sql); stripped_sql = MysqlSchemaUtil.removeCommentsFromDDL(sql_with_comments_in_multi_lines); expected_sql = "CREATE UNIQUE \nINDEX ON `my_db`.`my_table` (`col1`, `col2`)"; assertEquals(expected_sql, stripped_sql); } @Test public void testMySQLSpecCommentsRemoval() { String sql_with_mysql_spec_comments = "CREATE TABLE t1(a INT, KEY (a)) /*!50110 KEY_BLOCK_SIZE=1024 */"; String sql_with_mysql_spec_comments2 = "/*!CREATE TABLE t1(a INT, KEY (a))*/"; String expected_sql = "CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024 "; String stripped_sql = MysqlSchemaUtil.removeCommentsFromDDL(sql_with_mysql_spec_comments); assertEquals(expected_sql, stripped_sql); expected_sql = "CREATE TABLE t1(a INT, KEY (a))"; stripped_sql = MysqlSchemaUtil.removeCommentsFromDDL(sql_with_mysql_spec_comments2); assertEquals(expected_sql, stripped_sql); } }
1,937
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/schema/MysqlColumnTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import static org.junit.Assert.*; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import java.util.Arrays; import java.util.List; import org.junit.Test; public class MysqlColumnTest { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final MysqlColumn COLUMN = new MysqlColumn("column1", "varchar", "varchar(255)", false); private static final List<MysqlColumn> COLUMNS = Arrays.asList( new MysqlColumn("id", "int", "int(20)", true), COLUMN, new MysqlColumn("column2", "text", "text", false)); @Test public void testJSONSerDer() throws Exception { String jsonString = OBJECT_MAPPER.writeValueAsString(COLUMN); MysqlColumn deserialized = OBJECT_MAPPER.readValue(jsonString, MysqlColumn.class); assertEquals(COLUMN, deserialized); jsonString = OBJECT_MAPPER.writeValueAsString(COLUMNS); List<MysqlColumn> deserializedColumns = OBJECT_MAPPER.readValue(jsonString, new TypeReference<List<MysqlColumn>>() {}); assertEquals(COLUMNS, deserializedColumns); } }
1,938
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaStoreTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.google.common.collect.ImmutableList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import org.jdbi.v3.core.Jdbi; import org.junit.Before; import org.junit.Test; public class MysqlSchemaStoreTest { private static final String SERVER_UUID_1 = "fec1aada-c5fc-11e9-9af8-0242ac110003"; private static final String SERVER_UUID_2 = "3a7e553e-6ae1-11ea-99ee-0242ac110005"; private static final List<MysqlColumn> TABLE1_COLUMNS = Collections.singletonList(new MysqlColumn("id", "int", "int", false)); private static final List<MysqlColumn> TABLE2_COLUMNS = Arrays.asList( new MysqlColumn("id", "int", "int", true), new MysqlColumn("name", "varchar", "varchar(255)", false)); private static final List<MysqlColumn> TABLE1_COLUMNS_NEW = Arrays.asList( new MysqlColumn("id", "int", "int", true), new MysqlColumn("name", "text", "text", false), new MysqlColumn("phone", "varchar(255)", "varchar", false)); private static final BinlogFilePos FIRST_POS = new BinlogFilePos("mysql-binlog.001394", 6, 12, SERVER_UUID_1 + ":1-24", SERVER_UUID_1); private static final MysqlTableSchema FIRST_SCHEMA = new MysqlTableSchema( 1, "db1", "table1", FIRST_POS, SERVER_UUID_1 + ":24", "CREATE TABLE table1 (id INT)", 192000, TABLE1_COLUMNS, Collections.emptyMap()); private static final BinlogFilePos SECOND_POS = new BinlogFilePos( "mysql-binlog.001394", 78333, 78345, SERVER_UUID_1 + ":1-488", SERVER_UUID_1); private static final MysqlTableSchema SECOND_SCHEMA = new MysqlTableSchema( 2, "db1", "table2", SECOND_POS, SERVER_UUID_1 + ":488", "CREATE TABLE table2 (id INT, name VARCHAR(255))", 492000, TABLE2_COLUMNS, Collections.emptyMap()); private static final BinlogFilePos THIRD_POS = new BinlogFilePos("mysql-binlog.001402", 400, 432, SERVER_UUID_1 + ":1-673", SERVER_UUID_1); private static final MysqlTableSchema THIRD_SCHEMA = new MysqlTableSchema( 3, "db1", "table1", THIRD_POS, SERVER_UUID_1 + ":673", "ALTER TABLE table1 ADD name text, ADD phone VARCHAR(255)", 892000, TABLE1_COLUMNS_NEW, Collections.emptyMap()); private static final BinlogFilePos FOURTH_POS = new BinlogFilePos("mysql-binlog.001403", 200, 232, SERVER_UUID_1 + ":1-988", SERVER_UUID_1); private static final MysqlTableSchema FOURTH_SCHEMA = new MysqlTableSchema( 4, "db1", null, FOURTH_POS, SERVER_UUID_1 + ":988", "ALTER TABLE `table2` ADD INDEX (`name`)", 999988, Collections.emptyList(), Collections.emptyMap()); private static final BinlogFilePos FIFTH_POS = new BinlogFilePos( "mysql-binlog.001403", 890901, 890911, SERVER_UUID_1 + ":1-1024", SERVER_UUID_1); private static final MysqlTableSchema FIFTH_SCHEMA = new MysqlTableSchema( 5, "db1", "table1", FIFTH_POS, SERVER_UUID_1 + ":1024", "DROP TABLE `table1`", 1010188, Collections.emptyList(), Collections.emptyMap()); private static final List<MysqlTableSchema> ALL_TABLE_SCHEMAS = ImmutableList.of(FIRST_SCHEMA, SECOND_SCHEMA, THIRD_SCHEMA, FOURTH_SCHEMA, FIFTH_SCHEMA); private final MysqlSchemaStore schemaStore = spy( new MysqlSchemaStore( "test", "schema_store", "schema_archive", mock(Jdbi.class), mock(MysqlSourceMetrics.class))); @Before public void setUp() { doReturn(ALL_TABLE_SCHEMAS).when(schemaStore).getAllSchemas(); } @Test public void testLoadSchemaWithoutGTID() { BinlogFilePos pos = new BinlogFilePos("mysql-binlog.001399", 100, 120, null, SERVER_UUID_1); schemaStore.loadSchemaCacheUntil(pos); assertEquals(2, schemaStore.getSchemaCache().size()); assertEquals(FIRST_SCHEMA, schemaStore.get("db1", "table1")); assertEquals(SECOND_SCHEMA, schemaStore.get("db1", "table2")); pos = new BinlogFilePos("mysql-binlog.001403", 400, 420, null, SERVER_UUID_1); schemaStore.loadSchemaCacheUntil(pos); assertEquals(2, schemaStore.getSchemaCache().size()); assertEquals(THIRD_SCHEMA, schemaStore.get("db1", "table1")); assertEquals(SECOND_SCHEMA, schemaStore.get("db1", "table2")); pos = new BinlogFilePos("mysql-binlog.001444", 1100, 1420, null, SERVER_UUID_1); schemaStore.loadSchemaCacheUntil(pos); assertEquals(1, schemaStore.getSchemaCache().size()); assertNull(schemaStore.getSchemaCache().get("db1", "table1")); assertEquals(SECOND_SCHEMA, schemaStore.get("db1", "table2")); } @Test public void testLoadSchemaWithGTID() { schemaStore.getSchemaCache().clear(); BinlogFilePos pos = new BinlogFilePos("mysql-binlog.001398", 300, 310, SERVER_UUID_1 + ":1-589", SERVER_UUID_2); schemaStore.loadSchemaCacheUntil(pos); assertEquals(2, schemaStore.getSchemaCache().size()); assertEquals(FIRST_SCHEMA, schemaStore.get("db1", "table1")); assertEquals(SECOND_SCHEMA, schemaStore.get("db1", "table2")); pos = new BinlogFilePos("mysql-binlog.001403", 400, 420, SERVER_UUID_1 + ":1-888", SERVER_UUID_2); schemaStore.loadSchemaCacheUntil(pos); assertEquals(2, schemaStore.getSchemaCache().size()); assertEquals(THIRD_SCHEMA, schemaStore.get("db1", "table1")); assertEquals(SECOND_SCHEMA, schemaStore.get("db1", "table2")); pos = new BinlogFilePos( "mysql-binlog.001444", 1100, 1420, SERVER_UUID_1 + ":1-1888", SERVER_UUID_2); schemaStore.loadSchemaCacheUntil(pos); assertEquals(1, schemaStore.getSchemaCache().size()); assertNull(schemaStore.getSchemaCache().get("db1", "table1")); assertEquals(SECOND_SCHEMA, schemaStore.get("db1", "table2")); } @Test public void testCompressSchemaStore() { BinlogFilePos currentPos = new BinlogFilePos( "mysql-binlog.002222", 123, 201, SERVER_UUID_1 + ":1-2888", SERVER_UUID_1); schemaStore.loadSchemaCacheUntil(currentPos); BinlogFilePos earliestPos = new BinlogFilePos( "mysql-binlog.001234", 1234, 5677, SERVER_UUID_1 + ":1-10", SERVER_UUID_1); assertTrue(schemaStore.getRowIdsToDelete(earliestPos).isEmpty()); earliestPos = new BinlogFilePos("mysql-binlog.001394", 20, 77, SERVER_UUID_1 + ":1-100", SERVER_UUID_1); assertEquals( new HashSet<>(Collections.singletonList(1L)), schemaStore.getRowIdsToDelete(earliestPos)); earliestPos = new BinlogFilePos("mysql-binlog.001394", 20, 77, SERVER_UUID_1 + ":1-100", SERVER_UUID_2); assertEquals( new HashSet<>(Collections.singletonList(1L)), schemaStore.getRowIdsToDelete(earliestPos)); earliestPos = new BinlogFilePos("mysql-binlog.001400", 20, 77, SERVER_UUID_1 + ":1-500", SERVER_UUID_2); assertEquals( new HashSet<>(Collections.singletonList(1L)), schemaStore.getRowIdsToDelete(earliestPos)); earliestPos = new BinlogFilePos("mysql-binlog.001403", 100, 177, SERVER_UUID_1 + ":1-800", SERVER_UUID_1); assertEquals(new HashSet<>(Arrays.asList(1L, 3L)), schemaStore.getRowIdsToDelete(earliestPos)); earliestPos = new BinlogFilePos( "mysql-binlog.001403", 500, 577, SERVER_UUID_1 + ":1-1000", SERVER_UUID_1); assertEquals( new HashSet<>(Arrays.asList(1L, 3L, 4L)), schemaStore.getRowIdsToDelete(earliestPos)); earliestPos = new BinlogFilePos( "mysql-binlog.002000", 500, 577, SERVER_UUID_1 + ":1-1900", SERVER_UUID_1); assertEquals( new HashSet<>(Arrays.asList(1L, 3L, 4L, 5L)), schemaStore.getRowIdsToDelete(earliestPos)); } }
1,939
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaManagerTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.MysqlClient; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Table; import com.google.common.collect.Tables; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.junit.Before; import org.junit.Test; public class MysqlSchemaManagerTest { private static final String GTID = "fec1aada-c5fc-11e9-9af8-0242ac110003:1002"; private static final long TIMESTAMP = 7849733221L; private static final BinlogFilePos BINLOG_FILE_POS = BinlogFilePos.fromString("mysql-binlog.001233:345:456"); private static final List<MysqlColumn> TABLE1_COLUMNS = Collections.singletonList(new MysqlColumn("id", "int", "int", false)); private static final List<MysqlColumn> TABLE2_COLUMNS = Arrays.asList( new MysqlColumn("id", "int", "int", true), new MysqlColumn("name", "varchar", "varchar(255)", false)); private static final List<MysqlColumn> TABLE3_COLUMNS = Arrays.asList( new MysqlColumn("id", "int", "int", true), new MysqlColumn("name", "text", "text", false), new MysqlColumn("phone", "varchar(255)", "varchar", false)); private static final MysqlTableSchema TABLE1_SCHEMA = new MysqlTableSchema( 1, "db1", "table1", BINLOG_FILE_POS, GTID, "CREATE TABLE table1 (id INT)", 192000, TABLE1_COLUMNS, Collections.emptyMap()); private static final MysqlTableSchema TABLE2_SCHEMA = new MysqlTableSchema( 2, "db1", "table2", BINLOG_FILE_POS, GTID, "CREATE TABLE table2 (id INT, name VARCHAR(255))", 492000, TABLE2_COLUMNS, Collections.emptyMap()); private static final MysqlTableSchema TABLE3_SCHEMA = new MysqlTableSchema( 3, "db2", "table3", BINLOG_FILE_POS, GTID, "ALTER TABLE db2.table3 ADD name text, ADD phone VARCHAR(255)", 892000, TABLE3_COLUMNS, Collections.emptyMap()); private static final Table<String, String, MysqlTableSchema> SCHEMA_STORE_CACHE = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap); private final MysqlSchemaStore schemaStore = mock(MysqlSchemaStore.class); private final MysqlSchemaDatabase schemaDatabase = mock(MysqlSchemaDatabase.class); private final MysqlSchemaReader schemaReader = mock(MysqlSchemaReader.class); private final MysqlClient mysqlClient = mock(MysqlClient.class); private final MysqlSchemaManager schemaManager = new MysqlSchemaManager( "test_source", schemaStore, schemaDatabase, schemaReader, mysqlClient, true); static { SCHEMA_STORE_CACHE.put("db1", "table1", TABLE1_SCHEMA); SCHEMA_STORE_CACHE.put("db1", "table2", TABLE2_SCHEMA); SCHEMA_STORE_CACHE.put("db2", "table3", TABLE3_SCHEMA); } @Before public void setUp() { when(schemaDatabase.listDatabases()).thenReturn(ImmutableList.of("db1", "db2")); when(schemaDatabase.getColumnsForAllTables("db1")) .thenReturn(ImmutableMap.of("table1", TABLE1_COLUMNS, "table2", TABLE2_COLUMNS)); when(schemaDatabase.getColumnsForAllTables("db2")) .thenReturn(ImmutableMap.of("table3", TABLE3_COLUMNS)); when(schemaStore.getSchemaCache()).thenReturn(SCHEMA_STORE_CACHE); } @Test public void testSchemaVersionDisabled() { final MysqlSchemaManager schemaManagerWithSchemaVersionDisabled = new MysqlSchemaManager("test_source", null, null, schemaReader, mysqlClient, false); when(schemaReader.getTableColumns("db", "table")).thenReturn(TABLE1_COLUMNS); List<MysqlColumn> columns = schemaManagerWithSchemaVersionDisabled.getTableColumns("db", "table"); assertEquals(TABLE1_COLUMNS, columns); verify(schemaReader).getTableColumns("db", "table"); } @Test public void testCreateTable() { String sql = "CREATE TABLE table123 (uid INT)"; QueryEvent event = new QueryEvent(1L, TIMESTAMP, BINLOG_FILE_POS, "db1", sql); List<MysqlColumn> newTableColumns = Collections.singletonList(new MysqlColumn("uid", "int", "int", false)); when(schemaDatabase.getColumnsForAllTables("db1")) .thenReturn( ImmutableMap.of( "table1", TABLE1_COLUMNS, "table2", TABLE2_COLUMNS, "table123", newTableColumns)); schemaManager.processDDL(event, GTID); verify(schemaDatabase).applyDDL(sql, "db1"); verify(schemaStore) .put( new MysqlTableSchema( 0, "db1", "table123", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, newTableColumns, Collections.emptyMap())); } @Test public void testDropTable() { String sql = "DROP TABLE db1.table2"; QueryEvent event = new QueryEvent(1L, TIMESTAMP, BINLOG_FILE_POS, "db1", sql); when(schemaDatabase.getColumnsForAllTables("db1")) .thenReturn(ImmutableMap.of("table1", TABLE1_COLUMNS)); schemaManager.processDDL(event, GTID); verify(schemaDatabase).applyDDL(sql, "db1"); verify(schemaStore) .put( new MysqlTableSchema( 0, "db1", "table2", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, Collections.emptyList(), Collections.emptyMap())); } @Test public void testAlterTable() { String sql = "ALTER TABLE db1.table1 ADD account VARCHAR(255)"; QueryEvent event = new QueryEvent(1L, TIMESTAMP, BINLOG_FILE_POS, "db1", sql); List<MysqlColumn> newTableColumns = Arrays.asList( new MysqlColumn("id", "int", "int", false), new MysqlColumn("account", "varchar", "varchar(255)", false)); when(schemaDatabase.getColumnsForAllTables("db1")) .thenReturn(ImmutableMap.of("table1", newTableColumns, "table2", TABLE2_COLUMNS)); schemaManager.processDDL(event, GTID); verify(schemaDatabase).applyDDL(sql, "db1"); verify(schemaStore) .put( new MysqlTableSchema( 0, "db1", "table1", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, newTableColumns, Collections.emptyMap())); } @Test public void testRenameTable() { String sql = "RENAME TABLE db2.table3 TO db2.table4"; QueryEvent event = new QueryEvent(1L, TIMESTAMP, BINLOG_FILE_POS, "db2", sql); when(schemaDatabase.getColumnsForAllTables("db2")) .thenReturn(ImmutableMap.of("table4", TABLE3_COLUMNS)); schemaManager.processDDL(event, GTID); verify(schemaDatabase).applyDDL(sql, "db2"); verify(schemaStore) .put( new MysqlTableSchema( 0, "db2", "table3", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, Collections.emptyList(), Collections.emptyMap())); verify(schemaStore) .put( new MysqlTableSchema( 0, "db2", "table4", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, TABLE3_COLUMNS, Collections.emptyMap())); } @Test public void testCreateDatabase() { String sql = "CREATE DATABASE db3"; QueryEvent event = new QueryEvent(1L, TIMESTAMP, BINLOG_FILE_POS, "db3", sql); when(schemaDatabase.listDatabases()).thenReturn(ImmutableList.of("db1", "db2", "db3")); when(schemaDatabase.getColumnsForAllTables("db3")).thenReturn(Collections.emptyMap()); schemaManager.processDDL(event, GTID); verify(schemaDatabase).applyDDL(sql, null); verify(schemaStore) .put( new MysqlTableSchema( 0, "db3", null, BINLOG_FILE_POS, GTID, sql, TIMESTAMP, Collections.emptyList(), Collections.emptyMap())); } @Test public void testDropDatabase() { String sql = "DROP DATABASE db1"; QueryEvent event = new QueryEvent(1, TIMESTAMP, BINLOG_FILE_POS, null, sql); when(schemaDatabase.listDatabases()).thenReturn(ImmutableList.of("db2")); when(schemaDatabase.getColumnsForAllTables("db1")).thenReturn(Collections.emptyMap()); schemaManager.processDDL(event, GTID); verify(schemaDatabase).applyDDL(sql, null); verify(schemaStore) .put( new MysqlTableSchema( 0, "db1", "table1", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, Collections.emptyList(), Collections.emptyMap())); verify(schemaStore) .put( new MysqlTableSchema( 0, "db1", "table2", BINLOG_FILE_POS, GTID, sql, TIMESTAMP, Collections.emptyList(), Collections.emptyMap())); } }
1,940
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/event/mapper/MysqlMutationMapperTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlDeleteMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.MysqlUpdateMutation; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.google.common.collect.ImmutableList; import java.io.Serializable; import java.util.AbstractMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.junit.Before; import org.junit.Test; public class MysqlMutationMapperTest { private static final DataSource DATA_SOURCE = new DataSource("test", 0, "test"); private static final String DATABASE_NAME = "db"; private static final String TABLE_NAME = "users"; private static final byte[] COLUMN_TYPES = {0, 1}; private static final long TABLE_ID = 0L; private static final long SERVER_ID = 1L; private static final long TIMESTAMP = 6L; private static BinlogFilePos BINLOG_FILE_POS = new BinlogFilePos("test.218", 14, 100); private static final Table TEST_TABLE = new Table( TABLE_ID, "Users", "test_db", null, ImmutableList.of( new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0), new ColumnMetadata("name", ColumnDataType.VARCHAR, false, 1), new ColumnMetadata("age", ColumnDataType.INT24, false, 2), new ColumnMetadata("sex", ColumnDataType.TINY, false, 3)), ImmutableList.of("id")); private final AtomicReference<Transaction> beginTransaction = new AtomicReference<>(); private final AtomicReference<Transaction> lastTransaction = new AtomicReference<>(); private final AtomicLong leaderEpoch = new AtomicLong(4l); private final TableCache tableCache = mock(TableCache.class); private final MysqlSchemaManager schemaManager = mock(MysqlSchemaManager.class); private final MysqlSourceMetrics metrics = mock(MysqlSourceMetrics.class); private Mapper<BinlogEvent, List<? extends Mutation<?>>> eventMapper = MysqlMutationMapper.create( DATA_SOURCE, tableCache, schemaManager, leaderEpoch, beginTransaction, lastTransaction, metrics); @Before public void setUp() throws Exception { lastTransaction.set(new Transaction(12L, 30L, new BinlogFilePos("test.txt", 14, 100))); beginTransaction.set(new Transaction(15L, 31L, new BinlogFilePos("test.txt", 14, 120))); when(tableCache.get(TABLE_ID)).thenReturn(TEST_TABLE); } @Test public void testInsertMutation() throws Exception { Serializable[] change = new Serializable[4]; change[0] = 12131L; change[1] = "test_user"; change[2] = 25; change[3] = 0; Serializable[] change2 = new Serializable[4]; change2[0] = 12334L; change2[1] = "test_user2"; change2[2] = 12; change2[3] = 1; BinlogEvent event = new WriteEvent( TABLE_ID, SERVER_ID, TIMESTAMP, BINLOG_FILE_POS, ImmutableList.of(change, change2)); List<? extends Mutation> mutations = eventMapper.map(event); assertEquals(2, mutations.size()); assertTrue(mutations.get(0) instanceof MysqlInsertMutation); MysqlInsertMutation mutation = (MysqlInsertMutation) mutations.get(0); validateMetadata(mutation, 0); Row row = mutation.getEntity(); assertEquals(12131L, row.getColumns().get("id").getValue()); assertEquals("test_user", row.getColumns().get("name").getValue()); assertEquals(25, row.getColumns().get("age").getValue()); assertEquals(0, row.getColumns().get("sex").getValue()); assertTrue(mutations.get(1) instanceof MysqlInsertMutation); mutation = (MysqlInsertMutation) mutations.get(1); validateMetadata(mutation, 1); row = mutation.getEntity(); assertEquals(12334L, row.getColumns().get("id").getValue()); assertEquals("test_user2", row.getColumns().get("name").getValue()); assertEquals(12, row.getColumns().get("age").getValue()); assertEquals(1, row.getColumns().get("sex").getValue()); } @Test public void testUpdateMutation() throws Exception { Serializable[] old = new Serializable[4]; old[0] = 12131L; old[1] = "test_user"; old[2] = 25; old[3] = 0; Serializable[] current = new Serializable[4]; current[0] = old[0]; current[1] = old[1]; current[2] = 26; current[3] = old[3]; Serializable[] old2 = new Serializable[4]; old2[0] = 12334L; old2[1] = "test_user2"; old2[2] = 30; old2[3] = 1; Serializable[] current2 = new Serializable[4]; current2[0] = old2[0]; current2[1] = old2[1]; current2[2] = 31; current2[3] = old2[3]; Map.Entry<Serializable[], Serializable[]> change = new AbstractMap.SimpleEntry<>(old, current); Map.Entry<Serializable[], Serializable[]> change2 = new AbstractMap.SimpleEntry<>(old2, current2); BinlogEvent event = new UpdateEvent( TABLE_ID, SERVER_ID, TIMESTAMP, BINLOG_FILE_POS, ImmutableList.of(change, change2)); List<? extends Mutation> mutations = eventMapper.map(event); assertEquals(2, mutations.size()); assertTrue(mutations.get(0) instanceof MysqlUpdateMutation); MysqlUpdateMutation mutation = (MysqlUpdateMutation) mutations.get(0); validateMetadata(mutation, 0); Row oldRow = mutation.getPreviousRow(); Row newRow = mutation.getRow(); assertEquals(12131L, oldRow.getColumns().get("id").getValue()); assertEquals("test_user", oldRow.getColumns().get("name").getValue()); assertEquals(25, oldRow.getColumns().get("age").getValue()); assertEquals(0, oldRow.getColumns().get("sex").getValue()); assertEquals(12131L, newRow.getColumns().get("id").getValue()); assertEquals("test_user", newRow.getColumns().get("name").getValue()); assertEquals(26, newRow.getColumns().get("age").getValue()); assertEquals(0, newRow.getColumns().get("sex").getValue()); assertTrue(mutations.get(1) instanceof MysqlUpdateMutation); mutation = (MysqlUpdateMutation) mutations.get(1); validateMetadata(mutation, 1); oldRow = mutation.getPreviousRow(); newRow = mutation.getRow(); assertEquals(12334L, oldRow.getColumns().get("id").getValue()); assertEquals("test_user2", oldRow.getColumns().get("name").getValue()); assertEquals(30, oldRow.getColumns().get("age").getValue()); assertEquals(1, oldRow.getColumns().get("sex").getValue()); assertEquals(12334L, newRow.getColumns().get("id").getValue()); assertEquals("test_user2", newRow.getColumns().get("name").getValue()); assertEquals(31, newRow.getColumns().get("age").getValue()); assertEquals(1, newRow.getColumns().get("sex").getValue()); } @Test public void testUpdateMutationWithDifferentPK() throws Exception { Serializable[] old = new Serializable[4]; old[0] = 12131L; old[1] = "test_user"; old[2] = 25; old[3] = 0; Serializable[] current = new Serializable[4]; current[0] = 12334L; current[1] = old[1]; current[2] = 26; current[3] = old[3]; Map.Entry<Serializable[], Serializable[]> change = new AbstractMap.SimpleEntry<>(old, current); BinlogEvent event = new UpdateEvent(TABLE_ID, SERVER_ID, TIMESTAMP, BINLOG_FILE_POS, ImmutableList.of(change)); List<? extends Mutation> mutations = eventMapper.map(event); assertEquals(2, mutations.size()); assertTrue(mutations.get(0) instanceof MysqlDeleteMutation); MysqlDeleteMutation deleteMutation = (MysqlDeleteMutation) mutations.get(0); validateMetadata(deleteMutation, 0); Row row = deleteMutation.getRow(); assertEquals(12131L, row.getColumns().get("id").getValue()); assertEquals("test_user", row.getColumns().get("name").getValue()); assertEquals(25, row.getColumns().get("age").getValue()); assertEquals(0, row.getColumns().get("sex").getValue()); assertTrue(mutations.get(1) instanceof MysqlInsertMutation); MysqlInsertMutation insertMutation = (MysqlInsertMutation) mutations.get(1); validateMetadata(insertMutation, 0); row = insertMutation.getRow(); assertEquals(12334L, row.getColumns().get("id").getValue()); assertEquals("test_user", row.getColumns().get("name").getValue()); assertEquals(26, row.getColumns().get("age").getValue()); assertEquals(0, row.getColumns().get("sex").getValue()); } @Test public void testUpdateMutationWithNullPK() throws Exception { Serializable[] old = new Serializable[4]; old[0] = null; old[1] = "test_user"; old[2] = 25; old[3] = 0; Serializable[] current = new Serializable[4]; current[0] = null; current[1] = old[1]; current[2] = 26; current[3] = old[3]; Map.Entry<Serializable[], Serializable[]> change = new AbstractMap.SimpleEntry<>(old, current); BinlogEvent event = new UpdateEvent(TABLE_ID, SERVER_ID, TIMESTAMP, BINLOG_FILE_POS, ImmutableList.of(change)); List<? extends Mutation> mutations = eventMapper.map(event); assertEquals(1, mutations.size()); assertTrue(mutations.get(0) instanceof MysqlUpdateMutation); MysqlUpdateMutation mutation = (MysqlUpdateMutation) mutations.get(0); validateMetadata(mutation, 0); Row oldRow = mutation.getPreviousRow(); Row newRow = mutation.getRow(); assertEquals(null, oldRow.getColumns().get("id").getValue()); assertEquals("test_user", oldRow.getColumns().get("name").getValue()); assertEquals(25, oldRow.getColumns().get("age").getValue()); assertEquals(0, oldRow.getColumns().get("sex").getValue()); assertEquals(null, newRow.getColumns().get("id").getValue()); assertEquals("test_user", newRow.getColumns().get("name").getValue()); assertEquals(26, newRow.getColumns().get("age").getValue()); assertEquals(0, newRow.getColumns().get("sex").getValue()); } @Test public void testDeleteMutation() throws Exception { Serializable[] change = new Serializable[4]; change[0] = 12131L; change[1] = "test_user"; change[2] = 25; change[3] = 0; Serializable[] change2 = new Serializable[4]; change2[0] = 12334L; change2[1] = "test_user2"; change2[2] = 12; change2[3] = 1; BinlogEvent event = new DeleteEvent( TABLE_ID, SERVER_ID, TIMESTAMP, BINLOG_FILE_POS, ImmutableList.of(change, change2)); List<? extends Mutation> mutations = eventMapper.map(event); assertEquals(2, mutations.size()); assertTrue(mutations.get(0) instanceof MysqlDeleteMutation); MysqlDeleteMutation mutation = (MysqlDeleteMutation) mutations.get(0); validateMetadata(mutation, 0); Row row = mutation.getEntity(); assertEquals(12131L, row.getColumns().get("id").getValue()); assertEquals("test_user", row.getColumns().get("name").getValue()); assertEquals(25, row.getColumns().get("age").getValue()); assertEquals(0, row.getColumns().get("sex").getValue()); assertTrue(mutations.get(1) instanceof MysqlDeleteMutation); mutation = (MysqlDeleteMutation) mutations.get(1); validateMetadata(mutation, 1); row = mutation.getEntity(); assertEquals(12334L, row.getColumns().get("id").getValue()); assertEquals("test_user2", row.getColumns().get("name").getValue()); assertEquals(12, row.getColumns().get("age").getValue()); assertEquals(1, row.getColumns().get("sex").getValue()); } @Test public void testTableMap() throws Exception { TableMapEvent event = new TableMapEvent( TABLE_ID, 0l, 0l, BINLOG_FILE_POS, DATABASE_NAME, TABLE_NAME, COLUMN_TYPES); List<? extends Mutation> mutations = eventMapper.map(event); assertTrue(mutations.isEmpty()); verify(tableCache, times(1)) .addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, event.getColumnTypes()); } @Test public void testXid() throws Exception { XidEvent xidEvent = new XidEvent(SERVER_ID, 15l, new BinlogFilePos("test.200", 18, 130), 0l); List<? extends Mutation> mutations = eventMapper.map(xidEvent); assertTrue(mutations.isEmpty()); assertEquals(15L, lastTransaction.get().getTimestamp()); verify(metrics, times(1)).transactionReceived(); } @Test public void testQuery() throws Exception { QueryEvent queryEvent = new QueryEvent(SERVER_ID, 15l, BINLOG_FILE_POS, DATABASE_NAME, "BEGIN"); List<? extends Mutation> mutations = eventMapper.map(queryEvent); assertTrue(mutations.isEmpty()); assertEquals(15L, beginTransaction.get().getTimestamp()); queryEvent = new QueryEvent(SERVER_ID, 30l, BINLOG_FILE_POS, DATABASE_NAME, ""); mutations = eventMapper.map(queryEvent); assertTrue(mutations.isEmpty()); assertEquals(15L, beginTransaction.get().getTimestamp()); queryEvent = new QueryEvent(SERVER_ID, 30l, BINLOG_FILE_POS, DATABASE_NAME, "BEGIN"); mutations = eventMapper.map(queryEvent); assertTrue(mutations.isEmpty()); assertEquals(30L, beginTransaction.get().getTimestamp()); } @Test public void testStart() throws Exception { StartEvent event = new StartEvent(0l, 0l, BINLOG_FILE_POS); List<? extends Mutation> mutations = eventMapper.map(event); assertTrue(mutations.isEmpty()); verify(tableCache, times(1)).clear(); } @Test(expected = IllegalStateException.class) public void testNoMutationMapping() throws Exception { eventMapper.map(mock(BinlogEvent.class)); } private void validateMetadata(Mutation mutation, int rowPosition) { MysqlMutationMetadata metadata = (MysqlMutationMetadata) mutation.getMetadata(); assertEquals(DATA_SOURCE, metadata.getDataSource()); assertEquals(BINLOG_FILE_POS, metadata.getFilePos()); assertEquals(TEST_TABLE, metadata.getTable()); assertEquals(SERVER_ID, metadata.getServerId()); assertEquals(TIMESTAMP, metadata.getTimestamp()); assertEquals(beginTransaction.get(), metadata.getBeginTransaction()); assertEquals(lastTransaction.get(), metadata.getLastTransaction()); assertEquals(leaderEpoch.get(), metadata.getLeaderEpoch()); assertEquals(rowPosition, metadata.getEventRowPosition()); } }
1,941
0
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/test/java/com/airbnb/spinaltap/mysql/event/filter/MysqlEventFilterTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.filter; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.util.Filter; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.google.common.collect.Sets; import java.util.Collections; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import org.junit.Test; public class MysqlEventFilterTest { private static final String DATABASE_NAME = "db"; private static final String TABLE_NAME = "users"; private static final long TABLE_ID = 1l; private static final Set<String> TABLE_NAMES = Sets.newHashSet(Table.canonicalNameOf(DATABASE_NAME, TABLE_NAME)); private static final BinlogFilePos BINLOG_FILE_POS = new BinlogFilePos("test.123", 14, 100); @Test public void testEventFilter() throws Exception { TableCache tableCache = mock(TableCache.class); BinlogEvent lastEvent = new XidEvent(0l, 0l, BINLOG_FILE_POS, 0l); BinlogFilePos nextPosition = new BinlogFilePos("test.123", 15, 100); MysqlSourceState state = new MysqlSourceState(0l, lastEvent.getOffset(), 0l, BINLOG_FILE_POS); Filter<BinlogEvent> filter = MysqlEventFilter.create(tableCache, TABLE_NAMES, new AtomicReference(state)); when(tableCache.contains(TABLE_ID)).thenReturn(true); assertTrue( filter.apply( new TableMapEvent( TABLE_ID, 0l, 0l, nextPosition, DATABASE_NAME, TABLE_NAME, new byte[1]))); assertTrue( filter.apply(new WriteEvent(TABLE_ID, 0l, 0l, nextPosition, Collections.emptyList()))); assertTrue( filter.apply(new DeleteEvent(TABLE_ID, 0l, 0l, nextPosition, Collections.emptyList()))); assertTrue( filter.apply(new UpdateEvent(TABLE_ID, 0l, 0l, nextPosition, Collections.emptyList()))); assertTrue(filter.apply(new XidEvent(0l, 0l, BINLOG_FILE_POS, 12l))); assertTrue(filter.apply(new QueryEvent(0l, 0l, BINLOG_FILE_POS, DATABASE_NAME, ""))); assertTrue(filter.apply(new StartEvent(0l, 0l, BINLOG_FILE_POS))); assertFalse( filter.apply(new TableMapEvent(TABLE_ID, 0l, 0l, BINLOG_FILE_POS, "", "", new byte[1]))); assertFalse(filter.apply(new WriteEvent(0l, 0l, 0l, BINLOG_FILE_POS, Collections.emptyList()))); assertFalse( filter.apply(new WriteEvent(TABLE_ID, 0l, 0l, BINLOG_FILE_POS, Collections.emptyList()))); assertFalse(filter.apply(mock(BinlogEvent.class))); } }
1,942
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/ColumnSerializationUtil.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Map; import lombok.NonNull; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.SerializationUtils; import org.apache.zookeeper.server.ByteBufferInputStream; /** A utility class for MySQL {@link Column} SerDe supoort. */ @Slf4j @UtilityClass public class ColumnSerializationUtil { public static byte[] serializeColumn(@NonNull final Column oldColumn) { return SerializationUtils.serialize(oldColumn.getValue()); } /** * mapping between column type to java type BIT => BitSet ENUM, YEAR TINY, SHORT, INT24, LONG => * int SET, LONGLONG => long FLOAT => float DOUBLE => value NEWDECIMAL => BigDecimal DATE => Date * TIME, TIME_V2 => Time TIMESTAMP, TIMESTAMP_V2 => Timestmap DATETIME, DATETIME_V2 => Date case * YEAR: STRING, VARCHAR, VAR_STRING => String BLOB => byte[] */ public static Serializable deserializeColumn( @NonNull final Map<String, ByteBuffer> entity, @NonNull final String column) { final ByteBuffer byteBuffer = entity.get(column); if (byteBuffer == null) { return null; } final ByteBufferInputStream inputStream = new ByteBufferInputStream(byteBuffer); return (Serializable) SerializationUtils.deserialize(inputStream); } }
1,943
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/MysqlDestinationMetrics.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.common.metrics.TaggedMetricRegistry; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.destination.DestinationMetrics; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.google.common.base.Preconditions; import java.util.HashMap; import java.util.Map; import lombok.NonNull; /** * Responsible for metrics collection on operations for {@link * com.airbnb.spinaltap.common.destination.Destination} and associated components for a given {@link * MysqlSource}. */ public class MysqlDestinationMetrics extends DestinationMetrics { private static final String DATABASE_NAME_TAG = "database_name"; private static final String TABLE_NAME_TAG = "table_name"; public MysqlDestinationMetrics( @NonNull final String sourceName, @NonNull final TaggedMetricRegistry metricRegistry) { this("mysql", sourceName, metricRegistry); } protected MysqlDestinationMetrics( @NonNull final String sourceType, @NonNull final String sourceName, @NonNull final TaggedMetricRegistry metricRegistry) { super(sourceName, sourceType, metricRegistry); } @Override protected Map<String, String> getTags(@NonNull final Mutation.Metadata metadata) { Preconditions.checkState(metadata instanceof MysqlMutationMetadata); MysqlMutationMetadata mysqlMetadata = (MysqlMutationMetadata) metadata; Map<String, String> metadataTags = new HashMap<>(); metadataTags.put(DATABASE_NAME_TAG, mysqlMetadata.getTable().getDatabase()); metadataTags.put(TABLE_NAME_TAG, mysqlMetadata.getTable().getName()); metadataTags.putAll(super.getTags(mysqlMetadata)); return metadataTags; } }
1,944
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/MysqlSourceMetrics.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.common.metrics.TaggedMetricRegistry; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.source.SourceMetrics; import com.google.common.collect.ImmutableMap; import java.util.HashMap; import java.util.Map; /** * Responsible for metrics collection on operations for {@link MysqlSource} and associated * components. */ public class MysqlSourceMetrics extends SourceMetrics { private static final String MYSQL_PREFIX = METRIC_PREFIX + ".binlog"; private static final String TRANSACTION_RECEIVED_METRIC = MYSQL_PREFIX + ".transaction.count"; private static final String DESERIALIZATION_FAILURE_METRIC = MYSQL_PREFIX + ".deserialization.failure.count"; private static final String COMMUNICATION_FAILURE_METRIC = MYSQL_PREFIX + ".comm.failure.count"; private static final String CLIENT_CONNECTED_METRIC = MYSQL_PREFIX + ".connect.count"; private static final String CLIENT_DISCONNECTED_METRIC = MYSQL_PREFIX + ".disconnect.count"; private static final String SCHEMA_STORE_GET_SUCCESS_METRIC = MYSQL_PREFIX + ".schema_store.get.success.count"; private static final String SCHEMA_STORE_GET_FAILURE_METRIC = MYSQL_PREFIX + ".schema_store.get.failure.count"; private static final String SCHEMA_STORE_PUT_SUCCESS_METRIC = MYSQL_PREFIX + ".schema_store.put.success.count"; private static final String SCHEMA_STORE_PUT_FAILURE_METRIC = MYSQL_PREFIX + ".schema_store.put.failure.count"; private static final String SCHEMA_DATABASE_APPLY_DDL_SUCCESS_METRIC = MYSQL_PREFIX + ".schema_database.apply.ddl.success.count"; private static final String SCHEMA_DATABASE_APPLY_DDL_FAILURE_METRIC = MYSQL_PREFIX + ".schema_database.apply.ddl.failure.count"; private static final String DDL_HISTORY_STORE_GET_SUCCESS_METRIC = MYSQL_PREFIX + ".ddl_history_store.get.success.count"; private static final String DDL_HISTORY_STORE_GET_FAILURE_METRIC = MYSQL_PREFIX + ".ddl_history_store.get.failure.count"; private static final String DDL_HISTORY_STORE_PUT_SUCCESS_METRIC = MYSQL_PREFIX + ".ddl_history_store.put.success.count"; private static final String DDL_HISTORY_STORE_PUT_FAILURE_METRIC = MYSQL_PREFIX + ".ddl_history_store.put.failure.count"; private static final String INVALID_SCHEMA_METRIC = MYSQL_PREFIX + ".table.invalid_schema.count"; private static final String BINLOG_FILE_START_METRIC = MYSQL_PREFIX + ".binlog_file.start.count"; private static final String SAVE_STATE_METRIC = MYSQL_PREFIX + ".state.save.count"; private static final String READ_STATE_METRIC = MYSQL_PREFIX + ".state.read.count"; private static final String SAVE_STATE_FAILURE_METRIC = MYSQL_PREFIX + ".state.save.failure.count"; private static final String READ_STATE_FAILURE_METRIC = MYSQL_PREFIX + ".state.read.failure.count"; private static final String RESET_POSITION_METRIC = MYSQL_PREFIX + ".reset.position.count"; private static final String RESET_EARLIEST_POSITION_METRIC = MYSQL_PREFIX + ".reset.earliest_position.count"; public MysqlSourceMetrics(final String sourceName, final TaggedMetricRegistry metricRegistry) { this(sourceName, "mysql", metricRegistry); } protected MysqlSourceMetrics( String sourceName, String sourceType, TaggedMetricRegistry metricRegistry) { super(sourceName, sourceType, metricRegistry); } public void communicationFailure(Throwable error) { incError(COMMUNICATION_FAILURE_METRIC, error); } public void deserializationFailure(Throwable error) { incError(DESERIALIZATION_FAILURE_METRIC, error); } public void clientConnected() { inc(CLIENT_CONNECTED_METRIC); } public void clientDisconnected() { inc(CLIENT_DISCONNECTED_METRIC); } public void schemaStoreGetSuccess(final String database, final String table) { inc(SCHEMA_STORE_GET_SUCCESS_METRIC, getTableTags(database, table)); } public void schemaStoreGetFailure( final String database, final String table, final Throwable error) { incError(SCHEMA_STORE_GET_FAILURE_METRIC, error, getTableTags(database, table)); } public void schemaStorePutSuccess(final String database, final String table) { inc(SCHEMA_STORE_PUT_SUCCESS_METRIC, getTableTags(database, table)); } public void schemaStorePutFailure( final String database, final String table, final Throwable error) { incError(SCHEMA_STORE_PUT_FAILURE_METRIC, error, getTableTags(database, table)); } public void schemaDatabaseApplyDDLSuccess(final String database) { inc( SCHEMA_DATABASE_APPLY_DDL_SUCCESS_METRIC, ImmutableMap.of(DATABASE_NAME_TAG, database == null ? "" : database)); } public void schemaDatabaseApplyDDLFailure(final String database, final Throwable error) { incError( SCHEMA_DATABASE_APPLY_DDL_FAILURE_METRIC, error, ImmutableMap.of(DATABASE_NAME_TAG, database == null ? "" : database)); } public void invalidSchema(final Mutation<?> mutation) { inc(INVALID_SCHEMA_METRIC, getTags(mutation)); } public void binlogFileStart() { inc(BINLOG_FILE_START_METRIC); } public void stateSave() { inc(SAVE_STATE_METRIC); } public void stateRead() { inc(READ_STATE_METRIC); } public void stateSaveFailure(Throwable error) { incError(SAVE_STATE_FAILURE_METRIC, error); } public void stateReadFailure(Throwable error) { incError(READ_STATE_FAILURE_METRIC, error); } public void resetSourcePosition() { inc(RESET_POSITION_METRIC); } public void resetEarliestPosition() { inc(RESET_EARLIEST_POSITION_METRIC); } public void transactionReceived() { inc(TRANSACTION_RECEIVED_METRIC); } private Map<String, String> getTableTags(final String database, final String table) { Map<String, String> tableTags = new HashMap<>(); tableTags.put(DATABASE_NAME_TAG, database == null ? "" : database); tableTags.put(TABLE_NAME_TAG, table == null ? "" : table); return tableTags; } }
1,945
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/StateHistory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.common.source.SourceState; import com.airbnb.spinaltap.common.util.Repository; import com.google.common.base.Preconditions; import com.google.common.collect.Queues; import java.util.Collection; import java.util.Collections; import java.util.Deque; import javax.validation.constraints.Min; import lombok.AllArgsConstructor; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; /** * Represents a collection of {@link SourceState} checkpoints. This is used to track changes to * state, and enables rolling back to previous checkpoints if needed (ex: in case of erroneous * behavior or data loss). * * <p>The state history is tracked in-memory in a {@link java.util.Deque}. Add and remove operations * are performed on the tail (stack ordering). When capacity is reached, entries are evicted from * the head (queue ordering) . * * <p>The state history is persisted in the {@link Repository} implement provided on construction. * Changes are committed on every add or remove operation to ensure durability. The in-memory * collection is mainly employed as a caching solution to optimize read operations and reduce * request load on the backing storage. */ @Slf4j @AllArgsConstructor public final class StateHistory<S extends SourceState> { private static final int DEFAULT_CAPACITY = 50; @NonNull private final String sourceName; @Min(1) private final int capacity; @NonNull private final Repository<Collection<S>> repository; @NonNull private final MysqlSourceMetrics metrics; @NonNull private final Deque<S> stateHistory; public StateHistory( @NonNull final String sourceName, @NonNull final Repository<Collection<S>> repository, @NonNull final MysqlSourceMetrics metrics) { this(sourceName, DEFAULT_CAPACITY, repository, metrics); } public StateHistory( @NonNull final String sourceName, @Min(1) final int capacity, @NonNull final Repository<Collection<S>> repository, @NonNull final MysqlSourceMetrics metrics) { this.sourceName = sourceName; this.capacity = capacity; this.repository = repository; this.metrics = metrics; this.stateHistory = Queues.newArrayDeque(getPreviousStates()); } /** Adds a new {@link SourceState} entry to the history. */ public void add(final S state) { while (stateHistory.size() >= capacity) { stateHistory.removeFirst(); } stateHistory.addLast(state); save(); } /** Removes the most recently added {@link SourceState} entry from the history. */ public S removeLast() { return removeLast(1); } /** * Removes the last N most recently added {@link StateHistory} entries from the history. * * @param count the number of records to remove. * @return the last removed {@link SourceState}. */ public S removeLast(int count) { Preconditions.checkArgument(count > 0, "Count should be greater than 0"); Preconditions.checkState(!stateHistory.isEmpty(), "The state history is empty"); Preconditions.checkState(stateHistory.size() >= count, "Count is larger than history size"); S state = stateHistory.removeLast(); for (int i = 1; i < count; i++) { state = stateHistory.removeLast(); } save(); return state; } /** Clears the state history */ public void clear() { if (stateHistory.isEmpty()) { return; } stateHistory.clear(); save(); } /** @return {@code True} if the history is empty, else {@code False}. */ public boolean isEmpty() { return stateHistory.isEmpty(); } /** @return the current size of the state history. */ public int size() { return stateHistory.size(); } /** @return a collection representing the {@link SourceState}s currently in the state history. */ private Collection<S> getPreviousStates() { try { return repository.exists() ? repository.get() : Collections.emptyList(); } catch (Exception ex) { log.error("Failed to read state history for source " + sourceName, ex); metrics.stateReadFailure(ex); throw new RuntimeException(ex); } } /** Persists the state history in the backing repository. */ private void save() { try { if (repository.exists()) { repository.set(stateHistory); } else { repository.create(stateHistory); } } catch (Exception ex) { log.error("Failed to save state history for source " + sourceName, ex); metrics.stateSaveFailure(ex); throw new RuntimeException(ex); } } }
1,946
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/MysqlSource.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.source.AbstractDataStoreSource; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.filter.MysqlEventFilter; import com.airbnb.spinaltap.mysql.event.mapper.MysqlMutationMapper; import com.airbnb.spinaltap.mysql.exception.InvalidBinlogPositionException; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import lombok.AccessLevel; import lombok.Getter; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; /** * Base implement of a MySQL {@link com.airbnb.spinaltap.common.source.Source} that streams events * from a given binlog for a specified database host, and transforms them to {@link Mutation}s. */ @Slf4j public abstract class MysqlSource extends AbstractDataStoreSource<BinlogEvent> { /** Represents the latest binlog position in the mysql-binlog-connector client. */ public static final BinlogFilePos LATEST_BINLOG_POS = new BinlogFilePos(null, 0, 0); /** Represents the earliest binlog position in the mysql-binlog-connector client. */ public static final BinlogFilePos EARLIEST_BINLOG_POS = new BinlogFilePos("", 4, 4); /** The backoff rate when conducting rollback in the {@link StateHistory}. */ private static final int STATE_ROLLBACK_BACKOFF_RATE = 2; /** The {@link DataSource} representing the database host the source is streaming events from. */ @NonNull @Getter private final DataSource dataSource; /** * The {@link TableCache} tracking {@link com.airbnb.spinaltap.mysql.mutation.schema.Table} * metadata for the streamed source events. */ @NonNull private final TableCache tableCache; /** The {@link StateRepository} where the {@link MysqlSourceState} is committed to. */ @NonNull private final StateRepository<MysqlSourceState> stateRepository; /** The initial {@link BinlogFilePos} to start streaming from for the source. */ @NonNull private final BinlogFilePos initialBinlogFilePosition; @NonNull protected final MysqlSourceMetrics metrics; /** The last checkpointed {@link MysqlSourceState} for the source. */ @NonNull @VisibleForTesting @Getter(AccessLevel.PACKAGE) private final AtomicReference<MysqlSourceState> lastSavedState; /** The last MySQL {@link Transaction} seen so far from the streamed events. */ @NonNull @VisibleForTesting @Getter(AccessLevel.PACKAGE) private final AtomicReference<Transaction> lastTransaction; /** The leader epoch of the current node processing the source stream. */ @NonNull private final AtomicLong currentLeaderEpoch; /** The {@link StateHistory} of checkpointed {@link MysqlSourceState}s. */ @NonNull @VisibleForTesting @Getter(AccessLevel.PACKAGE) private final StateHistory<MysqlSourceState> stateHistory; private final MysqlSchemaManager schemaManager; /** * The number of {@link MysqlSourceState} entries to remove from {@link StateHistory} on rollback. */ private final AtomicInteger stateRollbackCount = new AtomicInteger(1); public MysqlSource( @NonNull final String name, @NonNull final DataSource dataSource, @NonNull final Set<String> tableNames, @NonNull final TableCache tableCache, @NonNull final StateRepository<MysqlSourceState> stateRepository, @NonNull final StateHistory<MysqlSourceState> stateHistory, @NonNull final BinlogFilePos initialBinlogFilePosition, @NonNull final MysqlSchemaManager schemaManager, @NonNull final MysqlSourceMetrics metrics, @NonNull final AtomicLong currentLeaderEpoch, @NonNull final AtomicReference<Transaction> lastTransaction, @NonNull final AtomicReference<MysqlSourceState> lastSavedState) { super( name, metrics, MysqlMutationMapper.create( dataSource, tableCache, schemaManager, currentLeaderEpoch, new AtomicReference<>(), lastTransaction, metrics), MysqlEventFilter.create(tableCache, tableNames, lastSavedState)); this.dataSource = dataSource; this.tableCache = tableCache; this.stateRepository = stateRepository; this.stateHistory = stateHistory; this.metrics = metrics; this.currentLeaderEpoch = currentLeaderEpoch; this.lastTransaction = lastTransaction; this.lastSavedState = lastSavedState; this.initialBinlogFilePosition = initialBinlogFilePosition; this.schemaManager = schemaManager; } public abstract void setPosition(BinlogFilePos pos); /** Initializes the source and prepares to start streaming. */ protected void initialize() { tableCache.clear(); MysqlSourceState state = getSavedState(); lastSavedState.set(state); lastTransaction.set( new Transaction(state.getLastTimestamp(), state.getLastOffset(), state.getLastPosition())); setPosition(state.getLastPosition()); schemaManager.initialize(state.getLastPosition()); } /** Resets to the last valid {@link MysqlSourceState} recorded in the {@link StateHistory}. */ void resetToLastValidState() { if (stateHistory.size() >= stateRollbackCount.get()) { final MysqlSourceState newState = stateHistory.removeLast(stateRollbackCount.get()); saveState(newState); metrics.resetSourcePosition(); log.info("Reset source {} position to {}.", name, newState.getLastPosition()); stateRollbackCount.accumulateAndGet( STATE_ROLLBACK_BACKOFF_RATE, (value, rate) -> value * rate); } else { stateHistory.clear(); saveState(getEarliestState()); metrics.resetEarliestPosition(); log.info("Reset source {} position to earliest.", name); } } private MysqlSourceState getEarliestState() { return new MysqlSourceState(0L, 0L, currentLeaderEpoch.get(), EARLIEST_BINLOG_POS); } protected void onDeserializationError(final Exception ex) { metrics.deserializationFailure(ex); // Fail on deserialization errors and restart source from last checkpoint throw new RuntimeException(ex); } protected void onCommunicationError(final Exception ex) { metrics.communicationFailure(ex); if (ex instanceof InvalidBinlogPositionException) { resetToLastValidState(); } throw new RuntimeException(ex); } /** * Checkpoints the {@link MysqlSourceState} for the source at the given {@link Mutation} position. */ public void commitCheckpoint(final Mutation<?> mutation) { final MysqlSourceState savedState = lastSavedState.get(); if (mutation == null || savedState == null) { return; } Preconditions.checkState(mutation instanceof MysqlMutation); final MysqlMutationMetadata metadata = ((MysqlMutation) mutation).getMetadata(); // Make sure we are saving at a higher watermark BinlogFilePos mutationPosition = metadata.getFilePos(); BinlogFilePos savedStatePosition = savedState.getLastPosition(); if ((BinlogFilePos.shouldCompareUsingFilePosition(mutationPosition, savedStatePosition) && savedState.getLastOffset() >= metadata.getId()) || (mutationPosition.getGtidSet() != null && mutationPosition.getGtidSet().isContainedWithin(savedStatePosition.getGtidSet()))) { return; } final MysqlSourceState newState = new MysqlSourceState( metadata.getTimestamp(), metadata.getId(), currentLeaderEpoch.get(), metadata.getLastTransaction().getPosition()); saveState(newState); stateHistory.add(newState); stateRollbackCount.set(1); } void saveState(@NonNull final MysqlSourceState state) { stateRepository.save(state); lastSavedState.set(state); } MysqlSourceState getSavedState() { return Optional.ofNullable(stateRepository.read()) .orElse(new MysqlSourceState(0L, 0L, currentLeaderEpoch.get(), initialBinlogFilePosition)); } }
1,947
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/StateRepository.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.common.source.SourceState; import com.airbnb.spinaltap.common.util.Repository; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** Represents a repository for a {@link SourceState} record. */ @Slf4j @RequiredArgsConstructor public class StateRepository<S extends SourceState> { @NonNull private final String sourceName; @NonNull private final Repository<S> repository; @NonNull private final MysqlSourceMetrics metrics; /** Saves or updates the {@link SourceState} record in the repository */ public void save(@NonNull final S state) { try { repository.update( state, (currentValue, nextValue) -> { if (currentValue.getCurrentLeaderEpoch() > nextValue.getCurrentLeaderEpoch()) { log.warn("Will not update mysql state: current={}, next={}", currentValue, nextValue); return currentValue; } return nextValue; }); } catch (Exception ex) { log.error("Failed to save state for source " + sourceName, ex); metrics.stateSaveFailure(ex); throw new RuntimeException(ex); } log.info("Saved state for source {}. state={}", sourceName, state); metrics.stateSave(); } /** @return the {@link SourceState} record present in the repository. */ public S read() { S state = null; try { if (repository.exists()) { state = repository.get(); } else { log.info("State does not exist for source {}", sourceName); } } catch (Exception ex) { log.error("Failed to read state for source " + sourceName, ex); metrics.stateReadFailure(ex); throw new RuntimeException(ex); } log.debug("Read state for source {}. state={}", sourceName, state); metrics.stateRead(); return state; } }
1,948
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/MysqlPipeFactory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.common.metrics.TaggedMetricRegistry; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.spinaltap.common.config.DestinationConfiguration; import com.airbnb.spinaltap.common.config.TlsConfiguration; import com.airbnb.spinaltap.common.destination.Destination; import com.airbnb.spinaltap.common.destination.DestinationBuilder; import com.airbnb.spinaltap.common.pipe.AbstractPipeFactory; import com.airbnb.spinaltap.common.pipe.Pipe; import com.airbnb.spinaltap.common.pipe.PipeMetrics; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.source.Source; import com.airbnb.spinaltap.common.util.StateRepositoryFactory; import com.airbnb.spinaltap.mysql.config.MysqlConfiguration; import com.airbnb.spinaltap.mysql.mutation.MysqlKeyProvider; import com.airbnb.spinaltap.mysql.mutation.mapper.ThriftMutationMapper; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManagerFactory; import com.google.common.base.Preconditions; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.Supplier; import javax.validation.constraints.Min; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; /** Represents a factory implement for {@link Pipe}s streaming from a {@link MysqlSource}. */ @Slf4j public final class MysqlPipeFactory extends AbstractPipeFactory<MysqlSourceState, MysqlConfiguration> { public static final String DEFAULT_MYSQL_TOPIC_PREFIX = "spinaltap"; @NonNull private final String mysqlUser; @NonNull private final String mysqlPassword; @Min(0) private final long mysqlServerId; @NonNull private final Map<String, Supplier<DestinationBuilder<Mutation>>> destinationBuilderSupplierMap; @NonNull private final MysqlSchemaManagerFactory schemaManagerFactory; private final TlsConfiguration tlsConfiguration; public MysqlPipeFactory( @NonNull final String mysqlUser, @NonNull final String mysqlPassword, @Min(0) final long mysqlServerId, final TlsConfiguration tlsConfiguration, @NonNull final Map<String, Supplier<DestinationBuilder<Mutation>>> destinationBuilderSupplierMap, final MysqlSchemaManagerFactory schemaManagerFactory, @NonNull final TaggedMetricRegistry metricRegistry) { super(metricRegistry); this.mysqlUser = mysqlUser; this.mysqlPassword = mysqlPassword; this.mysqlServerId = mysqlServerId; this.tlsConfiguration = tlsConfiguration; this.destinationBuilderSupplierMap = destinationBuilderSupplierMap; this.schemaManagerFactory = schemaManagerFactory; } /** * Creates the list of {@link Pipe}s for the {@link Source} constructed from the given {@link * com.airbnb.spinaltap.common.config.SourceConfiguration}. * * @param sourceConfig The {@link com.airbnb.spinaltap.common.config.SourceConfiguration}. * @param partitionName The partition name of the node streaming from the source. * @param repositoryFactory The {@link StateRepositoryFactory} to create the source repositories. * @param leaderEpoch The leader epoch for the node streaming from the source. * @return the resulting {@link List} of {@link Pipe}s for the constructed {@link Source}. */ @Override public List<Pipe> createPipes( @NonNull final MysqlConfiguration sourceConfig, @NonNull final String partitionName, @NonNull final StateRepositoryFactory<MysqlSourceState> repositoryFactory, @Min(0) final long leaderEpoch) throws Exception { return Collections.singletonList( create(sourceConfig, partitionName, repositoryFactory, leaderEpoch)); } private Pipe create( final MysqlConfiguration sourceConfig, final String partitionName, final StateRepositoryFactory<MysqlSourceState> repositoryFactory, final long leaderEpoch) throws Exception { final Source source = createSource(sourceConfig, repositoryFactory, partitionName, leaderEpoch); final DestinationConfiguration destinationConfig = sourceConfig.getDestinationConfiguration(); Preconditions.checkState( !(sourceConfig.getHostRole().equals(MysqlConfiguration.HostRole.MIGRATION) && destinationConfig.getPoolSize() > 0), String.format( "Destination pool size is not 0 for MIGRATION source %s", sourceConfig.getName())); final Destination destination = createDestination(sourceConfig, destinationConfig); return new Pipe(source, destination, new PipeMetrics(source.getName(), metricRegistry)); } private Source createSource( final MysqlConfiguration configuration, final StateRepositoryFactory<MysqlSourceState> repositoryFactory, final String partitionName, final long leaderEpoch) { return MysqlSourceFactory.create( configuration, mysqlUser, mysqlPassword, // Use a different server_id for REPLICAS in case the same database is configured as // both MASTER and REPLICA mysqlServerId + configuration.getHostRole().ordinal() * 100, tlsConfiguration, repositoryFactory.getStateRepository(configuration.getName(), partitionName), repositoryFactory.getStateHistoryRepository(configuration.getName(), partitionName), schemaManagerFactory, new MysqlSourceMetrics(configuration.getName(), metricRegistry), leaderEpoch); } private Destination createDestination( final MysqlConfiguration sourceConfiguration, final DestinationConfiguration destinationConfiguration) { Supplier<DestinationBuilder<Mutation>> destinationBuilderSupplier = Preconditions.checkNotNull( destinationBuilderSupplierMap.get(destinationConfiguration.getType()), String.format( "destination builder is not found for %s.", destinationConfiguration.getType())); return destinationBuilderSupplier .get() .withName(sourceConfiguration.getName()) .withTopicNamePrefix(MysqlConfiguration.MYSQL_TOPICS.get(sourceConfiguration.getHostRole())) .withMapper(ThriftMutationMapper.create(getHostName())) .withMetrics(new MysqlDestinationMetrics(sourceConfiguration.getName(), metricRegistry)) .withBuffer(destinationConfiguration.getBufferSize()) .withPool(destinationConfiguration.getPoolSize(), MysqlKeyProvider.INSTANCE) .withValidation() .withLargeMessage(sourceConfiguration.isLargeMessageEnabled()) .withDelaySendMs(sourceConfiguration.getDelaySendMs()) .withProducerConfig(destinationConfiguration.getProducerConfig()) .build(); } }
1,949
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/MysqlSourceFactory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.common.config.TlsConfiguration; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.source.Source; import com.airbnb.spinaltap.common.util.Repository; import com.airbnb.spinaltap.common.validator.MutationOrderValidator; import com.airbnb.spinaltap.mysql.binlog_connector.BinaryLogConnectorSource; import com.airbnb.spinaltap.mysql.config.MysqlConfiguration; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManagerFactory; import com.airbnb.spinaltap.mysql.validator.EventOrderValidator; import com.airbnb.spinaltap.mysql.validator.MutationSchemaValidator; import com.github.shyiko.mysql.binlog.BinaryLogClient; import java.util.Collection; import java.util.concurrent.atomic.AtomicLong; import javax.validation.constraints.Min; import lombok.NonNull; import lombok.experimental.UtilityClass; /** Represents a factory for a {@link MysqlSource}. */ @UtilityClass public class MysqlSourceFactory { public Source create( @NonNull final MysqlConfiguration configuration, @NonNull final String user, @NonNull final String password, @Min(0) final long serverId, final TlsConfiguration tlsConfiguration, @NonNull final Repository<MysqlSourceState> backingStateRepository, @NonNull final Repository<Collection<MysqlSourceState>> stateHistoryRepository, final MysqlSchemaManagerFactory schemaManagerFactory, @NonNull final MysqlSourceMetrics metrics, @Min(0) final long leaderEpoch) { final String name = configuration.getName(); final String host = configuration.getHost(); final int port = configuration.getPort(); final BinaryLogClient binlogClient = new BinaryLogClient(host, port, user, password); /* Override the global server_id if it is set in MysqlConfiguration Allow each source to use a different server_id */ if (configuration.getServerId() != MysqlConfiguration.DEFAULT_SERVER_ID) { binlogClient.setServerId(configuration.getServerId()); } else { binlogClient.setServerId(serverId); } final StateRepository<MysqlSourceState> stateRepository = new StateRepository<>(name, backingStateRepository, metrics); final StateHistory<MysqlSourceState> stateHistory = new StateHistory<>(name, stateHistoryRepository, metrics); final MysqlClient mysqlClient = MysqlClient.create( host, port, user, password, configuration.isMTlsEnabled(), tlsConfiguration); final MysqlSchemaManager schemaManager = schemaManagerFactory.create( name, mysqlClient, configuration.isSchemaVersionEnabled(), metrics); final TableCache tableCache = new TableCache(schemaManager, configuration.getOverridingDatabase()); final BinaryLogConnectorSource source = new BinaryLogConnectorSource( name, configuration, tlsConfiguration, binlogClient, mysqlClient, tableCache, stateRepository, stateHistory, schemaManager, metrics, new AtomicLong(leaderEpoch)); source.addEventValidator(new EventOrderValidator(metrics::outOfOrder)); source.addMutationValidator(new MutationOrderValidator(metrics::outOfOrder)); source.addMutationValidator(new MutationSchemaValidator(metrics::invalidSchema)); return source; } }
1,950
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/MysqlClient.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.common.config.TlsConfiguration; import com.mysql.jdbc.jdbc2.optional.MysqlConnectionPoolDataSource; import com.mysql.jdbc.jdbc2.optional.MysqlDataSource; import java.util.List; import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.jdbi.v3.core.Jdbi; /** Represents a MySQL server connection and context with utility functions */ @Slf4j @RequiredArgsConstructor @Getter public class MysqlClient { private final Jdbi jdbi; public static MysqlClient create( String host, int port, String user, String password, boolean mTlsEnabled, TlsConfiguration tlsConfig) { return new MysqlClient( Jdbi.create(createMysqlDataSource(host, port, user, password, mTlsEnabled, tlsConfig))); } public static MysqlDataSource createMysqlDataSource( String host, int port, String user, String password, boolean mTlsEnabled, TlsConfiguration tlsConfig) { MysqlDataSource dataSource = new MysqlConnectionPoolDataSource(); dataSource.setUser(user); dataSource.setPassword(password); dataSource.setServerName(host); dataSource.setPort(port); dataSource.setJdbcCompliantTruncation(false); dataSource.setAutoReconnectForConnectionPools(true); if (mTlsEnabled && tlsConfig != null) { dataSource.setUseSSL(true); if (tlsConfig.getKeyStoreFilePath() != null && tlsConfig.getKeyStorePassword() != null) { dataSource.setClientCertificateKeyStoreUrl("file:" + tlsConfig.getKeyStoreFilePath()); dataSource.setClientCertificateKeyStorePassword(tlsConfig.getKeyStorePassword()); } if (tlsConfig.getKeyStoreType() != null) { dataSource.setClientCertificateKeyStoreType(tlsConfig.getKeyStoreType()); } if (tlsConfig.getTrustStoreFilePath() != null && tlsConfig.getTrustStorePassword() != null) { dataSource.setTrustCertificateKeyStoreUrl("file:" + tlsConfig.getTrustStoreFilePath()); dataSource.setTrustCertificateKeyStorePassword(tlsConfig.getTrustStorePassword()); } if (tlsConfig.getTrustStoreType() != null) { dataSource.setTrustCertificateKeyStoreType(tlsConfig.getTrustStoreType()); } } return dataSource; } public BinlogFilePos getMasterStatus() { return jdbi.withHandle( handle -> handle .createQuery("SHOW MASTER STATUS") .map( (rs, ctx) -> { BinlogFilePos.Builder builder = BinlogFilePos.builder() .withServerUUID(getServerUUID()) .withFileName(rs.getString(1)) .withPosition(rs.getLong(2)) .withNextPosition(rs.getLong(2)); if (rs.getMetaData().getColumnCount() > 4) { builder.withGtidSet(rs.getString(5)); } return builder.build(); }) .findFirst() .orElse(null)); } public String getServerUUID() { return getGlobalVariableValue("server_uuid"); } public boolean isGtidModeEnabled() { return "ON".equalsIgnoreCase(getGlobalVariableValue("gtid_mode")); } public List<String> getBinaryLogs() { return jdbi.withHandle( handle -> handle.createQuery("SHOW BINARY LOGS").map((rs, ctx) -> rs.getString(1)).list()); } public String getGlobalVariableValue(String variableName) { return jdbi.withHandle( handle -> handle .createQuery( String.format("SHOW GLOBAL VARIABLES WHERE Variable_name = '%s'", variableName)) .map((rs, ctx) -> rs.getString(2)) .findFirst() .orElse(null)); } }
1,951
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/TableCache.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.airbnb.spinaltap.mysql.schema.MysqlColumn; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.stream.Collectors; import javax.validation.constraints.Min; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Represents an in-memory cache for storing table schema and metadata used during the * transformation of MySQL binlog events to {@link com.airbnb.spinaltap.Mutation}s. */ @Slf4j @RequiredArgsConstructor public class TableCache { private final MysqlSchemaManager schemaManager; private final String overridingDatabase; private final Cache<Long, Table> tableCache = CacheBuilder.newBuilder().maximumSize(200).build(); /** * @return the {@link Table} cache entry for the given table id if present, otherwise {@code null} */ public Table get(@Min(0) final long tableId) { return tableCache.getIfPresent(tableId); } /** * @return {@code True} if a cache entry exists for the given table id, otherwise {@code False}. */ public boolean contains(@Min(0) final long tableId) { return tableCache.getIfPresent(tableId) != null; } /** * Adds or replaces (if already exists) a {@link Table} entry in the cache for the given table id. * * @param tableId The table id * @param tableName The table name * @param database The database name * @param columnTypes The list of columnd data types */ public void addOrUpdate( @Min(0) final long tableId, @NonNull final String tableName, @NonNull final String database, @NonNull final List<ColumnDataType> columnTypes) throws Exception { final Table table = tableCache.getIfPresent(tableId); if (table == null || !validTable(table, tableName, database, columnTypes)) { tableCache.put(tableId, fetchTable(tableId, database, tableName, columnTypes)); } } /** Clears the cache by invalidating all entries. */ public void clear() { tableCache.invalidateAll(); } /** Checks whether the table representation is valid */ private boolean validTable( final Table table, final String tableName, final String databaseName, final List<ColumnDataType> columnTypes) { return table.getName().equals(tableName) && table.getDatabase().equals(databaseName) && columnsMatch(table, columnTypes); } /** Checks whether the {@link Table} schema matches the given column schema. */ private boolean columnsMatch(final Table table, final List<ColumnDataType> columnTypes) { return table .getColumns() .values() .stream() .map(ColumnMetadata::getColType) .collect(Collectors.toList()) .equals(columnTypes); } private Table fetchTable( final long tableId, final String databaseName, final String tableName, final List<ColumnDataType> columnTypes) throws Exception { final List<MysqlColumn> tableSchema = schemaManager.getTableColumns(databaseName, tableName); final Iterator<MysqlColumn> schemaIterator = tableSchema.iterator(); if (tableSchema.size() != columnTypes.size()) { log.error( "Schema length {} and Column length {} don't match", tableSchema.size(), columnTypes.size()); } final List<ColumnMetadata> columnMetadata = new ArrayList<>(); for (int position = 0; position < columnTypes.size() && schemaIterator.hasNext(); position++) { MysqlColumn colInfo = schemaIterator.next(); ColumnMetadata metadata = new ColumnMetadata( colInfo.getName(), columnTypes.get(position), colInfo.isPrimaryKey(), position); metadata.setRawColumnType(colInfo.getColumnType()); columnMetadata.add(metadata); } final List<String> primaryColumns = tableSchema .stream() .filter(MysqlColumn::isPrimaryKey) .map(MysqlColumn::getName) .collect(Collectors.toList()); return new Table( tableId, tableName, databaseName, overridingDatabase, columnMetadata, primaryColumns); } }
1,952
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/validator/EventOrderValidator.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.validator; import com.airbnb.spinaltap.common.util.Validator; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import java.util.function.Consumer; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Represents a {@link Validator} that asserts {@link BinlogEvent}s are streamed in order of event * id (offset). The implement assumes {@code validate} is called on events in the order they are * received. */ @Slf4j @RequiredArgsConstructor public class EventOrderValidator implements Validator<BinlogEvent> { /** The handler to call on {@link BinlogEvent}s that are out of order. */ @NonNull private final Consumer<BinlogEvent> handler; private long lastSeenId = -1; @Override public void validate(@NonNull final BinlogEvent event) { long eventId = event.getOffset(); log.debug("Validating order for event with id {}. {}", eventId, event); if (eventId > 0 && lastSeenId > eventId) { log.warn( "Mutation with id {} is out of order and should precede {}. {}", eventId, lastSeenId, event); handler.accept(event); } lastSeenId = eventId; } @Override public void reset() { lastSeenId = -1; } }
1,953
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/validator/MutationSchemaValidator.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.validator; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.util.Validator; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import java.util.Map; import java.util.function.Consumer; import java.util.stream.Collectors; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Represents a {@link Validator} that asserts parity of the {@link * com.airbnb.spinaltap.mysql.mutation.schema.Table} schema with the {@link * com.airbnb.spinaltap.mysql.mutation.schema.Column} schema associated with a {@link MysqlMutation} */ @Slf4j @RequiredArgsConstructor public final class MutationSchemaValidator implements Validator<MysqlMutation> { /** The handler to call on {@link Mutation}s that are invalid. */ @NonNull private final Consumer<Mutation<?>> handler; @Override public void validate(@NonNull final MysqlMutation mutation) { log.debug("Validating schema for mutation: {}", mutation); if (!hasValidSchema(mutation.getRow())) { log.warn("Invalid schema detected for mutation: {}", mutation); handler.accept(mutation); } } private boolean hasValidSchema(final Row row) { return row.getColumns() .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getMetadata())) .equals(row.getTable().getColumns()); } @Override public void reset() {} }
1,954
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/config/MysqlConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.config; import com.airbnb.spinaltap.common.config.DestinationConfiguration; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.binlog_connector.BinaryLogConnectorSource; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import com.github.shyiko.mysql.binlog.network.SSLMode; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; import java.util.List; import java.util.Map; import javax.validation.constraints.Max; import javax.validation.constraints.Min; import javax.validation.constraints.NotNull; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.Setter; import lombok.ToString; import org.hibernate.validator.constraints.NotEmpty; /** Represents the configuration for a {@link com.airbnb.spinaltap.mysql.MysqlSource} */ @Getter @Setter @ToString @EqualsAndHashCode(callSuper = true) @JsonIgnoreProperties(ignoreUnknown = true) public class MysqlConfiguration extends AbstractMysqlConfiguration { public static final String TYPE = "mySQL"; public static final String INSTANCE_TAG = TYPE.toLowerCase(); public static final HostRole DEFAULT_HOST_ROLE = HostRole.MASTER; public static final int DEFAULT_SOCKET_TIMEOUT_IN_SECONDS = 90; public static final int DEFAULT_PORT = 5672; public static final int DEFAULT_SERVER_ID = -1; public static final boolean DEFAULT_SCHEMA_VERSION_ENABLED = false; public static final boolean DEFAULT_LARGE_MESSAGE_ENABLED = false; public static final long DEFAULT_DELAY_SEND_MS = 0L; public static final Map<HostRole, String> MYSQL_TOPICS = ImmutableMap.of( MysqlConfiguration.HostRole.MASTER, "spinaltap", MysqlConfiguration.HostRole.REPLICA, "spinaltap_mysql_replica", MysqlConfiguration.HostRole.MIGRATION, "spinaltap_mysql_migration"); public MysqlConfiguration( @NonNull final String name, @NonNull final List<String> canonicalTableNames, @NonNull final String host, final String hostRole, @Min(0) final int port, final String sslMode, @NonNull final DestinationConfiguration destinationConfiguration) { super(name, TYPE, INSTANCE_TAG, destinationConfiguration); this.canonicalTableNames = canonicalTableNames; this.host = host; this.port = port; if (!Strings.isNullOrEmpty(hostRole)) { this.hostRole = HostRole.valueOf(hostRole.toUpperCase()); } if (!Strings.isNullOrEmpty(sslMode)) { this.sslMode = SSLMode.valueOf(sslMode.toUpperCase()); } } public MysqlConfiguration() { super(TYPE, INSTANCE_TAG); } @NotEmpty @JsonProperty("tables") private List<String> canonicalTableNames; @NotNull @JsonProperty private String host; @JsonProperty("host_role") private HostRole hostRole = DEFAULT_HOST_ROLE; @Min(1) @Max(65535) @JsonProperty private int port = DEFAULT_PORT; /** Setting a non-default server_id overrides the value in the global config */ @Min(-1) @JsonProperty("server_id") private int serverId = DEFAULT_SERVER_ID; @JsonProperty("socket_timeout_seconds") private int socketTimeoutInSeconds = DEFAULT_SOCKET_TIMEOUT_IN_SECONDS; @JsonProperty("schema_version_enabled") private boolean schemaVersionEnabled = DEFAULT_SCHEMA_VERSION_ENABLED; @JsonProperty("initial_binlog_position") private BinlogFilePos initialBinlogFilePosition = BinaryLogConnectorSource.LATEST_BINLOG_POS; @JsonProperty("large_message_enabled") private boolean largeMessageEnabled = DEFAULT_LARGE_MESSAGE_ENABLED; @JsonProperty("delay_send_ms") private long delaySendMs = DEFAULT_DELAY_SEND_MS; @JsonProperty("overriding_database") private String overridingDatabase; @JsonProperty("ssl_mode") private SSLMode sslMode = SSLMode.DISABLED; @JsonProperty("mtls_enabled") private boolean mTlsEnabled; @Override public void setPartitions(int partitions) { // We only support 1 partition for mysql sources } public enum HostRole { MASTER, REPLICA, MIGRATION } }
1,955
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/config/MysqlSchemaStoreConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.config; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import javax.validation.constraints.Max; import javax.validation.constraints.Min; import lombok.Data; import lombok.NoArgsConstructor; import lombok.NonNull; /** Represents the configuration for a {@link com.airbnb.spinaltap.mysql.schema.MysqlSchemaStore} */ @Data @NoArgsConstructor @JsonIgnoreProperties(ignoreUnknown = true) public class MysqlSchemaStoreConfiguration { @NonNull @JsonProperty private String host; @Min(0) @Max(65535) @JsonProperty private int port; @JsonProperty("mtls_enabled") private boolean mTlsEnabled; @NonNull @JsonProperty private String database = "schema_store"; @NonNull @JsonProperty("archive-database") private String archiveDatabase = "schema_store_archives"; }
1,956
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/config/AbstractMysqlConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.config; import com.airbnb.spinaltap.common.config.DestinationConfiguration; import com.airbnb.spinaltap.common.config.SourceConfiguration; import com.airbnb.spinaltap.mysql.MysqlSource; import java.util.List; import lombok.NonNull; /** Represents the base configuration for a {@link MysqlSource}. */ public abstract class AbstractMysqlConfiguration extends SourceConfiguration { public AbstractMysqlConfiguration( @NonNull final String name, final String type, final String instanceTag, @NonNull final DestinationConfiguration destinationConfiguration) { super(name, type, instanceTag, destinationConfiguration); } public AbstractMysqlConfiguration(final String type, final String instanceTag) { super(type, instanceTag); } public abstract String getHost(); public abstract int getPort(); public abstract List<String> getCanonicalTableNames(); public abstract String getOverridingDatabase(); }
1,957
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation/MysqlKeyProvider.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.mutation; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.util.KeyProvider; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.google.common.base.Preconditions; import lombok.AccessLevel; import lombok.NoArgsConstructor; import lombok.NonNull; /** Represents a {@link KeyProvider} for {@link MysqlMutation}s. */ @NoArgsConstructor(access = AccessLevel.PRIVATE) public class MysqlKeyProvider implements KeyProvider<Mutation<?>, String> { public static final MysqlKeyProvider INSTANCE = new MysqlKeyProvider(); /** * @return the key for a {@link MysqlMutation} in the following format: * "[database_name][table_name][primary_key_value]". */ @Override public String get(@NonNull final Mutation<?> mutation) { Preconditions.checkState(mutation instanceof MysqlMutation); final MysqlMutation mysqlMutation = (MysqlMutation) mutation; final Table table = mysqlMutation.getMetadata().getTable(); final Row row = mysqlMutation.getRow(); return String.format( "%s:%s:%s", table.getDatabase(), table.getName(), row.getPrimaryKeyValue()); } }
1,958
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation/mapper/DeleteMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.mutation.mapper; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.jitney.event.spinaltap.v1.MutationType; import com.airbnb.spinaltap.mysql.mutation.MysqlDeleteMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import lombok.NonNull; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that maps a {@link * MysqlDeleteMutation} to its corresponding thrift {@link Mutation} form. */ class DeleteMutationMapper extends ThriftMutationMapper<MysqlDeleteMutation> { public DeleteMutationMapper(final String sourceId) { super(sourceId); } public Mutation map(@NonNull final MysqlDeleteMutation mutation) { final MysqlMutationMetadata metadata = mutation.getMetadata(); return new Mutation( MutationType.DELETE, metadata.getTimestamp(), sourceId, metadata.getDataSource().getThriftDataSource(), createBinlogHeader(metadata, mutation.getType().getCode()), metadata.getTable().getThriftTable(), transformToEntity(mutation.getEntity())); } }
1,959
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation/mapper/UpdateMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.mutation.mapper; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.jitney.event.spinaltap.v1.MutationType; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.MysqlUpdateMutation; import lombok.NonNull; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that maps a {@link * MysqlUpdateMutation} to its corresponding thrift {@link Mutation} form. */ class UpdateMutationMapper extends ThriftMutationMapper<MysqlUpdateMutation> { public UpdateMutationMapper(final String sourceId) { super(sourceId); } public Mutation map(@NonNull final MysqlUpdateMutation mutation) { final MysqlMutationMetadata metadata = mutation.getMetadata(); final Mutation thriftMutation = new Mutation( MutationType.UPDATE, metadata.getTimestamp(), sourceId, metadata.getDataSource().getThriftDataSource(), createBinlogHeader(metadata, mutation.getType().getCode()), metadata.getTable().getThriftTable(), transformToEntity(mutation.getRow())); thriftMutation.setPreviousEntity(transformToEntity(mutation.getPreviousRow())); return thriftMutation; } }
1,960
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation/mapper/InsertMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.mutation.mapper; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.jitney.event.spinaltap.v1.MutationType; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import lombok.NonNull; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that maps a {@link * MysqlInsertMutation} to its corresponding thrift {@link Mutation} form. */ class InsertMutationMapper extends ThriftMutationMapper<MysqlInsertMutation> { public InsertMutationMapper(String sourceId) { super(sourceId); } public Mutation map(@NonNull final MysqlInsertMutation mutation) { final MysqlMutationMetadata metadata = mutation.getMetadata(); return new Mutation( MutationType.INSERT, metadata.getTimestamp(), sourceId, metadata.getDataSource().getThriftDataSource(), createBinlogHeader(metadata, mutation.getType().getCode()), metadata.getTable().getThriftTable(), transformToEntity(mutation.getEntity())); } }
1,961
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/mutation/mapper/ThriftMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.mutation.mapper; import com.airbnb.jitney.event.spinaltap.v1.BinlogHeader; import com.airbnb.jitney.event.spinaltap.v1.Mutation; import com.airbnb.spinaltap.common.util.ClassBasedMapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.ColumnSerializationUtil; import com.airbnb.spinaltap.mysql.GtidSet; import com.airbnb.spinaltap.mysql.mutation.MysqlDeleteMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.MysqlUpdateMutation; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.google.common.collect.ImmutableMap; import java.nio.ByteBuffer; import java.util.Map; import lombok.NonNull; import lombok.RequiredArgsConstructor; /** * Base {@link Mapper} implement that maps a {@link MysqlMutation} to its corresponding thrift * {@link Mutation} form. * * @param <T> The {@link MysqlMutation} type. */ @RequiredArgsConstructor public abstract class ThriftMutationMapper<T extends MysqlMutation> implements Mapper<T, com.airbnb.jitney.event.spinaltap.v1.Mutation> { protected final String sourceId; public static Mapper<com.airbnb.spinaltap.Mutation<?>, Mutation> create(final String sourceId) { return ClassBasedMapper.<com.airbnb.spinaltap.Mutation<?>, Mutation>builder() .addMapper(MysqlInsertMutation.class, new InsertMutationMapper(sourceId)) .addMapper(MysqlUpdateMutation.class, new UpdateMutationMapper(sourceId)) .addMapper(MysqlDeleteMutation.class, new DeleteMutationMapper(sourceId)) .build(); } protected static BinlogHeader createBinlogHeader( @NonNull final MysqlMutationMetadata metadata, final byte typeCode) { final BinlogHeader header = new BinlogHeader( metadata.getFilePos().toString(), metadata.getServerId(), metadata.getTimestamp(), typeCode); if (metadata.getLastTransaction() != null) { header.setLastTransactionPos(metadata.getLastTransaction().getPosition().toString()); header.setLastTransactionTimestamp(metadata.getLastTransaction().getTimestamp()); GtidSet gtidSet = metadata.getLastTransaction().getPosition().getGtidSet(); if (gtidSet != null) { header.setLastTransactionGtidSet(gtidSet.toString()); } } if (metadata.getBeginTransaction() != null) { header.setBeginTransactionPos(metadata.getBeginTransaction().getPosition().toString()); header.setBeginTransactionTimestamp(metadata.getBeginTransaction().getTimestamp()); header.setBeginTransactionGtid(metadata.getBeginTransaction().getGtid()); } header.setServerUuid(metadata.getFilePos().getServerUUID()); header.setLeaderEpoch(metadata.getLeaderEpoch()); header.setId(metadata.getId()); header.setEventRowPosition(metadata.getEventRowPosition()); return header; } protected static Map<String, ByteBuffer> transformToEntity(@NonNull final Row row) { final ImmutableMap.Builder<String, ByteBuffer> builder = ImmutableMap.builder(); for (Column column : row.getColumns().values()) { builder.put( column.getMetadata().getName(), ByteBuffer.wrap(ColumnSerializationUtil.serializeColumn(column))); } return builder.build(); } }
1,962
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/binlog_connector/BinaryLogConnectorEventMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.binlog_connector; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.event.GTIDEvent; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.github.shyiko.mysql.binlog.event.DeleteRowsEventData; import com.github.shyiko.mysql.binlog.event.Event; import com.github.shyiko.mysql.binlog.event.EventHeaderV4; import com.github.shyiko.mysql.binlog.event.EventType; import com.github.shyiko.mysql.binlog.event.GtidEventData; import com.github.shyiko.mysql.binlog.event.QueryEventData; import com.github.shyiko.mysql.binlog.event.TableMapEventData; import com.github.shyiko.mysql.binlog.event.UpdateRowsEventData; import com.github.shyiko.mysql.binlog.event.WriteRowsEventData; import com.github.shyiko.mysql.binlog.event.XidEventData; import java.util.Optional; import lombok.AccessLevel; import lombok.NoArgsConstructor; import lombok.NonNull; /** * Represents a mapper that maps a {@link com.github.shyiko.mysql.binlog.event.Event} to a {@link * com.airbnb.spinaltap.mysql.event.BinlogEvent}. */ @NoArgsConstructor(access = AccessLevel.PRIVATE) public final class BinaryLogConnectorEventMapper { public static final BinaryLogConnectorEventMapper INSTANCE = new BinaryLogConnectorEventMapper(); public Optional<BinlogEvent> map( @NonNull final Event event, @NonNull final BinlogFilePos position) { final EventHeaderV4 header = event.getHeader(); final EventType eventType = header.getEventType(); final long serverId = header.getServerId(); final long timestamp = header.getTimestamp(); if (EventType.isWrite(eventType)) { final WriteRowsEventData data = event.getData(); return Optional.of( new WriteEvent(data.getTableId(), serverId, timestamp, position, data.getRows())); } else if (EventType.isUpdate(eventType)) { final UpdateRowsEventData data = event.getData(); return Optional.of( new UpdateEvent(data.getTableId(), serverId, timestamp, position, data.getRows())); } else if (EventType.isDelete(eventType)) { final DeleteRowsEventData data = event.getData(); return Optional.of( new DeleteEvent(data.getTableId(), serverId, timestamp, position, data.getRows())); } else { switch (eventType) { case TABLE_MAP: TableMapEventData tableMapData = event.getData(); return Optional.of( new TableMapEvent( tableMapData.getTableId(), serverId, timestamp, position, tableMapData.getDatabase(), tableMapData.getTable(), tableMapData.getColumnTypes())); case XID: final XidEventData xidData = event.getData(); return Optional.of(new XidEvent(serverId, timestamp, position, xidData.getXid())); case GTID: final GtidEventData gtidEventData = event.getData(); return Optional.of(new GTIDEvent(serverId, timestamp, position, gtidEventData.getGtid())); case QUERY: final QueryEventData queryData = event.getData(); return Optional.of( new QueryEvent( serverId, timestamp, position, queryData.getDatabase(), queryData.getSql())); case FORMAT_DESCRIPTION: return Optional.of(new StartEvent(serverId, timestamp, position)); default: return Optional.empty(); } } } }
1,963
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/binlog_connector/BinaryLogConnectorSource.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.binlog_connector; import com.airbnb.spinaltap.common.config.TlsConfiguration; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.MysqlClient; import com.airbnb.spinaltap.mysql.MysqlSource; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.airbnb.spinaltap.mysql.StateHistory; import com.airbnb.spinaltap.mysql.StateRepository; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.config.MysqlConfiguration; import com.airbnb.spinaltap.mysql.exception.InvalidBinlogPositionException; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.github.shyiko.mysql.binlog.BinaryLogClient; import com.github.shyiko.mysql.binlog.event.Event; import com.github.shyiko.mysql.binlog.event.EventHeaderV4; import com.github.shyiko.mysql.binlog.network.DefaultSSLSocketFactory; import com.google.common.base.Preconditions; import java.net.Socket; import java.security.GeneralSecurityException; import java.util.HashSet; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import javax.net.ssl.SSLContext; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; /** * Represents a {@link MysqlSource} implement based on open-source library <a * href="https://github.com/shyiko/mysql-binlog-connector-java">. */ @Slf4j public final class BinaryLogConnectorSource extends MysqlSource { private static final String INVALID_BINLOG_POSITION_ERROR_CODE = "1236"; @NonNull private final BinaryLogClient binlogClient; @NonNull private final MysqlClient mysqlClient; private final String serverUUID; public BinaryLogConnectorSource( @NonNull final String name, @NonNull final MysqlConfiguration config, final TlsConfiguration tlsConfig, @NonNull final BinaryLogClient binlogClient, @NonNull final MysqlClient mysqlClient, @NonNull final TableCache tableCache, @NonNull final StateRepository<MysqlSourceState> stateRepository, @NonNull final StateHistory<MysqlSourceState> stateHistory, @NonNull final MysqlSchemaManager schemaManager, @NonNull final MysqlSourceMetrics metrics, @NonNull final AtomicLong currentLeaderEpoch) { super( name, new DataSource(config.getHost(), config.getPort(), name), new HashSet<>(config.getCanonicalTableNames()), tableCache, stateRepository, stateHistory, config.getInitialBinlogFilePosition(), schemaManager, metrics, currentLeaderEpoch, new AtomicReference<>(), new AtomicReference<>()); this.binlogClient = binlogClient; this.mysqlClient = mysqlClient; this.serverUUID = mysqlClient.getServerUUID(); initializeClient(config, tlsConfig); } /** Initializes the {@link BinaryLogClient}. */ private void initializeClient(final MysqlConfiguration config, final TlsConfiguration tlsConfig) { binlogClient.setThreadFactory( runnable -> new Thread( runnable, String.format( "binlog-client-%s-%s-%d", name, getDataSource().getHost(), getDataSource().getPort()))); binlogClient.setSocketFactory( () -> { Socket socket = new Socket(); try { if (config.getSocketTimeoutInSeconds() > 0) { socket.setSoTimeout(config.getSocketTimeoutInSeconds() * 1000); } } catch (Exception ex) { throw new RuntimeException(ex); } return socket; }); binlogClient.setSSLMode(config.getSslMode()); binlogClient.setKeepAlive(false); binlogClient.registerEventListener(new BinlogEventListener()); binlogClient.registerLifecycleListener(new BinlogClientLifeCycleListener()); if (config.isMTlsEnabled() && tlsConfig != null) { binlogClient.setSslSocketFactory( new DefaultSSLSocketFactory() { @Override protected void initSSLContext(SSLContext sc) throws GeneralSecurityException { try { sc.init(tlsConfig.getKeyManagers(), tlsConfig.getTrustManagers(), null); } catch (Exception ex) { log.error("Failed to initialize SSL Context for mTLS.", ex); throw new RuntimeException(ex); } } }); } } @Override protected void connect() throws Exception { binlogClient.connect(); } @Override protected void disconnect() throws Exception { binlogClient.disconnect(); } @Override protected boolean isConnected() { return binlogClient.isConnected(); } @Override public void setPosition(@NonNull final BinlogFilePos pos) { if (!mysqlClient.isGtidModeEnabled() || (pos.getGtidSet() == null && pos != MysqlSource.EARLIEST_BINLOG_POS && pos != MysqlSource.LATEST_BINLOG_POS)) { log.info("Setting binlog position for source {} to {}", name, pos); binlogClient.setBinlogFilename(pos.getFileName()); binlogClient.setBinlogPosition(pos.getNextPosition()); } else { // GTID mode is enabled if (pos == MysqlSource.EARLIEST_BINLOG_POS) { log.info("Setting binlog position for source {} to earliest available GTIDSet", name); binlogClient.setGtidSet(""); binlogClient.setGtidSetFallbackToPurged(true); } else if (pos == MysqlSource.LATEST_BINLOG_POS) { BinlogFilePos currentPos = mysqlClient.getMasterStatus(); String gtidSet = currentPos.getGtidSet().toString(); log.info("Setting binlog position for source {} to GTIDSet {}", name, gtidSet); binlogClient.setGtidSet(gtidSet); } else { String gtidSet = pos.getGtidSet().toString(); log.info("Setting binlog position for source {} to GTIDSet {}", name, gtidSet); binlogClient.setGtidSet(gtidSet); if (serverUUID != null && serverUUID.equalsIgnoreCase(pos.getServerUUID())) { binlogClient.setBinlogFilename(pos.getFileName()); binlogClient.setBinlogPosition(pos.getNextPosition()); binlogClient.setUseBinlogFilenamePositionInGtidMode(true); } } } } private final class BinlogEventListener implements BinaryLogClient.EventListener { public void onEvent(Event event) { Preconditions.checkState(isStarted(), "Source is not started and should not process events"); final EventHeaderV4 header = event.getHeader(); final BinlogFilePos filePos = new BinlogFilePos( binlogClient.getBinlogFilename(), header.getPosition(), header.getNextPosition(), binlogClient.getGtidSet(), serverUUID); BinaryLogConnectorEventMapper.INSTANCE .map(event, filePos) .ifPresent(BinaryLogConnectorSource.super::processEvent); } } /** * Lifecycle listener methods are called synchronized in BinaryLogClient. We should not enter * critical sections in SpinalTap code path to avoid deadlocks */ private final class BinlogClientLifeCycleListener implements BinaryLogClient.LifecycleListener { public void onConnect(BinaryLogClient client) { log.info("Connected to source {}.", name); metrics.clientConnected(); } public void onCommunicationFailure(BinaryLogClient client, Exception ex) { log.error( String.format( "Communication failure from source %s, binlogFile=%s, binlogPos=%s", name, client.getBinlogFilename(), client.getBinlogPosition()), ex); if (ex.getMessage().startsWith(INVALID_BINLOG_POSITION_ERROR_CODE)) { ex = new InvalidBinlogPositionException( String.format( "Invalid position %s in binlog file %s", client.getBinlogPosition(), client.getBinlogFilename())); } onCommunicationError(ex); } public void onEventDeserializationFailure(BinaryLogClient client, Exception ex) { log.error( String.format( "Deserialization failure from source %s, BinlogFile=%s, binlogPos=%s", name, client.getBinlogFilename(), client.getBinlogPosition()), ex); onDeserializationError(ex); } public void onDisconnect(BinaryLogClient client) { log.info( "Disconnected from source {}. BinlogFile={}, binlogPos={}", name, client.getBinlogFilename(), client.getBinlogPosition()); metrics.clientDisconnected(); started.set(false); } } }
1,964
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaStore.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.GtidSet; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.collect.Table; import com.google.common.collect.Tables; import java.io.IOException; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.logging.log4j.util.Strings; import org.jdbi.v3.core.Handle; import org.jdbi.v3.core.Jdbi; import org.jdbi.v3.core.mapper.RowMapper; import org.jdbi.v3.core.statement.PreparedBatch; import org.jdbi.v3.core.statement.StatementContext; @Slf4j @RequiredArgsConstructor public class MysqlSchemaStore { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String CREATE_SCHEMA_STORE_TABLE_QUERY = "CREATE TABLE IF NOT EXISTS `%s`.`%s` (" + "`id` bigint(20) NOT NULL AUTO_INCREMENT," + "`database` varchar(255)," + "`table` varchar(255)," + "`binlog_file_position` varchar(255) NOT NULL," + "`server_uuid` varchar(255)," + "`gtid_set` text," + "`gtid` varchar(255)," + "`columns` text," + "`sql` text," + "`meta_data` text DEFAULT NULL," + "`timestamp` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP," + " PRIMARY KEY (`id`)," + " KEY `binlog_file_position_index` (`binlog_file_position`)," + " KEY `gtid_index` (`gtid`)" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin"; private static final String PUT_SCHEMA_QUERY = "INSERT INTO `%s`.`%s`" + " (`database`, `table`, `binlog_file_position`, `server_uuid`, `gtid_set`, `gtid`, `columns`, `sql`, `meta_data`, `timestamp`)" + " VALUES (:database, :table, :binlog_file_position, :server_uuid, :gtid_set, :gtid, :columns, :sql, :meta_data, :timestamp)"; private final String sourceName; private final String storeDBName; private final String archiveDBName; private final Jdbi jdbi; private final MysqlSourceMetrics metrics; // Schema cache should always reflect the schema we currently need @Getter private final Table<String, String, MysqlTableSchema> schemaCache = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap); public boolean isCreated() { return jdbi.withHandle( handle -> handle .createQuery( "SELECT TABLE_NAME FROM information_schema.tables WHERE table_schema = :db AND table_name = :table") .bind("db", storeDBName) .bind("table", sourceName) .mapTo(String.class) .findFirst()) .isPresent(); } public void loadSchemaCacheUntil(BinlogFilePos pos) { schemaCache.clear(); for (MysqlTableSchema schema : getAllSchemas()) { if (schema.getBinlogFilePos().compareTo(pos) > 0) { break; } updateSchemaCache(schema); } } @VisibleForTesting List<MysqlTableSchema> getAllSchemas() { return jdbi.withHandle( handle -> handle .createQuery( String.format( "SELECT * FROM `%s`.`%s` ORDER BY id ASC", storeDBName, sourceName)) .map(MysqlTableSchemaMapper.INSTANCE) .list()); } public MysqlTableSchema get(String database, String table) { if (schemaCache.contains(database, table)) { metrics.schemaStoreGetSuccess(database, table); return schemaCache.get(database, table); } else { RuntimeException ex = new RuntimeException( String.format("No schema found for database: %s table: %s", database, table)); metrics.schemaStoreGetFailure(database, table, ex); throw ex; } } public void put(MysqlTableSchema schema) { try (Handle handle = jdbi.open()) { MysqlSchemaUtil.VOID_RETRYER.call( () -> { GtidSet gtidSet = schema.getBinlogFilePos().getGtidSet(); long id = handle .createUpdate(String.format(PUT_SCHEMA_QUERY, storeDBName, sourceName)) .bind("database", schema.getDatabase()) .bind("table", schema.getTable()) .bind("binlog_file_position", schema.getBinlogFilePos().toString()) .bind("server_uuid", schema.getBinlogFilePos().getServerUUID()) .bind("gtid_set", gtidSet == null ? null : gtidSet.toString()) .bind("gtid", schema.getGtid()) .bind("columns", OBJECT_MAPPER.writeValueAsString(schema.getColumns())) .bind("sql", schema.getSql()) .bind("meta_data", OBJECT_MAPPER.writeValueAsString(schema.getMetadata())) .bind("timestamp", new Timestamp(schema.getTimestamp())) .executeAndReturnGeneratedKeys("id") .mapTo(Long.class) .one(); // MysqlTableSchema is immutable so we have to create a new one and update cache updateSchemaCache( new MysqlTableSchema( id, schema.getDatabase(), schema.getTable(), schema.getBinlogFilePos(), schema.getGtid(), schema.getSql(), schema.getTimestamp(), schema.getColumns(), schema.getMetadata())); metrics.schemaStorePutSuccess(schema.getDatabase(), schema.getTable()); return null; }); } catch (Exception ex) { log.error("Failed to put table schema {}. Exception: {}", schema, ex.toString()); metrics.schemaStorePutFailure(schema.getDatabase(), schema.getTable(), ex); throw new RuntimeException(ex); } } public void bootstrap(List<MysqlTableSchema> schemas) { try (Handle handle = jdbi.open()) { MysqlSchemaUtil.VOID_RETRYER.call( () -> { handle.execute(String.format(CREATE_SCHEMA_STORE_TABLE_QUERY, storeDBName, sourceName)); PreparedBatch batch = handle.prepareBatch(String.format(PUT_SCHEMA_QUERY, storeDBName, sourceName)); for (MysqlTableSchema schema : schemas) { GtidSet gtidSet = schema.getBinlogFilePos().getGtidSet(); batch .bind("database", schema.getDatabase()) .bind("table", schema.getTable()) .bind("binlog_file_position", schema.getBinlogFilePos().toString()) .bind("server_uuid", schema.getBinlogFilePos().getServerUUID()) .bind("gtid_set", gtidSet == null ? null : gtidSet.toString()) .bind("gtid", schema.getGtid()) .bind("columns", OBJECT_MAPPER.writeValueAsString(schema.getColumns())) .bind("sql", schema.getSql()) .bind("meta_data", OBJECT_MAPPER.writeValueAsString(schema.getMetadata())) .bind("timestamp", new Timestamp(schema.getTimestamp())) .add(); } batch.execute(); getAllSchemas().forEach(this::updateSchemaCache); return null; }); } catch (Exception ex) { log.error( String.format("Failed to bootstrap schema store for %s. exception: %s", sourceName, ex)); throw new RuntimeException(ex); } } public List<MysqlTableSchema> queryByBinlogFilePos(BinlogFilePos pos) { Preconditions.checkNotNull(pos, "BinlogFilePos cannot be null"); try (Handle handle = jdbi.open()) { return MysqlSchemaUtil.LIST_TABLE_SCHEMA_RETRYER.call( () -> handle .createQuery( String.format( "SELECT * FROM `%s`.`%s` WHERE binlog_file_position = :pos", storeDBName, sourceName)) .bind("pos", pos.toString()) .map(MysqlTableSchemaMapper.INSTANCE) .list()); } catch (Exception ex) { log.error( String.format("Failed to query table schema by binlog pos: %s. Exception: %s", pos, ex)); throw new RuntimeException(ex); } } public List<MysqlTableSchema> queryByGTID(String gtid) { Preconditions.checkArgument(Strings.isNotBlank(gtid), "GTID cannot be empty"); try (Handle handle = jdbi.open()) { return handle .createQuery( String.format("SELECT * FROM `%s`.`%s` WHERE gtid = :gtid", storeDBName, sourceName)) .bind("gtid", gtid) .map(MysqlTableSchemaMapper.INSTANCE) .list(); } catch (Exception ex) { log.error(String.format("Failed to query table schema by GTID: %s. Exception: %s", gtid, ex)); throw new RuntimeException(ex); } } public void archive() { if (!isCreated()) { log.error("Schema store for {} is not created.", sourceName); return; } String archiveTableName = String.format( "%s_%s", sourceName, new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date())); jdbi.useHandle( handle -> handle.execute( String.format( "RENAME TABLE `%s`.`%s` TO `%s`.`%s`", storeDBName, sourceName, archiveDBName, archiveTableName))); schemaCache.clear(); } public void compress(BinlogFilePos earliestPos) { deleteSchemas(getRowIdsToDelete(earliestPos)); } @VisibleForTesting Set<Long> getRowIdsToDelete(BinlogFilePos earliestPos) { Table<String, String, List<MysqlTableSchema>> allSchemas = Tables.newCustomTable(Maps.newHashMap(), Maps::newHashMap); Set<Long> rowIdsToDelete = new HashSet<>(); getAllSchemas() .forEach( schema -> { String database = schema.getDatabase(); String table = schema.getTable(); if (database == null || table == null) { if (schema.getBinlogFilePos().compareTo(earliestPos) < 0) { rowIdsToDelete.add(schema.getId()); } } else { if (!allSchemas.contains(database, table)) { allSchemas.put(database, table, new LinkedList<>()); } allSchemas.get(database, table).add(schema); } }); for (List<MysqlTableSchema> schemas : allSchemas.values()) { for (MysqlTableSchema schema : schemas) { if (schema.getBinlogFilePos().compareTo(earliestPos) >= 0) { break; } if (!schema.equals(schemaCache.get(schema.getDatabase(), schema.getTable()))) { rowIdsToDelete.add(schema.getId()); } } } return rowIdsToDelete; } private void deleteSchemas(Collection<Long> ids) { log.info("Deleting {} rows from schema store. IDS: {}", ids.size(), ids); jdbi.useHandle( handle -> handle .createUpdate( String.format( "DELETE FROM `%s`.`%s` WHERE id IN (<rowIdsToDelete>)", storeDBName, sourceName)) .bindList("rowIdsToDelete", ids) .execute()); } void updateSchemaCache(MysqlTableSchema schema) { String database = schema.getDatabase(); String table = schema.getTable(); if (database == null || table == null) { return; } if (!schema.getColumns().isEmpty()) { schemaCache.put(database, table, schema); } else if (schemaCache.contains(database, table)) { schemaCache.remove(database, table); } } private static class MysqlTableSchemaMapper implements RowMapper<MysqlTableSchema> { public static MysqlTableSchemaMapper INSTANCE = new MysqlTableSchemaMapper(); @Override public MysqlTableSchema map(ResultSet rs, StatementContext ctx) throws SQLException { BinlogFilePos pos = BinlogFilePos.fromString(rs.getString("binlog_file_position")); pos.setServerUUID(rs.getString("server_uuid")); String gtidSet = rs.getString("gtid_set"); if (gtidSet != null) { pos.setGtidSet(new GtidSet(gtidSet)); } List<MysqlColumn> columns = Collections.emptyList(); Map<String, String> metadata = Collections.emptyMap(); String columnsStr = rs.getString("columns"); if (columnsStr != null) { try { columns = OBJECT_MAPPER.readValue(columnsStr, new TypeReference<List<MysqlColumn>>() {}); } catch (IOException ex) { log.error( String.format("Failed to deserialize columns %s. exception: %s", columnsStr, ex)); } } String metadataStr = rs.getString("meta_data"); if (metadataStr != null) { try { metadata = OBJECT_MAPPER.readValue(metadataStr, new TypeReference<Map<String, String>>() {}); } catch (IOException ex) { log.error( String.format("Failed to deserialize metadata %s. exception: %s", metadataStr, ex)); throw new RuntimeException(ex); } } return new MysqlTableSchema( rs.getLong("id"), rs.getString("database"), rs.getString("table"), pos, rs.getString("gtid"), rs.getString("sql"), rs.getTimestamp("timestamp").getTime(), columns, metadata); } } }
1,965
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaManagerFactory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.airbnb.common.metrics.TaggedMetricRegistry; import com.airbnb.spinaltap.common.config.TlsConfiguration; import com.airbnb.spinaltap.mysql.MysqlClient; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.airbnb.spinaltap.mysql.config.MysqlSchemaStoreConfiguration; import org.jdbi.v3.core.Jdbi; public class MysqlSchemaManagerFactory { private final String username; private final String password; private final MysqlSchemaStoreConfiguration configuration; private final TlsConfiguration tlsConfiguration; private Jdbi jdbi; public MysqlSchemaManagerFactory( final String username, final String password, final MysqlSchemaStoreConfiguration configuration, final TlsConfiguration tlsConfiguration) { this.username = username; this.password = password; this.configuration = configuration; this.tlsConfiguration = tlsConfiguration; if (configuration != null) { jdbi = Jdbi.create( MysqlClient.createMysqlDataSource( configuration.getHost(), configuration.getPort(), username, password, configuration.isMTlsEnabled(), tlsConfiguration)); jdbi.useHandle( handle -> { handle.execute( String.format("CREATE DATABASE IF NOT EXISTS `%s`", configuration.getDatabase())); handle.execute( String.format( "CREATE DATABASE IF NOT EXISTS `%s`", configuration.getArchiveDatabase())); }); } } public MysqlSchemaManager create( String sourceName, MysqlClient mysqlClient, boolean isSchemaVersionEnabled, MysqlSourceMetrics metrics) { MysqlSchemaReader schemaReader = new MysqlSchemaReader(sourceName, mysqlClient.getJdbi(), metrics); if (!isSchemaVersionEnabled) { return new MysqlSchemaManager(sourceName, null, null, schemaReader, mysqlClient, false); } MysqlSchemaStore schemaStore = new MysqlSchemaStore( sourceName, configuration.getDatabase(), configuration.getArchiveDatabase(), jdbi, metrics); MysqlSchemaDatabase schemaDatabase = new MysqlSchemaDatabase(sourceName, jdbi, metrics); return new MysqlSchemaManager( sourceName, schemaStore, schemaDatabase, schemaReader, mysqlClient, true); } public MysqlSchemaArchiver createArchiver(String sourceName) { MysqlSourceMetrics metrics = new MysqlSourceMetrics(sourceName, new TaggedMetricRegistry()); Jdbi jdbi = Jdbi.create( MysqlClient.createMysqlDataSource( configuration.getHost(), configuration.getPort(), username, password, configuration.isMTlsEnabled(), tlsConfiguration)); MysqlSchemaStore schemaStore = new MysqlSchemaStore( sourceName, configuration.getDatabase(), configuration.getArchiveDatabase(), jdbi, metrics); MysqlSchemaDatabase schemaDatabase = new MysqlSchemaDatabase(sourceName, jdbi, metrics); return new MysqlSchemaManager(sourceName, schemaStore, schemaDatabase, null, null, true); } }
1,966
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaUtil.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryerBuilder; import com.github.rholder.retry.StopStrategies; import com.github.rholder.retry.WaitStrategies; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; import java.util.List; import java.util.concurrent.TimeUnit; import lombok.NonNull; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; import org.jdbi.v3.core.Handle; import org.joda.time.DateTimeConstants; @Slf4j @UtilityClass public class MysqlSchemaUtil { public final Retryer<Void> VOID_RETRYER = createRetryer(); public final Retryer<List<MysqlColumn>> LIST_COLUMN_RETRYER = createRetryer(); public final Retryer<List<MysqlTableSchema>> LIST_TABLE_SCHEMA_RETRYER = createRetryer(); public final Retryer<List<String>> LIST_STRING_RETRYER = createRetryer(); public void executeWithJdbc( @NonNull final Handle handle, final String database, @NonNull final String sql) throws SQLException { // Use JDBC API to excute raw SQL without any return value and no binding in SQL statement, so // we don't need to escape colon(:) // SQL statement with colon(:) inside needs to be escaped if using JDBI Handle.execute(sql) Connection connection = handle.getConnection(); if (database != null) { connection.setCatalog(database); } Statement statement = connection.createStatement(); statement.execute(sql); } private <T> Retryer<T> createRetryer() { return RetryerBuilder.<T>newBuilder() .retryIfRuntimeException() .withWaitStrategy(WaitStrategies.exponentialWait(2, 30, TimeUnit.SECONDS)) .withStopStrategy(StopStrategies.stopAfterDelay(3 * DateTimeConstants.MILLIS_PER_MINUTE)) .build(); } public String escapeBackQuote(@NonNull final String name) { // MySQL allows backquote in database/table name, but need to escape it in DDL return name.replace("`", "``"); } String removeCommentsFromDDL(final String ddl) { return ddl // https://dev.mysql.com/doc/refman/5.7/en/comments.html // Replace MySQL-specific comments (/*! ... */ and /*!50110 ... */) which // are actually executed .replaceAll("/\\*!(?:\\d{5})?(.*?)\\*/", "$1") // Remove block comments // https://stackoverflow.com/questions/13014947/regex-to-match-a-c-style-multiline-comment // line comments and newlines are kept // Note: This does not handle comments in quotes .replaceAll("/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/", " ") // Remove extra spaces .replaceAll("\\h+", " ") .replaceAll("^\\s+", ""); } }
1,967
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaArchiver.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; public interface MysqlSchemaArchiver { void archive(); }
1,968
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaReader.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.List; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.jdbi.v3.core.Handle; import org.jdbi.v3.core.Jdbi; @Slf4j @RequiredArgsConstructor public class MysqlSchemaReader { private final String sourceName; private final Jdbi jdbi; private final MysqlSourceMetrics metrics; List<String> getAllDatabases() { try (Handle handle = jdbi.open()) { return MysqlSchemaUtil.LIST_STRING_RETRYER.call( () -> handle .createQuery("select SCHEMA_NAME from information_schema.SCHEMATA") .mapTo(String.class) .list()); } catch (Exception ex) { log.error(String.format("Failed to get all databases on %s, exception: %s", sourceName, ex)); throw new RuntimeException(ex); } } List<String> getAllTablesIn(String database) { try (Handle handle = jdbi.open()) { return MysqlSchemaUtil.LIST_STRING_RETRYER.call( () -> handle .createQuery( "select TABLE_NAME from information_schema.TABLES where TABLE_SCHEMA = :db and TABLE_TYPE = 'BASE TABLE'") .bind("db", database) .mapTo(String.class) .list()); } catch (Exception ex) { log.error( String.format( "Failed to get all tables in database %s on %s, exception: %s", database, sourceName, ex)); throw new RuntimeException(ex); } } public List<MysqlColumn> getTableColumns(@NonNull String database, @NonNull String table) { try (Handle handle = jdbi.open()) { List<MysqlColumn> columns = MysqlSchemaUtil.LIST_COLUMN_RETRYER.call( () -> handle .createQuery( "select COLUMN_NAME, DATA_TYPE, COLUMN_TYPE, COLUMN_KEY from information_schema.COLUMNS " + "where TABLE_SCHEMA = :db and TABLE_NAME = :table " + "order by ORDINAL_POSITION") .bind("db", database) .bind("table", table) .map( (rs, ctx) -> new MysqlColumn( rs.getString("COLUMN_NAME"), rs.getString("DATA_TYPE"), rs.getString("COLUMN_TYPE"), "PRI".equals(rs.getString("COLUMN_KEY")))) .list()); metrics.schemaStoreGetSuccess(database, table); return columns; } catch (Exception ex) { log.error(String.format("Failed to fetch schema for table %s, db %s", table, database), ex); metrics.schemaStoreGetFailure(database, table, ex); throw new RuntimeException(ex); } } String getCreateTableDDL(@NonNull String database, @NonNull String table) { return jdbi.withHandle( handle -> { try { Statement statement = handle.getConnection().createStatement(); statement.execute( String.format( "SHOW CREATE TABLE `%s`.`%s`", MysqlSchemaUtil.escapeBackQuote(database), MysqlSchemaUtil.escapeBackQuote(table))); ResultSet resultSet = statement.getResultSet(); resultSet.first(); return resultSet.getString(2); } catch (SQLException ex) { log.error( String.format("Failed to get DDL for database: %s table: %s.", database, table), ex); throw new RuntimeException(ex); } }); } }
1,969
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaManager.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.GtidSet; import com.airbnb.spinaltap.mysql.MysqlClient; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @Slf4j @RequiredArgsConstructor public class MysqlSchemaManager implements MysqlSchemaArchiver { private static final Set<String> SYSTEM_DATABASES = ImmutableSet.of("mysql", "information_schema", "performance_schema", "sys"); private static final Pattern DATABASE_DDL_SQL_PATTERN = Pattern.compile("^(CREATE|DROP)\\s+(DATABASE|SCHEMA)", Pattern.CASE_INSENSITIVE); private static final Pattern TABLE_DDL_SQL_PATTERN = Pattern.compile("^(ALTER|CREATE|DROP|RENAME)\\s+TABLE", Pattern.CASE_INSENSITIVE); private static final Pattern INDEX_DDL_SQL_PATTERN = Pattern.compile( "^((CREATE(\\s+(UNIQUE|FULLTEXT|SPATIAL))?)|DROP)\\s+INDEX", Pattern.CASE_INSENSITIVE); private static final Pattern GRANT_DDL_SQL_PATTERN = Pattern.compile("^GRANT\\s+", Pattern.CASE_INSENSITIVE); private final String sourceName; private final MysqlSchemaStore schemaStore; private final MysqlSchemaDatabase schemaDatabase; private final MysqlSchemaReader schemaReader; private final MysqlClient mysqlClient; private final boolean isSchemaVersionEnabled; public List<MysqlColumn> getTableColumns(String database, String table) { return isSchemaVersionEnabled ? schemaStore.get(database, table).getColumns() : schemaReader.getTableColumns(database, table); } public void processDDL(QueryEvent event, String gtid) { String sql = event.getSql(); BinlogFilePos pos = event.getBinlogFilePos(); String database = event.getDatabase(); if (!isSchemaVersionEnabled) { if (isDDLGrant(sql)) { log.info("Skip processing a Grant DDL because schema versioning is not enabled."); } else { log.info("Skip processing DDL {} because schema versioning is not enabled.", sql); } return; } if (!shouldProcessDDL(sql)) { if (isDDLGrant(sql)) { log.info("Not processing a Grant DDL because it is not our interest."); } else { log.info("Not processing DDL {} because it is not our interest.", sql); } return; } // Check if this schema change was processed before List<MysqlTableSchema> schemas = gtid == null ? schemaStore.queryByBinlogFilePos(pos) : schemaStore.queryByGTID(gtid); if (!schemas.isEmpty()) { log.info("DDL {} is already processed at BinlogFilePos: {}, GTID: {}", sql, pos, gtid); schemas.forEach(schemaStore::updateSchemaCache); return; } // It could be a new database which has not been created in schema store database, so don't // switch to any database before applying database DDL. schemaDatabase.applyDDL(sql, DATABASE_DDL_SQL_PATTERN.matcher(sql).find() ? null : database); // See what changed, check database by database Set<String> databasesInSchemaStore = ImmutableSet.copyOf(schemaStore.getSchemaCache().rowKeySet()); Set<String> databasesInSchemaDatabase = ImmutableSet.copyOf(schemaDatabase.listDatabases()); boolean isTableColumnsChanged = false; for (String newDatabase : Sets.difference(databasesInSchemaDatabase, databasesInSchemaStore)) { boolean isColumnChangedForNewDB = processTableSchemaChanges( newDatabase, event, gtid, Collections.emptyMap(), schemaDatabase.getColumnsForAllTables(newDatabase)); isTableColumnsChanged = isTableColumnsChanged || isColumnChangedForNewDB; } for (String existingDatbase : databasesInSchemaStore) { boolean isColumnChangedForExistingDB = processTableSchemaChanges( existingDatbase, event, gtid, schemaStore.getSchemaCache().row(existingDatbase), schemaDatabase.getColumnsForAllTables(existingDatbase)); isTableColumnsChanged = isTableColumnsChanged || isColumnChangedForExistingDB; } if (!isTableColumnsChanged) { // if the schema store is not updated, most likely the DDL does not change table columns. // we need to update schema store here to keep a record, so the DDL won't be processed again schemaStore.put( new MysqlTableSchema( 0, database, null, pos, gtid, sql, event.getTimestamp(), Collections.emptyList(), Collections.emptyMap())); } } private boolean processTableSchemaChanges( String database, QueryEvent event, String gtid, Map<String, MysqlTableSchema> tableSchemaMapInSchemaStore, Map<String, List<MysqlColumn>> tableColumnsInSchemaDatabase) { boolean isTableColumnChanged = false; Set<String> deletedTables = Sets.difference(tableSchemaMapInSchemaStore.keySet(), tableColumnsInSchemaDatabase.keySet()) .immutableCopy(); for (String deletedTable : deletedTables) { schemaStore.put( new MysqlTableSchema( 0, database, deletedTable, event.getBinlogFilePos(), gtid, event.getSql(), event.getTimestamp(), Collections.emptyList(), Collections.emptyMap())); isTableColumnChanged = true; } for (Map.Entry<String, List<MysqlColumn>> tableColumns : tableColumnsInSchemaDatabase.entrySet()) { String table = tableColumns.getKey(); List<MysqlColumn> columns = tableColumns.getValue(); if (!tableSchemaMapInSchemaStore.containsKey(table) || !columns.equals(tableSchemaMapInSchemaStore.get(table).getColumns())) { schemaStore.put( new MysqlTableSchema( 0, database, table, event.getBinlogFilePos(), gtid, event.getSql(), event.getTimestamp(), columns, Collections.emptyMap())); isTableColumnChanged = true; } } return isTableColumnChanged; } public synchronized void initialize(BinlogFilePos pos) { if (!isSchemaVersionEnabled) { log.info("Schema versioning is not enabled for {}", sourceName); return; } if (schemaStore.isCreated()) { log.info( "Schema store for {} is already bootstrapped. Loading schemas to store till {}, GTID Set: {}", sourceName, pos, pos.getGtidSet()); schemaStore.loadSchemaCacheUntil(pos); return; } log.info("Bootstrapping schema store for {}...", sourceName); BinlogFilePos earliestPos = new BinlogFilePos(mysqlClient.getBinaryLogs().get(0)); earliestPos.setServerUUID(mysqlClient.getServerUUID()); if (mysqlClient.isGtidModeEnabled()) { earliestPos.setGtidSet(new GtidSet(mysqlClient.getGlobalVariableValue("gtid_purged"))); } List<MysqlTableSchema> allTableSchemas = new ArrayList<>(); for (String database : schemaReader.getAllDatabases()) { if (SYSTEM_DATABASES.contains(database)) { log.info("Skipping tables for system database: {}", database); continue; } log.info("Bootstrapping table schemas for database {}", database); schemaDatabase.createDatabase(database); for (String table : schemaReader.getAllTablesIn(database)) { String createTableDDL = schemaReader.getCreateTableDDL(database, table); schemaDatabase.applyDDL(createTableDDL, database); allTableSchemas.add( new MysqlTableSchema( 0, database, table, earliestPos, null, createTableDDL, System.currentTimeMillis(), schemaReader.getTableColumns(database, table), Collections.emptyMap())); } } schemaStore.bootstrap(allTableSchemas); } @Override public synchronized void archive() { if (!isSchemaVersionEnabled) { log.info("Schema versioning is not enabled for {}", sourceName); return; } schemaStore.archive(); schemaDatabase.dropDatabases(); } public void compress() { if (!isSchemaVersionEnabled) { log.info("Schema versioning is not enabled for {}", sourceName); return; } String purgedGTID = mysqlClient.getGlobalVariableValue("gtid_purged"); BinlogFilePos earliestPosition = new BinlogFilePos(mysqlClient.getBinaryLogs().get(0)); earliestPosition.setServerUUID(mysqlClient.getServerUUID()); if (mysqlClient.isGtidModeEnabled()) { earliestPosition.setGtidSet(new GtidSet(purgedGTID)); } schemaStore.compress(earliestPosition); } private static boolean shouldProcessDDL(final String sql) { return TABLE_DDL_SQL_PATTERN.matcher(sql).find() || INDEX_DDL_SQL_PATTERN.matcher(sql).find() || DATABASE_DDL_SQL_PATTERN.matcher(sql).find(); } private static boolean isDDLGrant(final String sql) { return GRANT_DDL_SQL_PATTERN.matcher(sql).find(); } }
1,970
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlColumn.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; import lombok.Builder; import lombok.RequiredArgsConstructor; import lombok.Value; @Value @Builder @RequiredArgsConstructor @JsonDeserialize(builder = MysqlColumn.MysqlColumnBuilder.class) public class MysqlColumn { String name; String dataType; String columnType; boolean primaryKey; @JsonPOJOBuilder(withPrefix = "") static class MysqlColumnBuilder {} }
1,971
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlSchemaDatabase.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import javax.validation.constraints.NotNull; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CharStreams; import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.TokenStream; import org.antlr.v4.runtime.TokenStreamRewriter; import org.antlr.v4.runtime.tree.ParseTree; import org.antlr.v4.runtime.tree.ParseTreeWalker; import org.jdbi.v3.core.Handle; import org.jdbi.v3.core.Jdbi; @Slf4j @RequiredArgsConstructor public class MysqlSchemaDatabase { private static final char DELIMITER = '/'; private final String sourceName; private final Jdbi jdbi; private final MysqlSourceMetrics metrics; void applyDDL(@NonNull final String sql, final String database) { log.info(String.format("Applying DDL statement: %s (Database selected: %s)", sql, database)); try (Handle handle = jdbi.open()) { handle.execute("SET foreign_key_checks=0"); MysqlSchemaUtil.VOID_RETRYER.call( () -> { MysqlSchemaUtil.executeWithJdbc( handle, getSchemaDatabaseName(sourceName, database), addSourcePrefix(sql)); return null; }); metrics.schemaDatabaseApplyDDLSuccess(database); } catch (Exception ex) { log.error( String.format( "Failed to apply DDL Statement to source: %s database: %s. (SQL: %s. Exception: %s)", sourceName, database, sql, ex)); metrics.schemaDatabaseApplyDDLFailure(database, ex); throw new RuntimeException(ex); } } List<String> listDatabases() { try (Handle handle = jdbi.open()) { return MysqlSchemaUtil.LIST_STRING_RETRYER.call( () -> handle .createQuery( String.format( "select SCHEMA_NAME from information_schema.SCHEMATA " + "where SCHEMA_NAME LIKE '%s%s%%'", sourceName, DELIMITER)) .mapTo(String.class) .map( database -> database.replaceFirst(String.format("^%s%s", sourceName, DELIMITER), "")) .list()); } catch (Exception ex) { log.error( String.format("Failed to list databases for source: %s (Exception: %s)", sourceName, ex)); throw new RuntimeException(ex); } } void createDatabase(@NonNull final String database) { log.info("Creating database: {}", database); try (Handle handle = jdbi.open()) { MysqlSchemaUtil.VOID_RETRYER.call( () -> { MysqlSchemaUtil.executeWithJdbc( handle, null, String.format( "CREATE DATABASE `%s`", getSchemaDatabaseName(sourceName, MysqlSchemaUtil.escapeBackQuote(database)))); return null; }); } catch (Exception ex) { log.error( String.format( "Failed to create database %s (Exception: %s)", getSchemaDatabaseName(sourceName, database), ex)); throw new RuntimeException(ex); } } void dropDatabases() { listDatabases().forEach(this::dropDatabase); } void dropDatabase(@NonNull final String database) { log.info("Dropping database: {}", database); try (Handle handle = jdbi.open()) { handle.execute("SET foreign_key_checks=0"); MysqlSchemaUtil.VOID_RETRYER.call( () -> { MysqlSchemaUtil.executeWithJdbc( handle, null, String.format( "DROP DATABASE IF EXISTS `%s`", MysqlSchemaUtil.escapeBackQuote(getSchemaDatabaseName(sourceName, database)))); return null; }); } catch (Exception ex) { log.error(String.format("Failed to drop database %s. (Exception: %s)", database, ex)); throw new RuntimeException(ex); } } Map<String, List<MysqlColumn>> getColumnsForAllTables(@NonNull String database) { try (Handle handle = jdbi.open()) { Map<String, List<MysqlColumn>> tableColumnsMap = new HashMap<>(); MysqlSchemaUtil.VOID_RETRYER.call( () -> { handle .createQuery( "select TABLE_NAME, COLUMN_NAME, DATA_TYPE, COLUMN_TYPE, COLUMN_KEY from information_schema.COLUMNS " + "where TABLE_SCHEMA = :db " + "order by ORDINAL_POSITION") .bind("db", getSchemaDatabaseName(sourceName, database)) .mapToMap(String.class) .forEach( row -> { String table = row.get("table_name"); tableColumnsMap.putIfAbsent(table, new LinkedList<>()); tableColumnsMap .get(table) .add( new MysqlColumn( row.get("column_name"), row.get("data_type"), row.get("column_type"), "PRI".equals(row.get("column_key")))); }); return null; }); return tableColumnsMap; } catch (Exception ex) { log.error(String.format("Failed to fetch table columns for database: %s", database), ex); throw new RuntimeException(ex); } } @VisibleForTesting String addSourcePrefix(@NotNull final String sql) { CharStream charStream = CharStreams.fromString(sql); MySQLLexer lexer = new MySQLLexer(charStream); CommonTokenStream tokens = new CommonTokenStream(lexer); MySQLParser parser = new MySQLParser(tokens); ParseTree tree = parser.root(); ParseTreeWalker walker = new ParseTreeWalker(); MySQLDBNamePrefixAdder prefixAdder = new com.airbnb.spinaltap.mysql.schema.MysqlSchemaDatabase.MySQLDBNamePrefixAdder(tokens); walker.walk(prefixAdder, tree); return prefixAdder.rewriter.getText(); } private static String getSchemaDatabaseName(@NonNull final String source, final String database) { if (Strings.isNullOrEmpty(database)) { return null; } return String.format("%s%s%s", source, DELIMITER, database); } private class MySQLDBNamePrefixAdder extends MySQLBaseListener { final TokenStreamRewriter rewriter; MySQLDBNamePrefixAdder(TokenStream tokens) { rewriter = new TokenStreamRewriter(tokens); } @Override public void enterTable_name(MySQLParser.Table_nameContext ctx) { // If table name starts with dot(.), database name is not specified. // children.size() == 1 means no database name before table name if (!ctx.getText().startsWith(".") && ctx.children.size() != 1) { // The first child will be database name addPrefix(ctx.getChild(0).getText(), ctx.start); /* Add quotes around table name for a corner case: The database name is quoted but table name is not, and table name starts with a digit: Example: RENAME TABLE airbed3_production.20170810023312170_reservation2s to tmp.20170810023312170_reservation2s will be transformed to RENAME TABLE `source/airbed3_production`.20170810023312170_reservation2s to `source/tmp`.20170810023312170_reservation2s if we don't add quotes around table name, which is an invalid SQL statement in MySQL. */ // DOT_ID will be null if there is already quotes around table name, _id(3) will be set in // this case. if (ctx.DOT_ID() != null) { rewriter.replace(ctx.stop, String.format(".`%s`", ctx.DOT_ID().getText().substring(1))); } } } @Override public void enterCreate_database(MySQLParser.Create_databaseContext ctx) { addPrefix(ctx.id_().getText(), ctx.id_().start); } @Override public void enterDrop_database(MySQLParser.Drop_databaseContext ctx) { addPrefix(ctx.id_().getText(), ctx.id_().start); } private void addPrefix(@NotNull final String name, @NotNull final Token indexToken) { if (!name.startsWith("`")) { rewriter.replace(indexToken, String.format("`%s%s%s`", sourceName, DELIMITER, name)); } else { rewriter.replace( indexToken, String.format("`%s%s%s", sourceName, DELIMITER, name.substring(1))); } } } }
1,972
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/schema/MysqlTableSchema.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.schema; import com.airbnb.spinaltap.mysql.BinlogFilePos; import java.util.List; import java.util.Map; import lombok.Value; @Value public class MysqlTableSchema { long id; String database; String table; BinlogFilePos binlogFilePos; String gtid; String sql; long timestamp; List<MysqlColumn> columns; Map<String, String> metadata; }
1,973
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/exception/InvalidBinlogPositionException.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.exception; import com.airbnb.spinaltap.common.exception.SpinaltapException; /** * Reflects that the binlog position set in the {@link com.airbnb.spinaltap.mysql.MysqlSource} * client is invalid. */ public class InvalidBinlogPositionException extends SpinaltapException { private static final long serialVersionUID = 9187451138457311547L; public InvalidBinlogPositionException(final String message) { super(message); } }
1,974
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/TableMapEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnDataType; import java.util.ArrayList; import java.util.List; import lombok.Getter; @Getter public final class TableMapEvent extends BinlogEvent { private final String database; private final String table; private final List<ColumnDataType> columnTypes; public TableMapEvent( long tableId, long serverId, long timestamp, BinlogFilePos filePos, String database, String table, byte[] columnTypeCodes) { super(tableId, serverId, timestamp, filePos); this.database = database; this.table = table; this.columnTypes = new ArrayList<>(); for (byte code : columnTypeCodes) { columnTypes.add(ColumnDataType.byCode(code)); } } }
1,975
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/GTIDEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import lombok.Getter; @Getter public class GTIDEvent extends BinlogEvent { private final String gtid; public GTIDEvent(long serverId, long timestamp, BinlogFilePos filePos, String gtid) { super(0, serverId, timestamp, filePos); this.gtid = gtid; } }
1,976
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/DeleteEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import java.io.Serializable; import java.util.List; import lombok.Getter; @Getter public final class DeleteEvent extends BinlogEvent { private final List<Serializable[]> rows; public DeleteEvent( long tableId, long serverId, long timestamp, BinlogFilePos filePos, List<Serializable[]> rows) { super(tableId, serverId, timestamp, filePos); this.rows = rows; } @Override public int size() { return rows.size(); } }
1,977
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/WriteEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import java.io.Serializable; import java.util.List; import lombok.Getter; @Getter public final class WriteEvent extends BinlogEvent { private final List<Serializable[]> rows; public WriteEvent( long tableId, long serverId, long timestamp, BinlogFilePos filePos, List<Serializable[]> rows) { super(tableId, serverId, timestamp, filePos); this.rows = rows; } @Override public int size() { return rows.size(); } }
1,978
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/StartEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; public class StartEvent extends BinlogEvent { public StartEvent(long serverId, long timestamp, BinlogFilePos filePos) { super(0L, serverId, timestamp, filePos); } }
1,979
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/BinlogEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.common.source.SourceEvent; import com.airbnb.spinaltap.mysql.BinlogFilePos; import lombok.Getter; import lombok.ToString; /** Represents a Binlog event */ @Getter @ToString public abstract class BinlogEvent extends SourceEvent { private final long tableId; private final long serverId; private final BinlogFilePos binlogFilePos; public BinlogEvent(long tableId, long serverId, long timestamp, BinlogFilePos binlogFilePos) { super(timestamp); this.tableId = tableId; this.serverId = serverId; this.binlogFilePos = binlogFilePos; } public long getOffset() { return (binlogFilePos.getFileNumber() << 32) | binlogFilePos.getPosition(); } public boolean isMutation() { return this instanceof WriteEvent || this instanceof DeleteEvent || this instanceof UpdateEvent; } }
1,980
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/UpdateEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import java.io.Serializable; import java.util.List; import java.util.Map; import lombok.Getter; @Getter public class UpdateEvent extends BinlogEvent { private final List<Map.Entry<Serializable[], Serializable[]>> rows; public UpdateEvent( long tableId, long serverId, long timestamp, BinlogFilePos filePos, List<Map.Entry<Serializable[], Serializable[]>> rows) { super(tableId, serverId, timestamp, filePos); this.rows = rows; } @Override public int size() { return rows.size(); } }
1,981
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/XidEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import lombok.Getter; @Getter public class XidEvent extends BinlogEvent { private final long xid; public XidEvent(long serverId, long timestamp, BinlogFilePos filePos, long xid) { super(0l, serverId, timestamp, filePos); this.xid = xid; } }
1,982
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/QueryEvent.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event; import com.airbnb.spinaltap.mysql.BinlogFilePos; import lombok.Getter; @Getter public class QueryEvent extends BinlogEvent { private final String database; private final String sql; public QueryEvent( long serverId, long timestamp, BinlogFilePos filePos, String database, String sql) { super(0l, serverId, timestamp, filePos); this.database = database; this.sql = sql; } }
1,983
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/DeleteMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlDeleteMutation; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} of a {@link DeleteEvent}s to the * corresponding list of {@link com.airbnb.spinaltap.mysql.mutation.MysqlMutation}s corresponding to * each row change in the event. */ final class DeleteMutationMapper extends MysqlMutationMapper<DeleteEvent, MysqlDeleteMutation> { DeleteMutationMapper( @NonNull final DataSource dataSource, @NonNull final TableCache tableCache, @NonNull final AtomicReference<Transaction> beginTransaction, @NonNull final AtomicReference<Transaction> lastTransaction, @NonNull final AtomicLong leaderEpoch) { super(dataSource, tableCache, beginTransaction, lastTransaction, leaderEpoch); } @Override protected List<MysqlDeleteMutation> mapEvent( @NonNull final Table table, @NonNull final DeleteEvent event) { final Collection<ColumnMetadata> cols = table.getColumns().values(); final List<MysqlDeleteMutation> mutations = new ArrayList<>(); final List<Serializable[]> rows = event.getRows(); for (int position = 0; position < rows.size(); position++) { mutations.add( new MysqlDeleteMutation( createMetadata(table, event, position), new Row(table, zip(rows.get(position), cols)))); } return mutations; } }
1,984
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/XidMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that keeps track of {@link * Transaction} end information from {@link XidEvent}s, which will be appended as metadata to * streamed {@link MysqlMutation}s. */ @Slf4j @RequiredArgsConstructor final class XidMapper implements Mapper<XidEvent, List<MysqlMutation>> { @NonNull private final AtomicReference<Transaction> endTransaction; @NonNull private final AtomicReference<String> gtid; @NonNull private final MysqlSourceMetrics metrics; public List<MysqlMutation> map(@NonNull final XidEvent event) { endTransaction.set( new Transaction( event.getTimestamp(), event.getOffset(), event.getBinlogFilePos(), gtid.get())); metrics.transactionReceived(); return Collections.emptyList(); } }
1,985
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/GTIDMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.event.GTIDEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; import lombok.RequiredArgsConstructor; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that keeps track of {@link * GTIDEvent}s, which will be included in {@link com.airbnb.spinaltap.mysql.Transaction} */ @RequiredArgsConstructor final class GTIDMapper implements Mapper<GTIDEvent, List<MysqlMutation>> { private final AtomicReference<String> gtid; @Override public List<MysqlMutation> map(@NonNull final GTIDEvent event) { gtid.set(event.getGtid()); return Collections.emptyList(); } }
1,986
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/UpdateMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlDeleteMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.MysqlUpdateMutation; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.google.common.collect.Lists; import java.io.Serializable; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} of a {@link UpdateEvent}s to the * corresponding list of {@link com.airbnb.spinaltap.mysql.mutation.MysqlMutation}s corresponding to * each row change in the event. */ final class UpdateMutationMapper extends MysqlMutationMapper<UpdateEvent, MysqlMutation> { UpdateMutationMapper( @NonNull final DataSource dataSource, @NonNull final TableCache tableCache, @NonNull final AtomicReference<Transaction> beginTransaction, @NonNull final AtomicReference<Transaction> lastTransaction, @NonNull final AtomicLong leaderEpoch) { super(dataSource, tableCache, beginTransaction, lastTransaction, leaderEpoch); } @Override protected List<MysqlMutation> mapEvent( @NonNull final Table table, @NonNull final UpdateEvent event) { final List<MysqlMutation> mutations = Lists.newArrayList(); final Collection<ColumnMetadata> cols = table.getColumns().values(); final List<Map.Entry<Serializable[], Serializable[]>> rows = event.getRows(); for (int position = 0; position < rows.size(); position++) { MysqlMutationMetadata metadata = createMetadata(table, event, position); final Row previousRow = new Row(table, zip(rows.get(position).getKey(), cols)); final Row newRow = new Row(table, zip(rows.get(position).getValue(), cols)); // If PK value has changed, then delete before image and insert new image // to retain invariant that a mutation captures changes to a single PK if (table.getPrimaryKey().isPresent() && !previousRow.getPrimaryKeyValue().equals(newRow.getPrimaryKeyValue())) { mutations.add(new MysqlDeleteMutation(metadata, previousRow)); mutations.add(new MysqlInsertMutation(metadata, newRow)); } else { mutations.add(new MysqlUpdateMutation(metadata, previousRow, newRow)); } } return mutations; } }
1,987
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/InsertMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlInsertMutation; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Row; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} of a {@link WriteEvent} to a list of * {@link com.airbnb.spinaltap.mysql.mutation.MysqlMutation}s corresponding to each row change in * the event. */ class InsertMutationMapper extends MysqlMutationMapper<WriteEvent, MysqlInsertMutation> { InsertMutationMapper( @NonNull final DataSource dataSource, @NonNull final TableCache tableCache, @NonNull final AtomicReference<Transaction> beginTransaction, @NonNull final AtomicReference<Transaction> lastTransaction, @NonNull final AtomicLong leaderEpoch) { super(dataSource, tableCache, beginTransaction, lastTransaction, leaderEpoch); } @Override protected List<MysqlInsertMutation> mapEvent( @NonNull final Table table, @NonNull final WriteEvent event) { final List<Serializable[]> rows = event.getRows(); final List<MysqlInsertMutation> mutations = new ArrayList<>(); final Collection<ColumnMetadata> cols = table.getColumns().values(); for (int position = 0; position < rows.size(); position++) { mutations.add( new MysqlInsertMutation( createMetadata(table, event, position), new Row(table, zip(rows.get(position), cols)))); } return mutations; } }
1,988
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/QueryMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that keeps track of {@link * QueryEvent}s. This is used to detect schema changes from DDL statements, and mark BEGIN * statements. */ @Slf4j @RequiredArgsConstructor final class QueryMapper implements Mapper<QueryEvent, List<MysqlMutation>> { private static final String BEGIN_STATEMENT = "BEGIN"; private static final String COMMIT_STATEMENT = "COMMIT"; private final AtomicReference<Transaction> beginTransaction; private final AtomicReference<Transaction> lastTransaction; private final AtomicReference<String> gtid; private final MysqlSchemaManager schemaManager; public List<MysqlMutation> map(@NonNull final QueryEvent event) { Transaction transaction = new Transaction( event.getTimestamp(), event.getOffset(), event.getBinlogFilePos(), gtid.get()); if (isTransactionBegin(event)) { beginTransaction.set(transaction); } else { // DDL is also a transaction lastTransaction.set(transaction); if (!isTransactionEnd(event)) { schemaManager.processDDL(event, gtid.get()); } } return Collections.emptyList(); } private boolean isTransactionBegin(final QueryEvent event) { return event.getSql().equals(BEGIN_STATEMENT); } private boolean isTransactionEnd(final QueryEvent event) { return event.getSql().equals(COMMIT_STATEMENT); } }
1,989
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/MysqlMutationMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.Mutation; import com.airbnb.spinaltap.common.util.ClassBasedMapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.Transaction; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.event.GTIDEvent; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import com.airbnb.spinaltap.mysql.mutation.MysqlMutationMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Column; import com.airbnb.spinaltap.mysql.mutation.schema.ColumnMetadata; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import com.airbnb.spinaltap.mysql.schema.MysqlSchemaManager; import com.google.common.collect.ImmutableMap; import java.io.Serializable; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Base {@link com.airbnb.spinaltap.common.util.Mapper} implement that maps a {@link BinlogEvent}s * to its corresponding {@link com.airbnb.spinaltap.mysql.mutation.MysqlMutation}s. */ @Slf4j @RequiredArgsConstructor public abstract class MysqlMutationMapper<R extends BinlogEvent, T extends MysqlMutation> implements Mapper<R, List<T>> { @NonNull private final DataSource dataSource; @NonNull private final TableCache tableCache; @NonNull private final AtomicReference<Transaction> beginTransaction; @NonNull private final AtomicReference<Transaction> lastTransaction; @NonNull private final AtomicLong leaderEpoch; public static Mapper<BinlogEvent, List<? extends Mutation<?>>> create( @NonNull final DataSource dataSource, @NonNull final TableCache tableCache, @NonNull final MysqlSchemaManager schemaManager, @NonNull final AtomicLong leaderEpoch, @NonNull final AtomicReference<Transaction> beginTransaction, @NonNull final AtomicReference<Transaction> lastTransaction, @NonNull final MysqlSourceMetrics metrics) { final AtomicReference<String> gtid = new AtomicReference<>(); return ClassBasedMapper.<BinlogEvent, List<? extends Mutation<?>>>builder() .addMapper(TableMapEvent.class, new TableMapMapper(tableCache)) .addMapper(GTIDEvent.class, new GTIDMapper(gtid)) .addMapper( QueryEvent.class, new QueryMapper(beginTransaction, lastTransaction, gtid, schemaManager)) .addMapper(XidEvent.class, new XidMapper(lastTransaction, gtid, metrics)) .addMapper(StartEvent.class, new StartMapper(dataSource, tableCache, metrics)) .addMapper( UpdateEvent.class, new UpdateMutationMapper( dataSource, tableCache, beginTransaction, lastTransaction, leaderEpoch)) .addMapper( WriteEvent.class, new InsertMutationMapper( dataSource, tableCache, beginTransaction, lastTransaction, leaderEpoch)) .addMapper( DeleteEvent.class, new DeleteMutationMapper( dataSource, tableCache, beginTransaction, lastTransaction, leaderEpoch)) .build(); } protected abstract List<T> mapEvent(@NonNull final Table table, @NonNull final R event); public List<T> map(@NonNull final R event) { Table table = tableCache.get(event.getTableId()); return mapEvent(table, event); } MysqlMutationMetadata createMetadata( @NonNull final Table table, @NonNull final BinlogEvent event, final int eventPosition) { return new MysqlMutationMetadata( dataSource, event.getBinlogFilePos(), table, event.getServerId(), event.getOffset(), event.getTimestamp(), beginTransaction.get(), lastTransaction.get(), leaderEpoch.get(), eventPosition); } static ImmutableMap<String, Column> zip( @NonNull final Serializable[] row, @NonNull final Collection<ColumnMetadata> columns) { if (row.length != columns.size()) { log.error("Row length {} and column length {} don't match", row.length, columns.size()); } final ImmutableMap.Builder<String, Column> builder = ImmutableMap.builder(); final Iterator<ColumnMetadata> columnIterator = columns.iterator(); for (int position = 0; position < row.length && columnIterator.hasNext(); position++) { final ColumnMetadata col = columnIterator.next(); builder.put(col.getName(), new Column(col, row[position])); } return builder.build(); } }
1,990
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/StartMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.DataSource; import com.airbnb.spinaltap.mysql.MysqlSourceMetrics; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import java.util.Collections; import java.util.List; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.joda.time.DateTime; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that keeps track of binlog file * starts detected on {@link StartEvent}s. This is used to clear the {@link TableCache}, to ensure * table to tableId mapping remains consistent. */ @Slf4j @RequiredArgsConstructor final class StartMapper implements Mapper<StartEvent, List<MysqlMutation>> { @NonNull private final DataSource dataSource; @NonNull private final TableCache tableCache; @NonNull private final MysqlSourceMetrics metrics; public List<MysqlMutation> map(@NonNull final StartEvent event) { log.info( "Started processing binlog file {} for host {} at {}", event.getBinlogFilePos().getFileName(), dataSource.getHost(), new DateTime(event.getTimestamp())); metrics.binlogFileStart(); tableCache.clear(); return Collections.emptyList(); } }
1,991
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/mapper/TableMapMapper.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.mapper; import com.airbnb.spinaltap.common.util.Mapper; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.mutation.MysqlMutation; import java.util.Collections; import java.util.List; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; /** * Represents a {@link com.airbnb.spinaltap.common.util.Mapper} that keeps track of {@link * com.airbnb.spinaltap.mysql.mutation.schema.Table} information from {@link TableMapEvent}s, which * will be appended as metadata to streamed {@link MysqlMutation}s. */ @Slf4j @RequiredArgsConstructor final class TableMapMapper implements Mapper<TableMapEvent, List<MysqlMutation>> { @NonNull private final TableCache tableCache; /** * Updates the {@link TableCache} with {@link com.airbnb.spinaltap.mysql.mutation.schema.Table} * information corresponding to the {@link TableMapEvent}. To maintain consistency, any errors * will be propagated if the cache update fails. */ public List<MysqlMutation> map(@NonNull final TableMapEvent event) { try { tableCache.addOrUpdate( event.getTableId(), event.getTable(), event.getDatabase(), event.getColumnTypes()); } catch (Exception ex) { log.error("Failed to process table map event: " + event, ex); throw new RuntimeException(ex); } return Collections.emptyList(); } }
1,992
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/filter/TableFilter.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.filter; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.mutation.schema.Table; import java.util.Set; import lombok.NonNull; import lombok.RequiredArgsConstructor; /** * Represents a {@link com.airbnb.spinaltap.common.util.Filter} for {@link BinlogEvent}s based on * the database table they belong to. This is used to ensure that mutations are propagated only for * events for the table the {@link com.airbnb.spinaltap.common.source.Source} is subscribed to. */ @RequiredArgsConstructor final class TableFilter extends MysqlEventFilter { @NonNull private final TableCache tableCache; @NonNull private final Set<String> tableNames; public boolean apply(@NonNull final BinlogEvent event) { if (event instanceof TableMapEvent) { TableMapEvent tableMap = (TableMapEvent) event; return tableNames.contains( Table.canonicalNameOf(tableMap.getDatabase(), tableMap.getTable())); } else if (event.isMutation()) { return tableCache.contains(event.getTableId()); } return true; } }
1,993
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/filter/DuplicateFilter.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.filter; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.mysql.BinlogFilePos; import com.airbnb.spinaltap.mysql.GtidSet; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; import lombok.RequiredArgsConstructor; /** * Represents a {@link com.airbnb.spinaltap.common.util.Filter} for duplicate {@link BinlogEvent}s * that have already been streamed. This is used for server-side de-duplication, by comparing * against the offset of the last marked {@link MysqlSourceState} checkpoint and disregarding any * events that are received with an offset before that watermark. */ @RequiredArgsConstructor public final class DuplicateFilter extends MysqlEventFilter { @NonNull private final AtomicReference<MysqlSourceState> state; public boolean apply(@NonNull final BinlogEvent event) { // Only applies to mutation events if (!event.isMutation()) { return true; } // We need to tell if position in `event` and in `state` are from the same source // MySQL server, because a failover may have happened and we are currently streaming // from the new master. // If they are from the same source server, we can just use the binlog filename and // position (offset) to tell whether we should skip this event. BinlogFilePos eventBinlogPos = event.getBinlogFilePos(); BinlogFilePos savedBinlogPos = state.get().getLastPosition(); // Use the same logic in BinlogFilePos.compareTo() here... if (BinlogFilePos.shouldCompareUsingFilePosition(eventBinlogPos, savedBinlogPos)) { return event.getOffset() > state.get().getLastOffset(); } // If this point is reached, a master failover might have happened. // We can only use GTIDSet to tell whether this event should be skipped. // We should only skip this event if GTIDSet in event is a "proper subset" of the GTIDSet // in saved state, because it is possible that the last transaction we streamed before the // failover is in the middle of a transaction. GtidSet eventGtidSet = eventBinlogPos.getGtidSet(); GtidSet savedGtidSet = savedBinlogPos.getGtidSet(); return !eventGtidSet.isContainedWithin(savedGtidSet) && !eventGtidSet.equals(savedGtidSet); } }
1,994
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/filter/MysqlEventFilter.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.filter; import com.airbnb.spinaltap.common.source.MysqlSourceState; import com.airbnb.spinaltap.common.util.ChainedFilter; import com.airbnb.spinaltap.common.util.Filter; import com.airbnb.spinaltap.mysql.TableCache; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import lombok.NonNull; /** Base {@link com.airbnb.spinaltap.common.util.Filter} implement for MySQL {@link BinlogEvent}s */ public abstract class MysqlEventFilter implements Filter<BinlogEvent> { public static Filter<BinlogEvent> create( @NonNull final TableCache tableCache, @NonNull final Set<String> tableNames, @NonNull final AtomicReference<MysqlSourceState> state) { return ChainedFilter.<BinlogEvent>builder() .addFilter(new EventTypeFilter()) .addFilter(new TableFilter(tableCache, tableNames)) .addFilter(new DuplicateFilter(state)) .build(); } }
1,995
0
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event
Create_ds/SpinalTap/spinaltap-mysql/src/main/java/com/airbnb/spinaltap/mysql/event/filter/EventTypeFilter.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql.event.filter; import com.airbnb.spinaltap.mysql.event.BinlogEvent; import com.airbnb.spinaltap.mysql.event.DeleteEvent; import com.airbnb.spinaltap.mysql.event.GTIDEvent; import com.airbnb.spinaltap.mysql.event.QueryEvent; import com.airbnb.spinaltap.mysql.event.StartEvent; import com.airbnb.spinaltap.mysql.event.TableMapEvent; import com.airbnb.spinaltap.mysql.event.UpdateEvent; import com.airbnb.spinaltap.mysql.event.WriteEvent; import com.airbnb.spinaltap.mysql.event.XidEvent; import com.google.common.collect.ImmutableSet; import java.util.Set; import lombok.NonNull; import lombok.RequiredArgsConstructor; /** * Represents a {@link com.airbnb.spinaltap.common.util.Filter} for {@link BinlogEvent}s based on a * predefined whitelist of event class types. */ @RequiredArgsConstructor final class EventTypeFilter extends MysqlEventFilter { @SuppressWarnings("unchecked") private static final Set<Class<? extends BinlogEvent>> WHITELISTED_EVENT_TYPES = ImmutableSet.of( TableMapEvent.class, WriteEvent.class, UpdateEvent.class, DeleteEvent.class, XidEvent.class, QueryEvent.class, StartEvent.class, GTIDEvent.class); public boolean apply(@NonNull final BinlogEvent event) { return WHITELISTED_EVENT_TYPES.contains(event.getClass()); } }
1,996
0
Create_ds/SpinalTap/spinaltap-model/src/test/java/com/airbnb
Create_ds/SpinalTap/spinaltap-model/src/test/java/com/airbnb/spinaltap/MutationTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap; import static org.junit.Assert.assertEquals; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import java.util.Map; import org.junit.Test; public class MutationTest { private static final String SHARED_KEY = "sharedKey"; @Test public void testAddAndRemoveScalarColumns() { String removedKey = "removedKey"; String addedKey = "addedKey"; Map<String, String> previousColumns = ImmutableMap.of( removedKey, "removedValue", SHARED_KEY, "sharedValue"); Map<String, String> currentColumns = ImmutableMap.of( SHARED_KEY, "sharedValue", addedKey, "addedValue"); assertEquals( ImmutableSet.of(removedKey, addedKey), Mutation.getUpdatedColumns(previousColumns, currentColumns)); } @Test public void testUpdateScalarColumnValues() { String updatedKey = "updatedKey"; Map<String, String> previousColumns = ImmutableMap.of( SHARED_KEY, "sharedValue", updatedKey, "previousValue"); Map<String, String> currentColumns = ImmutableMap.of( SHARED_KEY, "sharedValue", updatedKey, "currentValue"); assertEquals( ImmutableSet.of(updatedKey), Mutation.getUpdatedColumns(previousColumns, currentColumns)); } @Test public void testUpdateArrayColumnValues() { String updatedKey = "updatedKey"; Map<String, Object> previousColumns = ImmutableMap.of( SHARED_KEY, new byte[] {0x00, 0x01}, updatedKey, new byte[] {0x02, 0x03}); Map<String, Object> currentColumns = ImmutableMap.of( SHARED_KEY, new byte[] {0x00, 0x01}, updatedKey, new byte[] {0x04, 0x05}); assertEquals( ImmutableSet.of(updatedKey), Mutation.getUpdatedColumns(previousColumns, currentColumns)); } }
1,997
0
Create_ds/SpinalTap/spinaltap-model/src/test/java/com/airbnb
Create_ds/SpinalTap/spinaltap-model/src/test/java/com/airbnb/spinaltap/GtidSetTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import com.airbnb.spinaltap.mysql.GtidSet; import org.junit.Test; public class GtidSetTest { private static final String SERVER_UUID_1 = "665ef2f4-b008-4440-b78c-26ba7ce500e6"; private static final String SERVER_UUID_2 = "eeb24231-ff9d-4051-b9b1-bf40bf33b2be"; @Test public void testEmptySet() { assertEquals(new GtidSet("").toString(), ""); } @Test public void testEquals() { assertEquals(new GtidSet(""), new GtidSet("")); assertEquals(new GtidSet(""), new GtidSet(null)); assertEquals(new GtidSet(SERVER_UUID_1 + ":1-888"), new GtidSet(SERVER_UUID_1 + ":1-888")); GtidSet gtidSet1 = new GtidSet(String.format("%s:1-1023,%s:1-888", SERVER_UUID_1, SERVER_UUID_2)); GtidSet gtidSet2 = new GtidSet(String.format("%s:1-888,%s:1-1023", SERVER_UUID_2, SERVER_UUID_1)); assertEquals(gtidSet1, gtidSet2); assertEquals(gtidSet1.toString(), gtidSet2.toString()); assertNotEquals( new GtidSet(SERVER_UUID_1 + ":1-888"), new GtidSet(SERVER_UUID_1 + ":1-100:102-888")); assertNotEquals(new GtidSet(SERVER_UUID_1 + ":1-888"), new GtidSet(SERVER_UUID_2 + ":1-888")); } @Test public void testCollapseIntervals() { GtidSet gtidSet = new GtidSet(SERVER_UUID_1 + ":1-123:124:125-200"); assertEquals(gtidSet, new GtidSet(SERVER_UUID_1 + ":1-200")); assertEquals(gtidSet.toString(), SERVER_UUID_1 + ":1-200"); gtidSet = new GtidSet(SERVER_UUID_1 + ":1-201:202-211:239-244:245-300:400-409"); assertEquals(gtidSet, new GtidSet(SERVER_UUID_1 + ":1-211:239-300:400-409")); assertEquals(gtidSet.toString(), SERVER_UUID_1 + ":1-211:239-300:400-409"); gtidSet = new GtidSet(SERVER_UUID_1 + ":1-200:100-123:40-255:40-100:60-100:280-290:270-279"); assertEquals(gtidSet.toString(), SERVER_UUID_1 + ":1-255:270-290"); } @Test public void testMixedCaseServerUUID() { String upperCaseServerUUID1 = SERVER_UUID_1.toUpperCase(); GtidSet gtidSet = new GtidSet( String.format( "%s:1-24,%s:25-706,%s:1-23", upperCaseServerUUID1, SERVER_UUID_1, SERVER_UUID_2)); assertEquals( new GtidSet(String.format("%s:1-706,%s:1-23", SERVER_UUID_1, SERVER_UUID_2)), gtidSet); } @Test public void testSubsetOf() { GtidSet[] set = { new GtidSet(""), new GtidSet(SERVER_UUID_1 + ":1-191"), new GtidSet(SERVER_UUID_1 + ":192-199"), new GtidSet(SERVER_UUID_1 + ":1-191:192-199"), new GtidSet(SERVER_UUID_1 + ":1-191:193-199"), new GtidSet(SERVER_UUID_1 + ":2-199"), new GtidSet(SERVER_UUID_1 + ":1-200") }; byte[][] subsetMatrix = { {1, 1, 1, 1, 1, 1, 1}, {0, 1, 0, 1, 1, 0, 1}, {0, 0, 1, 1, 0, 1, 1}, {0, 0, 0, 1, 0, 0, 1}, {0, 0, 0, 1, 1, 0, 1}, {0, 0, 0, 1, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 1}, }; for (int i = 0; i < subsetMatrix.length; i++) { byte[] subset = subsetMatrix[i]; for (int j = 0; j < subset.length; j++) { assertEquals(set[i].isContainedWithin(set[j]), subset[j] == 1); } } } }
1,998
0
Create_ds/SpinalTap/spinaltap-model/src/test/java/com/airbnb/spinaltap
Create_ds/SpinalTap/spinaltap-model/src/test/java/com/airbnb/spinaltap/mysql/BinlogFilePosTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See License in the project root for license * information. */ package com.airbnb.spinaltap.mysql; import static org.junit.Assert.*; import org.junit.Test; public class BinlogFilePosTest { private static final String UUID1 = "07592619-e257-4033-a30f-7fe9fcfbf229"; private static final String UUID2 = "4a4ac150-fe5b-4093-a1ef-a8876011adaa"; @Test public void testCompare() throws Exception { BinlogFilePos first = BinlogFilePos.fromString("mysql-bin-changelog.218:14:6"); BinlogFilePos second = BinlogFilePos.fromString("mysql-bin-changelog.218:27:12"); BinlogFilePos third = BinlogFilePos.fromString("mysql-bin-changelog.219:11:92"); BinlogFilePos fourth = BinlogFilePos.fromString("mysql-bin-changelog.219:11:104"); assertTrue(first.compareTo(second) < 0); assertTrue(third.compareTo(second) > 0); assertTrue(third.compareTo(fourth) == 0); } @Test public void testCompareWithGTID() { String gtid1 = UUID1 + ":1-200"; String gtid2 = UUID1 + ":1-300"; String gtid3 = UUID1 + ":1-200," + UUID2 + ":1-456"; BinlogFilePos first = new BinlogFilePos("mysql-bin-changelog.218", 123, 456, gtid1, UUID1); BinlogFilePos second = new BinlogFilePos("mysql-bin-changelog.218", 456, 789, gtid2, UUID1); BinlogFilePos third = new BinlogFilePos("mysql-bin-changelog.100", 10, 24, gtid1, UUID2); BinlogFilePos fourth = new BinlogFilePos("mysql-bin-changelog.100", 20, 24, gtid3, UUID2); // server_uuid matches, compare binlog file number and position assertTrue(first.compareTo(second) < 0); // server_uuid doesn't match, compare GTID assertEquals(0, first.compareTo(third)); assertTrue(first.compareTo(fourth) < 0); assertTrue(second.compareTo(third) > 0); } @Test public void testConstructor() throws Exception { assertEquals( BinlogFilePos.fromString("mysql-bin-changelog.000218:14:6"), new BinlogFilePos("mysql-bin-changelog.000218", 14, 6)); assertEquals(new BinlogFilePos(80887L), new BinlogFilePos("mysql-bin-changelog.080887")); assertEquals(new BinlogFilePos(1080887L), new BinlogFilePos("mysql-bin-changelog.1080887")); } }
1,999