index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/config/JwtAuthConfigMpConfigImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.config;
import javax.enterprise.inject.Vetoed;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
@Vetoed
class JwtAuthConfigMpConfigImpl implements GeronimoJwtAuthConfig {
private final Config config;
JwtAuthConfigMpConfigImpl() {
config = ConfigProvider.getConfig();
}
@Override
public String read(final String key, final String def) {
return config.getOptionalValue(key, String.class).orElse(def);
}
}
| 5,700 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/config/GeronimoJwtAuthConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.config;
import java.util.function.Supplier;
@FunctionalInterface
public interface GeronimoJwtAuthConfig {
String read(String value, String def);
static GeronimoJwtAuthConfig create() {
final Supplier<GeronimoJwtAuthConfig> delegate = () -> {
try {
return new JwtAuthConfigMpConfigImpl();
} catch (final NoClassDefFoundError | ExceptionInInitializerError cnfe) {
return new DefaultJwtAuthConfig();
}
};
return new PrefixedConfig(delegate.get());
}
}
| 5,701 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/config/DefaultJwtAuthConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.config;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import javax.enterprise.inject.Vetoed;
@Vetoed
class DefaultJwtAuthConfig implements GeronimoJwtAuthConfig {
private final Map<String, String> configuration = new HashMap<>();
DefaultJwtAuthConfig() {
System.getProperties().stringPropertyNames()
.forEach(k -> configuration.put(k, System.getProperty(k)));
try (final InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream("META-INF/geronimo/microprofile/jwt-auth.properties")) {
if (stream != null) {
final Properties properties = new Properties();
properties.load(stream);
properties.stringPropertyNames().forEach(k -> configuration.put(k, properties.getProperty(k)));
}
} catch (final IOException e) {
throw new IllegalStateException(e);
}
}
@Override
public String read(final String value, final String def) {
return configuration.getOrDefault(value, def);
}
}
| 5,702 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/config/PrefixedConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.config;
import javax.enterprise.inject.Vetoed;
@Vetoed
class PrefixedConfig implements GeronimoJwtAuthConfig {
private final GeronimoJwtAuthConfig delegate;
PrefixedConfig(final GeronimoJwtAuthConfig geronimoJwtAuthConfig) {
this.delegate = geronimoJwtAuthConfig;
}
@Override
public String read(final String value, final String def) {
if (value.startsWith("mp.")) {
return delegate.read(value, def);
}
return delegate.read("geronimo.jwt-auth." + value, def);
}
}
| 5,703 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/io/PropertiesLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.io;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.Properties;
public final class PropertiesLoader {
private PropertiesLoader() {
// no-op
}
public static Properties load(final String value) {
final Properties properties = new Properties();
try (final Reader reader = new StringReader(value)) {
properties.load(reader);
} catch (final IOException e) {
throw new IllegalArgumentException(e);
}
return properties;
}
}
| 5,704 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/cdi/GeronimoJwtAuthExtension.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.cdi;
import static java.util.Objects.requireNonNull;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import static java.util.Optional.ofNullable;
import static java.util.function.Function.identity;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collector;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.context.Dependent;
import javax.enterprise.context.RequestScoped;
import javax.enterprise.event.Observes;
import javax.enterprise.inject.Any;
import javax.enterprise.inject.Default;
import javax.enterprise.inject.Instance;
import javax.enterprise.inject.Vetoed;
import javax.enterprise.inject.spi.AfterBeanDiscovery;
import javax.enterprise.inject.spi.AfterDeploymentValidation;
import javax.enterprise.inject.spi.BeforeBeanDiscovery;
import javax.enterprise.inject.spi.Extension;
import javax.enterprise.inject.spi.InjectionPoint;
import javax.enterprise.inject.spi.ProcessInjectionPoint;
import javax.enterprise.util.AnnotationLiteral;
import javax.enterprise.util.Nonbinding;
import javax.inject.Provider;
import javax.json.JsonArray;
import javax.json.JsonArrayBuilder;
import javax.json.JsonNumber;
import javax.json.JsonObject;
import javax.json.JsonString;
import javax.json.JsonValue;
import javax.json.spi.JsonProvider;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import org.apache.geronimo.microprofile.impl.jwtauth.config.GeronimoJwtAuthConfig;
import org.apache.geronimo.microprofile.impl.jwtauth.jwt.ContextualJsonWebToken;
import org.apache.geronimo.microprofile.impl.jwtauth.servlet.TokenAccessor;
import org.apache.geronimo.microprofile.impl.jwtauth.servlet.JwtRequest;
import org.eclipse.microprofile.jwt.Claim;
import org.eclipse.microprofile.jwt.ClaimValue;
import org.eclipse.microprofile.jwt.Claims;
import org.eclipse.microprofile.jwt.JsonWebToken;
public class GeronimoJwtAuthExtension implements Extension {
private final ThreadLocal<TokenAccessor> request = new ThreadLocal<>();
private final Collection<Injection> injectionPoints = new HashSet<>(8);
private final Collection<Throwable> errors = new ArrayList<>();
private JsonProvider json;
public void setClaimMethodsBinding(@Observes final BeforeBeanDiscovery beforeBeanDiscovery) {
beforeBeanDiscovery.configureQualifier(Claim.class)
.methods().forEach(m -> m.remove(it -> it.annotationType() == Nonbinding.class));
json = JsonProvider.provider();
}
public void captureInjections(@Observes final ProcessInjectionPoint<?, ?> processInjectionPoint) {
final InjectionPoint injectionPoint = processInjectionPoint.getInjectionPoint();
ofNullable(injectionPoint.getAnnotated().getAnnotation(Claim.class))
.flatMap(claim -> createInjection(claim, injectionPoint.getType()))
.ifPresent(injectionPoints::add);
}
public void addClaimBeans(@Observes final AfterBeanDiscovery afterBeanDiscovery) {
// it is another instance than th eone used in our initializer but it should be backed by the same impl
afterBeanDiscovery.addBean()
.id(GeronimoJwtAuthExtension.class.getName() + "#" + GeronimoJwtAuthConfig.class.getName())
.beanClass(GeronimoJwtAuthConfig.class)
.types(GeronimoJwtAuthConfig.class, Object.class)
.qualifiers(Default.Literal.INSTANCE, Any.Literal.INSTANCE)
.scope(ApplicationScoped.class)
.createWith(ctx -> GeronimoJwtAuthConfig.create());
afterBeanDiscovery.addBean()
.id(GeronimoJwtAuthExtension.class.getName() + "#" + JsonWebToken.class.getName())
.beanClass(JsonWebToken.class)
.types(JsonWebToken.class, Object.class)
.qualifiers(Default.Literal.INSTANCE, Any.Literal.INSTANCE)
.scope(ApplicationScoped.class)
.createWith(ctx -> new ContextualJsonWebToken(() -> {
final TokenAccessor request = this.request.get();
if (request == null) {
throw new IllegalStateException("No JWT in this request");
}
return request.getToken();
}));
injectionPoints.forEach(injection ->
afterBeanDiscovery.addBean()
.id(GeronimoJwtAuthExtension.class.getName() + "#" + injection.getId())
.beanClass(injection.findClass())
.qualifiers(injection.literal(), Any.Literal.INSTANCE)
.scope(injection.findScope())
.types(injection.type, Object.class)
.createWith(ctx -> injection.createInstance(request.get())));
injectionPoints.clear();
}
public void afterDeployment(@Observes final AfterDeploymentValidation afterDeploymentValidation) {
errors.forEach(afterDeploymentValidation::addDeploymentProblem);
}
private Optional<Injection> createInjection(final Claim claim, final Type type) {
if (ParameterizedType.class.isInstance(type)) {
final ParameterizedType pt = ParameterizedType.class.cast(type);
if (pt.getActualTypeArguments().length == 1) {
final Type raw = pt.getRawType();
final Type arg = pt.getActualTypeArguments()[0];
if (raw == Provider.class || raw == Instance.class) {
return createInjection(claim, arg);
}
if (raw == Optional.class) {
return createInjection(claim, arg)
.map(it -> new Injection(claim.value(), claim.standard(), type) {
@Override
Object createInstance(final TokenAccessor jwtRequest) {
return ofNullable(it.createInstance(jwtRequest));
}
});
}
if (raw == ClaimValue.class) {
final String name = getClaimName(claim);
return createInjection(claim, arg)
.map(it -> new Injection(claim.value(), claim.standard(), type) {
@Override
Object createInstance(final TokenAccessor jwtRequest) {
return new ClaimValue<Object>() {
@Override
public String getName() {
return name;
}
@Override
public Object getValue() {
return it.createInstance(jwtRequest);
}
};
}
});
}
if (Class.class.isInstance(raw) && Collection.class.isAssignableFrom(Class.class.cast(raw))) {
return of(new Injection(claim.value(), claim.standard(), type));
}
}
} else if (Class.class.isInstance(type)) {
final Class<?> clazz = Class.class.cast(type);
if (JsonValue.class.isAssignableFrom(clazz)) {
if (JsonString.class.isAssignableFrom(clazz)) {
return of(new Injection(claim.value(), claim.standard(), clazz) {
@Override
Object createInstance(final TokenAccessor jwtRequest) {
final Object instance = super.createInstance(jwtRequest);
if (JsonString.class.isInstance(instance)) {
return instance;
}
return json.createValue(String.class.cast(instance));
}
});
}
if (JsonNumber.class.isAssignableFrom(clazz)) {
return of(new Injection(claim.value(), claim.standard(), clazz) {
@Override
Object createInstance(final TokenAccessor jwtRequest) {
final Object instance = super.createInstance(jwtRequest);
if (JsonNumber.class.isInstance(instance)) {
return instance;
}
return json.createValue(Number.class.cast(instance).doubleValue());
}
});
}
if (JsonObject.class.isAssignableFrom(clazz)) {
return of(new Injection(claim.value(), claim.standard(), clazz));
}
if (JsonArray.class.isAssignableFrom(clazz)) {
return of(new Injection(claim.value(), claim.standard(), clazz) {
@Override
Object createInstance(final TokenAccessor jwtRequest) {
final Object instance = super.createInstance(jwtRequest);
if (instance == null) {
return null;
}
if (JsonArray.class.isInstance(instance)) {
return instance;
}
if (Set.class.isInstance(instance)) {
return ((Set<String>) instance).stream()
.collect(Collector.of(
json::createArrayBuilder,
JsonArrayBuilder::add,
JsonArrayBuilder::addAll,
JsonArrayBuilder::build));
}
throw new IllegalArgumentException("Unsupported value: " + instance);
}
});
}
} else {
final Class<?> objectType = wrapPrimitives(clazz);
if (CharSequence.class.isAssignableFrom(clazz) || Double.class.isAssignableFrom(objectType) ||
Long.class.isAssignableFrom(objectType) || Integer.class.isAssignableFrom(objectType)) {
return of(new Injection(claim.value(), claim.standard(), objectType));
}
}
}
errors.add(new IllegalArgumentException(type + " not supported by JWT-Auth implementation"));
return empty();
}
private Class<?> wrapPrimitives(final Class<?> type) {
if (long.class == type) {
return Long.class;
}
if (int.class == type) {
return Integer.class;
}
if (double.class == type) {
return Double.class;
}
return type;
}
private static String getClaimName(final Claim claim) {
return getClaimName(claim.value(), claim.standard());
}
private static String getClaimName(final String name, final Claims val) {
return of(name).filter(s -> !s.isEmpty()).orElse(val.name());
}
public void execute(final HttpServletRequest req, final ServletRunnable task) {
try {
final TokenAccessor jwtRequest = requireNonNull(JwtRequest.class.isInstance(req) ?
JwtRequest.class.cast(req) : JwtRequest.class.cast(req.getAttribute(JwtRequest.class.getName())),
"No JwtRequest");
execute(jwtRequest, task);
} catch (final IOException | ServletException e) {
throw new IllegalStateException(e);
}
}
public void execute(final TokenAccessor req, final ServletRunnable task) throws ServletException, IOException {
request.set(req); // we want to track it ourself to support propagation properly when needed
try {
task.run();
} finally {
request.remove();
}
}
@FunctionalInterface
public interface ServletRunnable {
void run() throws ServletException, IOException;
}
private static class Injection {
private final String name;
private final Claims claims;
private final Type type;
private final int hash;
private final Function<Object, Object> transformer;
private final String runtimeName;
private Injection(final String name, final Claims claims, final Type type) {
this.name = name;
this.claims = claims;
this.type = type;
Function<Object, Object> transformer;
try {
Claims.valueOf(getClaimName(name, claims));
transformer = identity();
} catch (final IllegalArgumentException iae) {
if (type == String.class) {
transformer = val -> val == null ? null : JsonString.class.cast(val).getString();
} else if (type == Long.class) {
transformer = val -> val == null ? null : JsonNumber.class.cast(val).longValue();
} else {
transformer = identity();
}
}
this.transformer = transformer;
this.runtimeName = getClaimName(name, claims);
{
int result = name.hashCode();
result = 31 * result + claims.hashCode();
hash = 31 * result + type.hashCode();
}
}
private String getId() {
return name + "/" + claims + "/" + type;
}
private Class<?> findClass() {
if (Class.class.isInstance(type)) {
return Class.class.cast(type);
}
if (ParameterizedType.class.isInstance(type)) {
ParameterizedType current = ParameterizedType.class.cast(type);
while (!Class.class.isInstance(current.getRawType())) {
current = ParameterizedType.class.cast(current.getRawType());
}
return Class.class.cast(current.getRawType());
}
throw new IllegalArgumentException("Can't find a class from " + type);
}
private Class<? extends Annotation> findScope() {
if (ClaimValue.class == findClass()) {
return RequestScoped.class;
}
return Dependent.class;
}
private Annotation literal() {
return new ClaimLiteral(name, claims);
}
Object createInstance(final TokenAccessor jwtRequest) {
return transformer.apply(jwtRequest.getToken().getClaim(runtimeName));
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Injection injection = Injection.class.cast(o);
return runtimeName.equals(injection.runtimeName) && type.equals(injection.type);
}
@Override
public int hashCode() {
return hash;
}
@Override
public String toString() {
return "Injection{claim='" + runtimeName + "', type=" + type + '}';
}
}
@Vetoed
private static class ClaimLiteral extends AnnotationLiteral<Claim> implements Claim {
private final String name;
private final Claims claims;
private ClaimLiteral(final String name, final Claims claims) {
this.name = name;
this.claims = claims;
}
@Override
public String value() {
return name;
}
@Override
public Claims standard() {
return claims;
}
}
}
| 5,705 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/jaxrs/RolesAllowedFeature.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.jaxrs;
import static java.util.Collections.emptyList;
import static java.util.Optional.ofNullable;
import static java.util.function.Function.identity;
import static java.util.stream.Collectors.toMap;
import static java.util.stream.Collectors.toSet;
import java.lang.annotation.Annotation;
import java.lang.reflect.AnnotatedElement;
import java.util.Collection;
import java.util.Map;
import java.util.stream.Stream;
import javax.annotation.security.DenyAll;
import javax.annotation.security.PermitAll;
import javax.annotation.security.RolesAllowed;
import javax.enterprise.context.Dependent;
import javax.inject.Inject;
import javax.ws.rs.container.DynamicFeature;
import javax.ws.rs.container.ResourceInfo;
import javax.ws.rs.core.FeatureContext;
import javax.ws.rs.ext.Provider;
// not done at cdi level since a lot of apps already activate it
// so we would break apps too easily
// todo: probably make it configurable
@Provider
@Dependent
public class RolesAllowedFeature implements DynamicFeature {
@Inject
private GroupMapper mapper;
@Override
public void configure(final ResourceInfo resourceInfo, final FeatureContext featureContext) {
final Map<Class<?>, Annotation> methodAnnotations = collectConfig(resourceInfo.getResourceMethod());
if (methodAnnotations.size() > 1) {
throw new IllegalArgumentException("Ambiguous configuration for " + resourceInfo.getResourceMethod() + ": " + methodAnnotations);
}
final Map<Class<?>, Annotation> classAnnotations = collectConfig(unwrapClazz(resourceInfo.getResourceClass()));
if (classAnnotations.size() > 1) {
throw new IllegalArgumentException("Ambiguous configuration for " + resourceInfo.getResourceClass() + ": " + classAnnotations);
}
if (classAnnotations.isEmpty() && methodAnnotations.isEmpty()) {
return;
}
try {
ofNullable(RolesAllowedFeature.class.getClassLoader())
.orElseGet(ClassLoader::getSystemClassLoader)
.loadClass("javax.annotation.security.PermitAll");
} catch (final ClassNotFoundException cnfe) {
return;
}
final boolean denyAll = methodAnnotations.containsKey(DenyAll.class) || (methodAnnotations.isEmpty() && classAnnotations.containsKey(DenyAll.class));
final boolean permitAll = !denyAll && (methodAnnotations.containsKey(PermitAll.class) || (methodAnnotations.isEmpty() && classAnnotations.containsKey(PermitAll.class)));
final Collection<String> roles = denyAll || permitAll ?
emptyList() :
Stream.of(RolesAllowed.class.cast(ofNullable(methodAnnotations.get(RolesAllowed.class)).orElseGet(() -> classAnnotations.get(RolesAllowed.class))).value())
.flatMap(it -> mapper.map(it).stream())
.collect(toSet());
featureContext.register(new RolesAllowedRequestFilter(denyAll, permitAll, roles));
}
private Map<Class<?>, Annotation> collectConfig(final AnnotatedElement annotatedElement) {
return Stream.of(DenyAll.class, PermitAll.class, RolesAllowed.class)
.filter(annotatedElement::isAnnotationPresent)
.map(annotatedElement::getAnnotation)
.collect(toMap(Annotation::annotationType, identity()));
}
private AnnotatedElement unwrapClazz(final Class<?> resourceClass) {
Class<?> current = resourceClass;
while (current.getName().contains("$$") && current.getSuperclass() != null) {
current = current.getSuperclass();
}
return current;
}
}
| 5,706 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/jaxrs/GroupMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.jaxrs;
import static java.util.Collections.singletonList;
import static java.util.Optional.ofNullable;
import static java.util.stream.Collectors.toSet;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
import org.apache.geronimo.microprofile.impl.jwtauth.config.GeronimoJwtAuthConfig;
import org.apache.geronimo.microprofile.impl.jwtauth.io.PropertiesLoader;
@ApplicationScoped
public class GroupMapper {
@Inject
private GeronimoJwtAuthConfig config;
private final Map<String, Collection<String>> mapping = new HashMap<>();
@PostConstruct
private void init() {
ofNullable(config.read("groups.mapping", null))
.map(String::trim)
.filter(s -> !s.isEmpty())
.map(PropertiesLoader::load)
.ifPresent(props -> props.stringPropertyNames()
.forEach(k -> mapping.put(k, Stream.of(props.getProperty(k).split(","))
.map(String::trim)
.collect(toSet()))));
}
public Collection<String> map(final String tokenName) {
return ofNullable(mapping.get(tokenName)).orElse(singletonList(tokenName));
}
}
| 5,707 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/jaxrs/JAXRSRequestForwarder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.jaxrs;
import javax.annotation.Priority;
import javax.enterprise.context.Dependent;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.Priorities;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
import org.apache.geronimo.microprofile.impl.jwtauth.servlet.JwtRequest;
@Provider
@Dependent
@Priority(Priorities.AUTHENTICATION - 1)
public class JAXRSRequestForwarder implements ContainerRequestFilter {
@Context
private HttpServletRequest request;
@Override
public void filter(final ContainerRequestContext requestContext) {
final JwtRequest jwtRequest = JwtRequest.class.cast(request.getAttribute(JwtRequest.class.getName()));
if (jwtRequest == null) {
return;
}
final String value = requestContext.getHeaders().getFirst(jwtRequest.getHeaderName());
if (value != null) {
jwtRequest.setAttribute(JAXRSRequestForwarder.class.getName() + ".header", value);
}
}
}
| 5,708 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/jaxrs/RolesAllowedRequestFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.jaxrs;
import static java.util.Collections.emptyMap;
import java.io.IOException;
import java.util.Collection;
import javax.json.Json;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.SecurityContext;
class RolesAllowedRequestFilter implements ContainerRequestFilter {
private final Response forbidden = Response
.status(Response.Status.FORBIDDEN)
.entity(Json.createObjectBuilder(emptyMap()).add("message", "you are not allowed to access that endpoint").build())
.build();
private final boolean denyAll;
private final boolean permitAll;
private final Collection<String> roles;
RolesAllowedRequestFilter(final boolean denyAll, final boolean permitAll, final Collection<String> roles) {
this.denyAll = denyAll;
this.permitAll = permitAll;
this.roles = roles;
}
@Override
public void filter(final ContainerRequestContext context) throws IOException {
if (denyAll) {
context.abortWith(forbidden);
} else if (!permitAll) {
final SecurityContext securityContext = context.getSecurityContext();
if (securityContext == null || roles.stream().noneMatch(securityContext::isUserInRole)) {
context.abortWith(forbidden);
}
}
}
}
| 5,709 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/jaxrs/ResponseBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.jaxrs;
import static java.util.Collections.emptyMap;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.json.Json;
import javax.json.JsonBuilderFactory;
import javax.json.JsonObject;
import org.apache.geronimo.microprofile.impl.jwtauth.JwtException;
@ApplicationScoped
public class ResponseBuilder {
private JsonBuilderFactory factory;
@PostConstruct
private void createBuilderFactory() {
factory = Json.createBuilderFactory(emptyMap());
}
public JsonObject toObject(final JwtException exception) {
return factory.createObjectBuilder()
.add("message", exception.getMessage())
.build();
}
}
| 5,710 |
0 | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth | Create_ds/geronimo-jwt-auth/src/main/java/org/apache/geronimo/microprofile/impl/jwtauth/jaxrs/GeronimoJwtAuthExceptionMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.microprofile.impl.jwtauth.jaxrs;
import static javax.ws.rs.core.MediaType.APPLICATION_JSON_TYPE;
import javax.enterprise.context.Dependent;
import javax.inject.Inject;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
import org.apache.geronimo.microprofile.impl.jwtauth.JwtException;
@Provider
@Dependent
public class GeronimoJwtAuthExceptionMapper implements ExceptionMapper<JwtException> {
@Inject
private ResponseBuilder builder;
@Override
public Response toResponse(final JwtException e) {
return Response.status(e.getStatus())
.entity(builder.toObject(e))
.type(APPLICATION_JSON_TYPE)
.build();
}
}
| 5,711 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch7/ElasticsearchSinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch7;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.junit.ClassRule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import java.util.ArrayList;
import java.util.List;
/** IT cases for the {@link ElasticsearchSink}. */
public class ElasticsearchSinkITCase
extends ElasticsearchSinkTestBase<RestHighLevelClient, HttpHost> {
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchSinkITCase.class);
@ClassRule
public static ElasticsearchContainer elasticsearchContainer =
ElasticsearchUtil.createElasticsearchContainer(
DockerImageVersions.ELASTICSEARCH_7, LOG);
@Override
protected final RestHighLevelClient getClient() {
return new RestHighLevelClient(
RestClient.builder(HttpHost.create(elasticsearchContainer.getHttpHostAddress())));
}
@Test
public void testElasticsearchSink() throws Exception {
runElasticsearchSinkTest();
}
@Test
public void testElasticsearchSinkWithSmile() throws Exception {
runElasticsearchSinkSmileTest();
}
@Test
public void testNullAddresses() {
runNullAddressesTest();
}
@Test
public void testEmptyAddresses() {
runEmptyAddressesTest();
}
@Test
public void testInvalidElasticsearchCluster() throws Exception {
runInvalidElasticsearchClusterTest();
}
@Override
protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient>
createElasticsearchSink(
int bulkFlushMaxActions,
List<HttpHost> httpHosts,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
ElasticsearchSink.Builder<Tuple2<Integer, String>> builder =
new ElasticsearchSink.Builder<>(httpHosts, elasticsearchSinkFunction);
builder.setBulkFlushMaxActions(bulkFlushMaxActions);
return builder.build();
}
@Override
protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient>
createElasticsearchSinkForEmbeddedNode(
int bulkFlushMaxActions,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
return createElasticsearchSinkForNode(
bulkFlushMaxActions,
elasticsearchSinkFunction,
elasticsearchContainer.getHttpHostAddress());
}
@Override
protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient>
createElasticsearchSinkForNode(
int bulkFlushMaxActions,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
String hostAddress) {
ArrayList<HttpHost> httpHosts = new ArrayList<>();
httpHosts.add(HttpHost.create(hostAddress));
ElasticsearchSink.Builder<Tuple2<Integer, String>> builder =
new ElasticsearchSink.Builder<>(httpHosts, elasticsearchSinkFunction);
builder.setBulkFlushMaxActions(bulkFlushMaxActions);
return builder.build();
}
}
| 5,712 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicTableFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.api.common.typeutils.base.VoidSerializer;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.catalog.UniqueConstraint;
import org.apache.flink.util.TestLogger;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.util.Arrays;
import java.util.Collections;
import static org.apache.flink.streaming.connectors.elasticsearch.table.TestContext.context;
/** Tests for validation in {@link Elasticsearch7DynamicTableFactory}. */
public class Elasticsearch7DynamicTableFactoryTest extends TestLogger {
@Rule public ExpectedException thrown = ExpectedException.none();
@Test
public void validateEmptyConfiguration() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"One or more required options are missing.\n"
+ "\n"
+ "Missing required options are:\n"
+ "\n"
+ "hosts\n"
+ "index");
factory.createDynamicTableSink(context().build());
}
@Test
public void validateWrongIndex() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("'index' must not be empty");
factory.createDynamicTableSink(
context()
.withOption("index", "")
.withOption("hosts", "http://localhost:12345")
.build());
}
@Test
public void validateWrongHosts() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'.");
factory.createDynamicTableSink(
context().withOption("index", "MyIndex").withOption("hosts", "wrong-host").build());
}
@Test
public void validateWrongFlushSize() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"'sink.bulk-flush.max-size' must be in MB granularity. Got: 1024 bytes");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION.key(),
"1kb")
.build());
}
@Test
public void validateWrongRetries() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION
.key(),
"0")
.build());
}
@Test
public void validateWrongMaxActions() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("'sink.bulk-flush.max-actions' must be at least 1. Got: -2");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION.key(),
"-2")
.build());
}
@Test
public void validateWrongBackoffDelay() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("Invalid value for option 'sink.bulk-flush.backoff.delay'.");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION.key(),
"-1s")
.build());
}
@Test
public void validatePrimaryKeyOnIllegalColumn() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"The table has a primary key on columns of illegal types: "
+ "[ARRAY, MAP, MULTISET, ROW, RAW, VARBINARY].\n"
+ " Elasticsearch sink does not support primary keys on columns of types: "
+ "[ARRAY, MAP, MULTISET, STRUCTURED_TYPE, ROW, RAW, BINARY, VARBINARY].");
factory.createDynamicTableSink(
context()
.withSchema(
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT().notNull()),
Column.physical(
"b",
DataTypes.ARRAY(
DataTypes.BIGINT()
.notNull())
.notNull()),
Column.physical(
"c",
DataTypes.MAP(
DataTypes.BIGINT(),
DataTypes.STRING())
.notNull()),
Column.physical(
"d",
DataTypes.MULTISET(
DataTypes.BIGINT()
.notNull())
.notNull()),
Column.physical(
"e",
DataTypes.ROW(
DataTypes.FIELD(
"a",
DataTypes.BIGINT()))
.notNull()),
Column.physical(
"f",
DataTypes.RAW(
Void.class,
VoidSerializer.INSTANCE)
.notNull()),
Column.physical("g", DataTypes.BYTES().notNull())),
Collections.emptyList(),
UniqueConstraint.primaryKey(
"name",
Arrays.asList("a", "b", "c", "d", "e", "f", "g"))))
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION.key(),
"1s")
.build());
}
@Test
public void validateWrongCredential() {
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"'username' and 'password' must be set at the same time. Got: username 'username' and password ''");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(ElasticsearchConnectorOptions.USERNAME_OPTION.key(), "username")
.withOption(ElasticsearchConnectorOptions.PASSWORD_OPTION.key(), "")
.build());
}
}
| 5,713 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSinkTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.util.TestLogger;
import org.apache.http.HttpHost;
import org.elasticsearch.action.ActionRequest;
import org.junit.Test;
import org.mockito.Mockito;
import java.time.ZoneId;
import java.util.List;
import java.util.Optional;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
/** Tests for {@link Elasticsearch7DynamicSink} parameters. */
public class Elasticsearch7DynamicSinkTest extends TestLogger {
private static final String FIELD_KEY = "key";
private static final String FIELD_FRUIT_NAME = "fruit_name";
private static final String FIELD_COUNT = "count";
private static final String FIELD_TS = "ts";
private static final String HOSTNAME = "host1";
private static final int PORT = 1234;
private static final String SCHEMA = "https";
private static final String INDEX = "MyIndex";
private static final String DOC_TYPE = "MyType";
private static final String USERNAME = "username";
private static final String PASSWORD = "password";
@Test
public void testBuilder() {
final TableSchema schema = createTestSchema();
BuilderProvider provider = new BuilderProvider();
final Elasticsearch7DynamicSink testSink =
new Elasticsearch7DynamicSink(
new DummyEncodingFormat(),
new Elasticsearch7Configuration(
getConfig(), this.getClass().getClassLoader()),
schema,
ZoneId.systemDefault(),
provider);
testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();
verify(provider.builderSpy).setFailureHandler(new DummyFailureHandler());
verify(provider.builderSpy).setBulkFlushBackoff(true);
verify(provider.builderSpy)
.setBulkFlushBackoffType(ElasticsearchSinkBase.FlushBackoffType.EXPONENTIAL);
verify(provider.builderSpy).setBulkFlushBackoffDelay(123);
verify(provider.builderSpy).setBulkFlushBackoffRetries(3);
verify(provider.builderSpy).setBulkFlushInterval(100);
verify(provider.builderSpy).setBulkFlushMaxActions(1000);
verify(provider.builderSpy).setBulkFlushMaxSizeMb(1);
verify(provider.builderSpy)
.setRestClientFactory(
new Elasticsearch7DynamicSink.DefaultRestClientFactory("/myapp"));
verify(provider.sinkSpy).disableFlushOnCheckpoint();
}
@Test
public void testDefaultConfig() {
final TableSchema schema = createTestSchema();
Configuration configuration = new Configuration();
configuration.setString(ElasticsearchConnectorOptions.INDEX_OPTION.key(), INDEX);
configuration.setString(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
configuration.setString(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
SCHEMA + "://" + HOSTNAME + ":" + PORT);
BuilderProvider provider = new BuilderProvider();
final Elasticsearch7DynamicSink testSink =
new Elasticsearch7DynamicSink(
new DummyEncodingFormat(),
new Elasticsearch7Configuration(
configuration, this.getClass().getClassLoader()),
schema,
ZoneId.systemDefault(),
provider);
testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();
verify(provider.builderSpy).setFailureHandler(new NoOpFailureHandler());
verify(provider.builderSpy).setBulkFlushBackoff(false);
verify(provider.builderSpy).setBulkFlushInterval(1000);
verify(provider.builderSpy).setBulkFlushMaxActions(1000);
verify(provider.builderSpy).setBulkFlushMaxSizeMb(2);
verify(provider.builderSpy)
.setRestClientFactory(new Elasticsearch7DynamicSink.DefaultRestClientFactory(null));
verify(provider.sinkSpy, never()).disableFlushOnCheckpoint();
}
@Test
public void testAuthConfig() {
final TableSchema schema = createTestSchema();
Configuration configuration = new Configuration();
configuration.setString(ElasticsearchConnectorOptions.INDEX_OPTION.key(), INDEX);
configuration.setString(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
configuration.setString(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
SCHEMA + "://" + HOSTNAME + ":" + PORT);
configuration.setString(ElasticsearchConnectorOptions.USERNAME_OPTION.key(), USERNAME);
configuration.setString(ElasticsearchConnectorOptions.PASSWORD_OPTION.key(), PASSWORD);
BuilderProvider provider = new BuilderProvider();
final Elasticsearch7DynamicSink testSink =
new Elasticsearch7DynamicSink(
new DummyEncodingFormat(),
new Elasticsearch7Configuration(
configuration, this.getClass().getClassLoader()),
schema,
ZoneId.systemDefault(),
provider);
testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();
verify(provider.builderSpy).setFailureHandler(new NoOpFailureHandler());
verify(provider.builderSpy).setBulkFlushBackoff(false);
verify(provider.builderSpy).setBulkFlushInterval(1000);
verify(provider.builderSpy).setBulkFlushMaxActions(1000);
verify(provider.builderSpy).setBulkFlushMaxSizeMb(2);
verify(provider.builderSpy)
.setRestClientFactory(
new Elasticsearch7DynamicSink.AuthRestClientFactory(
null, USERNAME, PASSWORD));
verify(provider.sinkSpy, never()).disableFlushOnCheckpoint();
}
private Configuration getConfig() {
Configuration configuration = new Configuration();
configuration.setString(ElasticsearchConnectorOptions.INDEX_OPTION.key(), INDEX);
configuration.setString(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
configuration.setString(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
SCHEMA + "://" + HOSTNAME + ":" + PORT);
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION.key(), "exponential");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION.key(), "123");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION.key(), "3");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION.key(), "100");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION.key(), "1000");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION.key(), "1mb");
configuration.setString(
ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX.key(), "/myapp");
configuration.setString(
ElasticsearchConnectorOptions.FAILURE_HANDLER_OPTION.key(),
DummyFailureHandler.class.getName());
configuration.setString(
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(), "false");
return configuration;
}
private static class BuilderProvider
implements Elasticsearch7DynamicSink.ElasticSearchBuilderProvider {
public ElasticsearchSink.Builder<RowData> builderSpy;
public ElasticsearchSink<RowData> sinkSpy;
@Override
public ElasticsearchSink.Builder<RowData> createBuilder(
List<HttpHost> httpHosts, RowElasticsearchSinkFunction upsertSinkFunction) {
builderSpy =
Mockito.spy(new ElasticsearchSink.Builder<>(httpHosts, upsertSinkFunction));
doAnswer(
invocation -> {
sinkSpy =
Mockito.spy(
(ElasticsearchSink<RowData>)
invocation.callRealMethod());
return sinkSpy;
})
.when(builderSpy)
.build();
return builderSpy;
}
}
private TableSchema createTestSchema() {
return TableSchema.builder()
.field(FIELD_KEY, DataTypes.BIGINT())
.field(FIELD_FRUIT_NAME, DataTypes.STRING())
.field(FIELD_COUNT, DataTypes.DECIMAL(10, 4))
.field(FIELD_TS, DataTypes.TIMESTAMP(3))
.build();
}
private static class DummySerializationSchema implements SerializationSchema<RowData> {
private static final DummySerializationSchema INSTANCE = new DummySerializationSchema();
@Override
public byte[] serialize(RowData element) {
return new byte[0];
}
}
private static class DummyEncodingFormat
implements EncodingFormat<SerializationSchema<RowData>> {
@Override
public SerializationSchema<RowData> createRuntimeEncoder(
DynamicTableSink.Context context, DataType consumedDataType) {
return DummySerializationSchema.INSTANCE;
}
@Override
public ChangelogMode getChangelogMode() {
return null;
}
}
private static class MockSinkContext implements DynamicTableSink.Context {
@Override
public boolean isBounded() {
return false;
}
@Override
public TypeInformation<?> createTypeInformation(DataType consumedDataType) {
return null;
}
@Override
public TypeInformation<?> createTypeInformation(LogicalType consumedLogicalType) {
return null;
}
@Override
public DynamicTableSink.DataStructureConverter createDataStructureConverter(
DataType consumedDataType) {
return null;
}
public Optional<int[][]> getTargetColumns() {
return Optional.empty();
}
}
/** Custom failure handler for testing. */
public static class DummyFailureHandler implements ActionRequestFailureHandler {
@Override
public void onFailure(
ActionRequest action,
Throwable failure,
int restStatusCode,
RequestIndexer indexer) {
// do nothing
}
@Override
public boolean equals(Object o) {
return o instanceof DummyFailureHandler;
}
@Override
public int hashCode() {
return DummyFailureHandler.class.hashCode();
}
}
}
| 5,714 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.api.common.time.Deadline;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.catalog.UniqueConstraint;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.TestLogger;
import org.apache.http.HttpHost;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.search.SearchHits;
import org.junit.ClassRule;
import org.junit.Test;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import java.time.Duration;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import static org.apache.flink.streaming.connectors.elasticsearch.table.TestContext.context;
import static org.apache.flink.table.api.Expressions.row;
import static org.assertj.core.api.Assertions.assertThat;
/** IT tests for {@link Elasticsearch7DynamicSink}. */
public class Elasticsearch7DynamicSinkITCase extends TestLogger {
@ClassRule
public static ElasticsearchContainer elasticsearchContainer =
new ElasticsearchContainer(DockerImageName.parse(DockerImageVersions.ELASTICSEARCH_7));
@SuppressWarnings("deprecation")
protected final RestHighLevelClient getClient() {
return new RestHighLevelClient(
RestClient.builder(HttpHost.create(elasticsearchContainer.getHttpHostAddress())));
}
@Test
public void testWritingDocuments() throws Exception {
ResolvedSchema schema =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT().notNull()),
Column.physical("b", DataTypes.TIME()),
Column.physical("c", DataTypes.STRING().notNull()),
Column.physical("d", DataTypes.FLOAT()),
Column.physical("e", DataTypes.TINYINT().notNull()),
Column.physical("f", DataTypes.DATE()),
Column.physical("g", DataTypes.TIMESTAMP().notNull())),
Collections.emptyList(),
UniqueConstraint.primaryKey("name", Arrays.asList("a", "g")));
GenericRowData rowData =
GenericRowData.of(
1L,
12345,
StringData.fromString("ABCDE"),
12.12f,
(byte) 2,
12345,
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12")));
String index = "writing-documents";
Elasticsearch7DynamicTableFactory factory = new Elasticsearch7DynamicTableFactory();
SinkFunctionProvider sinkRuntimeProvider =
(SinkFunctionProvider)
factory.createDynamicTableSink(
context()
.withSchema(schema)
.withOption(
ElasticsearchConnectorOptions.INDEX_OPTION
.key(),
index)
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION
.key(),
elasticsearchContainer.getHttpHostAddress())
.withOption(
ElasticsearchConnectorOptions
.FLUSH_ON_CHECKPOINT_OPTION
.key(),
"false")
.build())
.getSinkRuntimeProvider(new MockContext());
SinkFunction<RowData> sinkFunction = sinkRuntimeProvider.createSinkFunction();
StreamExecutionEnvironment environment =
StreamExecutionEnvironment.getExecutionEnvironment();
environment.setParallelism(4);
rowData.setRowKind(RowKind.UPDATE_AFTER);
environment.<RowData>fromElements(rowData).addSink(sinkFunction);
environment.execute();
RestHighLevelClient client = getClient();
Map<String, Object> response =
client.get(new GetRequest(index, "1_2012-12-12T12:12:12"), RequestOptions.DEFAULT)
.getSource();
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsFromTableApi() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "table-api";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIME,\n"
+ "c STRING NOT NULL,\n"
+ "d FLOAT,\n"
+ "e TINYINT NOT NULL,\n"
+ "f DATE,\n"
+ "g TIMESTAMP NOT NULL,"
+ "h as a + 2,\n"
+ "PRIMARY KEY (a, g) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ String.format("'%s'='%s',\n", "connector", "elasticsearch-7")
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
elasticsearchContainer.getHttpHostAddress())
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(),
"false")
+ ")");
tableEnvironment
.fromValues(
row(
1L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"ABCDE",
12.12f,
(byte) 2,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response =
client.get(new GetRequest(index, "1_2012-12-12T12:12:12"), RequestOptions.DEFAULT)
.getSource();
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsNoPrimaryKey() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "no-primary-key";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIME,\n"
+ "c STRING NOT NULL,\n"
+ "d FLOAT,\n"
+ "e TINYINT NOT NULL,\n"
+ "f DATE,\n"
+ "g TIMESTAMP NOT NULL\n"
+ ")\n"
+ "WITH (\n"
+ String.format("'%s'='%s',\n", "connector", "elasticsearch-7")
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
elasticsearchContainer.getHttpHostAddress())
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(),
"false")
+ ")");
tableEnvironment
.fromValues(
row(
1L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"ABCDE",
12.12f,
(byte) 2,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2012-12-12T12:12:12")),
row(
2L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"FGHIJK",
13.13f,
(byte) 4,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2013-12-12T13:13:13")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
// search API does not return documents that were not indexed, we might need to query
// the index a few times
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
SearchHits hits;
do {
hits = client.search(new SearchRequest(index), RequestOptions.DEFAULT).getHits();
if (hits.getTotalHits().value < 2) {
Thread.sleep(200);
}
} while (hits.getTotalHits().value < 2 && deadline.hasTimeLeft());
if (hits.getTotalHits().value < 2) {
throw new AssertionError("Could not retrieve results from Elasticsearch.");
}
HashSet<Map<String, Object>> resultSet = new HashSet<>();
resultSet.add(hits.getAt(0).getSourceAsMap());
resultSet.add(hits.getAt(1).getSourceAsMap());
Map<Object, Object> expectedMap1 = new HashMap<>();
expectedMap1.put("a", 1);
expectedMap1.put("b", "00:00:12");
expectedMap1.put("c", "ABCDE");
expectedMap1.put("d", 12.12d);
expectedMap1.put("e", 2);
expectedMap1.put("f", "2003-10-20");
expectedMap1.put("g", "2012-12-12 12:12:12");
Map<Object, Object> expectedMap2 = new HashMap<>();
expectedMap2.put("a", 2);
expectedMap2.put("b", "00:00:12");
expectedMap2.put("c", "FGHIJK");
expectedMap2.put("d", 13.13d);
expectedMap2.put("e", 4);
expectedMap2.put("f", "2003-10-20");
expectedMap2.put("g", "2013-12-12 13:13:13");
HashSet<Map<Object, Object>> expectedSet = new HashSet<>();
expectedSet.add(expectedMap1);
expectedSet.add(expectedMap2);
assertThat(resultSet).isEqualTo(expectedSet);
}
@Test
public void testWritingDocumentsWithDynamicIndex() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "dynamic-index-{b|yyyy-MM-dd}";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIMESTAMP NOT NULL,\n"
+ "PRIMARY KEY (a) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ String.format("'%s'='%s',\n", "connector", "elasticsearch-7")
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
elasticsearchContainer.getHttpHostAddress())
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(),
"false")
+ ")");
tableEnvironment
.fromValues(row(1L, LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response =
client.get(new GetRequest("dynamic-index-2012-12-12", "1"), RequestOptions.DEFAULT)
.getSource();
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
private static class MockContext implements DynamicTableSink.Context {
@Override
public boolean isBounded() {
return false;
}
@Override
public TypeInformation<?> createTypeInformation(DataType consumedDataType) {
return null;
}
@Override
public TypeInformation<?> createTypeInformation(LogicalType consumedLogicalType) {
return null;
}
@Override
public DynamicTableSink.DataStructureConverter createDataStructureConverter(
DataType consumedDataType) {
return null;
}
public Optional<int[][]> getTargetColumns() {
return Optional.empty();
}
}
}
| 5,715 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/architecture/ProductionCodeArchitectureTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
/** product code Architecture tests. */
@AnalyzeClasses(
packages = "org.apache.flink.connector",
importOptions = {
ImportOption.DoNotIncludeTests.class,
ImportOption.DoNotIncludeArchives.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class ProductionCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(ProductionCodeArchitectureBase.class);
}
| 5,716 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.core.importer.Location;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
import java.util.regex.Pattern;
/** Architecture tests for test code. */
@AnalyzeClasses(
packages = {
"org.apache.flink.connector.elasticsearch",
"org.apache.flink.streaming.connectors.elasticsearch7"
},
importOptions = {
ImportOption.OnlyIncludeTests.class,
TestCodeArchitectureTest.IncludeES7ImportOption.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class TestCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
/** Only include ES7 related locations. */
public static final class IncludeES7ImportOption implements ImportOption {
private static final Pattern ES7 = Pattern.compile(".*elasticsearch7.*");
@Override
public boolean includes(Location location) {
return location.matches(ES7);
}
}
}
| 5,717 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch7SinkBuilderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.http.HttpHost;
/** Tests for {@link Elasticsearch7SinkBuilder}. */
class Elasticsearch7SinkBuilderTest
extends ElasticsearchSinkBuilderBaseTest<Elasticsearch7SinkBuilder<Object>> {
@Override
Elasticsearch7SinkBuilder<Object> createEmptyBuilder() {
return new Elasticsearch7SinkBuilder<>();
}
@Override
Elasticsearch7SinkBuilder<Object> createMinimalBuilder() {
return new Elasticsearch7SinkBuilder<>()
.setEmitter((element, indexer, context) -> {})
.setHosts(new HttpHost("localhost:3000"));
}
}
| 5,718 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch7SinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.elasticsearch.client.RestHighLevelClient;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
/** Tests for {@link ElasticsearchSink}. */
@Testcontainers
class Elasticsearch7SinkITCase extends ElasticsearchSinkBaseITCase {
@Container
private static final ElasticsearchContainer ES_CONTAINER =
ElasticsearchUtil.createElasticsearchContainer(DockerImageVersions.ELASTICSEARCH_7, LOG)
.withPassword(ELASTICSEARCH_PASSWORD);
@Override
String getElasticsearchHttpHostAddress() {
return ES_CONTAINER.getHttpHostAddress();
}
@Override
TestClientBase createTestClient(RestHighLevelClient client) {
return new Elasticsearch7TestClient(client);
}
@Override
ElasticsearchSinkBuilderBase<Tuple2<Integer, String>, ? extends ElasticsearchSinkBuilderBase>
getSinkBuilder() {
return new Elasticsearch7SinkBuilder<>();
}
}
| 5,719 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch7TestClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import java.io.IOException;
class Elasticsearch7TestClient extends TestClientBase {
Elasticsearch7TestClient(RestHighLevelClient client) {
super(client);
}
@Override
GetResponse getResponse(String index, int id) throws IOException {
return client.get(new GetRequest(index, Integer.toString(id)), RequestOptions.DEFAULT);
}
}
| 5,720 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicTableFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.table.api.ValidationException;
import org.junit.jupiter.api.Test;
import static org.apache.flink.connector.elasticsearch.table.TestContext.context;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/** Tests for validation in {@link Elasticsearch7DynamicSinkFactory}. */
public class Elasticsearch7DynamicTableFactoryTest extends ElasticsearchDynamicSinkFactoryBaseTest {
@Override
ElasticsearchDynamicSinkFactoryBase createSinkFactory() {
return new Elasticsearch7DynamicSinkFactory();
}
@Override
TestContext createPrefilledTestContext() {
return context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(), "http://localhost:12345");
}
@Test
public void validateEmptyConfiguration() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(() -> sinkFactory.createDynamicTableSink(context().build()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"One or more required options are missing.\n"
+ "\n"
+ "Missing required options are:\n"
+ "\n"
+ "hosts\n"
+ "index");
}
}
| 5,721 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicSinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.search.SearchHits;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import java.io.IOException;
import java.util.Map;
import static org.apache.flink.connector.elasticsearch.table.TestContext.context;
/** IT tests for {@link ElasticsearchDynamicSink}. */
@Testcontainers
public class Elasticsearch7DynamicSinkITCase extends ElasticsearchDynamicSinkBaseITCase {
private static final Logger LOG =
LoggerFactory.getLogger(Elasticsearch7DynamicSinkITCase.class);
@Container
private static final ElasticsearchContainer ES_CONTAINER =
ElasticsearchUtil.createElasticsearchContainer(
DockerImageVersions.ELASTICSEARCH_7, LOG);
@Override
String getElasticsearchHttpHostAddress() {
return ES_CONTAINER.getHttpHostAddress();
}
@Override
ElasticsearchDynamicSinkFactoryBase getDynamicSinkFactory() {
return new Elasticsearch7DynamicSinkFactory();
}
@Override
Map<String, Object> makeGetRequest(RestHighLevelClient client, String index, String id)
throws IOException {
return client.get(new GetRequest(index, id), RequestOptions.DEFAULT).getSource();
}
@Override
SearchHits makeSearchRequest(RestHighLevelClient client, String index) throws IOException {
return client.search(new SearchRequest(index), RequestOptions.DEFAULT).getHits();
}
@Override
long getTotalSearchHits(SearchHits searchHits) {
return searchHits.getTotalHits().value;
}
@Override
TestContext getPrefilledTestContext(String index) {
return context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
ES_CONTAINER.getHttpHostAddress());
}
@Override
String getConnectorSql(String index) {
return String.format("'%s'='%s',\n", "connector", "elasticsearch-7")
+ String.format(
"'%s'='%s',\n", ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
ES_CONTAINER.getHttpHostAddress());
}
}
| 5,722 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/ElasticsearchSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch7;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
import org.apache.flink.util.Preconditions;
import org.apache.http.HttpHost;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.client.RestHighLevelClient;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Elasticsearch 7.x sink that requests multiple {@link ActionRequest ActionRequests} against a
* cluster for each incoming element.
*
* <p>The sink internally uses a {@link RestHighLevelClient} to communicate with an Elasticsearch
* cluster. The sink will fail if no cluster can be connected to using the provided transport
* addresses passed to the constructor.
*
* <p>Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest
* ActionRequests}. This will buffer elements before sending a request to the cluster. The behaviour
* of the {@code BulkProcessor} can be configured using these config keys:
*
* <ul>
* <li>{@code bulk.flush.max.actions}: Maximum amount of elements to buffer
* <li>{@code bulk.flush.max.size.mb}: Maximum amount of data (in megabytes) to buffer
* <li>{@code bulk.flush.interval.ms}: Interval at which to flush data regardless of the other two
* settings in milliseconds
* </ul>
*
* <p>You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
* {@link ActionRequest ActionRequests} for each incoming element. See the class level documentation
* of {@link ElasticsearchSinkFunction} for an example.
*
* @param <T> Type of the elements handled by this sink
* @deprecated This sink has been deprecated in favor of {@link
* org.apache.flink.connector.elasticsearch.sink.ElasticsearchSink}
*/
@Deprecated
@PublicEvolving
public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T, RestHighLevelClient> {
private static final long serialVersionUID = 1L;
private ElasticsearchSink(
Map<String, String> bulkRequestsConfig,
List<HttpHost> httpHosts,
ElasticsearchSinkFunction<T> elasticsearchSinkFunction,
ActionRequestFailureHandler failureHandler,
RestClientFactory restClientFactory) {
super(
new Elasticsearch7ApiCallBridge(httpHosts, restClientFactory),
bulkRequestsConfig,
elasticsearchSinkFunction,
failureHandler);
}
/**
* A builder for creating an {@link ElasticsearchSink}.
*
* @param <T> Type of the elements handled by the sink this builder creates.
* @deprecated This has been deprecated, please use {@link
* org.apache.flink.connector.elasticsearch.sink.Elasticsearch7SinkBuilder}.
*/
@Deprecated
@PublicEvolving
public static class Builder<T> {
private final List<HttpHost> httpHosts;
private final ElasticsearchSinkFunction<T> elasticsearchSinkFunction;
private Map<String, String> bulkRequestsConfig = new HashMap<>();
private ActionRequestFailureHandler failureHandler = new NoOpFailureHandler();
private RestClientFactory restClientFactory = restClientBuilder -> {};
/**
* Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link
* RestHighLevelClient}.
*
* @param httpHosts The list of {@link HttpHost} to which the {@link RestHighLevelClient}
* connects to.
* @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest}
* from the incoming element.
*/
public Builder(
List<HttpHost> httpHosts, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {
this.httpHosts = Preconditions.checkNotNull(httpHosts);
this.elasticsearchSinkFunction = Preconditions.checkNotNull(elasticsearchSinkFunction);
}
/**
* Sets the maximum number of actions to buffer for each bulk request. You can pass -1 to
* disable it.
*
* @param numMaxActions the maximum number of actions to buffer per bulk request.
*/
public void setBulkFlushMaxActions(int numMaxActions) {
Preconditions.checkArgument(
numMaxActions == -1 || numMaxActions > 0,
"Max number of buffered actions must be larger than 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(numMaxActions));
}
/**
* Sets the maximum size of buffered actions, in mb, per bulk request. You can pass -1 to
* disable it.
*
* @param maxSizeMb the maximum size of buffered actions, in mb.
*/
public void setBulkFlushMaxSizeMb(int maxSizeMb) {
Preconditions.checkArgument(
maxSizeMb == -1 || maxSizeMb > 0,
"Max size of buffered actions must be larger than 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, String.valueOf(maxSizeMb));
}
/**
* Sets the bulk flush interval, in milliseconds. You can pass -1 to disable it.
*
* @param intervalMillis the bulk flush interval, in milliseconds.
*/
public void setBulkFlushInterval(long intervalMillis) {
Preconditions.checkArgument(
intervalMillis == -1 || intervalMillis >= 0,
"Interval (in milliseconds) between each flush must be larger than or equal to 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, String.valueOf(intervalMillis));
}
/**
* Sets whether or not to enable bulk flush backoff behaviour.
*
* @param enabled whether or not to enable backoffs.
*/
public void setBulkFlushBackoff(boolean enabled) {
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, String.valueOf(enabled));
}
/**
* Sets the type of back of to use when flushing bulk requests.
*
* @param flushBackoffType the backoff type to use.
*/
public void setBulkFlushBackoffType(FlushBackoffType flushBackoffType) {
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE,
Preconditions.checkNotNull(flushBackoffType).toString());
}
/**
* Sets the maximum number of retries for a backoff attempt when flushing bulk requests.
*
* @param maxRetries the maximum number of retries for a backoff attempt when flushing bulk
* requests
*/
public void setBulkFlushBackoffRetries(int maxRetries) {
Preconditions.checkArgument(
maxRetries > 0, "Max number of backoff attempts must be larger than 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, String.valueOf(maxRetries));
}
/**
* Sets the amount of delay between each backoff attempt when flushing bulk requests, in
* milliseconds.
*
* @param delayMillis the amount of delay between each backoff attempt when flushing bulk
* requests, in milliseconds.
*/
public void setBulkFlushBackoffDelay(long delayMillis) {
Preconditions.checkArgument(
delayMillis >= 0,
"Delay (in milliseconds) between each backoff attempt must be larger than or equal to 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, String.valueOf(delayMillis));
}
/**
* Sets a failure handler for action requests.
*
* @param failureHandler This is used to handle failed {@link ActionRequest}.
*/
public void setFailureHandler(ActionRequestFailureHandler failureHandler) {
this.failureHandler = Preconditions.checkNotNull(failureHandler);
}
/**
* Sets a REST client factory for custom client configuration.
*
* @param restClientFactory the factory that configures the rest client.
*/
public void setRestClientFactory(RestClientFactory restClientFactory) {
this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
}
/**
* Creates the Elasticsearch sink.
*
* @return the created Elasticsearch sink.
*/
public ElasticsearchSink<T> build() {
return new ElasticsearchSink<>(
bulkRequestsConfig,
httpHosts,
elasticsearchSinkFunction,
failureHandler,
restClientFactory);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Builder<?> builder = (Builder<?>) o;
return Objects.equals(httpHosts, builder.httpHosts)
&& Objects.equals(elasticsearchSinkFunction, builder.elasticsearchSinkFunction)
&& Objects.equals(bulkRequestsConfig, builder.bulkRequestsConfig)
&& Objects.equals(failureHandler, builder.failureHandler)
&& Objects.equals(restClientFactory, builder.restClientFactory);
}
@Override
public int hashCode() {
return Objects.hash(
httpHosts,
elasticsearchSinkFunction,
bulkRequestsConfig,
failureHandler,
restClientFactory);
}
}
}
| 5,723 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/Elasticsearch7ApiCallBridge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch7;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallBridge;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.util.Preconditions;
import org.apache.http.HttpHost;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.SearchHit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
/** Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 7 and later versions. */
@Internal
public class Elasticsearch7ApiCallBridge
implements ElasticsearchApiCallBridge<RestHighLevelClient> {
private static final long serialVersionUID = -5222683870097809633L;
private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch7ApiCallBridge.class);
/** User-provided HTTP Host. */
private final List<HttpHost> httpHosts;
/** The factory to configure the rest client. */
private final RestClientFactory restClientFactory;
public Elasticsearch7ApiCallBridge(
List<HttpHost> httpHosts, RestClientFactory restClientFactory) {
Preconditions.checkArgument(httpHosts != null && !httpHosts.isEmpty());
this.httpHosts = httpHosts;
this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
}
@Override
public RestHighLevelClient createClient() {
RestClientBuilder builder =
RestClient.builder(httpHosts.toArray(new HttpHost[httpHosts.size()]));
restClientFactory.configureRestClientBuilder(builder);
RestHighLevelClient rhlClient = new RestHighLevelClient(builder);
return rhlClient;
}
@Override
public BulkProcessor.Builder createBulkProcessorBuilder(
RestHighLevelClient client, BulkProcessor.Listener listener) {
return BulkProcessor.builder(
(request, bulkListener) ->
client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener),
listener);
}
@Override
public Tuple2<String, String[]> search(RestHighLevelClient client, SearchRequest searchRequest)
throws IOException {
SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
SearchHit[] searchHits = searchResponse.getHits().getHits();
return new Tuple2<>(
searchResponse.getScrollId(),
Stream.of(searchHits).map(SearchHit::getSourceAsString).toArray(String[]::new));
}
@Override
public void close(RestHighLevelClient client) throws IOException {
client.close();
}
@Override
public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
if (!bulkItemResponse.isFailed()) {
return null;
} else {
return bulkItemResponse.getFailure().getCause();
}
}
@Override
public void configureBulkProcessorFlushInterval(
BulkProcessor.Builder builder, long flushIntervalMillis) {
builder.setFlushInterval(TimeValue.timeValueMillis(flushIntervalMillis));
}
@Override
public void configureBulkProcessorBackoff(
BulkProcessor.Builder builder,
@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy) {
BackoffPolicy backoffPolicy;
if (flushBackoffPolicy != null) {
switch (flushBackoffPolicy.getBackoffType()) {
case CONSTANT:
backoffPolicy =
BackoffPolicy.constantBackoff(
new TimeValue(flushBackoffPolicy.getDelayMillis()),
flushBackoffPolicy.getMaxRetryCount());
break;
case EXPONENTIAL:
default:
backoffPolicy =
BackoffPolicy.exponentialBackoff(
new TimeValue(flushBackoffPolicy.getDelayMillis()),
flushBackoffPolicy.getMaxRetryCount());
}
} else {
backoffPolicy = BackoffPolicy.noBackoff();
}
builder.setBackoffPolicy(backoffPolicy);
}
@Override
public RequestIndexer createBulkProcessorIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef) {
return new Elasticsearch7BulkProcessorIndexer(
bulkProcessor, flushOnCheckpoint, numPendingRequestsRef);
}
@Override
public void verifyClientConnection(RestHighLevelClient client) throws IOException {
if (LOG.isInfoEnabled()) {
LOG.info("Pinging Elasticsearch cluster via hosts {} ...", httpHosts);
}
if (!client.ping(RequestOptions.DEFAULT)) {
throw new RuntimeException("There are no reachable Elasticsearch nodes!");
}
if (LOG.isInfoEnabled()) {
LOG.info("Elasticsearch RestHighLevelClient is connected to {}", httpHosts.toString());
}
}
}
| 5,724 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/Elasticsearch7BulkProcessorIndexer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch7;
import org.apache.flink.annotation.Internal;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}. {@link ActionRequest
* ActionRequests} will be buffered before sending a bulk request to the Elasticsearch cluster.
*
* <p>Note: This class is binary compatible to Elasticsearch 7.
*/
@Internal
class Elasticsearch7BulkProcessorIndexer implements RequestIndexer {
private final BulkProcessor bulkProcessor;
private final boolean flushOnCheckpoint;
private final AtomicLong numPendingRequestsRef;
Elasticsearch7BulkProcessorIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef) {
this.bulkProcessor = checkNotNull(bulkProcessor);
this.flushOnCheckpoint = flushOnCheckpoint;
this.numPendingRequestsRef = checkNotNull(numPendingRequestsRef);
}
@Override
public void add(DeleteRequest... deleteRequests) {
for (DeleteRequest deleteRequest : deleteRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(deleteRequest);
}
}
@Override
public void add(IndexRequest... indexRequests) {
for (IndexRequest indexRequest : indexRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(indexRequest);
}
}
@Override
public void add(UpdateRequest... updateRequests) {
for (UpdateRequest updateRequest : updateRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(updateRequest);
}
}
}
| 5,725 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/RestClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch7;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.client.RestClientBuilder;
import java.io.Serializable;
/**
* A factory that is used to configure the {@link org.elasticsearch.client.RestHighLevelClient}
* internally used in the {@link ElasticsearchSink}.
*
* @deprecated This has been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public interface RestClientFactory extends Serializable {
/**
* Configures the rest client builder.
*
* @param restClientBuilder the configured rest client builder.
*/
void configureRestClientBuilder(RestClientBuilder restClientBuilder);
}
| 5,726 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicTableFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.api.config.TableConfigOptions;
import org.apache.flink.table.connector.format.DecodingFormat;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.lookup.LookupOptions;
import org.apache.flink.table.connector.source.lookup.cache.DefaultLookupCache;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.factories.DeserializationFormatFactory;
import org.apache.flink.table.factories.DynamicTableFactory;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.factories.SerializationFormatFactory;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.StringUtils;
import javax.annotation.Nullable;
import java.time.ZoneId;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FAILURE_HANDLER_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FORMAT_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.INDEX_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.KEY_DELIMITER_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.PASSWORD_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.USERNAME_OPTION;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.CACHE_TYPE;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.MAX_RETRIES;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_CACHE_MISSING_KEY;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_ACCESS;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_WRITE;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_MAX_ROWS;
/**
* A {@link DynamicTableFactory} for discovering {@link Elasticsearch7DynamicSource} and {@link
* Elasticsearch7DynamicSink}.
*/
@Internal
public class Elasticsearch7DynamicTableFactory
implements DynamicTableSourceFactory, DynamicTableSinkFactory {
private static final Set<ConfigOption<?>> requiredOptions =
Stream.of(HOSTS_OPTION, INDEX_OPTION).collect(Collectors.toSet());
private static final Set<ConfigOption<?>> optionalOptions =
Stream.of(
KEY_DELIMITER_OPTION,
FAILURE_HANDLER_OPTION,
FLUSH_ON_CHECKPOINT_OPTION,
BULK_FLASH_MAX_SIZE_OPTION,
BULK_FLUSH_MAX_ACTIONS_OPTION,
BULK_FLUSH_INTERVAL_OPTION,
BULK_FLUSH_BACKOFF_TYPE_OPTION,
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION,
BULK_FLUSH_BACKOFF_DELAY_OPTION,
CONNECTION_PATH_PREFIX,
FORMAT_OPTION,
PASSWORD_OPTION,
USERNAME_OPTION,
CACHE_TYPE,
PARTIAL_CACHE_EXPIRE_AFTER_ACCESS,
PARTIAL_CACHE_EXPIRE_AFTER_WRITE,
PARTIAL_CACHE_MAX_ROWS,
PARTIAL_CACHE_CACHE_MISSING_KEY,
MAX_RETRIES)
.collect(Collectors.toSet());
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
DataType physicalRowDataType = context.getPhysicalRowDataType();
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig options = helper.getOptions();
final DecodingFormat<DeserializationSchema<RowData>> format =
helper.discoverDecodingFormat(
DeserializationFormatFactory.class,
org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions
.FORMAT_OPTION);
helper.validate();
Configuration configuration = new Configuration();
context.getCatalogTable().getOptions().forEach(configuration::setString);
Elasticsearch7Configuration config =
new Elasticsearch7Configuration(configuration, context.getClassLoader());
return new Elasticsearch7DynamicSource(
format,
config,
physicalRowDataType,
options.get(MAX_RETRIES),
getLookupCache(options));
}
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableSchema tableSchema = context.getCatalogTable().getSchema();
ElasticsearchValidationUtils.validatePrimaryKey(tableSchema);
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
final EncodingFormat<SerializationSchema<RowData>> format =
helper.discoverEncodingFormat(SerializationFormatFactory.class, FORMAT_OPTION);
helper.validate();
Configuration configuration = new Configuration();
context.getCatalogTable().getOptions().forEach(configuration::setString);
Elasticsearch7Configuration config =
new Elasticsearch7Configuration(configuration, context.getClassLoader());
validate(config, configuration);
return new Elasticsearch7DynamicSink(
format,
config,
TableSchemaUtils.getPhysicalSchema(tableSchema),
getLocalTimeZoneId(context.getConfiguration()));
}
@Nullable
private LookupCache getLookupCache(ReadableConfig tableOptions) {
LookupCache cache = null;
if (tableOptions
.get(LookupOptions.CACHE_TYPE)
.equals(LookupOptions.LookupCacheType.PARTIAL)) {
cache = DefaultLookupCache.fromConfig(tableOptions);
}
return cache;
}
ZoneId getLocalTimeZoneId(ReadableConfig readableConfig) {
final String zone = readableConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
final ZoneId zoneId =
TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)
? ZoneId.systemDefault()
: ZoneId.of(zone);
return zoneId;
}
private void validate(Elasticsearch7Configuration config, Configuration originalConfiguration) {
config.getFailureHandler(); // checks if we can instantiate the custom failure handler
config.getHosts(); // validate hosts
validate(
config.getIndex().length() >= 1,
() -> String.format("'%s' must not be empty", INDEX_OPTION.key()));
int maxActions = config.getBulkFlushMaxActions();
validate(
maxActions == -1 || maxActions >= 1,
() ->
String.format(
"'%s' must be at least 1. Got: %s",
BULK_FLUSH_MAX_ACTIONS_OPTION.key(), maxActions));
long maxSize = config.getBulkFlushMaxByteSize();
long mb1 = 1024 * 1024;
validate(
maxSize == -1 || (maxSize >= mb1 && maxSize % mb1 == 0),
() ->
String.format(
"'%s' must be in MB granularity. Got: %s",
BULK_FLASH_MAX_SIZE_OPTION.key(),
originalConfiguration
.get(BULK_FLASH_MAX_SIZE_OPTION)
.toHumanReadableString()));
validate(
config.getBulkFlushBackoffRetries().map(retries -> retries >= 1).orElse(true),
() ->
String.format(
"'%s' must be at least 1. Got: %s",
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION.key(),
config.getBulkFlushBackoffRetries().get()));
if (config.getUsername().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())) {
validate(
config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get()),
() ->
String.format(
"'%s' and '%s' must be set at the same time. Got: username '%s' and password '%s'",
USERNAME_OPTION.key(),
PASSWORD_OPTION.key(),
config.getUsername().get(),
config.getPassword().orElse("")));
}
}
private static void validate(boolean condition, Supplier<String> message) {
if (!condition) {
throw new ValidationException(message.get());
}
}
@Override
public String factoryIdentifier() {
return "elasticsearch-7";
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return requiredOptions;
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
return optionalOptions;
}
}
| 5,727 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.table.api.ValidationException;
import org.apache.http.HttpHost;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION;
/** Elasticsearch 7 specific configuration. */
@Internal
final class Elasticsearch7Configuration extends ElasticsearchConfiguration {
Elasticsearch7Configuration(ReadableConfig config, ClassLoader classLoader) {
super(config, classLoader);
}
public List<HttpHost> getHosts() {
return config.get(HOSTS_OPTION).stream()
.map(Elasticsearch7Configuration::validateAndParseHostsString)
.collect(Collectors.toList());
}
private static HttpHost validateAndParseHostsString(String host) {
try {
HttpHost httpHost = HttpHost.create(host);
if (httpHost.getPort() < 0) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing port.",
host, HOSTS_OPTION.key()));
}
if (httpHost.getSchemeName() == null) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing scheme.",
host, HOSTS_OPTION.key()));
}
return httpHost;
} catch (Exception e) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'.",
host, HOSTS_OPTION.key()),
e);
}
}
}
| 5,728 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink;
import org.apache.flink.streaming.connectors.elasticsearch7.RestClientFactory;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.StringUtils;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import javax.annotation.Nullable;
import java.time.ZoneId;
import java.util.List;
import java.util.Objects;
/**
* A {@link DynamicTableSink} that describes how to create a {@link ElasticsearchSink} from a
* logical description.
*/
@Internal
final class Elasticsearch7DynamicSink implements DynamicTableSink {
@VisibleForTesting
static final Elasticsearch7RequestFactory REQUEST_FACTORY = new Elasticsearch7RequestFactory();
private final EncodingFormat<SerializationSchema<RowData>> format;
private final TableSchema schema;
private final Elasticsearch7Configuration config;
private final ZoneId localTimeZoneId;
private final boolean isDynamicIndexWithSystemTime;
public Elasticsearch7DynamicSink(
EncodingFormat<SerializationSchema<RowData>> format,
Elasticsearch7Configuration config,
TableSchema schema,
ZoneId localTimeZoneId) {
this(format, config, schema, localTimeZoneId, (ElasticsearchSink.Builder::new));
}
// --------------------------------------------------------------
// Hack to make configuration testing possible.
//
// The code in this block should never be used outside of tests.
// Having a way to inject a builder we can assert the builder in
// the test. We can not assert everything though, e.g. it is not
// possible to assert flushing on checkpoint, as it is configured
// on the sink itself.
// --------------------------------------------------------------
private final ElasticSearchBuilderProvider builderProvider;
@FunctionalInterface
interface ElasticSearchBuilderProvider {
ElasticsearchSink.Builder<RowData> createBuilder(
List<HttpHost> httpHosts, RowElasticsearchSinkFunction upsertSinkFunction);
}
Elasticsearch7DynamicSink(
EncodingFormat<SerializationSchema<RowData>> format,
Elasticsearch7Configuration config,
TableSchema schema,
ZoneId localTimeZoneId,
ElasticSearchBuilderProvider builderProvider) {
this.format = format;
this.schema = schema;
this.config = config;
this.localTimeZoneId = localTimeZoneId;
this.isDynamicIndexWithSystemTime = isDynamicIndexWithSystemTime();
this.builderProvider = builderProvider;
}
// --------------------------------------------------------------
// End of hack to make configuration testing possible
// --------------------------------------------------------------
public boolean isDynamicIndexWithSystemTime() {
IndexGeneratorFactory.IndexHelper indexHelper = new IndexGeneratorFactory.IndexHelper();
return indexHelper.checkIsDynamicIndexWithSystemTimeFormat(config.getIndex());
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
for (RowKind kind : requestedMode.getContainedKinds()) {
if (kind != RowKind.UPDATE_BEFORE) {
builder.addContainedKind(kind);
}
}
if (isDynamicIndexWithSystemTime && !requestedMode.containsOnly(RowKind.INSERT)) {
throw new ValidationException(
"Dynamic indexing based on system time only works on append only stream.");
}
return builder.build();
}
@Override
public SinkFunctionProvider getSinkRuntimeProvider(Context context) {
return () -> {
SerializationSchema<RowData> format =
this.format.createRuntimeEncoder(context, schema.toRowDataType());
final RowElasticsearchSinkFunction upsertFunction =
new RowElasticsearchSinkFunction(
IndexGeneratorFactory.createIndexGenerator(
config.getIndex(), schema, localTimeZoneId),
null, // this is deprecated in es 7+
format,
XContentType.JSON,
REQUEST_FACTORY,
KeyExtractor.createKeyExtractor(schema, config.getKeyDelimiter()));
final ElasticsearchSink.Builder<RowData> builder =
builderProvider.createBuilder(config.getHosts(), upsertFunction);
builder.setFailureHandler(config.getFailureHandler());
builder.setBulkFlushMaxActions(config.getBulkFlushMaxActions());
builder.setBulkFlushMaxSizeMb((int) (config.getBulkFlushMaxByteSize() >> 20));
builder.setBulkFlushInterval(config.getBulkFlushInterval());
builder.setBulkFlushBackoff(config.isBulkFlushBackoffEnabled());
config.getBulkFlushBackoffType().ifPresent(builder::setBulkFlushBackoffType);
config.getBulkFlushBackoffRetries().ifPresent(builder::setBulkFlushBackoffRetries);
config.getBulkFlushBackoffDelay().ifPresent(builder::setBulkFlushBackoffDelay);
// we must overwrite the default factory which is defined with a lambda because of a bug
// in shading lambda serialization shading see FLINK-18006
if (config.getUsername().isPresent()
&& config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get())) {
builder.setRestClientFactory(
new AuthRestClientFactory(
config.getPathPrefix().orElse(null),
config.getUsername().get(),
config.getPassword().get()));
} else {
builder.setRestClientFactory(
new DefaultRestClientFactory(config.getPathPrefix().orElse(null)));
}
final ElasticsearchSink<RowData> sink = builder.build();
if (config.isDisableFlushOnCheckpoint()) {
sink.disableFlushOnCheckpoint();
}
return sink;
};
}
@Override
public DynamicTableSink copy() {
return this;
}
@Override
public String asSummaryString() {
return "Elasticsearch7";
}
/** Serializable {@link RestClientFactory} used by the sink. */
@VisibleForTesting
static class DefaultRestClientFactory implements RestClientFactory {
private final String pathPrefix;
public DefaultRestClientFactory(@Nullable String pathPrefix) {
this.pathPrefix = pathPrefix;
}
@Override
public void configureRestClientBuilder(RestClientBuilder restClientBuilder) {
if (pathPrefix != null) {
restClientBuilder.setPathPrefix(pathPrefix);
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DefaultRestClientFactory that = (DefaultRestClientFactory) o;
return Objects.equals(pathPrefix, that.pathPrefix);
}
@Override
public int hashCode() {
return Objects.hash(pathPrefix);
}
}
/** Serializable {@link RestClientFactory} used by the sink which enable authentication. */
@VisibleForTesting
static class AuthRestClientFactory implements RestClientFactory {
private final String pathPrefix;
private final String username;
private final String password;
private transient CredentialsProvider credentialsProvider;
public AuthRestClientFactory(
@Nullable String pathPrefix, String username, String password) {
this.pathPrefix = pathPrefix;
this.password = password;
this.username = username;
}
@Override
public void configureRestClientBuilder(RestClientBuilder restClientBuilder) {
if (pathPrefix != null) {
restClientBuilder.setPathPrefix(pathPrefix);
}
if (credentialsProvider == null) {
credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY, new UsernamePasswordCredentials(username, password));
}
restClientBuilder.setHttpClientConfigCallback(
httpAsyncClientBuilder ->
httpAsyncClientBuilder.setDefaultCredentialsProvider(
credentialsProvider));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AuthRestClientFactory that = (AuthRestClientFactory) o;
return Objects.equals(pathPrefix, that.pathPrefix)
&& Objects.equals(username, that.username)
&& Objects.equals(password, that.password);
}
@Override
public int hashCode() {
return Objects.hash(pathPrefix, password, username);
}
}
/**
* Version-specific creation of {@link org.elasticsearch.action.ActionRequest}s used by the
* sink.
*/
private static class Elasticsearch7RequestFactory implements RequestFactory {
@Override
public UpdateRequest createUpdateRequest(
String index,
String docType,
String key,
XContentType contentType,
byte[] document) {
return new UpdateRequest(index, key)
.doc(document, contentType)
.upsert(document, contentType);
}
@Override
public IndexRequest createIndexRequest(
String index,
String docType,
String key,
XContentType contentType,
byte[] document) {
return new IndexRequest(index).id(key).source(document, contentType);
}
@Override
public DeleteRequest createDeleteRequest(String index, String docType, String key) {
return new DeleteRequest(index, key);
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Elasticsearch7DynamicSink that = (Elasticsearch7DynamicSink) o;
return Objects.equals(format, that.format)
&& Objects.equals(schema, that.schema)
&& Objects.equals(config, that.config)
&& Objects.equals(builderProvider, that.builderProvider);
}
@Override
public int hashCode() {
return Objects.hash(format, schema, config, builderProvider);
}
}
| 5,729 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch7DynamicSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.streaming.connectors.elasticsearch7.Elasticsearch7ApiCallBridge;
import org.apache.flink.streaming.connectors.elasticsearch7.RestClientFactory;
import org.apache.flink.table.connector.Projection;
import org.apache.flink.table.connector.format.DecodingFormat;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
import org.apache.flink.table.connector.source.lookup.LookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.PartialCachingLookupProvider;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.util.Preconditions;
import org.apache.flink.util.StringUtils;
import org.elasticsearch.client.RestHighLevelClient;
import javax.annotation.Nullable;
/**
* A {@link DynamicTableSource} that describes how to create a {@link Elasticsearch7DynamicSource}
* from a logical description.
*/
@Internal
public class Elasticsearch7DynamicSource implements LookupTableSource, SupportsProjectionPushDown {
private final DecodingFormat<DeserializationSchema<RowData>> format;
private final Elasticsearch7Configuration config;
private final int lookupMaxRetryTimes;
private final LookupCache lookupCache;
private DataType physicalRowDataType;
public Elasticsearch7DynamicSource(
DecodingFormat<DeserializationSchema<RowData>> format,
Elasticsearch7Configuration config,
DataType physicalRowDataType,
int lookupMaxRetryTimes,
@Nullable LookupCache lookupCache) {
this.format = format;
this.config = config;
this.physicalRowDataType = physicalRowDataType;
this.lookupMaxRetryTimes = lookupMaxRetryTimes;
this.lookupCache = lookupCache;
}
@Override
public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
RestClientFactory restClientFactory = null;
if (config.getUsername().isPresent()
&& config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get())) {
restClientFactory =
new Elasticsearch7DynamicSink.AuthRestClientFactory(
config.getPathPrefix().orElse(null),
config.getUsername().get(),
config.getPassword().get());
} else {
restClientFactory =
new Elasticsearch7DynamicSink.DefaultRestClientFactory(
config.getPathPrefix().orElse(null));
}
Elasticsearch7ApiCallBridge elasticsearch7ApiCallBridge =
new Elasticsearch7ApiCallBridge(config.getHosts(), restClientFactory);
// Elasticsearch only support non-nested look up keys
String[] keyNames = new String[context.getKeys().length];
for (int i = 0; i < keyNames.length; i++) {
int[] innerKeyArr = context.getKeys()[i];
Preconditions.checkArgument(
innerKeyArr.length == 1, "Elasticsearch only support non-nested look up keys");
keyNames[i] = DataType.getFieldNames(physicalRowDataType).get(innerKeyArr[0]);
}
ElasticsearchRowDataLookupFunction<RestHighLevelClient> lookupFunction =
new ElasticsearchRowDataLookupFunction<>(
this.format.createRuntimeDecoder(context, physicalRowDataType),
lookupMaxRetryTimes,
config.getIndex(),
config.getDocumentType(),
DataType.getFieldNames(physicalRowDataType).toArray(new String[0]),
DataType.getFieldDataTypes(physicalRowDataType).toArray(new DataType[0]),
keyNames,
elasticsearch7ApiCallBridge);
if (lookupCache != null) {
return PartialCachingLookupProvider.of(lookupFunction, lookupCache);
} else {
return LookupFunctionProvider.of(lookupFunction);
}
}
@Override
public DynamicTableSource copy() {
return new Elasticsearch7DynamicSource(
format, config, physicalRowDataType, lookupMaxRetryTimes, lookupCache);
}
@Override
public String asSummaryString() {
return "Elasticsearch7";
}
@Override
public boolean supportsNestedProjection() {
return false;
}
@Override
public void applyProjection(int[][] projectedFields, DataType type) {
this.physicalRowDataType = Projection.of(projectedFields).project(type);
}
}
| 5,730 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch7SinkBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
/**
* Builder to construct an Elasticsearch 7 compatible {@link ElasticsearchSink}.
*
* <p>The following example shows the minimal setup to create a ElasticsearchSink that submits
* actions on checkpoint or the default number of actions was buffered (1000).
*
* <pre>{@code
* ElasticsearchSink<String> sink = new Elasticsearch7SinkBuilder<String>()
* .setHosts(new HttpHost("localhost:9200")
* .setEmitter((element, context, indexer) -> {
* indexer.add(
* new IndexRequest("my-index")
* .id(element.f0.toString())
* .source(element.f1)
* );
* })
* .build();
* }</pre>
*
* @param <IN> type of the records converted to Elasticsearch actions
*/
@PublicEvolving
public class Elasticsearch7SinkBuilder<IN>
extends ElasticsearchSinkBuilderBase<IN, Elasticsearch7SinkBuilder<IN>> {
public Elasticsearch7SinkBuilder() {}
@Override
public <T extends IN> Elasticsearch7SinkBuilder<T> setEmitter(
ElasticsearchEmitter<? super T> emitter) {
super.<T>setEmitter(emitter);
return self();
}
@Override
protected BulkProcessorBuilderFactory getBulkProcessorBuilderFactory() {
return new BulkProcessorBuilderFactory() {
@Override
public BulkProcessor.Builder apply(
RestHighLevelClient client,
BulkProcessorConfig bulkProcessorConfig,
BulkProcessor.Listener listener) {
BulkProcessor.Builder builder =
BulkProcessor.builder(
new BulkRequestConsumerFactory() { // This cannot be inlined as a
// lambda because then
// deserialization fails
@Override
public void accept(
BulkRequest bulkRequest,
ActionListener<BulkResponse>
bulkResponseActionListener) {
client.bulkAsync(
bulkRequest,
RequestOptions.DEFAULT,
bulkResponseActionListener);
}
},
listener);
if (bulkProcessorConfig.getBulkFlushMaxActions() != -1) {
builder.setBulkActions(bulkProcessorConfig.getBulkFlushMaxActions());
}
if (bulkProcessorConfig.getBulkFlushMaxMb() != -1) {
builder.setBulkSize(
new ByteSizeValue(
bulkProcessorConfig.getBulkFlushMaxMb(), ByteSizeUnit.MB));
}
if (bulkProcessorConfig.getBulkFlushInterval() != -1) {
builder.setFlushInterval(
new TimeValue(bulkProcessorConfig.getBulkFlushInterval()));
}
BackoffPolicy backoffPolicy;
final TimeValue backoffDelay =
new TimeValue(bulkProcessorConfig.getBulkFlushBackOffDelay());
final int maxRetryCount = bulkProcessorConfig.getBulkFlushBackoffRetries();
switch (bulkProcessorConfig.getFlushBackoffType()) {
case CONSTANT:
backoffPolicy = BackoffPolicy.constantBackoff(backoffDelay, maxRetryCount);
break;
case EXPONENTIAL:
backoffPolicy =
BackoffPolicy.exponentialBackoff(backoffDelay, maxRetryCount);
break;
case NONE:
backoffPolicy = BackoffPolicy.noBackoff();
break;
default:
throw new IllegalArgumentException(
"Received unknown backoff policy type "
+ bulkProcessorConfig.getFlushBackoffType());
}
builder.setBackoffPolicy(backoffPolicy);
return builder;
}
};
}
}
| 5,731 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch7/src/main/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicSinkFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.elasticsearch.sink.Elasticsearch7SinkBuilder;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
/** A {@link DynamicTableSinkFactory} for discovering {@link ElasticsearchDynamicSink}. */
@Internal
public class Elasticsearch7DynamicSinkFactory extends ElasticsearchDynamicSinkFactoryBase {
private static final String FACTORY_IDENTIFIER = "elasticsearch-7";
public Elasticsearch7DynamicSinkFactory() {
super(FACTORY_IDENTIFIER, Elasticsearch7SinkBuilder::new);
}
}
| 5,732 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch6;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.junit.ClassRule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import java.util.ArrayList;
import java.util.List;
/** IT cases for the {@link ElasticsearchSink}. */
public class ElasticsearchSinkITCase
extends ElasticsearchSinkTestBase<RestHighLevelClient, HttpHost> {
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchSinkITCase.class);
@ClassRule
public static ElasticsearchContainer elasticsearchContainer =
ElasticsearchUtil.createElasticsearchContainer(
DockerImageVersions.ELASTICSEARCH_6, LOG);
@Override
protected final RestHighLevelClient getClient() {
return new RestHighLevelClient(
RestClient.builder(HttpHost.create(elasticsearchContainer.getHttpHostAddress())));
}
@Test
public void testElasticsearchSink() throws Exception {
runElasticsearchSinkTest();
}
@Test
public void testElasticsearchSinkWithSmile() throws Exception {
runElasticsearchSinkSmileTest();
}
@Test
public void testNullAddresses() {
runNullAddressesTest();
}
@Test
public void testEmptyAddresses() {
runEmptyAddressesTest();
}
@Test
public void testInvalidElasticsearchCluster() throws Exception {
runInvalidElasticsearchClusterTest();
}
@Override
protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient>
createElasticsearchSink(
int bulkFlushMaxActions,
List<HttpHost> httpHosts,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
ElasticsearchSink.Builder<Tuple2<Integer, String>> builder =
new ElasticsearchSink.Builder<>(httpHosts, elasticsearchSinkFunction);
builder.setBulkFlushMaxActions(bulkFlushMaxActions);
return builder.build();
}
@Override
protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient>
createElasticsearchSinkForEmbeddedNode(
int bulkFlushMaxActions,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
return createElasticsearchSinkForNode(
bulkFlushMaxActions,
elasticsearchSinkFunction,
elasticsearchContainer.getHttpHostAddress());
}
@Override
protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient>
createElasticsearchSinkForNode(
int bulkFlushMaxActions,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
String hostAddress) {
ArrayList<HttpHost> httpHosts = new ArrayList<>();
httpHosts.add(HttpHost.create(hostAddress));
ElasticsearchSink.Builder<Tuple2<Integer, String>> builder =
new ElasticsearchSink.Builder<>(httpHosts, elasticsearchSinkFunction);
builder.setBulkFlushMaxActions(bulkFlushMaxActions);
return builder.build();
}
}
| 5,733 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicTableFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.api.common.typeutils.base.VoidSerializer;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.catalog.UniqueConstraint;
import org.apache.flink.util.TestLogger;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.util.Arrays;
import java.util.Collections;
import static org.apache.flink.streaming.connectors.elasticsearch.table.TestContext.context;
/** Tests for validation in {@link Elasticsearch6DynamicTableFactory}. */
public class Elasticsearch6DynamicTableFactoryTest extends TestLogger {
@Rule public ExpectedException thrown = ExpectedException.none();
@Test
public void validateEmptyConfiguration() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"One or more required options are missing.\n"
+ "\n"
+ "Missing required options are:\n"
+ "\n"
+ "document-type\n"
+ "hosts\n"
+ "index");
factory.createDynamicTableSink(context().build());
}
@Test
public void validateWrongIndex() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("'index' must not be empty");
factory.createDynamicTableSink(
context()
.withOption("index", "")
.withOption("document-type", "MyType")
.withOption("hosts", "http://localhost:12345")
.build());
}
@Test
public void validateWrongHosts() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'.");
factory.createDynamicTableSink(
context()
.withOption("index", "MyIndex")
.withOption("document-type", "MyType")
.withOption("hosts", "wrong-host")
.build());
}
@Test
public void validateWrongFlushSize() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"'sink.bulk-flush.max-size' must be in MB granularity. Got: 1024 bytes");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), "MyType")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION.key(),
"1kb")
.build());
}
@Test
public void validateWrongRetries() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), "MyType")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION
.key(),
"0")
.build());
}
@Test
public void validateWrongMaxActions() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("'sink.bulk-flush.max-actions' must be at least 1. Got: -2");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), "MyType")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION.key(),
"-2")
.build());
}
@Test
public void validateWrongBackoffDelay() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage("Invalid value for option 'sink.bulk-flush.backoff.delay'.");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), "MyType")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION.key(),
"-1s")
.build());
}
@Test
public void validatePrimaryKeyOnIllegalColumn() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"The table has a primary key on columns of illegal types: "
+ "[ARRAY, MAP, MULTISET, ROW, RAW, VARBINARY].\n"
+ " Elasticsearch sink does not support primary keys on columns of types: "
+ "[ARRAY, MAP, MULTISET, STRUCTURED_TYPE, ROW, RAW, BINARY, VARBINARY].");
factory.createDynamicTableSink(
context()
.withSchema(
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT().notNull()),
Column.physical(
"b",
DataTypes.ARRAY(
DataTypes.BIGINT()
.notNull())
.notNull()),
Column.physical(
"c",
DataTypes.MAP(
DataTypes.BIGINT(),
DataTypes.STRING())
.notNull()),
Column.physical(
"d",
DataTypes.MULTISET(
DataTypes.BIGINT()
.notNull())
.notNull()),
Column.physical(
"e",
DataTypes.ROW(
DataTypes.FIELD(
"a",
DataTypes.BIGINT()))
.notNull()),
Column.physical(
"f",
DataTypes.RAW(
Void.class,
VoidSerializer.INSTANCE)
.notNull()),
Column.physical("g", DataTypes.BYTES().notNull())),
Collections.emptyList(),
UniqueConstraint.primaryKey(
"name",
Arrays.asList("a", "b", "c", "d", "e", "f", "g"))))
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION.key(),
"1s")
.build());
}
@Test
public void validateWrongCredential() {
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
thrown.expect(ValidationException.class);
thrown.expectMessage(
"'username' and 'password' must be set at the same time. Got: username 'username' and password ''");
factory.createDynamicTableSink(
context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
"http://localhost:1234")
.withOption(
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), "MyType")
.withOption(ElasticsearchConnectorOptions.USERNAME_OPTION.key(), "username")
.withOption(ElasticsearchConnectorOptions.PASSWORD_OPTION.key(), "")
.build());
}
}
| 5,734 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.api.common.time.Deadline;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.catalog.UniqueConstraint;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.TestLogger;
import org.apache.http.HttpHost;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.search.SearchHits;
import org.junit.ClassRule;
import org.junit.Test;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import java.time.Duration;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import static org.apache.flink.streaming.connectors.elasticsearch.table.TestContext.context;
import static org.apache.flink.table.api.Expressions.row;
import static org.assertj.core.api.Assertions.assertThat;
import static org.testcontainers.containers.wait.strategy.Wait.forHttp;
/** IT tests for {@link Elasticsearch6DynamicSink}. */
public class Elasticsearch6DynamicSinkITCase extends TestLogger {
@ClassRule
public static ElasticsearchContainer elasticsearchContainer =
new ElasticsearchContainer(DockerImageName.parse(DockerImageVersions.ELASTICSEARCH_6))
.waitingFor(
forHttp("/")
.withMethod("HEAD")
.forStatusCode(200)
.forPort(9200)
.withStartupTimeout(Duration.ofMinutes(2)));
@SuppressWarnings("deprecation")
protected final RestHighLevelClient getClient() {
return new RestHighLevelClient(
RestClient.builder(HttpHost.create(elasticsearchContainer.getHttpHostAddress())));
}
@Test
public void testWritingDocuments() throws Exception {
ResolvedSchema schema =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT().notNull()),
Column.physical("b", DataTypes.TIME()),
Column.physical("c", DataTypes.STRING().notNull()),
Column.physical("d", DataTypes.FLOAT()),
Column.physical("e", DataTypes.TINYINT().notNull()),
Column.physical("f", DataTypes.DATE()),
Column.physical("g", DataTypes.TIMESTAMP().notNull())),
Collections.emptyList(),
UniqueConstraint.primaryKey("name", Arrays.asList("a", "g")));
GenericRowData rowData =
GenericRowData.of(
1L,
12345,
StringData.fromString("ABCDE"),
12.12f,
(byte) 2,
12345,
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12")));
String index = "writing-documents";
String myType = "MyType";
Elasticsearch6DynamicTableFactory factory = new Elasticsearch6DynamicTableFactory();
SinkFunctionProvider sinkRuntimeProvider =
(SinkFunctionProvider)
factory.createDynamicTableSink(
context()
.withSchema(schema)
.withOption(
ElasticsearchConnectorOptions.INDEX_OPTION
.key(),
index)
.withOption(
ElasticsearchConnectorOptions
.DOCUMENT_TYPE_OPTION
.key(),
myType)
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION
.key(),
elasticsearchContainer.getHttpHostAddress())
.withOption(
ElasticsearchConnectorOptions
.FLUSH_ON_CHECKPOINT_OPTION
.key(),
"false")
.build())
.getSinkRuntimeProvider(new MockContext());
SinkFunction<RowData> sinkFunction = sinkRuntimeProvider.createSinkFunction();
StreamExecutionEnvironment environment =
StreamExecutionEnvironment.getExecutionEnvironment();
environment.setParallelism(4);
rowData.setRowKind(RowKind.UPDATE_AFTER);
environment.<RowData>fromElements(rowData).addSink(sinkFunction);
environment.execute();
RestHighLevelClient client = getClient();
Map<String, Object> response =
client.get(
new GetRequest(index, myType, "1_2012-12-12T12:12:12"),
RequestOptions.DEFAULT)
.getSource();
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsFromTableApi() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "table-api";
String myType = "MyType";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIME,\n"
+ "c STRING NOT NULL,\n"
+ "d FLOAT,\n"
+ "e TINYINT NOT NULL,\n"
+ "f DATE,\n"
+ "g TIMESTAMP NOT NULL,\n"
+ "h as a + 2,\n"
+ "PRIMARY KEY (a, g) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ String.format("'%s'='%s',\n", "connector", "elasticsearch-6")
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), myType)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
elasticsearchContainer.getHttpHostAddress())
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(),
"false")
+ ")");
tableEnvironment
.fromValues(
row(
1L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"ABCDE",
12.12f,
(byte) 2,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response =
client.get(
new GetRequest(index, myType, "1_2012-12-12T12:12:12"),
RequestOptions.DEFAULT)
.getSource();
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsNoPrimaryKey() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "no-primary-key";
String myType = "MyType";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIME,\n"
+ "c STRING NOT NULL,\n"
+ "d FLOAT,\n"
+ "e TINYINT NOT NULL,\n"
+ "f DATE,\n"
+ "g TIMESTAMP NOT NULL\n"
+ ")\n"
+ "WITH (\n"
+ String.format("'%s'='%s',\n", "connector", "elasticsearch-6")
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), myType)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
elasticsearchContainer.getHttpHostAddress())
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(),
"false")
+ ")");
tableEnvironment
.fromValues(
row(
1L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"ABCDE",
12.12f,
(byte) 2,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2012-12-12T12:12:12")),
row(
2L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"FGHIJK",
13.13f,
(byte) 4,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2013-12-12T13:13:13")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
// search API does not return documents that were not indexed, we might need to query
// the index a few times
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
SearchHits hits;
do {
hits = client.search(new SearchRequest(index), RequestOptions.DEFAULT).getHits();
if (hits.getTotalHits() < 2) {
Thread.sleep(200);
}
} while (hits.getTotalHits() < 2 && deadline.hasTimeLeft());
if (hits.getTotalHits() < 2) {
throw new AssertionError("Could not retrieve results from Elasticsearch.");
}
HashSet<Map<String, Object>> resultSet = new HashSet<>();
resultSet.add(hits.getAt(0).getSourceAsMap());
resultSet.add(hits.getAt(1).getSourceAsMap());
Map<Object, Object> expectedMap1 = new HashMap<>();
expectedMap1.put("a", 1);
expectedMap1.put("b", "00:00:12");
expectedMap1.put("c", "ABCDE");
expectedMap1.put("d", 12.12d);
expectedMap1.put("e", 2);
expectedMap1.put("f", "2003-10-20");
expectedMap1.put("g", "2012-12-12 12:12:12");
Map<Object, Object> expectedMap2 = new HashMap<>();
expectedMap2.put("a", 2);
expectedMap2.put("b", "00:00:12");
expectedMap2.put("c", "FGHIJK");
expectedMap2.put("d", 13.13d);
expectedMap2.put("e", 4);
expectedMap2.put("f", "2003-10-20");
expectedMap2.put("g", "2013-12-12 13:13:13");
HashSet<Map<Object, Object>> expectedSet = new HashSet<>();
expectedSet.add(expectedMap1);
expectedSet.add(expectedMap2);
assertThat(resultSet).isEqualTo(expectedSet);
}
@Test
public void testWritingDocumentsWithDynamicIndex() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "dynamic-index-{b|yyyy-MM-dd}";
String myType = "MyType";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIMESTAMP NOT NULL,\n"
+ "PRIMARY KEY (a) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ String.format("'%s'='%s',\n", "connector", "elasticsearch-6")
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), myType)
+ String.format(
"'%s'='%s',\n",
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
elasticsearchContainer.getHttpHostAddress())
+ String.format(
"'%s'='%s'\n",
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(),
"false")
+ ")");
tableEnvironment
.fromValues(row(1L, LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response =
client.get(
new GetRequest("dynamic-index-2012-12-12", myType, "1"),
RequestOptions.DEFAULT)
.getSource();
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
private static class MockContext implements DynamicTableSink.Context {
@Override
public boolean isBounded() {
return false;
}
@Override
public TypeInformation<?> createTypeInformation(DataType consumedDataType) {
return null;
}
@Override
public TypeInformation<?> createTypeInformation(LogicalType consumedLogicalType) {
return null;
}
@Override
public DynamicTableSink.DataStructureConverter createDataStructureConverter(
DataType consumedDataType) {
return null;
}
public Optional<int[][]> getTargetColumns() {
return Optional.empty();
}
}
}
| 5,735 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSinkTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.util.TestLogger;
import org.apache.http.HttpHost;
import org.elasticsearch.action.ActionRequest;
import org.junit.Test;
import org.mockito.Mockito;
import java.time.ZoneId;
import java.util.List;
import java.util.Optional;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
/** Tests for {@link Elasticsearch6DynamicSink} parameters. */
public class Elasticsearch6DynamicSinkTest extends TestLogger {
private static final String FIELD_KEY = "key";
private static final String FIELD_FRUIT_NAME = "fruit_name";
private static final String FIELD_COUNT = "count";
private static final String FIELD_TS = "ts";
private static final String HOSTNAME = "host1";
private static final int PORT = 1234;
private static final String SCHEMA = "https";
private static final String INDEX = "MyIndex";
private static final String DOC_TYPE = "MyType";
private static final String USERNAME = "username";
private static final String PASSWORD = "password";
@Test
public void testBuilder() {
final TableSchema schema = createTestSchema();
BuilderProvider provider = new BuilderProvider();
final Elasticsearch6DynamicSink testSink =
new Elasticsearch6DynamicSink(
new DummyEncodingFormat(),
new Elasticsearch6Configuration(
getConfig(), this.getClass().getClassLoader()),
schema,
ZoneId.systemDefault(),
provider);
testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();
verify(provider.builderSpy).setFailureHandler(new DummyFailureHandler());
verify(provider.builderSpy).setBulkFlushBackoff(true);
verify(provider.builderSpy)
.setBulkFlushBackoffType(ElasticsearchSinkBase.FlushBackoffType.EXPONENTIAL);
verify(provider.builderSpy).setBulkFlushBackoffDelay(123);
verify(provider.builderSpy).setBulkFlushBackoffRetries(3);
verify(provider.builderSpy).setBulkFlushInterval(100);
verify(provider.builderSpy).setBulkFlushMaxActions(1000);
verify(provider.builderSpy).setBulkFlushMaxSizeMb(1);
verify(provider.builderSpy)
.setRestClientFactory(
new Elasticsearch6DynamicSink.DefaultRestClientFactory("/myapp"));
verify(provider.sinkSpy).disableFlushOnCheckpoint();
}
@Test
public void testDefaultConfig() {
final TableSchema schema = createTestSchema();
Configuration configuration = new Configuration();
configuration.setString(ElasticsearchConnectorOptions.INDEX_OPTION.key(), INDEX);
configuration.setString(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
configuration.setString(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
SCHEMA + "://" + HOSTNAME + ":" + PORT);
BuilderProvider provider = new BuilderProvider();
final Elasticsearch6DynamicSink testSink =
new Elasticsearch6DynamicSink(
new DummyEncodingFormat(),
new Elasticsearch6Configuration(
configuration, this.getClass().getClassLoader()),
schema,
ZoneId.systemDefault(),
provider);
testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();
verify(provider.builderSpy).setFailureHandler(new NoOpFailureHandler());
verify(provider.builderSpy).setBulkFlushBackoff(false);
verify(provider.builderSpy).setBulkFlushInterval(1000);
verify(provider.builderSpy).setBulkFlushMaxActions(1000);
verify(provider.builderSpy).setBulkFlushMaxSizeMb(2);
verify(provider.builderSpy)
.setRestClientFactory(new Elasticsearch6DynamicSink.DefaultRestClientFactory(null));
verify(provider.sinkSpy, never()).disableFlushOnCheckpoint();
}
@Test
public void testAuthConfig() {
final TableSchema schema = createTestSchema();
Configuration configuration = new Configuration();
configuration.setString(ElasticsearchConnectorOptions.INDEX_OPTION.key(), INDEX);
configuration.setString(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
configuration.setString(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
SCHEMA + "://" + HOSTNAME + ":" + PORT);
configuration.setString(ElasticsearchConnectorOptions.USERNAME_OPTION.key(), USERNAME);
configuration.setString(ElasticsearchConnectorOptions.PASSWORD_OPTION.key(), PASSWORD);
BuilderProvider provider = new BuilderProvider();
final Elasticsearch6DynamicSink testSink =
new Elasticsearch6DynamicSink(
new DummyEncodingFormat(),
new Elasticsearch6Configuration(
configuration, this.getClass().getClassLoader()),
schema,
ZoneId.systemDefault(),
provider);
testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();
verify(provider.builderSpy).setFailureHandler(new NoOpFailureHandler());
verify(provider.builderSpy).setBulkFlushBackoff(false);
verify(provider.builderSpy).setBulkFlushInterval(1000);
verify(provider.builderSpy).setBulkFlushMaxActions(1000);
verify(provider.builderSpy).setBulkFlushMaxSizeMb(2);
verify(provider.builderSpy)
.setRestClientFactory(
new Elasticsearch6DynamicSink.AuthRestClientFactory(
null, USERNAME, PASSWORD));
verify(provider.sinkSpy, never()).disableFlushOnCheckpoint();
}
private Configuration getConfig() {
Configuration configuration = new Configuration();
configuration.setString(ElasticsearchConnectorOptions.INDEX_OPTION.key(), INDEX);
configuration.setString(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
configuration.setString(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(),
SCHEMA + "://" + HOSTNAME + ":" + PORT);
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION.key(), "exponential");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION.key(), "123");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION.key(), "3");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION.key(), "100");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION.key(), "1000");
configuration.setString(
ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION.key(), "1mb");
configuration.setString(
ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX.key(), "/myapp");
configuration.setString(
ElasticsearchConnectorOptions.FAILURE_HANDLER_OPTION.key(),
DummyFailureHandler.class.getName());
configuration.setString(
ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION.key(), "false");
return configuration;
}
private static class BuilderProvider
implements Elasticsearch6DynamicSink.ElasticSearchBuilderProvider {
public ElasticsearchSink.Builder<RowData> builderSpy;
public ElasticsearchSink<RowData> sinkSpy;
@Override
public ElasticsearchSink.Builder<RowData> createBuilder(
List<HttpHost> httpHosts, RowElasticsearchSinkFunction upsertSinkFunction) {
builderSpy =
Mockito.spy(new ElasticsearchSink.Builder<>(httpHosts, upsertSinkFunction));
doAnswer(
invocation -> {
sinkSpy =
Mockito.spy(
(ElasticsearchSink<RowData>)
invocation.callRealMethod());
return sinkSpy;
})
.when(builderSpy)
.build();
return builderSpy;
}
}
private TableSchema createTestSchema() {
return TableSchema.builder()
.field(FIELD_KEY, DataTypes.BIGINT())
.field(FIELD_FRUIT_NAME, DataTypes.STRING())
.field(FIELD_COUNT, DataTypes.DECIMAL(10, 4))
.field(FIELD_TS, DataTypes.TIMESTAMP(3))
.build();
}
private static class DummySerializationSchema implements SerializationSchema<RowData> {
private static final DummySerializationSchema INSTANCE = new DummySerializationSchema();
@Override
public byte[] serialize(RowData element) {
return new byte[0];
}
}
private static class DummyEncodingFormat
implements EncodingFormat<SerializationSchema<RowData>> {
@Override
public SerializationSchema<RowData> createRuntimeEncoder(
DynamicTableSink.Context context, DataType consumedDataType) {
return DummySerializationSchema.INSTANCE;
}
@Override
public ChangelogMode getChangelogMode() {
return null;
}
}
private static class MockSinkContext implements DynamicTableSink.Context {
@Override
public boolean isBounded() {
return false;
}
@Override
public TypeInformation<?> createTypeInformation(DataType consumedDataType) {
return null;
}
@Override
public TypeInformation<?> createTypeInformation(LogicalType consumedLogicalType) {
return null;
}
@Override
public DynamicTableSink.DataStructureConverter createDataStructureConverter(
DataType consumedDataType) {
return null;
}
public Optional<int[][]> getTargetColumns() {
return Optional.empty();
}
}
/** Custom failure handler for testing. */
public static class DummyFailureHandler implements ActionRequestFailureHandler {
@Override
public void onFailure(
ActionRequest action,
Throwable failure,
int restStatusCode,
RequestIndexer indexer) {
// do nothing
}
@Override
public boolean equals(Object o) {
return o instanceof DummyFailureHandler;
}
@Override
public int hashCode() {
return DummyFailureHandler.class.hashCode();
}
}
}
| 5,736 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/architecture/ProductionCodeArchitectureTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
/** product code Architecture tests. */
@AnalyzeClasses(
packages = "org.apache.flink.connector",
importOptions = {
ImportOption.DoNotIncludeTests.class,
ImportOption.DoNotIncludeArchives.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class ProductionCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(ProductionCodeArchitectureBase.class);
}
| 5,737 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.core.importer.Location;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
import java.util.regex.Pattern;
/** Architecture tests for test code. */
@AnalyzeClasses(
packages = {
"org.apache.flink.connector.elasticsearch",
"org.apache.flink.streaming.connectors.elasticsearch6"
},
importOptions = {
ImportOption.OnlyIncludeTests.class,
TestCodeArchitectureTest.IncludeES6ImportOption.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class TestCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
/** Only include ES6 related locations. */
public static final class IncludeES6ImportOption implements ImportOption {
private static final Pattern ES6 = Pattern.compile(".*elasticsearch6.*");
@Override
public boolean includes(Location location) {
return location.matches(ES6);
}
}
}
| 5,738 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch6SinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.elasticsearch.client.RestHighLevelClient;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
/** Tests for {@link ElasticsearchSink}. */
@Testcontainers
class Elasticsearch6SinkITCase extends ElasticsearchSinkBaseITCase {
@Container
private static final ElasticsearchContainer ES_CONTAINER =
ElasticsearchUtil.createElasticsearchContainer(DockerImageVersions.ELASTICSEARCH_6, LOG)
.withPassword(ELASTICSEARCH_PASSWORD);
@Override
String getElasticsearchHttpHostAddress() {
return ES_CONTAINER.getHttpHostAddress();
}
@Override
TestClientBase createTestClient(RestHighLevelClient client) {
return new Elasticsearch6TestClient(client);
}
@Override
Elasticsearch6SinkBuilder<Tuple2<Integer, String>> getSinkBuilder() {
return new Elasticsearch6SinkBuilder<>();
}
}
| 5,739 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch6SinkBuilderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.http.HttpHost;
/** Tests for {@link Elasticsearch6SinkBuilder}. */
class Elasticsearch6SinkBuilderTest
extends ElasticsearchSinkBuilderBaseTest<Elasticsearch6SinkBuilder<Object>> {
@Override
Elasticsearch6SinkBuilder<Object> createEmptyBuilder() {
return new Elasticsearch6SinkBuilder<>();
}
@Override
Elasticsearch6SinkBuilder<Object> createMinimalBuilder() {
return new Elasticsearch6SinkBuilder<>()
.setEmitter((element, indexer, context) -> {})
.setHosts(new HttpHost("localhost:3000"));
}
}
| 5,740 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch6TestClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.RestHighLevelClient;
import java.io.IOException;
class Elasticsearch6TestClient extends TestClientBase {
Elasticsearch6TestClient(RestHighLevelClient client) {
super(client);
}
@Override
GetResponse getResponse(String index, int id) throws IOException {
return client.get(new GetRequest(index, DOCUMENT_TYPE, Integer.toString(id)));
}
}
| 5,741 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicTableFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.table.api.ValidationException;
import org.junit.jupiter.api.Test;
import static org.apache.flink.connector.elasticsearch.table.TestContext.context;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/** Tests for validation in {@link Elasticsearch6DynamicSinkFactory}. */
public class Elasticsearch6DynamicTableFactoryTest extends ElasticsearchDynamicSinkFactoryBaseTest {
@Override
ElasticsearchDynamicSinkFactoryBase createSinkFactory() {
return new Elasticsearch6DynamicSinkFactory();
}
@Override
TestContext createPrefilledTestContext() {
return context()
.withOption(ElasticsearchConnectorOptions.INDEX_OPTION.key(), "MyIndex")
.withOption(Elasticsearch6ConnectorOptions.DOCUMENT_TYPE_OPTION.key(), "MyType")
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION.key(), "http://localhost:12345");
}
@Test
public void validateEmptyConfiguration() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(() -> sinkFactory.createDynamicTableSink(context().build()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"One or more required options are missing.\n"
+ "\n"
+ "Missing required options are:\n"
+ "\n"
+ "document-type\n"
+ "hosts\n"
+ "index");
}
}
| 5,742 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicSinkITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.search.SearchHits;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import java.io.IOException;
import java.util.Map;
import static org.apache.flink.connector.elasticsearch.table.TestContext.context;
/** IT tests for {@link ElasticsearchDynamicSink}. */
@Testcontainers
public class Elasticsearch6DynamicSinkITCase extends ElasticsearchDynamicSinkBaseITCase {
private static final Logger LOG =
LoggerFactory.getLogger(Elasticsearch6DynamicSinkITCase.class);
private static final String DOCUMENT_TYPE = "MyType";
@Container
private static final ElasticsearchContainer ES_CONTAINER =
ElasticsearchUtil.createElasticsearchContainer(
DockerImageVersions.ELASTICSEARCH_6, LOG);
@Override
String getElasticsearchHttpHostAddress() {
return ES_CONTAINER.getHttpHostAddress();
}
@Override
ElasticsearchDynamicSinkFactoryBase getDynamicSinkFactory() {
return new Elasticsearch6DynamicSinkFactory();
}
@Override
Map<String, Object> makeGetRequest(RestHighLevelClient client, String index, String id)
throws IOException {
return client.get(new GetRequest(index, DOCUMENT_TYPE, id)).getSource();
}
@Override
SearchHits makeSearchRequest(RestHighLevelClient client, String index) throws IOException {
return client.search(new SearchRequest(index)).getHits();
}
@Override
long getTotalSearchHits(SearchHits hits) {
return hits.getTotalHits();
}
@Override
TestContext getPrefilledTestContext(String index) {
return context()
.withOption(Elasticsearch6ConnectorOptions.INDEX_OPTION.key(), index)
.withOption(
Elasticsearch6ConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOCUMENT_TYPE)
.withOption(
Elasticsearch6ConnectorOptions.HOSTS_OPTION.key(),
ES_CONTAINER.getHttpHostAddress());
}
@Override
String getConnectorSql(String index) {
return String.format("'%s'='%s',\n", "connector", "elasticsearch-6")
+ String.format(
"'%s'='%s',\n", Elasticsearch6ConnectorOptions.INDEX_OPTION.key(), index)
+ String.format(
"'%s'='%s',\n",
Elasticsearch6ConnectorOptions.DOCUMENT_TYPE_OPTION.key(), DOCUMENT_TYPE)
+ String.format(
"'%s'='%s'\n",
Elasticsearch6ConnectorOptions.HOSTS_OPTION.key(),
ES_CONTAINER.getHttpHostAddress());
}
}
| 5,743 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6BulkProcessorIndexer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch6;
import org.apache.flink.annotation.Internal;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}. {@link ActionRequest
* ActionRequests} will be buffered before sending a bulk request to the Elasticsearch cluster.
*
* <p>Note: This class is binary compatible to Elasticsearch 6.
*/
@Internal
class Elasticsearch6BulkProcessorIndexer implements RequestIndexer {
private final BulkProcessor bulkProcessor;
private final boolean flushOnCheckpoint;
private final AtomicLong numPendingRequestsRef;
Elasticsearch6BulkProcessorIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef) {
this.bulkProcessor = checkNotNull(bulkProcessor);
this.flushOnCheckpoint = flushOnCheckpoint;
this.numPendingRequestsRef = checkNotNull(numPendingRequestsRef);
}
@Override
public void add(DeleteRequest... deleteRequests) {
for (DeleteRequest deleteRequest : deleteRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(deleteRequest);
}
}
@Override
public void add(IndexRequest... indexRequests) {
for (IndexRequest indexRequest : indexRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(indexRequest);
}
}
@Override
public void add(UpdateRequest... updateRequests) {
for (UpdateRequest updateRequest : updateRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(updateRequest);
}
}
}
| 5,744 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch6;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
import org.apache.flink.util.Preconditions;
import org.apache.http.HttpHost;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.client.RestHighLevelClient;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Elasticsearch 6.x sink that requests multiple {@link ActionRequest ActionRequests} against a
* cluster for each incoming element.
*
* <p>The sink internally uses a {@link RestHighLevelClient} to communicate with an Elasticsearch
* cluster. The sink will fail if no cluster can be connected to using the provided transport
* addresses passed to the constructor.
*
* <p>Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest
* ActionRequests}. This will buffer elements before sending a request to the cluster. The behaviour
* of the {@code BulkProcessor} can be configured using these config keys:
*
* <ul>
* <li>{@code bulk.flush.max.actions}: Maximum amount of elements to buffer
* <li>{@code bulk.flush.max.size.mb}: Maximum amount of data (in megabytes) to buffer
* <li>{@code bulk.flush.interval.ms}: Interval at which to flush data regardless of the other two
* settings in milliseconds
* </ul>
*
* <p>You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
* {@link ActionRequest ActionRequests} for each incoming element. See the class level documentation
* of {@link ElasticsearchSinkFunction} for an example.
*
* @param <T> Type of the elements handled by this sink
* @deprecated This sink has been deprecated in favor of {@link
* org.apache.flink.connector.elasticsearch.sink.ElasticsearchSink}
*/
@Deprecated
@PublicEvolving
public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T, RestHighLevelClient> {
private static final long serialVersionUID = 1L;
private ElasticsearchSink(
Map<String, String> bulkRequestsConfig,
List<HttpHost> httpHosts,
ElasticsearchSinkFunction<T> elasticsearchSinkFunction,
ActionRequestFailureHandler failureHandler,
RestClientFactory restClientFactory) {
super(
new Elasticsearch6ApiCallBridge(httpHosts, restClientFactory),
bulkRequestsConfig,
elasticsearchSinkFunction,
failureHandler);
}
/**
* A builder for creating an {@link ElasticsearchSink}.
*
* @param <T> Type of the elements handled by the sink this builder creates.
* @deprecated This has been deprecated, please use {@link
* org.apache.flink.connector.elasticsearch.sink.Elasticsearch6SinkBuilder}.
*/
@Deprecated
@PublicEvolving
public static class Builder<T> {
private final List<HttpHost> httpHosts;
private final ElasticsearchSinkFunction<T> elasticsearchSinkFunction;
private Map<String, String> bulkRequestsConfig = new HashMap<>();
private ActionRequestFailureHandler failureHandler = new NoOpFailureHandler();
private RestClientFactory restClientFactory = restClientBuilder -> {};
/**
* Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link
* RestHighLevelClient}.
*
* @param httpHosts The list of {@link HttpHost} to which the {@link RestHighLevelClient}
* connects to.
* @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest}
* from the incoming element.
*/
public Builder(
List<HttpHost> httpHosts, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {
this.httpHosts = Preconditions.checkNotNull(httpHosts);
this.elasticsearchSinkFunction = Preconditions.checkNotNull(elasticsearchSinkFunction);
}
/**
* Sets the maximum number of actions to buffer for each bulk request. You can pass -1 to
* disable it.
*
* @param numMaxActions the maximum number of actions to buffer per bulk request.
*/
public void setBulkFlushMaxActions(int numMaxActions) {
Preconditions.checkArgument(
numMaxActions == -1 || numMaxActions > 0,
"Max number of buffered actions must be larger than 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(numMaxActions));
}
/**
* Sets the maximum size of buffered actions, in mb, per bulk request. You can pass -1 to
* disable it.
*
* @param maxSizeMb the maximum size of buffered actions, in mb.
*/
public void setBulkFlushMaxSizeMb(int maxSizeMb) {
Preconditions.checkArgument(
maxSizeMb == -1 || maxSizeMb > 0,
"Max size of buffered actions must be larger than 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, String.valueOf(maxSizeMb));
}
/**
* Sets the bulk flush interval, in milliseconds. You can pass -1 to disable it.
*
* @param intervalMillis the bulk flush interval, in milliseconds.
*/
public void setBulkFlushInterval(long intervalMillis) {
Preconditions.checkArgument(
intervalMillis == -1 || intervalMillis >= 0,
"Interval (in milliseconds) between each flush must be larger than or equal to 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, String.valueOf(intervalMillis));
}
/**
* Sets whether or not to enable bulk flush backoff behaviour.
*
* @param enabled whether or not to enable backoffs.
*/
public void setBulkFlushBackoff(boolean enabled) {
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, String.valueOf(enabled));
}
/**
* Sets the type of back of to use when flushing bulk requests.
*
* @param flushBackoffType the backoff type to use.
*/
public void setBulkFlushBackoffType(FlushBackoffType flushBackoffType) {
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE,
Preconditions.checkNotNull(flushBackoffType).toString());
}
/**
* Sets the maximum number of retries for a backoff attempt when flushing bulk requests.
*
* @param maxRetries the maximum number of retries for a backoff attempt when flushing bulk
* requests
*/
public void setBulkFlushBackoffRetries(int maxRetries) {
Preconditions.checkArgument(
maxRetries > 0, "Max number of backoff attempts must be larger than 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, String.valueOf(maxRetries));
}
/**
* Sets the amount of delay between each backoff attempt when flushing bulk requests, in
* milliseconds.
*
* @param delayMillis the amount of delay between each backoff attempt when flushing bulk
* requests, in milliseconds.
*/
public void setBulkFlushBackoffDelay(long delayMillis) {
Preconditions.checkArgument(
delayMillis >= 0,
"Delay (in milliseconds) between each backoff attempt must be larger than or equal to 0.");
this.bulkRequestsConfig.put(
CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, String.valueOf(delayMillis));
}
/**
* Sets a failure handler for action requests.
*
* @param failureHandler This is used to handle failed {@link ActionRequest}.
*/
public void setFailureHandler(ActionRequestFailureHandler failureHandler) {
this.failureHandler = Preconditions.checkNotNull(failureHandler);
}
/**
* Sets a REST client factory for custom client configuration.
*
* @param restClientFactory the factory that configures the rest client.
*/
public void setRestClientFactory(RestClientFactory restClientFactory) {
this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
}
/**
* Creates the Elasticsearch sink.
*
* @return the created Elasticsearch sink.
*/
public ElasticsearchSink<T> build() {
return new ElasticsearchSink<>(
bulkRequestsConfig,
httpHosts,
elasticsearchSinkFunction,
failureHandler,
restClientFactory);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Builder<?> builder = (Builder<?>) o;
return Objects.equals(httpHosts, builder.httpHosts)
&& Objects.equals(elasticsearchSinkFunction, builder.elasticsearchSinkFunction)
&& Objects.equals(bulkRequestsConfig, builder.bulkRequestsConfig)
&& Objects.equals(failureHandler, builder.failureHandler)
&& Objects.equals(restClientFactory, builder.restClientFactory);
}
@Override
public int hashCode() {
return Objects.hash(
httpHosts,
elasticsearchSinkFunction,
bulkRequestsConfig,
failureHandler,
restClientFactory);
}
}
}
| 5,745 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6ApiCallBridge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch6;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallBridge;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.util.Preconditions;
import org.apache.http.HttpHost;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.SearchHit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
/** Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 6 and later versions. */
@Internal
public class Elasticsearch6ApiCallBridge
implements ElasticsearchApiCallBridge<RestHighLevelClient> {
private static final long serialVersionUID = -5222683870097809633L;
private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch6ApiCallBridge.class);
/** User-provided HTTP Host. */
private final List<HttpHost> httpHosts;
/** The factory to configure the rest client. */
private final RestClientFactory restClientFactory;
public Elasticsearch6ApiCallBridge(
List<HttpHost> httpHosts, RestClientFactory restClientFactory) {
Preconditions.checkArgument(httpHosts != null && !httpHosts.isEmpty());
this.httpHosts = httpHosts;
this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
}
@Override
public RestHighLevelClient createClient() {
RestClientBuilder builder =
RestClient.builder(httpHosts.toArray(new HttpHost[httpHosts.size()]));
restClientFactory.configureRestClientBuilder(builder);
RestHighLevelClient rhlClient = new RestHighLevelClient(builder);
return rhlClient;
}
@Override
public BulkProcessor.Builder createBulkProcessorBuilder(
RestHighLevelClient client, BulkProcessor.Listener listener) {
return BulkProcessor.builder(client::bulkAsync, listener);
}
@Override
public Tuple2<String, String[]> search(RestHighLevelClient client, SearchRequest searchRequest)
throws IOException {
SearchResponse searchResponse = client.search(searchRequest);
SearchHit[] searchHits = searchResponse.getHits().getHits();
return new Tuple2<>(
searchResponse.getScrollId(),
Stream.of(searchHits).map(SearchHit::getSourceAsString).toArray(String[]::new));
}
@Override
public void close(RestHighLevelClient client) throws IOException {
client.close();
}
@Override
public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
if (!bulkItemResponse.isFailed()) {
return null;
} else {
return bulkItemResponse.getFailure().getCause();
}
}
@Override
public void configureBulkProcessorFlushInterval(
BulkProcessor.Builder builder, long flushIntervalMillis) {
builder.setFlushInterval(TimeValue.timeValueMillis(flushIntervalMillis));
}
@Override
public void configureBulkProcessorBackoff(
BulkProcessor.Builder builder,
@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy) {
BackoffPolicy backoffPolicy;
if (flushBackoffPolicy != null) {
switch (flushBackoffPolicy.getBackoffType()) {
case CONSTANT:
backoffPolicy =
BackoffPolicy.constantBackoff(
new TimeValue(flushBackoffPolicy.getDelayMillis()),
flushBackoffPolicy.getMaxRetryCount());
break;
case EXPONENTIAL:
default:
backoffPolicy =
BackoffPolicy.exponentialBackoff(
new TimeValue(flushBackoffPolicy.getDelayMillis()),
flushBackoffPolicy.getMaxRetryCount());
}
} else {
backoffPolicy = BackoffPolicy.noBackoff();
}
builder.setBackoffPolicy(backoffPolicy);
}
@Override
public RequestIndexer createBulkProcessorIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef) {
return new Elasticsearch6BulkProcessorIndexer(
bulkProcessor, flushOnCheckpoint, numPendingRequestsRef);
}
@Override
public void verifyClientConnection(RestHighLevelClient client) throws IOException {
if (LOG.isInfoEnabled()) {
LOG.info("Pinging Elasticsearch cluster via hosts {} ...", httpHosts);
}
if (!client.ping()) {
throw new RuntimeException("There are no reachable Elasticsearch nodes!");
}
if (LOG.isInfoEnabled()) {
LOG.info("Elasticsearch RestHighLevelClient is connected to {}", httpHosts.toString());
}
}
}
| 5,746 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/RestClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch6;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.client.RestClientBuilder;
import java.io.Serializable;
/**
* A factory that is used to configure the {@link org.elasticsearch.client.RestHighLevelClient}
* internally used in the {@link ElasticsearchSink}.
*
* @deprecated This has been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public interface RestClientFactory extends Serializable {
/**
* Configures the rest client builder.
*
* @param restClientBuilder the configured rest client builder.
*/
void configureRestClientBuilder(RestClientBuilder restClientBuilder);
}
| 5,747 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink;
import org.apache.flink.streaming.connectors.elasticsearch6.RestClientFactory;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.StringUtils;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import javax.annotation.Nullable;
import java.time.ZoneId;
import java.util.List;
import java.util.Objects;
/**
* A {@link DynamicTableSink} that describes how to create a {@link ElasticsearchSink} from a
* logical description.
*
* @deprecated Please use {@link
* org.apache.flink.connector.elasticsearch.sink.Elasticsearch6SinkBuilder} to build a {@link
* org.apache.flink.connector.elasticsearch.sink.ElasticsearchSink} instead.
*/
@Deprecated
@PublicEvolving
final class Elasticsearch6DynamicSink implements DynamicTableSink {
@VisibleForTesting
static final Elasticsearch6RequestFactory REQUEST_FACTORY = new Elasticsearch6RequestFactory();
private final EncodingFormat<SerializationSchema<RowData>> format;
private final TableSchema schema;
private final Elasticsearch6Configuration config;
private final ZoneId localTimeZoneId;
private final boolean isDynamicIndexWithSystemTime;
public Elasticsearch6DynamicSink(
EncodingFormat<SerializationSchema<RowData>> format,
Elasticsearch6Configuration config,
TableSchema schema,
ZoneId localTimeZoneId) {
this(format, config, schema, localTimeZoneId, (ElasticsearchSink.Builder::new));
}
// --------------------------------------------------------------
// Hack to make configuration testing possible.
//
// The code in this block should never be used outside of tests.
// Having a way to inject a builder we can assert the builder in
// the test. We can not assert everything though, e.g. it is not
// possible to assert flushing on checkpoint, as it is configured
// on the sink itself.
// --------------------------------------------------------------
private final ElasticSearchBuilderProvider builderProvider;
@FunctionalInterface
interface ElasticSearchBuilderProvider {
ElasticsearchSink.Builder<RowData> createBuilder(
List<HttpHost> httpHosts, RowElasticsearchSinkFunction upsertSinkFunction);
}
Elasticsearch6DynamicSink(
EncodingFormat<SerializationSchema<RowData>> format,
Elasticsearch6Configuration config,
TableSchema schema,
ZoneId localTimeZoneId,
ElasticSearchBuilderProvider builderProvider) {
this.format = format;
this.schema = schema;
this.config = config;
this.localTimeZoneId = localTimeZoneId;
this.isDynamicIndexWithSystemTime = isDynamicIndexWithSystemTime();
this.builderProvider = builderProvider;
}
// --------------------------------------------------------------
// End of hack to make configuration testing possible
// --------------------------------------------------------------
public boolean isDynamicIndexWithSystemTime() {
IndexGeneratorFactory.IndexHelper indexHelper = new IndexGeneratorFactory.IndexHelper();
return indexHelper.checkIsDynamicIndexWithSystemTimeFormat(config.getIndex());
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
for (RowKind kind : requestedMode.getContainedKinds()) {
if (kind != RowKind.UPDATE_BEFORE) {
builder.addContainedKind(kind);
}
}
if (isDynamicIndexWithSystemTime && !requestedMode.containsOnly(RowKind.INSERT)) {
throw new ValidationException(
"Dynamic indexing based on system time only works on append only stream.");
}
return builder.build();
}
@Override
public SinkFunctionProvider getSinkRuntimeProvider(Context context) {
return () -> {
SerializationSchema<RowData> format =
this.format.createRuntimeEncoder(context, schema.toRowDataType());
final RowElasticsearchSinkFunction upsertFunction =
new RowElasticsearchSinkFunction(
IndexGeneratorFactory.createIndexGenerator(
config.getIndex(), schema, localTimeZoneId),
config.getDocumentType(),
format,
XContentType.JSON,
REQUEST_FACTORY,
KeyExtractor.createKeyExtractor(schema, config.getKeyDelimiter()));
final ElasticsearchSink.Builder<RowData> builder =
builderProvider.createBuilder(config.getHosts(), upsertFunction);
builder.setFailureHandler(config.getFailureHandler());
builder.setBulkFlushMaxActions(config.getBulkFlushMaxActions());
builder.setBulkFlushMaxSizeMb((int) (config.getBulkFlushMaxByteSize() >> 20));
builder.setBulkFlushInterval(config.getBulkFlushInterval());
builder.setBulkFlushBackoff(config.isBulkFlushBackoffEnabled());
config.getBulkFlushBackoffType().ifPresent(builder::setBulkFlushBackoffType);
config.getBulkFlushBackoffRetries().ifPresent(builder::setBulkFlushBackoffRetries);
config.getBulkFlushBackoffDelay().ifPresent(builder::setBulkFlushBackoffDelay);
// we must overwrite the default factory which is defined with a lambda because of a bug
// in shading lambda serialization shading see FLINK-18006
if (config.getUsername().isPresent()
&& config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get())) {
builder.setRestClientFactory(
new AuthRestClientFactory(
config.getPathPrefix().orElse(null),
config.getUsername().get(),
config.getPassword().get()));
} else {
builder.setRestClientFactory(
new DefaultRestClientFactory(config.getPathPrefix().orElse(null)));
}
final ElasticsearchSink<RowData> sink = builder.build();
if (config.isDisableFlushOnCheckpoint()) {
sink.disableFlushOnCheckpoint();
}
return sink;
};
}
@Override
public DynamicTableSink copy() {
return this;
}
@Override
public String asSummaryString() {
return "Elasticsearch6";
}
/** Serializable {@link RestClientFactory} used by the sink. */
@VisibleForTesting
static class DefaultRestClientFactory implements RestClientFactory {
private final String pathPrefix;
public DefaultRestClientFactory(@Nullable String pathPrefix) {
this.pathPrefix = pathPrefix;
}
@Override
public void configureRestClientBuilder(RestClientBuilder restClientBuilder) {
if (pathPrefix != null) {
restClientBuilder.setPathPrefix(pathPrefix);
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DefaultRestClientFactory that = (DefaultRestClientFactory) o;
return Objects.equals(pathPrefix, that.pathPrefix);
}
@Override
public int hashCode() {
return Objects.hash(pathPrefix);
}
}
/** Serializable {@link RestClientFactory} used by the sink which enable authentication. */
@VisibleForTesting
static class AuthRestClientFactory implements RestClientFactory {
private final String pathPrefix;
private final String username;
private final String password;
private transient CredentialsProvider credentialsProvider;
public AuthRestClientFactory(
@Nullable String pathPrefix, String username, String password) {
this.pathPrefix = pathPrefix;
this.password = password;
this.username = username;
}
@Override
public void configureRestClientBuilder(RestClientBuilder restClientBuilder) {
if (pathPrefix != null) {
restClientBuilder.setPathPrefix(pathPrefix);
}
if (credentialsProvider == null) {
credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY, new UsernamePasswordCredentials(username, password));
}
restClientBuilder.setHttpClientConfigCallback(
httpAsyncClientBuilder ->
httpAsyncClientBuilder.setDefaultCredentialsProvider(
credentialsProvider));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AuthRestClientFactory that = (AuthRestClientFactory) o;
return Objects.equals(pathPrefix, that.pathPrefix)
&& Objects.equals(username, that.username)
&& Objects.equals(password, that.password);
}
@Override
public int hashCode() {
return Objects.hash(pathPrefix, username, password);
}
}
/**
* Version-specific creation of {@link org.elasticsearch.action.ActionRequest}s used by the
* sink.
*/
private static class Elasticsearch6RequestFactory implements RequestFactory {
@Override
public UpdateRequest createUpdateRequest(
String index,
String docType,
String key,
XContentType contentType,
byte[] document) {
return new UpdateRequest(index, docType, key)
.doc(document, contentType)
.upsert(document, contentType);
}
@Override
public IndexRequest createIndexRequest(
String index,
String docType,
String key,
XContentType contentType,
byte[] document) {
return new IndexRequest(index, docType, key).source(document, contentType);
}
@Override
public DeleteRequest createDeleteRequest(String index, String docType, String key) {
return new DeleteRequest(index, docType, key);
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Elasticsearch6DynamicSink that = (Elasticsearch6DynamicSink) o;
return Objects.equals(format, that.format)
&& Objects.equals(schema, that.schema)
&& Objects.equals(config, that.config)
&& Objects.equals(builderProvider, that.builderProvider);
}
@Override
public int hashCode() {
return Objects.hash(format, schema, config, builderProvider);
}
}
| 5,748 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.table.api.ValidationException;
import org.apache.http.HttpHost;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION;
/** Elasticsearch 6 specific configuration. */
@Internal
final class Elasticsearch6Configuration extends ElasticsearchConfiguration {
Elasticsearch6Configuration(ReadableConfig config, ClassLoader classLoader) {
super(config, classLoader);
}
public List<HttpHost> getHosts() {
return config.get(HOSTS_OPTION).stream()
.map(Elasticsearch6Configuration::validateAndParseHostsString)
.collect(Collectors.toList());
}
/**
* Parse Hosts String to list.
*
* <p>Hosts String format was given as following:
*
* <pre>
* connector.hosts = http://host_name:9092;http://host_name:9093
* </pre>
*/
private static HttpHost validateAndParseHostsString(String host) {
try {
HttpHost httpHost = HttpHost.create(host);
if (httpHost.getPort() < 0) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing port.",
host, HOSTS_OPTION.key()));
}
if (httpHost.getSchemeName() == null) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing scheme.",
host, HOSTS_OPTION.key()));
}
return httpHost;
} catch (Exception e) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'.",
host, HOSTS_OPTION.key()),
e);
}
}
}
| 5,749 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.streaming.connectors.elasticsearch6.Elasticsearch6ApiCallBridge;
import org.apache.flink.streaming.connectors.elasticsearch6.RestClientFactory;
import org.apache.flink.table.connector.Projection;
import org.apache.flink.table.connector.format.DecodingFormat;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
import org.apache.flink.table.connector.source.lookup.LookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.PartialCachingLookupProvider;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.util.Preconditions;
import org.apache.flink.util.StringUtils;
import org.elasticsearch.client.RestHighLevelClient;
import javax.annotation.Nullable;
/**
* A {@link DynamicTableSource} that describes how to create a {@link Elasticsearch6DynamicSource}
* from a logical description.
*/
@Internal
public class Elasticsearch6DynamicSource implements LookupTableSource, SupportsProjectionPushDown {
private final DecodingFormat<DeserializationSchema<RowData>> format;
private final Elasticsearch6Configuration config;
private final int lookupMaxRetryTimes;
private final LookupCache lookupCache;
private DataType physicalRowDataType;
public Elasticsearch6DynamicSource(
DecodingFormat<DeserializationSchema<RowData>> format,
Elasticsearch6Configuration config,
DataType physicalRowDataType,
int lookupMaxRetryTimes,
@Nullable LookupCache lookupCache) {
this.format = format;
this.config = config;
this.physicalRowDataType = physicalRowDataType;
this.lookupMaxRetryTimes = lookupMaxRetryTimes;
this.lookupCache = lookupCache;
}
@Override
public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
RestClientFactory restClientFactory = null;
if (config.getUsername().isPresent()
&& config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get())) {
restClientFactory =
new Elasticsearch6DynamicSink.AuthRestClientFactory(
config.getPathPrefix().orElse(null),
config.getUsername().get(),
config.getPassword().get());
} else {
restClientFactory =
new Elasticsearch6DynamicSink.DefaultRestClientFactory(
config.getPathPrefix().orElse(null));
}
Elasticsearch6ApiCallBridge elasticsearch6ApiCallBridge =
new Elasticsearch6ApiCallBridge(config.getHosts(), restClientFactory);
// Elasticsearch only support non-nested look up keys
String[] keyNames = new String[context.getKeys().length];
for (int i = 0; i < keyNames.length; i++) {
int[] innerKeyArr = context.getKeys()[i];
Preconditions.checkArgument(
innerKeyArr.length == 1, "Elasticsearch only support non-nested look up keys");
keyNames[i] = DataType.getFieldNames(physicalRowDataType).get(innerKeyArr[0]);
}
ElasticsearchRowDataLookupFunction<RestHighLevelClient> lookupFunction =
new ElasticsearchRowDataLookupFunction<>(
this.format.createRuntimeDecoder(context, physicalRowDataType),
lookupMaxRetryTimes,
config.getIndex(),
config.getDocumentType(),
DataType.getFieldNames(physicalRowDataType).toArray(new String[0]),
DataType.getFieldDataTypes(physicalRowDataType).toArray(new DataType[0]),
keyNames,
elasticsearch6ApiCallBridge);
if (lookupCache != null) {
return PartialCachingLookupProvider.of(lookupFunction, lookupCache);
} else {
return LookupFunctionProvider.of(lookupFunction);
}
}
@Override
public DynamicTableSource copy() {
return new Elasticsearch6DynamicSource(
format, config, physicalRowDataType, lookupMaxRetryTimes, lookupCache);
}
@Override
public String asSummaryString() {
return "Elasticsearch6";
}
@Override
public boolean supportsNestedProjection() {
return false;
}
@Override
public void applyProjection(int[][] projectedFields, DataType type) {
this.physicalRowDataType = Projection.of(projectedFields).project(type);
}
}
| 5,750 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/Elasticsearch6DynamicTableFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.api.config.TableConfigOptions;
import org.apache.flink.table.connector.format.DecodingFormat;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.lookup.LookupOptions;
import org.apache.flink.table.connector.source.lookup.cache.DefaultLookupCache;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.factories.DeserializationFormatFactory;
import org.apache.flink.table.factories.DynamicTableFactory;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.factories.SerializationFormatFactory;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.StringUtils;
import javax.annotation.Nullable;
import java.time.ZoneId;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FAILURE_HANDLER_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FORMAT_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.INDEX_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.KEY_DELIMITER_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.PASSWORD_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.USERNAME_OPTION;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.CACHE_TYPE;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.MAX_RETRIES;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_CACHE_MISSING_KEY;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_ACCESS;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_WRITE;
import static org.apache.flink.table.connector.source.lookup.LookupOptions.PARTIAL_CACHE_MAX_ROWS;
/**
* A {@link DynamicTableFactory} for discovering {@link Elasticsearch6DynamicSource} and {@link
* Elasticsearch6DynamicSink}.
*/
@Internal
public class Elasticsearch6DynamicTableFactory
implements DynamicTableSourceFactory, DynamicTableSinkFactory {
private static final Set<ConfigOption<?>> requiredOptions =
Stream.of(HOSTS_OPTION, INDEX_OPTION, DOCUMENT_TYPE_OPTION).collect(Collectors.toSet());
private static final Set<ConfigOption<?>> optionalOptions =
Stream.of(
KEY_DELIMITER_OPTION,
FAILURE_HANDLER_OPTION,
FLUSH_ON_CHECKPOINT_OPTION,
BULK_FLASH_MAX_SIZE_OPTION,
BULK_FLUSH_MAX_ACTIONS_OPTION,
BULK_FLUSH_INTERVAL_OPTION,
BULK_FLUSH_BACKOFF_TYPE_OPTION,
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION,
BULK_FLUSH_BACKOFF_DELAY_OPTION,
CONNECTION_PATH_PREFIX,
FORMAT_OPTION,
PASSWORD_OPTION,
USERNAME_OPTION,
CACHE_TYPE,
PARTIAL_CACHE_EXPIRE_AFTER_ACCESS,
PARTIAL_CACHE_EXPIRE_AFTER_WRITE,
PARTIAL_CACHE_MAX_ROWS,
PARTIAL_CACHE_CACHE_MISSING_KEY,
MAX_RETRIES)
.collect(Collectors.toSet());
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
DataType physicalRowDataType = context.getPhysicalRowDataType();
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig options = helper.getOptions();
final DecodingFormat<DeserializationSchema<RowData>> format =
helper.discoverDecodingFormat(
DeserializationFormatFactory.class,
org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions
.FORMAT_OPTION);
helper.validate();
Configuration configuration = new Configuration();
context.getCatalogTable().getOptions().forEach(configuration::setString);
Elasticsearch6Configuration config =
new Elasticsearch6Configuration(configuration, context.getClassLoader());
return new Elasticsearch6DynamicSource(
format,
config,
physicalRowDataType,
options.get(MAX_RETRIES),
getLookupCache(options));
}
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableSchema tableSchema = context.getCatalogTable().getSchema();
ElasticsearchValidationUtils.validatePrimaryKey(tableSchema);
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
final EncodingFormat<SerializationSchema<RowData>> format =
helper.discoverEncodingFormat(SerializationFormatFactory.class, FORMAT_OPTION);
helper.validate();
Configuration configuration = new Configuration();
context.getCatalogTable().getOptions().forEach(configuration::setString);
Elasticsearch6Configuration config =
new Elasticsearch6Configuration(configuration, context.getClassLoader());
validate(config, configuration);
return new Elasticsearch6DynamicSink(
format,
config,
TableSchemaUtils.getPhysicalSchema(tableSchema),
getLocalTimeZoneId(context.getConfiguration()));
}
@Nullable
private LookupCache getLookupCache(ReadableConfig tableOptions) {
LookupCache cache = null;
if (tableOptions
.get(LookupOptions.CACHE_TYPE)
.equals(LookupOptions.LookupCacheType.PARTIAL)) {
cache = DefaultLookupCache.fromConfig(tableOptions);
}
return cache;
}
ZoneId getLocalTimeZoneId(ReadableConfig readableConfig) {
final String zone = readableConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
final ZoneId zoneId =
TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)
? ZoneId.systemDefault()
: ZoneId.of(zone);
return zoneId;
}
private void validate(Elasticsearch6Configuration config, Configuration originalConfiguration) {
config.getFailureHandler(); // checks if we can instantiate the custom failure handler
config.getHosts(); // validate hosts
validate(
config.getIndex().length() >= 1,
() -> String.format("'%s' must not be empty", INDEX_OPTION.key()));
int maxActions = config.getBulkFlushMaxActions();
validate(
maxActions == -1 || maxActions >= 1,
() ->
String.format(
"'%s' must be at least 1. Got: %s",
BULK_FLUSH_MAX_ACTIONS_OPTION.key(), maxActions));
long maxSize = config.getBulkFlushMaxByteSize();
long mb1 = 1024 * 1024;
validate(
maxSize == -1 || (maxSize >= mb1 && maxSize % mb1 == 0),
() ->
String.format(
"'%s' must be in MB granularity. Got: %s",
BULK_FLASH_MAX_SIZE_OPTION.key(),
originalConfiguration
.get(BULK_FLASH_MAX_SIZE_OPTION)
.toHumanReadableString()));
validate(
config.getBulkFlushBackoffRetries().map(retries -> retries >= 1).orElse(true),
() ->
String.format(
"'%s' must be at least 1. Got: %s",
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION.key(),
config.getBulkFlushBackoffRetries().get()));
if (config.getUsername().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())) {
validate(
config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get()),
() ->
String.format(
"'%s' and '%s' must be set at the same time. Got: username '%s' and password '%s'",
USERNAME_OPTION.key(),
PASSWORD_OPTION.key(),
config.getUsername().get(),
config.getPassword().orElse("")));
}
}
private static void validate(boolean condition, Supplier<String> message) {
if (!condition) {
throw new ValidationException(message.get());
}
}
@Override
public String factoryIdentifier() {
return "elasticsearch-6";
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return requiredOptions;
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
return optionalOptions;
}
}
| 5,751 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch/sink/Elasticsearch6SinkBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
/**
* Builder to construct an Elasticsearch 6 compatible {@link ElasticsearchSink}.
*
* <p>The following example shows the minimal setup to create a ElasticsearchSink that submits
* actions on checkpoint or the default number of actions was buffered (1000).
*
* <pre>{@code
* ElasticsearchSink<String> sink = new Elasticsearch6SinkBuilder<String>()
* .setHosts(new HttpHost("localhost:9200")
* .setEmitter((element, context, indexer) -> {
* indexer.add(
* new IndexRequest("my-index","my-type")
* .id(element.f0.toString())
* .source(element.f1)
* );
* })
* .build();
* }</pre>
*
* @param <IN> type of the records converted to Elasticsearch actions
*/
@PublicEvolving
public class Elasticsearch6SinkBuilder<IN>
extends ElasticsearchSinkBuilderBase<IN, Elasticsearch6SinkBuilder<IN>> {
public Elasticsearch6SinkBuilder() {}
@Override
public <T extends IN> Elasticsearch6SinkBuilder<T> setEmitter(
ElasticsearchEmitter<? super T> emitter) {
super.<T>setEmitter(emitter);
return self();
}
@Override
protected BulkProcessorBuilderFactory getBulkProcessorBuilderFactory() {
return new BulkProcessorBuilderFactory() {
@Override
public BulkProcessor.Builder apply(
RestHighLevelClient client,
BulkProcessorConfig bulkProcessorConfig,
BulkProcessor.Listener listener) {
BulkProcessor.Builder builder =
BulkProcessor.builder(
new BulkRequestConsumerFactory() { // This cannot be inlined as a
// lambda because then
// deserialization fails
@Override
public void accept(
BulkRequest bulkRequest,
ActionListener<BulkResponse>
bulkResponseActionListener) {
client.bulkAsync(
bulkRequest,
RequestOptions.DEFAULT,
bulkResponseActionListener);
}
},
listener);
if (bulkProcessorConfig.getBulkFlushMaxActions() != -1) {
builder.setBulkActions(bulkProcessorConfig.getBulkFlushMaxActions());
}
if (bulkProcessorConfig.getBulkFlushMaxMb() != -1) {
builder.setBulkSize(
new ByteSizeValue(
bulkProcessorConfig.getBulkFlushMaxMb(), ByteSizeUnit.MB));
}
if (bulkProcessorConfig.getBulkFlushInterval() != -1) {
builder.setFlushInterval(
new TimeValue(bulkProcessorConfig.getBulkFlushInterval()));
}
BackoffPolicy backoffPolicy;
final TimeValue backoffDelay =
new TimeValue(bulkProcessorConfig.getBulkFlushBackOffDelay());
final int maxRetryCount = bulkProcessorConfig.getBulkFlushBackoffRetries();
switch (bulkProcessorConfig.getFlushBackoffType()) {
case CONSTANT:
backoffPolicy = BackoffPolicy.constantBackoff(backoffDelay, maxRetryCount);
break;
case EXPONENTIAL:
backoffPolicy =
BackoffPolicy.exponentialBackoff(backoffDelay, maxRetryCount);
break;
case NONE:
backoffPolicy = BackoffPolicy.noBackoff();
break;
default:
throw new IllegalArgumentException(
"Received unknown backoff policy type "
+ bulkProcessorConfig.getFlushBackoffType());
}
builder.setBackoffPolicy(backoffPolicy);
return builder;
}
};
}
}
| 5,752 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicSinkFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.connector.elasticsearch.sink.Elasticsearch6SinkBuilder;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.util.StringUtils;
import javax.annotation.Nullable;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.flink.connector.elasticsearch.table.Elasticsearch6ConnectorOptions.DOCUMENT_TYPE_OPTION;
/** A {@link DynamicTableSinkFactory} for discovering {@link ElasticsearchDynamicSink}. */
@Internal
public class Elasticsearch6DynamicSinkFactory extends ElasticsearchDynamicSinkFactoryBase {
private static final String FACTORY_IDENTIFIER = "elasticsearch-6";
public Elasticsearch6DynamicSinkFactory() {
super(FACTORY_IDENTIFIER, Elasticsearch6SinkBuilder::new);
}
@Override
ElasticsearchConfiguration getConfiguration(FactoryUtil.TableFactoryHelper helper) {
return new Elasticsearch6Configuration(helper.getOptions());
}
@Nullable
@Override
String getDocumentType(ElasticsearchConfiguration configuration) {
return ((Elasticsearch6Configuration) configuration).getDocumentType();
}
@Override
void validateConfiguration(ElasticsearchConfiguration config) {
super.validateConfiguration(config);
Elasticsearch6Configuration configuration = (Elasticsearch6Configuration) config;
validate(
!StringUtils.isNullOrWhitespaceOnly(configuration.getDocumentType()),
() -> String.format("'%s' must not be empty", DOCUMENT_TYPE_OPTION.key()));
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> requiredOptions = super.requiredOptions();
requiredOptions.add(DOCUMENT_TYPE_OPTION);
return requiredOptions;
}
@Override
public Set<ConfigOption<?>> forwardOptions() {
return Stream.concat(super.forwardOptions().stream(), Stream.of(DOCUMENT_TYPE_OPTION))
.collect(Collectors.toSet());
}
}
| 5,753 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ReadableConfig;
import static org.apache.flink.connector.elasticsearch.table.Elasticsearch6ConnectorOptions.DOCUMENT_TYPE_OPTION;
/** Elasticsearch 6 specific configuration. */
@Internal
final class Elasticsearch6Configuration extends ElasticsearchConfiguration {
Elasticsearch6Configuration(ReadableConfig config) {
super(config);
}
public String getDocumentType() {
return config.get(DOCUMENT_TYPE_OPTION);
}
}
| 5,754 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch6/src/main/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6ConnectorOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
/**
* Options specific for the Elasticsearch 6 connector. Public so that the {@link
* org.apache.flink.table.api.TableDescriptor} can access it.
*/
@PublicEvolving
public final class Elasticsearch6ConnectorOptions extends ElasticsearchConnectorOptions {
private Elasticsearch6ConnectorOptions() {}
public static final ConfigOption<String> DOCUMENT_TYPE_OPTION =
ConfigOptions.key("document-type")
.stringType()
.noDefaultValue()
.withDescription("Elasticsearch document type.");
}
| 5,755 |
0 | Create_ds/flink-connector-elasticsearch/flink-sql-connector-elasticsearch6/src/test/java/org/apache/flink/connectors | Create_ds/flink-connector-elasticsearch/flink-sql-connector-elasticsearch6/src/test/java/org/apache/flink/connectors/elasticsearch/PackagingITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connectors.elasticsearch;
import org.apache.flink.packaging.PackagingTestUtils;
import org.apache.flink.table.factories.Factory;
import org.apache.flink.test.resources.ResourceTestUtils;
import org.junit.jupiter.api.Test;
import java.nio.file.Path;
import java.util.Arrays;
class PackagingITCase {
@Test
void testPackaging() throws Exception {
final Path jar =
ResourceTestUtils.getResource(".*/flink-sql-connector-elasticsearch6-[^/]*\\.jar");
PackagingTestUtils.assertJarContainsOnlyFilesMatching(
jar,
Arrays.asList(
"META-INF/",
"org/apache/flink/connector/base/",
"org/apache/flink/connector/elasticsearch/",
"org/apache/flink/elasticsearch6/",
"org/apache/flink/streaming/connectors/elasticsearch/",
"org/apache/flink/streaming/connectors/elasticsearch6/"));
PackagingTestUtils.assertJarContainsServiceEntry(jar, Factory.class);
}
}
| 5,756 |
0 | Create_ds/flink-connector-elasticsearch/flink-sql-connector-elasticsearch7/src/test/java/org/apache/flink/connectors | Create_ds/flink-connector-elasticsearch/flink-sql-connector-elasticsearch7/src/test/java/org/apache/flink/connectors/elasticsearch/PackagingITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connectors.elasticsearch;
import org.apache.flink.packaging.PackagingTestUtils;
import org.apache.flink.table.factories.Factory;
import org.apache.flink.test.resources.ResourceTestUtils;
import org.junit.jupiter.api.Test;
import java.nio.file.Path;
import java.util.Arrays;
class PackagingITCase {
@Test
void testPackaging() throws Exception {
final Path jar =
ResourceTestUtils.getResource(".*/flink-sql-connector-elasticsearch7-[^/]*\\.jar");
PackagingTestUtils.assertJarContainsOnlyFilesMatching(
jar,
Arrays.asList(
"META-INF/",
"org/apache/flink/connector/base/",
"org/apache/flink/connector/elasticsearch/",
"org/apache/flink/elasticsearch7/",
"org/apache/flink/streaming/connectors/elasticsearch/",
"org/apache/flink/streaming/connectors/elasticsearch7/"));
PackagingTestUtils.assertJarContainsServiceEntry(jar, Factory.class);
}
}
| 5,757 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.testutils.CheckedThread;
import org.apache.flink.core.testutils.MultiShotLatch;
import org.apache.flink.streaming.api.operators.StreamSink;
import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.streaming.util.MockStreamingRuntimeContext;
import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/** Suite of tests for {@link ElasticsearchSinkBase}. */
public class ElasticsearchSinkBaseTest {
/** Verifies that the collection given to the sink is not modified. */
@Test
public void testCollectionArgumentNotModified() {
Map<String, String> userConfig = new HashMap<>();
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, "1");
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, "true");
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, "1");
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE, "CONSTANT");
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, "1");
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, "1");
new DummyElasticsearchSink<>(
Collections.unmodifiableMap(userConfig),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
}
/**
* Tests that any item failure in the listener callbacks is rethrown on an immediately following
* invoke call.
*/
@Test
public void testItemFailureRethrownOnInvoke() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and its mock item failures
sink.setMockItemFailuresListForNextBulkItemResponses(
Collections.singletonList(new Exception("artificial failure for record")));
testHarness.processElement(new StreamRecord<>("msg"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// manually execute the next bulk request
sink.manualBulkRequestWithAllPendingRequests();
assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>("next msg")))
.getCause()
.hasMessageContaining("artificial failure for record");
}
/**
* Tests that any item failure in the listener callbacks is rethrown on an immediately following
* checkpoint.
*/
@Test
public void testItemFailureRethrownOnCheckpoint() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and its mock item failures
sink.setMockItemFailuresListForNextBulkItemResponses(
Collections.singletonList(new Exception("artificial failure for record")));
testHarness.processElement(new StreamRecord<>("msg"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// manually execute the next bulk request
sink.manualBulkRequestWithAllPendingRequests();
assertThatThrownBy(() -> testHarness.snapshot(1L, 1000L))
.getCause()
.getCause()
.hasMessageContaining("artificial failure for record");
}
/**
* Tests that any item failure in the listener callbacks due to flushing on an immediately
* following checkpoint is rethrown; we set a timeout because the test will not finish if the
* logic is broken.
*/
@Test(timeout = 5000)
public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and its mock item failures
List<Exception> mockResponsesList = new ArrayList<>(2);
mockResponsesList.add(null); // the first request in a bulk will succeed
mockResponsesList.add(
new Exception(
"artificial failure for record")); // the second request in a bulk will fail
sink.setMockItemFailuresListForNextBulkItemResponses(mockResponsesList);
testHarness.processElement(new StreamRecord<>("msg-1"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// manually execute the next bulk request (1 request only, thus should succeed)
sink.manualBulkRequestWithAllPendingRequests();
// setup the requests to be flushed in the snapshot
testHarness.processElement(new StreamRecord<>("msg-2"));
testHarness.processElement(new StreamRecord<>("msg-3"));
verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));
CheckedThread snapshotThread =
new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.snapshot(1L, 1000L);
}
};
snapshotThread.start();
// the snapshot should eventually be blocked before snapshot triggers flushing
while (snapshotThread.getState() != Thread.State.WAITING) {
Thread.sleep(10);
}
// let the snapshot-triggered flush continue (2 records in the bulk, so the 2nd one should
// fail)
sink.continueFlush();
assertThatThrownBy(snapshotThread::sync)
.getCause()
.getCause()
.hasMessageContaining("artificial failure for record");
}
/**
* Tests that any bulk failure in the listener callbacks is rethrown on an immediately following
* invoke call.
*/
@Test
public void testBulkFailureRethrownOnInvoke() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and let the whole bulk request fail
sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
testHarness.processElement(new StreamRecord<>("msg"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// manually execute the next bulk request
sink.manualBulkRequestWithAllPendingRequests();
assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>("next msg")))
.getCause()
.hasMessageContaining("artificial failure for bulk request");
}
/**
* Tests that any bulk failure in the listener callbacks is rethrown on an immediately following
* checkpoint.
*/
@Test
public void testBulkFailureRethrownOnCheckpoint() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and let the whole bulk request fail
sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
testHarness.processElement(new StreamRecord<>("msg"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// manually execute the next bulk request
sink.manualBulkRequestWithAllPendingRequests();
assertThatThrownBy(() -> testHarness.snapshot(1L, 1000L))
.getCause()
.getCause()
.hasMessageContaining("artificial failure for bulk request");
}
/**
* Tests that any bulk failure in the listener callbacks due to flushing on an immediately
* following checkpoint is rethrown; we set a timeout because the test will not finish if the
* logic is broken.
*/
@Test(timeout = 5000)
public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new NoOpFailureHandler());
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and let bulk request succeed
sink.setMockItemFailuresListForNextBulkItemResponses(
Collections.singletonList((Exception) null));
testHarness.processElement(new StreamRecord<>("msg-1"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// manually execute the next bulk request
sink.manualBulkRequestWithAllPendingRequests();
// setup the requests to be flushed in the snapshot
testHarness.processElement(new StreamRecord<>("msg-2"));
testHarness.processElement(new StreamRecord<>("msg-3"));
verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));
CheckedThread snapshotThread =
new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.snapshot(1L, 1000L);
}
};
snapshotThread.start();
// the snapshot should eventually be blocked before snapshot triggers flushing
while (snapshotThread.getState() != Thread.State.WAITING) {
Thread.sleep(10);
}
// for the snapshot-triggered flush, we let the bulk request fail completely
sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
// let the snapshot-triggered flush continue (bulk request should fail completely)
sink.continueFlush();
assertThatThrownBy(snapshotThread::sync)
.getCause()
.getCause()
.hasMessageContaining("artificial failure for bulk request");
}
/**
* Tests that the sink correctly waits for pending requests (including re-added requests) on
* checkpoints; we set a timeout because the test will not finish if the logic is broken.
*/
@Test(timeout = 5000)
public void testAtLeastOnceSink() throws Throwable {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new DummyRetryFailureHandler()); // use a failure handler that simply
// re-adds requests
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and its mock item failures;
// it contains 1 request, which will fail and re-added to the next bulk request
sink.setMockItemFailuresListForNextBulkItemResponses(
Collections.singletonList(new Exception("artificial failure for record")));
testHarness.processElement(new StreamRecord<>("msg"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
CheckedThread snapshotThread =
new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.snapshot(1L, 1000L);
}
};
snapshotThread.start();
// the snapshot should eventually be blocked before snapshot triggers flushing
while (snapshotThread.getState() != Thread.State.WAITING) {
Thread.sleep(10);
}
sink.continueFlush();
// since the previous flush should have resulted in a request re-add from the failure
// handler,
// we should have flushed again, and eventually be blocked before snapshot triggers the 2nd
// flush
while (snapshotThread.getState() != Thread.State.WAITING) {
Thread.sleep(10);
}
// current number of pending request should be 1 due to the re-add
assertThat(sink.getNumPendingRequests()).isEqualTo(1);
// this time, let the bulk request succeed, so no-more requests are re-added
sink.setMockItemFailuresListForNextBulkItemResponses(
Collections.singletonList((Exception) null));
sink.continueFlush();
// the snapshot should finish with no exceptions
snapshotThread.sync();
testHarness.close();
}
/**
* This test is meant to assure that testAtLeastOnceSink is valid by testing that if flushing is
* disabled, the snapshot method does indeed finishes without waiting for pending requests; we
* set a timeout because the test will not finish if the logic is broken.
*/
@Test(timeout = 5000)
public void testDoesNotWaitForPendingRequestsIfFlushingDisabled() throws Exception {
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<String, String>(),
new SimpleSinkFunction<String>(),
new DummyRetryFailureHandler());
sink.disableFlushOnCheckpoint(); // disable flushing
final OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));
testHarness.open();
// setup the next bulk request, and let bulk request succeed
sink.setMockItemFailuresListForNextBulkItemResponses(
Collections.singletonList(new Exception("artificial failure for record")));
testHarness.processElement(new StreamRecord<>("msg-1"));
verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
// the snapshot should not block even though we haven't flushed the bulk request
testHarness.snapshot(1L, 1000L);
testHarness.close();
}
@Test
public void testOpenAndCloseInSinkFunction() throws Exception {
SimpleClosableSinkFunction<String> sinkFunction = new SimpleClosableSinkFunction<>();
final DummyElasticsearchSink<String> sink =
new DummyElasticsearchSink<>(
new HashMap<>(), sinkFunction, new DummyRetryFailureHandler());
sink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));
sink.open(new Configuration());
sink.close();
assertThat(sinkFunction.openCalled).isTrue();
assertThat(sinkFunction.closeCalled).isTrue();
}
private static class DummyElasticsearchSink<T> extends ElasticsearchSinkBase<T, Client> {
private static final long serialVersionUID = 5051907841570096991L;
private transient BulkProcessor mockBulkProcessor;
private transient BulkRequest nextBulkRequest = new BulkRequest();
private transient MultiShotLatch flushLatch = new MultiShotLatch();
private List<? extends Exception> mockItemFailuresList;
private Throwable nextBulkFailure;
public DummyElasticsearchSink(
Map<String, String> userConfig,
ElasticsearchSinkFunction<T> sinkFunction,
ActionRequestFailureHandler failureHandler) {
super(new DummyElasticsearchApiCallBridge(), userConfig, sinkFunction, failureHandler);
}
/**
* This method is used to mimic a scheduled bulk request; we need to do this manually
* because we are mocking the BulkProcessor.
*/
public void manualBulkRequestWithAllPendingRequests() {
flushLatch.trigger(); // let the flush
mockBulkProcessor.flush();
}
/**
* On non-manual flushes, i.e. when flush is called in the snapshot method implementation,
* usages need to explicitly call this to allow the flush to continue. This is useful to
* make sure that specific requests get added to the next bulk request for flushing.
*/
public void continueFlush() {
flushLatch.trigger();
}
/**
* Set the list of mock failures to use for the next bulk of item responses. A {@code null}
* means that the response is successful, failed otherwise.
*
* <p>The list is used with corresponding order to the requests in the bulk, i.e. the first
* request uses the response at index 0, the second requests uses the response at index 1,
* etc.
*/
public void setMockItemFailuresListForNextBulkItemResponses(
List<? extends Exception> mockItemFailuresList) {
this.mockItemFailuresList = mockItemFailuresList;
}
/**
* Let the next bulk request fail completely with the provided throwable. If this is set,
* the failures list provided with setMockItemFailuresListForNextBulkItemResponses is not
* respected.
*/
public void setFailNextBulkRequestCompletely(Throwable failure) {
this.nextBulkFailure = failure;
}
public BulkProcessor getMockBulkProcessor() {
return mockBulkProcessor;
}
/**
* Override the bulk processor build process to provide a mock implementation, but reuse the
* listener implementation in our mock to test that the listener logic works correctly with
* request flushing logic.
*/
@Override
protected BulkProcessor buildBulkProcessor(final BulkProcessor.Listener listener) {
this.mockBulkProcessor = mock(BulkProcessor.class);
when(mockBulkProcessor.add(any(IndexRequest.class)))
.thenAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
// intercept the request and add it to our mock bulk request
nextBulkRequest.add(
(IndexRequest) invocationOnMock.getArgument(0));
return null;
}
});
doAnswer(
new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
while (nextBulkRequest.numberOfActions() > 0) {
// wait until we are allowed to continue with the flushing
flushLatch.await();
// create a copy of the accumulated mock requests, so that
// re-added requests from the failure handler are included
// in the next bulk
BulkRequest currentBulkRequest = nextBulkRequest;
nextBulkRequest = new BulkRequest();
listener.beforeBulk(123L, currentBulkRequest);
if (nextBulkFailure == null) {
BulkItemResponse[] mockResponses =
new BulkItemResponse
[currentBulkRequest.requests().size()];
for (int i = 0;
i < currentBulkRequest.requests().size();
i++) {
Exception mockItemFailure =
mockItemFailuresList.get(i);
if (mockItemFailure == null) {
// the mock response for the item is success
mockResponses[i] =
new BulkItemResponse(
i,
DocWriteRequest.OpType.INDEX,
mock(DocWriteResponse.class));
} else {
// the mock response for the item is failure
mockResponses[i] =
new BulkItemResponse(
i,
DocWriteRequest.OpType.INDEX,
new BulkItemResponse.Failure(
"index",
"type",
"id",
mockItemFailure));
}
}
listener.afterBulk(
123L,
currentBulkRequest,
new BulkResponse(mockResponses, 1000L));
} else {
listener.afterBulk(
123L, currentBulkRequest, nextBulkFailure);
}
}
return null;
}
})
.when(mockBulkProcessor)
.flush();
return mockBulkProcessor;
}
}
private static class DummyElasticsearchApiCallBridge
implements ElasticsearchApiCallBridge<Client> {
private static final long serialVersionUID = -4272760730959041699L;
@Override
public Client createClient() {
return mock(Client.class);
}
@Override
public BulkProcessor.Builder createBulkProcessorBuilder(
Client client, BulkProcessor.Listener listener) {
return null;
}
@Override
public Tuple2<String, String[]> search(Client client, SearchRequest searchRequest)
throws IOException {
return null;
}
@Override
public void close(Client client) throws IOException {}
@Nullable
@Override
public Throwable extractFailureCauseFromBulkItemResponse(
BulkItemResponse bulkItemResponse) {
if (bulkItemResponse.isFailed()) {
return new Exception(bulkItemResponse.getFailure().getMessage());
} else {
return null;
}
}
@Override
public void configureBulkProcessorFlushInterval(
BulkProcessor.Builder builder, long flushIntervalMillis) {
// no need for this in the test cases here
}
@Override
public void configureBulkProcessorBackoff(
BulkProcessor.Builder builder,
@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy) {
// no need for this in the test cases here
}
@Override
public void verifyClientConnection(Client client) {
// no need for this in the test cases here
}
@Override
public RequestIndexer createBulkProcessorIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef) {
return new TestRequestIndexer(bulkProcessor, flushOnCheckpoint, numPendingRequestsRef);
}
}
private static class SimpleSinkFunction<String> implements ElasticsearchSinkFunction<String> {
private static final long serialVersionUID = -176739293659135148L;
@Override
public void process(String element, RuntimeContext ctx, RequestIndexer indexer) {
Map<java.lang.String, Object> json = new HashMap<>();
json.put("data", element);
indexer.add(Requests.indexRequest().index("index").type("type").id("id").source(json));
}
}
private static class SimpleClosableSinkFunction<String>
implements ElasticsearchSinkFunction<String> {
private static final long serialVersionUID = 1872065917794006848L;
private boolean openCalled;
private boolean closeCalled;
@Override
public void open() {
openCalled = true;
}
@Override
public void close() {
closeCalled = true;
}
@Override
public void process(String element, RuntimeContext ctx, RequestIndexer indexer) {}
}
private static class DummyRetryFailureHandler implements ActionRequestFailureHandler {
private static final long serialVersionUID = 5400023700099200745L;
@Override
public void onFailure(
ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer)
throws Throwable {
indexer.add(action);
}
}
}
| 5,758 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/TestRequestIndexer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}. {@link ActionRequest
* ActionRequests} will be buffered before sending a bulk request to the Elasticsearch cluster.
*/
class TestRequestIndexer implements RequestIndexer {
private final BulkProcessor bulkProcessor;
private final boolean flushOnCheckpoint;
private final AtomicLong numPendingRequestsRef;
TestRequestIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef) {
this.bulkProcessor = checkNotNull(bulkProcessor);
this.flushOnCheckpoint = flushOnCheckpoint;
this.numPendingRequestsRef = checkNotNull(numPendingRequestsRef);
}
@Override
public void add(DeleteRequest... deleteRequests) {
for (DeleteRequest deleteRequest : deleteRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(deleteRequest);
}
}
@Override
public void add(IndexRequest... indexRequests) {
for (IndexRequest indexRequest : indexRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(indexRequest);
}
}
@Override
public void add(UpdateRequest... updateRequests) {
for (UpdateRequest updateRequest : updateRequests) {
if (flushOnCheckpoint) {
numPendingRequestsRef.getAndIncrement();
}
this.bulkProcessor.add(updateRequest);
}
}
}
| 5,759 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.client.JobExecutionException;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.elasticsearch.testutils.SourceSinkDataTestKit;
import org.apache.flink.test.util.AbstractTestBase;
import org.elasticsearch.client.RestHighLevelClient;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/**
* Environment preparation and suite of tests for version-specific {@link ElasticsearchSinkBase}
* implementations.
*
* @param <C> Elasticsearch client type
* @param <A> The address type to use
*/
public abstract class ElasticsearchSinkTestBase<C extends AutoCloseable, A>
extends AbstractTestBase {
protected abstract RestHighLevelClient getClient();
/** Tests that the Elasticsearch sink works properly with json. */
public void runElasticsearchSinkTest() throws Exception {
runElasticSearchSinkTest(
"elasticsearch-sink-test-json-index", SourceSinkDataTestKit::getJsonSinkFunction);
}
/** Tests that the Elasticsearch sink works properly with smile. */
public void runElasticsearchSinkSmileTest() throws Exception {
runElasticSearchSinkTest(
"elasticsearch-sink-test-smile-index", SourceSinkDataTestKit::getSmileSinkFunction);
}
private void runElasticSearchSinkTest(
String index,
Function<String, ElasticsearchSinkFunction<Tuple2<Integer, String>>> functionFactory)
throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source =
env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
source.addSink(createElasticsearchSinkForEmbeddedNode(1, functionFactory.apply(index)));
env.execute("Elasticsearch Sink Test");
// verify the results
RestHighLevelClient client = getClient();
SourceSinkDataTestKit.verifyProducedSinkData(client, index);
client.close();
}
/**
* Tests that the Elasticsearch sink fails eagerly if the provided list of addresses is {@code
* null}.
*/
public void runNullAddressesTest() {
assertThatThrownBy(
() ->
createElasticsearchSink(
1, null, SourceSinkDataTestKit.getJsonSinkFunction("test")))
.isInstanceOfAny(IllegalArgumentException.class, NullPointerException.class);
}
/**
* Tests that the Elasticsearch sink fails eagerly if the provided list of addresses is empty.
*/
public void runEmptyAddressesTest() {
assertThatThrownBy(
() ->
createElasticsearchSink(
1,
Collections.emptyList(),
SourceSinkDataTestKit.getJsonSinkFunction("test")))
.isInstanceOf(IllegalArgumentException.class);
}
/** Tests whether the Elasticsearch sink fails when there is no cluster to connect to. */
public void runInvalidElasticsearchClusterTest() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source =
env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
source.addSink(
createElasticsearchSinkForNode(
1,
SourceSinkDataTestKit.getJsonSinkFunction("test"),
"123.123.123.123")); // incorrect ip address
assertThatThrownBy(() -> env.execute("Elasticsearch Sink Test"))
.isInstanceOf(JobExecutionException.class);
}
/** Creates a version-specific Elasticsearch sink, using arbitrary transport addresses. */
protected abstract ElasticsearchSinkBase<Tuple2<Integer, String>, C> createElasticsearchSink(
int bulkFlushMaxActions,
List<A> addresses,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction);
/**
* Creates a version-specific Elasticsearch sink to connect to a local embedded Elasticsearch
* node.
*
* <p>This case is singled out from {@link
* ElasticsearchSinkTestBase#createElasticsearchSink(int, List, ElasticsearchSinkFunction)}
* because the Elasticsearch Java API to do so is incompatible across different versions.
*/
protected abstract ElasticsearchSinkBase<Tuple2<Integer, String>, C>
createElasticsearchSinkForEmbeddedNode(
int bulkFlushMaxActions,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction)
throws Exception;
/**
* Creates a version-specific Elasticsearch sink to connect to a specific Elasticsearch node.
*/
protected abstract ElasticsearchSinkBase<Tuple2<Integer, String>, C>
createElasticsearchSinkForNode(
int bulkFlushMaxActions,
ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
String ipAddress)
throws Exception;
}
| 5,760 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/TestContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.catalog.CatalogTable;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ObjectIdentifier;
import org.apache.flink.table.catalog.ResolvedCatalogTable;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.factories.DynamicTableFactory;
import org.apache.flink.table.factories.FactoryUtil;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/** A utility class for mocking {@link DynamicTableFactory.Context}. */
class TestContext {
private ResolvedSchema schema = ResolvedSchema.of(Column.physical("a", DataTypes.TIME()));
private final Map<String, String> options = new HashMap<>();
public static TestContext context() {
return new TestContext();
}
public TestContext withSchema(ResolvedSchema schema) {
this.schema = schema;
return this;
}
DynamicTableFactory.Context build() {
return new FactoryUtil.DefaultDynamicTableContext(
ObjectIdentifier.of("default", "default", "t1"),
new ResolvedCatalogTable(
CatalogTable.of(
Schema.newBuilder().fromResolvedSchema(schema).build(),
"mock context",
Collections.emptyList(),
options),
schema),
Collections.emptyMap(),
new Configuration(),
TestContext.class.getClassLoader(),
false);
}
public TestContext withOption(String key, String value) {
options.put(key, value);
return this;
}
}
| 5,761 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/KeyExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.junit.Test;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.function.Function;
import static org.assertj.core.api.Assertions.assertThat;
/** Tests for {@link KeyExtractor}. */
public class KeyExtractorTest {
@Test
public void testSimpleKey() {
TableSchema schema =
TableSchema.builder()
.field("a", DataTypes.BIGINT().notNull())
.field("b", DataTypes.STRING())
.primaryKey("a")
.build();
Function<RowData, String> keyExtractor = KeyExtractor.createKeyExtractor(schema, "_");
String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD")));
assertThat(key).isEqualTo("12");
}
@Test
public void testNoPrimaryKey() {
TableSchema schema =
TableSchema.builder()
.field("a", DataTypes.BIGINT().notNull())
.field("b", DataTypes.STRING())
.build();
Function<RowData, String> keyExtractor = KeyExtractor.createKeyExtractor(schema, "_");
String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD")));
assertThat(key).isNull();
}
@Test
public void testTwoFieldsKey() {
TableSchema schema =
TableSchema.builder()
.field("a", DataTypes.BIGINT().notNull())
.field("b", DataTypes.STRING())
.field("c", DataTypes.TIMESTAMP().notNull())
.primaryKey("a", "c")
.build();
Function<RowData, String> keyExtractor = KeyExtractor.createKeyExtractor(schema, "_");
String key =
keyExtractor.apply(
GenericRowData.of(
12L,
StringData.fromString("ABCD"),
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12"))));
assertThat(key).isEqualTo("12_2012-12-12T12:12:12");
}
@Test
public void testAllTypesKey() {
TableSchema schema =
TableSchema.builder()
.field("a", DataTypes.TINYINT().notNull())
.field("b", DataTypes.SMALLINT().notNull())
.field("c", DataTypes.INT().notNull())
.field("d", DataTypes.BIGINT().notNull())
.field("e", DataTypes.BOOLEAN().notNull())
.field("f", DataTypes.FLOAT().notNull())
.field("g", DataTypes.DOUBLE().notNull())
.field("h", DataTypes.STRING().notNull())
.field("i", DataTypes.TIMESTAMP().notNull())
.field("j", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE().notNull())
.field("k", DataTypes.TIME().notNull())
.field("l", DataTypes.DATE().notNull())
.primaryKey("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l")
.build();
Function<RowData, String> keyExtractor = KeyExtractor.createKeyExtractor(schema, "_");
String key =
keyExtractor.apply(
GenericRowData.of(
(byte) 1,
(short) 2,
3,
(long) 4,
true,
1.0f,
2.0d,
StringData.fromString("ABCD"),
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12")),
TimestampData.fromInstant(Instant.parse("2013-01-13T13:13:13Z")),
(int) (LocalTime.parse("14:14:14").toNanoOfDay() / 1_000_000),
(int) LocalDate.parse("2015-05-15").toEpochDay()));
assertThat(key)
.isEqualTo(
"1_2_3_4_true_1.0_2.0_ABCD_2012-12-12T12:12:12_2013-01-13T13:13:13_14:14:14_2015-05-15");
}
}
| 5,762 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.util.TestLogger;
import org.junit.Before;
import org.junit.Test;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assume.assumeThat;
/** Tests for {@link IndexGeneratorFactory}. */
public class IndexGeneratorFactoryTest extends TestLogger {
private TableSchema schema;
private List<RowData> rows;
@Before
public void prepareData() {
schema =
new TableSchema.Builder()
.field("id", DataTypes.INT())
.field("item", DataTypes.STRING())
.field("log_ts", DataTypes.BIGINT())
.field("log_date", DataTypes.DATE())
.field("log_time", DataTypes.TIME())
.field("order_timestamp", DataTypes.TIMESTAMP())
.field("local_timestamp", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE())
.field("status", DataTypes.BOOLEAN())
.build();
rows = new ArrayList<>();
rows.add(
GenericRowData.of(
1,
StringData.fromString("apple"),
Timestamp.valueOf("2020-03-18 12:12:14").getTime(),
(int) LocalDate.parse("2020-03-18").toEpochDay(),
(int) (LocalTime.parse("12:12:14").toNanoOfDay() / 1_000_000L),
TimestampData.fromLocalDateTime(LocalDateTime.parse("2020-03-18T12:12:14")),
TimestampData.fromInstant(
LocalDateTime.of(2020, 3, 18, 3, 12, 14, 1000)
.atZone(ZoneId.of("Asia/Shanghai"))
.toInstant()),
true));
rows.add(
GenericRowData.of(
2,
StringData.fromString("peanut"),
Timestamp.valueOf("2020-03-19 12:12:14").getTime(),
(int) LocalDate.parse("2020-03-19").toEpochDay(),
(int) (LocalTime.parse("12:22:21").toNanoOfDay() / 1_000_000L),
TimestampData.fromLocalDateTime(LocalDateTime.parse("2020-03-19T12:22:14")),
TimestampData.fromInstant(
LocalDateTime.of(2020, 3, 19, 20, 22, 14, 1000)
.atZone(ZoneId.of("America/Los_Angeles"))
.toInstant()),
false));
}
@Test
public void testDynamicIndexFromTimestamp() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"{order_timestamp|yyyy_MM_dd_HH-ss}_index", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index");
IndexGenerator indexGenerator1 =
IndexGeneratorFactory.createIndexGenerator(
"{order_timestamp|yyyy_MM_dd_HH_mm}_index", schema);
indexGenerator1.open();
assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index");
}
@Test
public void testDynamicIndexFromDate() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{log_date|yyyy/MM/dd}", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19");
}
@Test
public void testDynamicIndexFromTime() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("my-index-{log_time|HH-mm}", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-12");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-22");
}
@Test
public void testDynamicIndexDefaultFormat() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("my-index-{log_time|}", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12_12_14");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12_22_21");
}
@Test
public void testDynamicIndexFromSystemTime() {
List<String> supportedUseCases =
Arrays.asList(
"now()",
"NOW()",
"now( )",
"NOW(\t)",
"\t NOW( ) \t",
"current_timestamp",
"CURRENT_TIMESTAMP",
"\tcurrent_timestamp\t",
" current_timestamp ");
supportedUseCases.stream()
.forEach(
f -> {
DateTimeFormatter dateTimeFormatter =
DateTimeFormatter.ofPattern("yyyy_MM_dd");
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
String.format("my-index-{%s|yyyy_MM_dd}", f), schema);
indexGenerator.open();
// The date may change during the running of the unit test.
// Generate expected index-name based on the current time
// before and after calling the generate method.
String expectedIndex1 =
"my-index-" + LocalDateTime.now().format(dateTimeFormatter);
String actualIndex = indexGenerator.generate(rows.get(1));
String expectedIndex2 =
"my-index-" + LocalDateTime.now().format(dateTimeFormatter);
assertThat(
actualIndex.equals(expectedIndex1)
|| actualIndex.equals(expectedIndex2))
.isTrue();
});
List<String> invalidUseCases =
Arrays.asList(
"now",
"now(",
"NOW",
"NOW)",
"current_timestamp()",
"CURRENT_TIMESTAMP()",
"CURRENT_timestamp");
invalidUseCases.stream()
.forEach(
f -> {
String expectedExceptionMsg =
String.format(
"Unknown field '%s' in index pattern 'my-index-{%s|yyyy_MM_dd}',"
+ " please check the field name.",
f, f);
try {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
String.format("my-index-{%s|yyyy_MM_dd}", f),
schema);
indexGenerator.open();
} catch (TableException e) {
assertThat(e).hasMessage(expectedExceptionMsg);
}
});
}
@Test
public void testDynamicIndexDefaultFormatTimestampWithLocalTimeZoneUTC() {
assumeThat(ZoneId.systemDefault(), is(ZoneId.of("UTC")));
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("my-index-{local_timestamp|}", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020_03_17_19_12_14Z");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020_03_20_03_22_14Z");
}
@Test
public void testDynamicIndexDefaultFormatTimestampWithLocalTimeZoneWithSpecificTimeZone() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{local_timestamp|}", schema, ZoneId.of("Europe/Berlin"));
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0)))
.isEqualTo("my-index-2020_03_17_20_12_14+01");
assertThat(indexGenerator.generate(rows.get(1)))
.isEqualTo("my-index-2020_03_20_04_22_14+01");
}
@Test
public void testGeneralDynamicIndex() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("index_{item}", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("index_apple");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("index_peanut");
}
@Test
public void testStaticIndex() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("my-index", schema);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index");
}
@Test
public void testUnknownField() {
String expectedExceptionMsg =
"Unknown field 'unknown_ts' in index pattern 'my-index-{unknown_ts|yyyy-MM-dd}',"
+ " please check the field name.";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"my-index-{unknown_ts|yyyy-MM-dd}", schema))
.isInstanceOf(TableException.class)
.hasMessage(expectedExceptionMsg);
}
@Test
public void testUnsupportedTimeType() {
String expectedExceptionMsg =
"Unsupported type 'INT' found in Elasticsearch dynamic index field, "
+ "time-related pattern only support types are: DATE,TIME,TIMESTAMP.";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"my-index-{id|yyyy-MM-dd}", schema))
.isInstanceOf(TableException.class)
.hasMessage(expectedExceptionMsg);
}
@Test
public void testUnsupportedMultiParametersType() {
String expectedExceptionMsg =
"Chaining dynamic index pattern my-index-{local_date}-{local_time} is not supported,"
+ " only support single dynamic index pattern.";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"my-index-{local_date}-{local_time}", schema))
.isInstanceOf(TableException.class)
.hasMessage(expectedExceptionMsg);
}
@Test
public void testUnsupportedIndexFieldType() {
String expectedExceptionMsg =
"Unsupported type BOOLEAN of index field, Supported types are:"
+ " [DATE, TIME_WITHOUT_TIME_ZONE, TIMESTAMP_WITHOUT_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE,"
+ " TIMESTAMP_WITH_LOCAL_TIME_ZONE, VARCHAR, CHAR, TINYINT, INTEGER, BIGINT]";
assertThatThrownBy(
() -> IndexGeneratorFactory.createIndexGenerator("index_{status}", schema))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(expectedExceptionMsg);
}
}
| 5,763 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.testutils;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import java.io.IOException;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import static org.assertj.core.api.Assertions.assertThat;
/**
* This class contains utilities and a pre-defined source function and Elasticsearch Sink function
* used to simulate and verify data used in tests.
*/
public class SourceSinkDataTestKit {
private static final int NUM_ELEMENTS = 20;
private static final String DATA_PREFIX = "message #";
private static final String DATA_FIELD_NAME = "data";
private static final String TYPE_NAME = "flink-es-test-type";
/**
* A {@link SourceFunction} that generates the elements (id, "message #" + id) with id being 0 -
* 20.
*/
public static class TestDataSourceFunction implements SourceFunction<Tuple2<Integer, String>> {
private static final long serialVersionUID = 1L;
private volatile boolean running = true;
@Override
public void run(SourceFunction.SourceContext<Tuple2<Integer, String>> ctx)
throws Exception {
for (int i = 0; i < NUM_ELEMENTS && running; i++) {
ctx.collect(Tuple2.of(i, DATA_PREFIX + i));
}
}
@Override
public void cancel() {
running = false;
}
}
public static ElasticsearchSinkFunction<Tuple2<Integer, String>> getJsonSinkFunction(
String index) {
return new TestElasticsearchSinkFunction(index, XContentFactory::jsonBuilder);
}
public static ElasticsearchSinkFunction<Tuple2<Integer, String>> getSmileSinkFunction(
String index) {
return new TestElasticsearchSinkFunction(index, XContentFactory::smileBuilder);
}
private static class TestElasticsearchSinkFunction
implements ElasticsearchSinkFunction<Tuple2<Integer, String>> {
private static final long serialVersionUID = 1L;
private final String index;
private final XContentBuilderProvider contentBuilderProvider;
/**
* Create the sink function, specifying a target Elasticsearch index.
*
* @param index Name of the target Elasticsearch index.
*/
public TestElasticsearchSinkFunction(
String index, XContentBuilderProvider contentBuilderProvider) {
this.index = index;
this.contentBuilderProvider = contentBuilderProvider;
}
public IndexRequest createIndexRequest(Tuple2<Integer, String> element) {
Map<String, Object> document = new HashMap<>();
document.put(DATA_FIELD_NAME, element.f1);
try {
return new IndexRequest(index, TYPE_NAME, element.f0.toString())
.source(contentBuilderProvider.getBuilder().map(document));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void process(
Tuple2<Integer, String> element, RuntimeContext ctx, RequestIndexer indexer) {
indexer.add(createIndexRequest(element));
}
}
/**
* Verify the results in an Elasticsearch index. The results must first be produced into the
* index using a {@link TestElasticsearchSinkFunction};
*
* @param client The client to use to connect to Elasticsearch
* @param index The index to check
*/
public static void verifyProducedSinkData(RestHighLevelClient client, String index)
throws IOException {
for (int i = 0; i < NUM_ELEMENTS; i++) {
GetResponse response =
client.get(
new GetRequest(index, TYPE_NAME, Integer.toString(i)),
RequestOptions.DEFAULT);
assertThat(response.getSource().get(DATA_FIELD_NAME)).isEqualTo(DATA_PREFIX + i);
}
}
@FunctionalInterface
private interface XContentBuilderProvider extends Serializable {
XContentBuilder getBuilder() throws IOException;
}
}
| 5,764 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/architecture/ProductionCodeArchitectureTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.core.importer.Location;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
import java.util.regex.Pattern;
/** product code Architecture tests. */
@AnalyzeClasses(
packages = "org.apache.flink.connector",
importOptions = {
ImportOption.DoNotIncludeTests.class,
ImportOption.DoNotIncludeArchives.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class ProductionCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(ProductionCodeArchitectureBase.class);
/** test. */
public static final class MavenMainClassesOnly implements ImportOption {
private static final Pattern MAVEN = Pattern.compile(".*/target/classes/.*");
@Override
public boolean includes(Location location) {
return location.matches(MAVEN);
}
}
}
| 5,765 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
/** Architecture tests for test code. */
@AnalyzeClasses(
packages = {
"org.apache.flink.connector.elasticsearch",
"org.apache.flink.streaming.connectors.elasticsearch"
},
importOptions = {
ImportOption.OnlyIncludeTests.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class TestCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
}
| 5,766 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/ElasticsearchUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.slf4j.Logger;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import java.util.Optional;
/** Collection of utility methods for Elasticsearch tests. */
@Internal
public class ElasticsearchUtil {
private ElasticsearchUtil() {}
/**
* Creates a preconfigured {@link ElasticsearchContainer} with limited memory allocation and
* aligns the internal Elasticsearch log levels with the ones used by the capturing logger.
*
* @param dockerImageVersion describing the Elasticsearch image
* @param log to derive the log level from
* @return configured Elasticsearch container
*/
public static ElasticsearchContainer createElasticsearchContainer(
String dockerImageVersion, Logger log) {
String logLevel;
if (log.isTraceEnabled()) {
logLevel = "TRACE";
} else if (log.isDebugEnabled()) {
logLevel = "DEBUG";
} else if (log.isInfoEnabled()) {
logLevel = "INFO";
} else if (log.isWarnEnabled()) {
logLevel = "WARN";
} else if (log.isErrorEnabled()) {
logLevel = "ERROR";
} else {
logLevel = "OFF";
}
return new ElasticsearchContainer(DockerImageName.parse(dockerImageVersion))
.withEnv("ES_JAVA_OPTS", "-Xms2g -Xmx2g")
.withEnv("logger.org.elasticsearch", logLevel)
.withLogConsumer(new Slf4jLogConsumer(log));
}
/** A mock {@link DynamicTableSink.Context} for Elasticsearch tests. */
public static class MockContext implements DynamicTableSink.Context {
@Override
public boolean isBounded() {
return false;
}
@Override
public TypeInformation<?> createTypeInformation(DataType consumedDataType) {
return null;
}
@Override
public TypeInformation<?> createTypeInformation(LogicalType consumedLogicalType) {
return null;
}
@Override
public DynamicTableSink.DataStructureConverter createDataStructureConverter(
DataType consumedDataType) {
return null;
}
public Optional<int[][]> getTargetColumns() {
return Optional.empty();
}
}
}
| 5,767 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/test/DockerImageVersions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.test;
/**
* Utility class for defining the image names and versions of Docker containers used during the Java
* tests. The names/versions are centralised here in order to make testing version updates easier,
* as well as to provide a central file to use as a key when caching testing Docker files.
*/
public class DockerImageVersions {
public static final String ELASTICSEARCH_6 =
"docker.elastic.co/elasticsearch/elasticsearch:6.8.20";
public static final String ELASTICSEARCH_7 =
"docker.elastic.co/elasticsearch/elasticsearch:7.10.2";
}
| 5,768 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBaseITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.CheckpointListener;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.test.junit5.MiniClusterExtension;
import org.apache.flink.util.TestLoggerExtension;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.junit.jupiter.params.provider.MethodSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.function.BiFunction;
import static org.assertj.core.api.Assertions.assertThat;
/** Tests for {@link ElasticsearchSink}. */
@ExtendWith(TestLoggerExtension.class)
abstract class ElasticsearchSinkBaseITCase {
@RegisterExtension
private static final MiniClusterExtension MINI_CLUSTER_RESOURCE =
new MiniClusterExtension(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(1)
.setNumberSlotsPerTaskManager(3)
.build());
protected static final Logger LOG = LoggerFactory.getLogger(ElasticsearchSinkBaseITCase.class);
protected static final String ELASTICSEARCH_PASSWORD = "test-password";
protected static final String ELASTICSEARCH_USER = "elastic";
private static boolean failed;
private RestHighLevelClient client;
private TestClientBase context;
abstract String getElasticsearchHttpHostAddress();
abstract TestClientBase createTestClient(RestHighLevelClient client);
abstract ElasticsearchSinkBuilderBase<
Tuple2<Integer, String>, ? extends ElasticsearchSinkBuilderBase>
getSinkBuilder();
private RestHighLevelClient createRestHighLevelClient() {
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY,
new UsernamePasswordCredentials(ELASTICSEARCH_USER, ELASTICSEARCH_PASSWORD));
return new RestHighLevelClient(
RestClient.builder(HttpHost.create(getElasticsearchHttpHostAddress()))
.setHttpClientConfigCallback(
httpClientBuilder ->
httpClientBuilder.setDefaultCredentialsProvider(
credentialsProvider)));
}
@BeforeEach
void setUp() {
failed = false;
client = createRestHighLevelClient();
context = createTestClient(client);
}
@AfterEach
void tearDown() throws IOException {
if (client != null) {
client.close();
}
}
@ParameterizedTest
@EnumSource(DeliveryGuarantee.class)
void testWriteToElasticSearchWithDeliveryGuarantee(DeliveryGuarantee deliveryGuarantee)
throws Exception {
final String index = "test-es-with-delivery-" + deliveryGuarantee;
boolean failure = false;
try {
runTest(index, false, TestEmitter::jsonEmitter, deliveryGuarantee, null);
} catch (IllegalStateException e) {
failure = true;
assertThat(deliveryGuarantee).isSameAs(DeliveryGuarantee.EXACTLY_ONCE);
} finally {
assertThat(failure).isEqualTo(deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE);
}
}
@ParameterizedTest
@MethodSource("elasticsearchEmitters")
void testWriteJsonToElasticsearch(
BiFunction<String, String, ElasticsearchEmitter<Tuple2<Integer, String>>>
emitterProvider)
throws Exception {
final String index = "test-elasticsearch-sink-" + UUID.randomUUID();
runTest(index, false, emitterProvider, null);
}
@Test
void testRecovery() throws Exception {
final String index = "test-recovery-elasticsearch-sink";
runTest(index, true, TestEmitter::jsonEmitter, new FailingMapper());
assertThat(failed).isTrue();
}
private void runTest(
String index,
boolean allowRestarts,
BiFunction<String, String, ElasticsearchEmitter<Tuple2<Integer, String>>>
emitterProvider,
@Nullable MapFunction<Long, Long> additionalMapper)
throws Exception {
runTest(
index,
allowRestarts,
emitterProvider,
DeliveryGuarantee.AT_LEAST_ONCE,
additionalMapper);
}
private void runTest(
String index,
boolean allowRestarts,
BiFunction<String, String, ElasticsearchEmitter<Tuple2<Integer, String>>>
emitterProvider,
DeliveryGuarantee deliveryGuarantee,
@Nullable MapFunction<Long, Long> additionalMapper)
throws Exception {
final ElasticsearchSink<Tuple2<Integer, String>> sink =
getSinkBuilder()
.setHosts(HttpHost.create(getElasticsearchHttpHostAddress()))
.setEmitter(emitterProvider.apply(index, context.getDataFieldName()))
.setBulkFlushMaxActions(5)
.setConnectionUsername(ELASTICSEARCH_USER)
.setConnectionPassword(ELASTICSEARCH_PASSWORD)
.setDeliveryGuarantee(deliveryGuarantee)
.build();
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(100L);
if (!allowRestarts) {
env.setRestartStrategy(RestartStrategies.noRestart());
}
DataStream<Long> stream = env.fromSequence(1, 5);
if (additionalMapper != null) {
stream = stream.map(additionalMapper);
}
stream.map(
new MapFunction<Long, Tuple2<Integer, String>>() {
@Override
public Tuple2<Integer, String> map(Long value) throws Exception {
return Tuple2.of(
value.intValue(),
TestClientBase.buildMessage(value.intValue()));
}
})
.sinkTo(sink);
env.execute();
context.assertThatIdsAreWritten(index, 1, 2, 3, 4, 5);
}
private static List<BiFunction<String, String, ElasticsearchEmitter<Tuple2<Integer, String>>>>
elasticsearchEmitters() {
return Arrays.asList(TestEmitter::jsonEmitter, TestEmitter::smileEmitter);
}
private static class FailingMapper implements MapFunction<Long, Long>, CheckpointListener {
private int emittedRecords = 0;
@Override
public Long map(Long value) throws Exception {
Thread.sleep(50);
emittedRecords++;
return value;
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
if (failed || emittedRecords == 0) {
return;
}
failed = true;
throw new Exception("Expected failure");
}
}
}
| 5,769 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchWriterITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.api.common.operators.MailboxExecutor;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.metrics.Counter;
import org.apache.flink.metrics.Gauge;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.metrics.groups.OperatorIOMetricGroup;
import org.apache.flink.metrics.groups.SinkWriterMetricGroup;
import org.apache.flink.metrics.testutils.MetricListener;
import org.apache.flink.runtime.metrics.MetricNames;
import org.apache.flink.runtime.metrics.groups.UnregisteredMetricGroups;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.test.junit5.MiniClusterExtension;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.TestLoggerExtension;
import org.apache.flink.util.function.ThrowingRunnable;
import org.apache.http.HttpHost;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.function.Consumer;
import static org.apache.flink.connector.elasticsearch.sink.TestClientBase.DOCUMENT_TYPE;
import static org.apache.flink.connector.elasticsearch.sink.TestClientBase.buildMessage;
import static org.assertj.core.api.Assertions.assertThat;
/** Tests for {@link ElasticsearchWriter}. */
@Testcontainers
@ExtendWith(TestLoggerExtension.class)
class ElasticsearchWriterITCase {
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchWriterITCase.class);
@RegisterExtension
private static final MiniClusterExtension MINI_CLUSTER_RESOURCE =
new MiniClusterExtension(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(1)
.setNumberSlotsPerTaskManager(3)
.build());
@Container
private static final ElasticsearchContainer ES_CONTAINER =
ElasticsearchUtil.createElasticsearchContainer(
DockerImageVersions.ELASTICSEARCH_7, LOG);
private RestHighLevelClient client;
private TestClientBase context;
private MetricListener metricListener;
@BeforeEach
void setUp() {
metricListener = new MetricListener();
client =
new RestHighLevelClient(
RestClient.builder(HttpHost.create(ES_CONTAINER.getHttpHostAddress())));
context = new TestClient(client);
}
@AfterEach
void tearDown() throws IOException {
if (client != null) {
client.close();
}
}
@Test
void testWriteOnBulkFlush() throws Exception {
final String index = "test-bulk-flush-without-checkpoint";
final int flushAfterNActions = 5;
final BulkProcessorConfig bulkProcessorConfig =
new BulkProcessorConfig(flushAfterNActions, -1, -1, FlushBackoffType.NONE, 0, 0);
try (final ElasticsearchWriter<Tuple2<Integer, String>> writer =
createWriter(index, false, bulkProcessorConfig)) {
writer.write(Tuple2.of(1, buildMessage(1)), null);
writer.write(Tuple2.of(2, buildMessage(2)), null);
writer.write(Tuple2.of(3, buildMessage(3)), null);
writer.write(Tuple2.of(4, buildMessage(4)), null);
// Ignore flush on checkpoint
writer.flush(false);
context.assertThatIdsAreNotWritten(index, 1, 2, 3, 4);
// Trigger flush
writer.write(Tuple2.of(5, "test-5"), null);
context.assertThatIdsAreWritten(index, 1, 2, 3, 4, 5);
writer.write(Tuple2.of(6, "test-6"), null);
context.assertThatIdsAreNotWritten(index, 6);
// Force flush
writer.blockingFlushAllActions();
context.assertThatIdsAreWritten(index, 1, 2, 3, 4, 5, 6);
}
}
@Test
void testWriteOnBulkIntervalFlush() throws Exception {
final String index = "test-bulk-flush-with-interval";
// Configure bulk processor to flush every 1s;
final BulkProcessorConfig bulkProcessorConfig =
new BulkProcessorConfig(-1, -1, 1000, FlushBackoffType.NONE, 0, 0);
try (final ElasticsearchWriter<Tuple2<Integer, String>> writer =
createWriter(index, false, bulkProcessorConfig)) {
writer.write(Tuple2.of(1, buildMessage(1)), null);
writer.write(Tuple2.of(2, buildMessage(2)), null);
writer.write(Tuple2.of(3, buildMessage(3)), null);
writer.write(Tuple2.of(4, buildMessage(4)), null);
writer.blockingFlushAllActions();
}
context.assertThatIdsAreWritten(index, 1, 2, 3, 4);
}
@Test
void testWriteOnCheckpoint() throws Exception {
final String index = "test-bulk-flush-with-checkpoint";
final BulkProcessorConfig bulkProcessorConfig =
new BulkProcessorConfig(-1, -1, -1, FlushBackoffType.NONE, 0, 0);
// Enable flush on checkpoint
try (final ElasticsearchWriter<Tuple2<Integer, String>> writer =
createWriter(index, true, bulkProcessorConfig)) {
writer.write(Tuple2.of(1, buildMessage(1)), null);
writer.write(Tuple2.of(2, buildMessage(2)), null);
writer.write(Tuple2.of(3, buildMessage(3)), null);
context.assertThatIdsAreNotWritten(index, 1, 2, 3);
// Trigger flush
writer.flush(false);
context.assertThatIdsAreWritten(index, 1, 2, 3);
}
}
@Test
void testIncrementByteOutMetric() throws Exception {
final String index = "test-inc-byte-out";
final OperatorIOMetricGroup operatorIOMetricGroup =
UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup().getIOMetricGroup();
final int flushAfterNActions = 2;
final BulkProcessorConfig bulkProcessorConfig =
new BulkProcessorConfig(flushAfterNActions, -1, -1, FlushBackoffType.NONE, 0, 0);
try (final ElasticsearchWriter<Tuple2<Integer, String>> writer =
createWriter(
index,
false,
bulkProcessorConfig,
getSinkWriterMetricGroup(operatorIOMetricGroup))) {
final Counter numBytesOut = operatorIOMetricGroup.getNumBytesOutCounter();
assertThat(numBytesOut.getCount()).isZero();
writer.write(Tuple2.of(1, buildMessage(1)), null);
writer.write(Tuple2.of(2, buildMessage(2)), null);
writer.blockingFlushAllActions();
long first = numBytesOut.getCount();
assertThat(first).isGreaterThan(0);
writer.write(Tuple2.of(1, buildMessage(1)), null);
writer.write(Tuple2.of(2, buildMessage(2)), null);
writer.blockingFlushAllActions();
assertThat(numBytesOut.getCount()).isGreaterThan(first);
}
}
@Test
void testIncrementRecordsSendMetric() throws Exception {
final String index = "test-inc-records-send";
final int flushAfterNActions = 2;
final BulkProcessorConfig bulkProcessorConfig =
new BulkProcessorConfig(flushAfterNActions, -1, -1, FlushBackoffType.NONE, 0, 0);
try (final ElasticsearchWriter<Tuple2<Integer, String>> writer =
createWriter(index, false, bulkProcessorConfig)) {
final Optional<Counter> recordsSend =
metricListener.getCounter(MetricNames.NUM_RECORDS_SEND);
writer.write(Tuple2.of(1, buildMessage(1)), null);
// Update existing index
writer.write(Tuple2.of(1, "u" + buildMessage(2)), null);
// Delete index
writer.write(Tuple2.of(1, "d" + buildMessage(3)), null);
writer.blockingFlushAllActions();
assertThat(recordsSend).isPresent();
assertThat(recordsSend.get().getCount()).isEqualTo(3L);
}
}
@Test
void testCurrentSendTime() throws Exception {
final String index = "test-current-send-time";
final int flushAfterNActions = 2;
final BulkProcessorConfig bulkProcessorConfig =
new BulkProcessorConfig(flushAfterNActions, -1, -1, FlushBackoffType.NONE, 0, 0);
try (final ElasticsearchWriter<Tuple2<Integer, String>> writer =
createWriter(index, false, bulkProcessorConfig)) {
final Optional<Gauge<Long>> currentSendTime =
metricListener.getGauge("currentSendTime");
writer.write(Tuple2.of(1, buildMessage(1)), null);
writer.write(Tuple2.of(2, buildMessage(2)), null);
writer.blockingFlushAllActions();
assertThat(currentSendTime).isPresent();
assertThat(currentSendTime.get().getValue()).isGreaterThan(0L);
}
}
private ElasticsearchWriter<Tuple2<Integer, String>> createWriter(
String index, boolean flushOnCheckpoint, BulkProcessorConfig bulkProcessorConfig) {
return createWriter(
index, flushOnCheckpoint, bulkProcessorConfig, getSinkWriterMetricGroup());
}
private ElasticsearchWriter<Tuple2<Integer, String>> createWriter(
String index,
boolean flushOnCheckpoint,
BulkProcessorConfig bulkProcessorConfig,
SinkWriterMetricGroup metricGroup) {
return new ElasticsearchWriter<>(
Collections.singletonList(HttpHost.create(ES_CONTAINER.getHttpHostAddress())),
new UpdatingEmitter(index, context.getDataFieldName()),
flushOnCheckpoint,
bulkProcessorConfig,
new TestBulkProcessorBuilderFactory(),
new NetworkClientConfig(null, null, null, null, null, null),
metricGroup,
new TestMailbox());
}
private TestingSinkWriterMetricGroup getSinkWriterMetricGroup() {
final OperatorIOMetricGroup operatorIOMetricGroup =
UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup().getIOMetricGroup();
return getSinkWriterMetricGroup(operatorIOMetricGroup);
}
private TestingSinkWriterMetricGroup getSinkWriterMetricGroup(
OperatorIOMetricGroup operatorIOMetricGroup) {
MetricGroup parentMetricGroup = metricListener.getMetricGroup();
Counter numRecordsOutErrors = parentMetricGroup.counter(MetricNames.NUM_RECORDS_OUT_ERRORS);
Counter numRecordsSendErrors =
parentMetricGroup.counter(MetricNames.NUM_RECORDS_SEND_ERRORS, numRecordsOutErrors);
Counter numRecordsWritten =
parentMetricGroup.counter(
MetricNames.NUM_RECORDS_SEND,
operatorIOMetricGroup.getNumRecordsOutCounter());
Counter numBytesWritten =
parentMetricGroup.counter(
MetricNames.NUM_BYTES_SEND, operatorIOMetricGroup.getNumBytesOutCounter());
Consumer<Gauge<Long>> currentSendTimeGaugeConsumer =
currentSendTimeGauge ->
parentMetricGroup.gauge(
MetricNames.CURRENT_SEND_TIME, currentSendTimeGauge);
return new TestingSinkWriterMetricGroup.Builder()
.setParentMetricGroup(parentMetricGroup)
.setIoMetricGroupSupplier(() -> operatorIOMetricGroup)
.setNumRecordsOutErrorsCounterSupplier(() -> numRecordsOutErrors)
.setNumRecordsSendErrorsCounterSupplier(() -> numRecordsSendErrors)
.setNumRecordsSendCounterSupplier(() -> numRecordsWritten)
.setNumBytesSendCounterSupplier(() -> numBytesWritten)
.setCurrentSendTimeGaugeConsumer(currentSendTimeGaugeConsumer)
.build();
}
private static class TestBulkProcessorBuilderFactory implements BulkProcessorBuilderFactory {
@Override
public BulkProcessor.Builder apply(
RestHighLevelClient client,
BulkProcessorConfig bulkProcessorConfig,
BulkProcessor.Listener listener) {
BulkProcessor.Builder builder =
BulkProcessor.builder(
new BulkRequestConsumerFactory() { // This cannot be inlined as a lambda
// because then deserialization fails
@Override
public void accept(
BulkRequest bulkRequest,
ActionListener<BulkResponse> bulkResponseActionListener) {
client.bulkAsync(
bulkRequest,
RequestOptions.DEFAULT,
bulkResponseActionListener);
}
},
listener);
if (bulkProcessorConfig.getBulkFlushMaxActions() != -1) {
builder.setBulkActions(bulkProcessorConfig.getBulkFlushMaxActions());
}
if (bulkProcessorConfig.getBulkFlushMaxMb() != -1) {
builder.setBulkSize(
new ByteSizeValue(
bulkProcessorConfig.getBulkFlushMaxMb(), ByteSizeUnit.MB));
}
if (bulkProcessorConfig.getBulkFlushInterval() != -1) {
builder.setFlushInterval(new TimeValue(bulkProcessorConfig.getBulkFlushInterval()));
}
BackoffPolicy backoffPolicy;
final TimeValue backoffDelay =
new TimeValue(bulkProcessorConfig.getBulkFlushBackOffDelay());
final int maxRetryCount = bulkProcessorConfig.getBulkFlushBackoffRetries();
switch (bulkProcessorConfig.getFlushBackoffType()) {
case CONSTANT:
backoffPolicy = BackoffPolicy.constantBackoff(backoffDelay, maxRetryCount);
break;
case EXPONENTIAL:
backoffPolicy = BackoffPolicy.exponentialBackoff(backoffDelay, maxRetryCount);
break;
case NONE:
backoffPolicy = BackoffPolicy.noBackoff();
break;
default:
throw new IllegalArgumentException(
"Received unknown backoff policy type "
+ bulkProcessorConfig.getFlushBackoffType());
}
builder.setBackoffPolicy(backoffPolicy);
return builder;
}
}
private static class UpdatingEmitter implements ElasticsearchEmitter<Tuple2<Integer, String>> {
private final String dataFieldName;
private final String index;
UpdatingEmitter(String index, String dataFieldName) {
this.index = index;
this.dataFieldName = dataFieldName;
}
@Override
public void emit(
Tuple2<Integer, String> element,
SinkWriter.Context context,
RequestIndexer indexer) {
Map<String, Object> document = new HashMap<>();
document.put(dataFieldName, element.f1);
final char action = element.f1.charAt(0);
final String id = element.f0.toString();
switch (action) {
case 'd':
{
indexer.add(new DeleteRequest(index).id(id));
break;
}
case 'u':
{
indexer.add(new UpdateRequest().index(index).id(id).doc(document));
break;
}
default:
{
indexer.add(
new IndexRequest(index)
.id(id)
.type(DOCUMENT_TYPE)
.source(document));
}
}
}
}
private static class TestClient extends TestClientBase {
TestClient(RestHighLevelClient client) {
super(client);
}
@Override
GetResponse getResponse(String index, int id) throws IOException {
return client.get(new GetRequest(index, Integer.toString(id)), RequestOptions.DEFAULT);
}
}
private static class TestMailbox implements MailboxExecutor {
@Override
public void execute(
ThrowingRunnable<? extends Exception> command,
String descriptionFormat,
Object... descriptionArgs) {
try {
command.run();
} catch (Exception e) {
throw new RuntimeException("Unexpected error", e);
}
}
@Override
public void yield() throws InterruptedException, FlinkRuntimeException {
Thread.sleep(100);
}
@Override
public boolean tryYield() throws FlinkRuntimeException {
return false;
}
}
}
| 5,770 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestingSinkWriterMetricGroup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.metrics.Counter;
import org.apache.flink.metrics.Gauge;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.metrics.groups.OperatorIOMetricGroup;
import org.apache.flink.metrics.groups.SinkWriterMetricGroup;
import org.apache.flink.runtime.metrics.groups.ProxyMetricGroup;
import java.util.function.Consumer;
import java.util.function.Supplier;
/** Testing implementation for {@link SinkWriterMetricGroup}. */
public class TestingSinkWriterMetricGroup extends ProxyMetricGroup<MetricGroup>
implements SinkWriterMetricGroup {
private final Supplier<Counter> numRecordsOutErrorsCounterSupplier;
private final Supplier<Counter> numRecordsSendErrorsCounterSupplier;
private final Supplier<Counter> numRecordsSendCounterSupplier;
private final Supplier<Counter> numBytesSendCounterSupplier;
private final Consumer<Gauge<Long>> currentSendTimeGaugeConsumer;
private final Supplier<OperatorIOMetricGroup> ioMetricGroupSupplier;
public TestingSinkWriterMetricGroup(
MetricGroup parentMetricGroup,
Supplier<Counter> numRecordsOutErrorsCounterSupplier,
Supplier<Counter> numRecordsSendErrorsCounterSupplier,
Supplier<Counter> numRecordsSendCounterSupplier,
Supplier<Counter> numBytesSendCounterSupplier,
Consumer<Gauge<Long>> currentSendTimeGaugeConsumer,
Supplier<OperatorIOMetricGroup> ioMetricGroupSupplier) {
super(parentMetricGroup);
this.numRecordsOutErrorsCounterSupplier = numRecordsOutErrorsCounterSupplier;
this.numRecordsSendErrorsCounterSupplier = numRecordsSendErrorsCounterSupplier;
this.numRecordsSendCounterSupplier = numRecordsSendCounterSupplier;
this.numBytesSendCounterSupplier = numBytesSendCounterSupplier;
this.currentSendTimeGaugeConsumer = currentSendTimeGaugeConsumer;
this.ioMetricGroupSupplier = ioMetricGroupSupplier;
}
@Override
public Counter getNumRecordsOutErrorsCounter() {
return numRecordsOutErrorsCounterSupplier.get();
}
@Override
public Counter getNumRecordsSendErrorsCounter() {
return numRecordsSendErrorsCounterSupplier.get();
}
@Override
public Counter getNumRecordsSendCounter() {
return numRecordsSendCounterSupplier.get();
}
@Override
public Counter getNumBytesSendCounter() {
return numBytesSendCounterSupplier.get();
}
@Override
public void setCurrentSendTimeGauge(Gauge<Long> gauge) {
currentSendTimeGaugeConsumer.accept(gauge);
}
@Override
public OperatorIOMetricGroup getIOMetricGroup() {
return ioMetricGroupSupplier.get();
}
/** Builder for {@link TestingSinkWriterMetricGroup}. */
public static class Builder {
private MetricGroup parentMetricGroup = null;
private Supplier<Counter> numRecordsOutErrorsCounterSupplier = () -> null;
private Supplier<Counter> numRecordsSendErrorsCounterSupplier = () -> null;
private Supplier<Counter> numRecordsSendCounterSupplier = () -> null;
private Supplier<Counter> numBytesSendCounterSupplier = () -> null;
private Consumer<Gauge<Long>> currentSendTimeGaugeConsumer = counter -> {};
private Supplier<OperatorIOMetricGroup> ioMetricGroupSupplier = () -> null;
public Builder setParentMetricGroup(MetricGroup parentMetricGroup) {
this.parentMetricGroup = parentMetricGroup;
return this;
}
public Builder setNumRecordsOutErrorsCounterSupplier(
Supplier<Counter> numRecordsOutErrorsCounterSupplier) {
this.numRecordsOutErrorsCounterSupplier = numRecordsOutErrorsCounterSupplier;
return this;
}
public Builder setNumRecordsSendErrorsCounterSupplier(
Supplier<Counter> numRecordsSendErrorsCounterSupplier) {
this.numRecordsSendErrorsCounterSupplier = numRecordsSendErrorsCounterSupplier;
return this;
}
public Builder setNumRecordsSendCounterSupplier(
Supplier<Counter> numRecordsSendCounterSupplier) {
this.numRecordsSendCounterSupplier = numRecordsSendCounterSupplier;
return this;
}
public Builder setNumBytesSendCounterSupplier(
Supplier<Counter> numBytesSendCounterSupplier) {
this.numBytesSendCounterSupplier = numBytesSendCounterSupplier;
return this;
}
public Builder setCurrentSendTimeGaugeConsumer(
Consumer<Gauge<Long>> currentSendTimeGaugeConsumer) {
this.currentSendTimeGaugeConsumer = currentSendTimeGaugeConsumer;
return this;
}
public Builder setIoMetricGroupSupplier(
Supplier<OperatorIOMetricGroup> ioMetricGroupSupplier) {
this.ioMetricGroupSupplier = ioMetricGroupSupplier;
return this;
}
public TestingSinkWriterMetricGroup build() {
return new TestingSinkWriterMetricGroup(
parentMetricGroup,
numRecordsOutErrorsCounterSupplier,
numRecordsSendErrorsCounterSupplier,
numRecordsSendCounterSupplier,
numBytesSendCounterSupplier,
currentSendTimeGaugeConsumer,
ioMetricGroupSupplier);
}
}
}
| 5,771 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.util.TestLoggerExtension;
import org.apache.http.HttpHost;
import org.junit.jupiter.api.DynamicTest;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestFactory;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.ExtendWith;
import java.util.stream.Stream;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatCode;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/** Tests for {@link ElasticsearchSinkBuilderBase}. */
@ExtendWith(TestLoggerExtension.class)
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
abstract class ElasticsearchSinkBuilderBaseTest<B extends ElasticsearchSinkBuilderBase<Object, B>> {
@TestFactory
Stream<DynamicTest> testValidBuilders() {
Stream<B> validBuilders =
Stream.of(
createMinimalBuilder(),
createMinimalBuilder()
.setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE),
createMinimalBuilder()
.setBulkFlushBackoffStrategy(FlushBackoffType.CONSTANT, 1, 1),
createMinimalBuilder()
.setConnectionUsername("username")
.setConnectionPassword("password"));
return DynamicTest.stream(
validBuilders,
ElasticsearchSinkBuilderBase::toString,
builder -> assertThatCode(builder::build).doesNotThrowAnyException());
}
@Test
void testDefaultDeliveryGuarantee() {
assertThat(createMinimalBuilder().build().getDeliveryGuarantee())
.isEqualTo(DeliveryGuarantee.AT_LEAST_ONCE);
}
@Test
void testThrowIfExactlyOnceConfigured() {
assertThatThrownBy(
() ->
createMinimalBuilder()
.setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE))
.isInstanceOf(IllegalStateException.class);
}
@Test
void testThrowIfHostsNotSet() {
assertThatThrownBy(
() ->
createEmptyBuilder()
.setEmitter((element, indexer, context) -> {})
.build())
.isInstanceOf(NullPointerException.class);
}
@Test
void testThrowIfEmitterNotSet() {
assertThatThrownBy(
() -> createEmptyBuilder().setHosts(new HttpHost("localhost:3000")).build())
.isInstanceOf(NullPointerException.class);
}
@Test
void testThrowIfSetInvalidTimeouts() {
assertThatThrownBy(() -> createEmptyBuilder().setConnectionRequestTimeout(-1).build())
.isInstanceOf(IllegalStateException.class);
assertThatThrownBy(() -> createEmptyBuilder().setConnectionTimeout(-1).build())
.isInstanceOf(IllegalStateException.class);
assertThatThrownBy(() -> createEmptyBuilder().setSocketTimeout(-1).build())
.isInstanceOf(IllegalStateException.class);
}
abstract B createEmptyBuilder();
abstract B createMinimalBuilder();
}
| 5,772 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestClientBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.RestHighLevelClient;
import java.io.IOException;
import static org.assertj.core.api.Assertions.assertThat;
abstract class TestClientBase {
static final String DOCUMENT_TYPE = "test-document-type";
private static final String DATA_FIELD_NAME = "data";
final RestHighLevelClient client;
TestClientBase(RestHighLevelClient client) {
this.client = client;
}
abstract GetResponse getResponse(String index, int id) throws IOException;
void assertThatIdsAreNotWritten(String index, int... ids) throws IOException {
for (final int id : ids) {
try {
final GetResponse response = getResponse(index, id);
assertThat(response.isExists())
.as(String.format("Id %s is unexpectedly present.", id))
.isFalse();
} catch (ElasticsearchStatusException e) {
assertThat(e.status().getStatus()).isEqualTo(404);
}
}
}
void assertThatIdsAreWritten(String index, int... ids)
throws IOException, InterruptedException {
for (final int id : ids) {
GetResponse response;
do {
response = getResponse(index, id);
Thread.sleep(10);
} while (response.isSourceEmpty());
assertThat(response.getSource().get(DATA_FIELD_NAME)).isEqualTo(buildMessage(id));
}
}
String getDataFieldName() {
return DATA_FIELD_NAME;
}
static String buildMessage(int id) {
return "test-" + id;
}
}
| 5,773 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestEmitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.apache.flink.api.java.tuple.Tuple2;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import java.io.IOException;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import static org.apache.flink.connector.elasticsearch.sink.TestClientBase.DOCUMENT_TYPE;
class TestEmitter implements ElasticsearchEmitter<Tuple2<Integer, String>> {
private final String index;
private final XContentBuilderProvider xContentBuilderProvider;
private final String dataFieldName;
public static TestEmitter jsonEmitter(String index, String dataFieldName) {
return new TestEmitter(index, dataFieldName, XContentFactory::jsonBuilder);
}
public static TestEmitter smileEmitter(String index, String dataFieldName) {
return new TestEmitter(index, dataFieldName, XContentFactory::smileBuilder);
}
private TestEmitter(
String index, String dataFieldName, XContentBuilderProvider xContentBuilderProvider) {
this.dataFieldName = dataFieldName;
this.index = index;
this.xContentBuilderProvider = xContentBuilderProvider;
}
@Override
public void emit(
Tuple2<Integer, String> element, SinkWriter.Context context, RequestIndexer indexer) {
indexer.add(createIndexRequest(element));
}
private IndexRequest createIndexRequest(Tuple2<Integer, String> element) {
Map<String, Object> document = new HashMap<>();
document.put(dataFieldName, element.f1);
try {
return new IndexRequest(index)
.id(element.f0.toString())
.type(DOCUMENT_TYPE)
.source(xContentBuilderProvider.getBuilder().map(document));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@FunctionalInterface
private interface XContentBuilderProvider extends Serializable {
XContentBuilder getBuilder() throws IOException;
}
}
| 5,774 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/TestContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.catalog.CatalogTable;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ObjectIdentifier;
import org.apache.flink.table.catalog.ResolvedCatalogTable;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.factories.DynamicTableFactory;
import org.apache.flink.table.factories.FactoryUtil;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/** A utility class for mocking {@link DynamicTableFactory.Context}. */
class TestContext {
private ResolvedSchema schema = ResolvedSchema.of(Column.physical("a", DataTypes.TIME()));
private final Map<String, String> options = new HashMap<>();
public static TestContext context() {
return new TestContext();
}
public TestContext withSchema(ResolvedSchema schema) {
this.schema = schema;
return this;
}
DynamicTableFactory.Context build() {
return new FactoryUtil.DefaultDynamicTableContext(
ObjectIdentifier.of("default", "default", "t1"),
new ResolvedCatalogTable(
CatalogTable.of(
Schema.newBuilder().fromResolvedSchema(schema).build(),
"mock context",
Collections.emptyList(),
options),
schema),
Collections.emptyMap(),
new Configuration(),
TestContext.class.getClassLoader(),
false);
}
public TestContext withOption(String key, String value) {
options.put(key, value);
return this;
}
}
| 5,775 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkFactoryBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.api.common.typeutils.base.VoidSerializer;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.catalog.UniqueConstraint;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkV2Provider;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.TestLoggerExtension;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import java.util.Arrays;
import java.util.Collections;
import static org.apache.flink.table.api.DataTypes.ARRAY;
import static org.apache.flink.table.api.DataTypes.BIGINT;
import static org.apache.flink.table.api.DataTypes.BYTES;
import static org.apache.flink.table.api.DataTypes.FIELD;
import static org.apache.flink.table.api.DataTypes.MAP;
import static org.apache.flink.table.api.DataTypes.MULTISET;
import static org.apache.flink.table.api.DataTypes.RAW;
import static org.apache.flink.table.api.DataTypes.ROW;
import static org.apache.flink.table.api.DataTypes.STRING;
import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/** Tests for validation in {@link ElasticsearchDynamicSinkFactoryBase}. */
@ExtendWith(TestLoggerExtension.class)
abstract class ElasticsearchDynamicSinkFactoryBaseTest {
abstract ElasticsearchDynamicSinkFactoryBase createSinkFactory();
abstract TestContext createPrefilledTestContext();
@Test
public void validateWrongIndex() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions.INDEX_OPTION
.key(),
"")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage("'index' must not be empty");
}
@Test
public void validateWrongHosts() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions.HOSTS_OPTION
.key(),
"wrong-host")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'.");
}
@Test
public void validateWrongFlushSize() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions
.BULK_FLUSH_MAX_SIZE_OPTION
.key(),
"1kb")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"'sink.bulk-flush.max-size' must be in MB granularity. Got: 1024 bytes");
}
@Test
public void validateWrongRetries() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions
.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION
.key(),
"0")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage("'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0");
}
@Test
public void validateWrongMaxActions() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions
.BULK_FLUSH_MAX_ACTIONS_OPTION
.key(),
"-2")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage("'sink.bulk-flush.max-actions' must be at least 1. Got: -2");
}
@Test
public void validateWrongBackoffDelay() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions
.BULK_FLUSH_BACKOFF_DELAY_OPTION
.key(),
"-1s")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage("Invalid value for option 'sink.bulk-flush.backoff.delay'.");
}
@Test
public void validatePrimaryKeyOnIllegalColumn() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
ResolvedSchema resolvedSchema =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", BIGINT().notNull()),
Column.physical("b", ARRAY(BIGINT().notNull()).notNull()),
Column.physical("c", MAP(BIGINT(), STRING()).notNull()),
Column.physical("d", MULTISET(BIGINT().notNull()).notNull()),
Column.physical("e", ROW(FIELD("a", BIGINT())).notNull()),
Column.physical(
"f", RAW(Void.class, VoidSerializer.INSTANCE).notNull()),
Column.physical("g", BYTES().notNull())),
Collections.emptyList(),
UniqueConstraint.primaryKey(
"name", Arrays.asList("a", "b", "c", "d", "e", "f", "g")));
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withSchema(resolvedSchema)
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"The table has a primary key on columns of illegal types: "
+ "[ARRAY, MAP, MULTISET, ROW, RAW, VARBINARY].");
}
@Test
public void validateWrongCredential() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
assertThatThrownBy(
() ->
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions
.USERNAME_OPTION
.key(),
"username")
.withOption(
ElasticsearchConnectorOptions
.PASSWORD_OPTION
.key(),
"")
.build()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"'username' and 'password' must be set at the same time. Got: username 'username' and password ''");
}
@Test
public void validateDynamicIndexOnChangelogStream() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
DynamicTableSink sink =
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(
ElasticsearchConnectorOptions.INDEX_OPTION.key(),
"dynamic-index-{now()|yyyy-MM-dd}_index")
.build());
ChangelogMode changelogMode =
ChangelogMode.newBuilder()
.addContainedKind(RowKind.DELETE)
.addContainedKind(RowKind.INSERT)
.build();
assertThatThrownBy(() -> sink.getChangelogMode(changelogMode))
.isInstanceOf(ValidationException.class)
.hasMessage(
"Dynamic indexing based on system time only works on append only stream.");
}
@Test
public void testSinkParallelism() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
DynamicTableSink sink =
sinkFactory.createDynamicTableSink(
createPrefilledTestContext()
.withOption(SINK_PARALLELISM.key(), "2")
.build());
assertThat(sink).isInstanceOf(ElasticsearchDynamicSink.class);
ElasticsearchDynamicSink esSink = (ElasticsearchDynamicSink) sink;
SinkV2Provider provider =
(SinkV2Provider) esSink.getSinkRuntimeProvider(new ElasticsearchUtil.MockContext());
assertThat(provider.getParallelism()).hasValue(2);
}
}
| 5,776 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/KeyExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.junit.jupiter.api.Test;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.assertj.core.api.Assertions.assertThat;
/** Tests for {@link KeyExtractor}. */
public class KeyExtractorTest {
@Test
public void testSimpleKey() {
List<LogicalTypeWithIndex> logicalTypesWithIndex =
Stream.of(
new LogicalTypeWithIndex(
0, DataTypes.BIGINT().notNull().getLogicalType()))
.collect(Collectors.toList());
Function<RowData, String> keyExtractor =
KeyExtractor.createKeyExtractor(logicalTypesWithIndex, "_");
String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD")));
assertThat(key).isEqualTo("12");
}
@Test
public void testNoPrimaryKey() {
List<LogicalTypeWithIndex> logicalTypesWithIndex = Collections.emptyList();
Function<RowData, String> keyExtractor =
KeyExtractor.createKeyExtractor(logicalTypesWithIndex, "_");
String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD")));
assertThat(key).isNull();
}
@Test
public void testTwoFieldsKey() {
List<LogicalTypeWithIndex> logicalTypesWithIndex =
Stream.of(
new LogicalTypeWithIndex(
0, DataTypes.BIGINT().notNull().getLogicalType()),
new LogicalTypeWithIndex(
2, DataTypes.TIMESTAMP().notNull().getLogicalType()))
.collect(Collectors.toList());
Function<RowData, String> keyExtractor =
KeyExtractor.createKeyExtractor(logicalTypesWithIndex, "_");
String key =
keyExtractor.apply(
GenericRowData.of(
12L,
StringData.fromString("ABCD"),
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12"))));
assertThat(key).isEqualTo("12_2012-12-12T12:12:12");
}
@Test
public void testAllTypesKey() {
List<LogicalTypeWithIndex> logicalTypesWithIndex =
Stream.of(
new LogicalTypeWithIndex(
0, DataTypes.TINYINT().notNull().getLogicalType()),
new LogicalTypeWithIndex(
1, DataTypes.SMALLINT().notNull().getLogicalType()),
new LogicalTypeWithIndex(
2, DataTypes.INT().notNull().getLogicalType()),
new LogicalTypeWithIndex(
3, DataTypes.BIGINT().notNull().getLogicalType()),
new LogicalTypeWithIndex(
4, DataTypes.BOOLEAN().notNull().getLogicalType()),
new LogicalTypeWithIndex(
5, DataTypes.FLOAT().notNull().getLogicalType()),
new LogicalTypeWithIndex(
6, DataTypes.DOUBLE().notNull().getLogicalType()),
new LogicalTypeWithIndex(
7, DataTypes.STRING().notNull().getLogicalType()),
new LogicalTypeWithIndex(
8, DataTypes.TIMESTAMP().notNull().getLogicalType()),
new LogicalTypeWithIndex(
9,
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE()
.notNull()
.getLogicalType()),
new LogicalTypeWithIndex(
10, DataTypes.TIME().notNull().getLogicalType()),
new LogicalTypeWithIndex(
11, DataTypes.DATE().notNull().getLogicalType()))
.collect(Collectors.toList());
Function<RowData, String> keyExtractor =
KeyExtractor.createKeyExtractor(logicalTypesWithIndex, "_");
String key =
keyExtractor.apply(
GenericRowData.of(
(byte) 1,
(short) 2,
3,
(long) 4,
true,
1.0f,
2.0d,
StringData.fromString("ABCD"),
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12")),
TimestampData.fromInstant(Instant.parse("2013-01-13T13:13:13Z")),
(int) (LocalTime.parse("14:14:14").toNanoOfDay() / 1_000_000),
(int) LocalDate.parse("2015-05-15").toEpochDay()));
assertThat(key)
.isEqualTo(
"1_2_3_4_true_1.0_2.0_ABCD_2012-12-12T12:12:12_2013-01-13T13:13:13_14:14:14_2015-05-15");
}
}
| 5,777 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkBaseITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.api.common.time.Deadline;
import org.apache.flink.api.connector.sink2.Sink;
import org.apache.flink.connector.elasticsearch.ElasticsearchUtil;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.config.TableConfigOptions;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.catalog.UniqueConstraint;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkV2Provider;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.test.junit5.MiniClusterExtension;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.TestLoggerExtension;
import org.apache.http.HttpHost;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHits;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.extension.RegisterExtension;
import java.io.IOException;
import java.time.Duration;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import static org.apache.flink.table.api.Expressions.row;
import static org.assertj.core.api.Assertions.assertThat;
/** IT tests for {@link ElasticsearchDynamicSink}. */
@ExtendWith(TestLoggerExtension.class)
abstract class ElasticsearchDynamicSinkBaseITCase {
private static final int PARALLELISM = 4;
@RegisterExtension
private static final MiniClusterExtension MINI_CLUSTER_RESOURCE =
new MiniClusterExtension(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(1)
.setNumberSlotsPerTaskManager(PARALLELISM)
.build());
abstract String getElasticsearchHttpHostAddress();
abstract ElasticsearchDynamicSinkFactoryBase getDynamicSinkFactory();
abstract Map<String, Object> makeGetRequest(RestHighLevelClient client, String index, String id)
throws IOException;
abstract SearchHits makeSearchRequest(RestHighLevelClient client, String index)
throws IOException;
abstract long getTotalSearchHits(SearchHits searchHits);
abstract TestContext getPrefilledTestContext(String index);
abstract String getConnectorSql(String index);
private RestHighLevelClient getClient() {
return new RestHighLevelClient(
RestClient.builder(HttpHost.create(getElasticsearchHttpHostAddress())));
}
@Test
public void testWritingDocuments() throws Exception {
ResolvedSchema schema =
new ResolvedSchema(
Arrays.asList(
Column.physical("a", DataTypes.BIGINT().notNull()),
Column.physical("b", DataTypes.TIME()),
Column.physical("c", DataTypes.STRING().notNull()),
Column.physical("d", DataTypes.FLOAT()),
Column.physical("e", DataTypes.TINYINT().notNull()),
Column.physical("f", DataTypes.DATE()),
Column.physical("g", DataTypes.TIMESTAMP().notNull())),
Collections.emptyList(),
UniqueConstraint.primaryKey("name", Arrays.asList("a", "g")));
GenericRowData rowData =
GenericRowData.of(
1L,
12345,
StringData.fromString("ABCDE"),
12.12f,
(byte) 2,
12345,
TimestampData.fromLocalDateTime(
LocalDateTime.parse("2012-12-12T12:12:12")));
String index = "writing-documents";
ElasticsearchDynamicSinkFactoryBase sinkFactory = getDynamicSinkFactory();
DynamicTableSink.SinkRuntimeProvider runtimeProvider =
sinkFactory
.createDynamicTableSink(
getPrefilledTestContext(index).withSchema(schema).build())
.getSinkRuntimeProvider(new ElasticsearchUtil.MockContext());
final SinkV2Provider sinkProvider = (SinkV2Provider) runtimeProvider;
final Sink<RowData> sink = sinkProvider.createSink();
StreamExecutionEnvironment environment =
StreamExecutionEnvironment.getExecutionEnvironment();
environment.setParallelism(PARALLELISM);
rowData.setRowKind(RowKind.UPDATE_AFTER);
environment.<RowData>fromElements(rowData).sinkTo(sink);
environment.execute();
RestHighLevelClient client = getClient();
Map<String, Object> response = makeGetRequest(client, index, "1_2012-12-12T12:12:12");
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsFromTableApi() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "table-api";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIME,\n"
+ "c STRING NOT NULL,\n"
+ "d FLOAT,\n"
+ "e TINYINT NOT NULL,\n"
+ "f DATE,\n"
+ "g TIMESTAMP NOT NULL,\n"
+ "h as a + 2,\n"
+ "PRIMARY KEY (a, g) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ getConnectorSql(index)
+ ")");
tableEnvironment
.fromValues(
row(
1L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"ABCDE",
12.12f,
(byte) 2,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response = makeGetRequest(client, index, "1_2012-12-12T12:12:12");
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsNoPrimaryKey() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "no-primary-key";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIME,\n"
+ "c STRING NOT NULL,\n"
+ "d FLOAT,\n"
+ "e TINYINT NOT NULL,\n"
+ "f DATE,\n"
+ "g TIMESTAMP NOT NULL\n"
+ ")\n"
+ "WITH (\n"
+ getConnectorSql(index)
+ ")");
tableEnvironment
.fromValues(
row(
1L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"ABCDE",
12.12f,
(byte) 2,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2012-12-12T12:12:12")),
row(
2L,
LocalTime.ofNanoOfDay(12345L * 1_000_000L),
"FGHIJK",
13.13f,
(byte) 4,
LocalDate.ofEpochDay(12345),
LocalDateTime.parse("2013-12-12T13:13:13")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
// search API does not return documents that were not indexed, we might need to query
// the index a few times
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
SearchHits hits;
do {
hits = makeSearchRequest(client, index);
if (getTotalSearchHits(hits) < 2) {
Thread.sleep(200);
}
} while (getTotalSearchHits(hits) < 2 && deadline.hasTimeLeft());
if (getTotalSearchHits(hits) < 2) {
throw new AssertionError("Could not retrieve results from Elasticsearch.");
}
HashSet<Map<String, Object>> resultSet = new HashSet<>();
resultSet.add(hits.getAt(0).getSourceAsMap());
resultSet.add(hits.getAt(1).getSourceAsMap());
Map<Object, Object> expectedMap1 = new HashMap<>();
expectedMap1.put("a", 1);
expectedMap1.put("b", "00:00:12");
expectedMap1.put("c", "ABCDE");
expectedMap1.put("d", 12.12d);
expectedMap1.put("e", 2);
expectedMap1.put("f", "2003-10-20");
expectedMap1.put("g", "2012-12-12 12:12:12");
Map<Object, Object> expectedMap2 = new HashMap<>();
expectedMap2.put("a", 2);
expectedMap2.put("b", "00:00:12");
expectedMap2.put("c", "FGHIJK");
expectedMap2.put("d", 13.13d);
expectedMap2.put("e", 4);
expectedMap2.put("f", "2003-10-20");
expectedMap2.put("g", "2013-12-12 13:13:13");
HashSet<Map<Object, Object>> expectedSet = new HashSet<>();
expectedSet.add(expectedMap1);
expectedSet.add(expectedMap2);
assertThat(resultSet).isEqualTo(expectedSet);
}
@Test
public void testWritingDocumentsWithDynamicIndex() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "dynamic-index-{b|yyyy-MM-dd}";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIMESTAMP NOT NULL,\n"
+ "PRIMARY KEY (a) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ getConnectorSql(index)
+ ")");
tableEnvironment
.fromValues(row(1L, LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response = makeGetRequest(client, "dynamic-index-2012-12-12", "1");
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
@Test
public void testWritingDocumentsWithDynamicIndexFromSystemTime() throws Exception {
TableEnvironment tableEnvironment =
TableEnvironment.create(EnvironmentSettings.inStreamingMode());
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
tableEnvironment.getConfig().set(TableConfigOptions.LOCAL_TIME_ZONE, "Asia/Shanghai");
String dynamicIndex1 =
"dynamic-index-"
+ dateTimeFormatter.format(LocalDateTime.now(ZoneId.of("Asia/Shanghai")))
+ "_index";
String index = "dynamic-index-{now()|yyyy-MM-dd}_index";
tableEnvironment.executeSql(
"CREATE TABLE esTable ("
+ "a BIGINT NOT NULL,\n"
+ "b TIMESTAMP NOT NULL,\n"
+ "PRIMARY KEY (a) NOT ENFORCED\n"
+ ")\n"
+ "WITH (\n"
+ getConnectorSql(index)
+ ")");
String dynamicIndex2 =
"dynamic-index-"
+ dateTimeFormatter.format(LocalDateTime.now(ZoneId.of("Asia/Shanghai")))
+ "_index";
tableEnvironment
.fromValues(row(1L, LocalDateTime.parse("2012-12-12T12:12:12")))
.executeInsert("esTable")
.await();
RestHighLevelClient client = getClient();
Map<String, Object> response;
try {
response = makeGetRequest(client, dynamicIndex1, "1");
} catch (ElasticsearchStatusException e) {
if (e.status() == RestStatus.NOT_FOUND) {
response = makeGetRequest(client, dynamicIndex2, "1");
} else {
throw e;
}
}
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "2012-12-12 12:12:12");
assertThat(response).isEqualTo(expectedMap);
}
}
| 5,778 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
import org.junit.jupiter.api.Test;
import java.sql.Date;
import java.sql.Time;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.Arrays;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy;
import static org.junit.jupiter.api.Assumptions.assumingThat;
/** Suite tests for {@link IndexGenerator}. */
public class IndexGeneratorTest {
private static final List<String> fieldNames =
Arrays.asList(
"id",
"item",
"log_ts",
"log_date",
"order_timestamp",
"log_time",
"local_datetime",
"local_date",
"local_time",
"local_timestamp",
"note",
"status");
private static final List<DataType> dataTypes =
Arrays.asList(
DataTypes.INT(),
DataTypes.STRING(),
DataTypes.BIGINT(),
DataTypes.DATE().bridgedTo(Date.class),
DataTypes.TIMESTAMP().bridgedTo(Timestamp.class),
DataTypes.TIME().bridgedTo(Time.class),
DataTypes.TIMESTAMP().bridgedTo(LocalDateTime.class),
DataTypes.DATE().bridgedTo(LocalDate.class),
DataTypes.TIME().bridgedTo(LocalTime.class),
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(),
DataTypes.STRING(),
DataTypes.BOOLEAN());
private static final List<RowData> rows =
Arrays.asList(
GenericRowData.of(
1,
StringData.fromString("apple"),
Timestamp.valueOf("2020-03-18 12:12:14").getTime(),
(int) Date.valueOf("2020-03-18").toLocalDate().toEpochDay(),
TimestampData.fromTimestamp(Timestamp.valueOf("2020-03-18 12:12:14")),
(int)
(Time.valueOf("12:12:14").toLocalTime().toNanoOfDay()
/ 1_000_000L),
TimestampData.fromLocalDateTime(
LocalDateTime.of(2020, 3, 18, 12, 12, 14, 1000)),
(int) LocalDate.of(2020, 3, 18).toEpochDay(),
(int) (LocalTime.of(12, 13, 14, 2000).toNanoOfDay() / 1_000_000L),
TimestampData.fromInstant(
LocalDateTime.of(2020, 3, 18, 3, 12, 14, 1000)
.atZone(ZoneId.of("Asia/Shanghai"))
.toInstant()),
"test1",
true),
GenericRowData.of(
2,
StringData.fromString("peanut"),
Timestamp.valueOf("2020-03-19 12:22:14").getTime(),
(int) Date.valueOf("2020-03-19").toLocalDate().toEpochDay(),
TimestampData.fromTimestamp(Timestamp.valueOf("2020-03-19 12:22:21")),
(int)
(Time.valueOf("12:22:21").toLocalTime().toNanoOfDay()
/ 1_000_000L),
TimestampData.fromLocalDateTime(
LocalDateTime.of(2020, 3, 19, 12, 22, 14, 1000)),
(int) LocalDate.of(2020, 3, 19).toEpochDay(),
(int) (LocalTime.of(12, 13, 14, 2000).toNanoOfDay() / 1_000_000L),
TimestampData.fromInstant(
LocalDateTime.of(2020, 3, 19, 20, 22, 14, 1000)
.atZone(ZoneId.of("America/Los_Angeles"))
.toInstant()),
"test2",
false));
@Test
public void testDynamicIndexFromTimestampTzUTC() {
assumingThat(
ZoneId.systemDefault().equals(ZoneId.of("UTC")),
() -> {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"{local_timestamp|yyyy_MM_dd_HH-ss}_index",
fieldNames,
dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0)))
.isEqualTo("2020_03_17_19-14_index");
assertThat(indexGenerator.generate(rows.get(1)))
.isEqualTo("2020_03_20_03-14_index");
});
}
@Test
public void testDynamicIndexFromTimestampTzWithSpecificTimezone() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"{local_timestamp|yyyy_MM_dd_HH-ss}_index",
fieldNames,
dataTypes,
ZoneId.of("Europe/Berlin"));
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_17_20-14_index");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("2020_03_20_04-14_index");
}
@Test
public void testDynamicIndexFromTimestamp() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"{order_timestamp|yyyy_MM_dd_HH-ss}_index", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index");
IndexGenerator indexGenerator1 =
IndexGeneratorFactory.createIndexGenerator(
"{order_timestamp|yyyy_MM_dd_HH_mm}_index", fieldNames, dataTypes);
indexGenerator1.open();
assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index");
}
@Test
public void testDynamicIndexFromLocalDateTime() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"{local_datetime|yyyy_MM_dd_HH-ss}_index", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index");
IndexGenerator indexGenerator1 =
IndexGeneratorFactory.createIndexGenerator(
"{local_datetime|yyyy_MM_dd_HH_mm}_index", fieldNames, dataTypes);
indexGenerator1.open();
assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index");
}
@Test
public void testDynamicIndexFromDate() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{log_date|yyyy/MM/dd}", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19");
}
@Test
public void testDynamicIndexFromLocalDate() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{local_date|yyyy/MM/dd}", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19");
}
@Test
public void testDynamicIndexFromTime() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{log_time|HH-mm}", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-12");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-22");
}
@Test
public void testDynamicIndexFromLocalTime() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{local_time|HH-mm}", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-13");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-13");
}
@Test
public void testDynamicIndexDefaultFormat() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
"my-index-{local_time|}", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12_13_14");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12_13_14");
}
@Test
public void testDynamicIndexFromSystemTime() {
List<String> supportedUseCases =
Arrays.asList(
"now()",
"NOW()",
"now( )",
"NOW(\t)",
"\t NOW( ) \t",
"current_timestamp",
"CURRENT_TIMESTAMP",
"\tcurrent_timestamp\t",
" current_timestamp ");
supportedUseCases.stream()
.forEach(
f -> {
DateTimeFormatter dateTimeFormatter =
DateTimeFormatter.ofPattern("yyyy_MM_dd");
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
String.format("my-index-{%s|yyyy_MM_dd}", f),
fieldNames,
dataTypes);
indexGenerator.open();
// The date may change during the running of the unit test.
// Generate expected index-name based on the current time
// before and after calling the generate method.
String expectedIndex1 =
"my-index-" + LocalDateTime.now().format(dateTimeFormatter);
String actualIndex = indexGenerator.generate(rows.get(1));
String expectedIndex2 =
"my-index-" + LocalDateTime.now().format(dateTimeFormatter);
assertThat(
actualIndex.equals(expectedIndex1)
|| actualIndex.equals(expectedIndex2))
.isTrue();
});
List<String> invalidUseCases =
Arrays.asList(
"now",
"now(",
"NOW",
"NOW)",
"current_timestamp()",
"CURRENT_TIMESTAMP()",
"CURRENT_timestamp");
invalidUseCases.stream()
.forEach(
f -> {
String expectedExceptionMsg =
String.format(
"Unknown field '%s' in index pattern 'my-index-{%s|yyyy_MM_dd}',"
+ " please check the field name.",
f, f);
try {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator(
String.format("my-index-{%s|yyyy_MM_dd}", f),
fieldNames,
dataTypes);
indexGenerator.open();
} catch (TableException e) {
assertThat(e).hasMessage(expectedExceptionMsg);
}
});
}
@Test
public void testGeneralDynamicIndex() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("index_{item}", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("index_apple");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("index_peanut");
}
@Test
public void testStaticIndex() {
IndexGenerator indexGenerator =
IndexGeneratorFactory.createIndexGenerator("my-index", fieldNames, dataTypes);
indexGenerator.open();
assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index");
assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index");
}
@Test
public void testUnknownField() {
String expectedExceptionMsg =
"Unknown field 'unknown_ts' in index pattern 'my-index-{unknown_ts|yyyy-MM-dd}',"
+ " please check the field name.";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"my-index-{unknown_ts|yyyy-MM-dd}", fieldNames, dataTypes))
.isInstanceOf(TableException.class)
.hasMessage(expectedExceptionMsg);
}
@Test
public void testUnsupportedTimeType() {
String expectedExceptionMsg =
"Unsupported type 'INT' found in Elasticsearch dynamic index field, "
+ "time-related pattern only support types are: DATE,TIME,TIMESTAMP.";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"my-index-{id|yyyy-MM-dd}", fieldNames, dataTypes))
.isInstanceOf(TableException.class)
.hasMessage(expectedExceptionMsg);
}
@Test
public void testUnsupportedMultiParametersType() {
String expectedExceptionMsg =
"Chaining dynamic index pattern my-index-{local_date}-{local_time} is not supported,"
+ " only support single dynamic index pattern.";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"my-index-{local_date}-{local_time}",
fieldNames,
dataTypes))
.isInstanceOf(TableException.class)
.hasMessage(expectedExceptionMsg);
}
@Test
public void testUnsupportedIndexFieldType() {
String expectedExceptionMsg =
"Unsupported type BOOLEAN of index field, Supported types are:"
+ " [DATE, TIME_WITHOUT_TIME_ZONE, TIMESTAMP_WITHOUT_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE,"
+ " TIMESTAMP_WITH_LOCAL_TIME_ZONE, VARCHAR, CHAR, TINYINT, INTEGER, BIGINT]";
assertThatThrownBy(
() ->
IndexGeneratorFactory.createIndexGenerator(
"index_{status}", fieldNames, dataTypes))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(expectedExceptionMsg);
}
}
| 5,779 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.java.tuple.Tuple2;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.search.SearchRequest;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.Serializable;
import java.util.concurrent.atomic.AtomicLong;
/**
* An {@link ElasticsearchApiCallBridge} is used to bridge incompatible Elasticsearch Java API calls
* across different versions. This includes calls to create Elasticsearch clients, handle failed
* item responses, etc. Any incompatible Elasticsearch Java APIs should be bridged using this
* interface.
*
* <p>Implementations are allowed to be stateful. For example, for Elasticsearch 1.x, since
* connecting via an embedded node is allowed, the call bridge will hold reference to the created
* embedded node. Each instance of the sink will hold exactly one instance of the call bridge, and
* state cleanup is performed when the sink is closed.
*
* @param <C> The Elasticsearch client, that implements {@link AutoCloseable}.
*/
@Internal
public interface ElasticsearchApiCallBridge<C extends AutoCloseable> extends Serializable {
/**
* Creates an Elasticsearch client implementing {@link AutoCloseable}.
*
* @return The created client.
*/
C createClient();
/**
* Creates a {@link BulkProcessor.Builder} for creating the bulk processor.
*
* @param client the Elasticsearch client.
* @param listener the bulk processor listener.
* @return the bulk processor builder.
*/
BulkProcessor.Builder createBulkProcessorBuilder(C client, BulkProcessor.Listener listener);
/**
* Executes a search using the Search API.
*
* @param client the Elasticsearch client.
* @param searchRequest A request to execute search against one or more indices (or all).
*/
Tuple2<String, String[]> search(C client, SearchRequest searchRequest) throws IOException;
/**
* Closes this client and releases any system resources associated with it.
*
* @param client the Elasticsearch client.
*/
void close(C client) throws IOException;
/**
* Extracts the cause of failure of a bulk item action.
*
* @param bulkItemResponse the bulk item response to extract cause of failure
* @return the extracted {@link Throwable} from the response ({@code null} is the response is
* successful).
*/
@Nullable
Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse);
/**
* Sets the bulk flush interval, in milliseconds on the provided {@link BulkProcessor.Builder}.
* The builder will be later on used to instantiate the actual {@link BulkProcessor}.
*
* @param builder the {@link BulkProcessor.Builder} to configure.
* @param flushIntervalMillis the flush interval in milliseconds.
*/
void configureBulkProcessorFlushInterval(
BulkProcessor.Builder builder, long flushIntervalMillis);
/**
* Set backoff-related configurations on the provided {@link BulkProcessor.Builder}. The builder
* will be later on used to instantiate the actual {@link BulkProcessor}.
*
* @param builder the {@link BulkProcessor.Builder} to configure.
* @param flushBackoffPolicy user-provided backoff retry settings ({@code null} if the user
* disabled backoff retries).
*/
void configureBulkProcessorBackoff(
BulkProcessor.Builder builder,
@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy);
/**
* Verify the client connection by making a test request/ping to the Elasticsearch cluster.
*
* <p>Called by {@link ElasticsearchSinkBase#open(org.apache.flink.configuration.Configuration)}
* after creating the client. This makes sure the underlying client is closed if the connection
* is not successful and preventing thread leak.
*
* @param client the Elasticsearch client.
*/
void verifyClientConnection(C client) throws IOException;
/**
* Creates a {@link RequestIndexer} that is able to work with {@link BulkProcessor} binary
* compatible.
*/
RequestIndexer createBulkProcessorIndexer(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequestsRef);
/** Perform any necessary state cleanup. */
default void cleanup() {
// nothing to cleanup by default
}
}
| 5,780 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
/**
* Users add multiple delete, index or update requests to a {@link RequestIndexer} to prepare them
* for sending to an Elasticsearch cluster.
*
* @deprecated This has been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public interface RequestIndexer {
/**
* Add multiple {@link ActionRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param actionRequests The multiple {@link ActionRequest} to add.
* @deprecated use the {@link DeleteRequest}, {@link IndexRequest} or {@link UpdateRequest}
*/
@Deprecated
default void add(ActionRequest... actionRequests) {
for (ActionRequest actionRequest : actionRequests) {
if (actionRequest instanceof IndexRequest) {
add((IndexRequest) actionRequest);
} else if (actionRequest instanceof DeleteRequest) {
add((DeleteRequest) actionRequest);
} else if (actionRequest instanceof UpdateRequest) {
add((UpdateRequest) actionRequest);
} else {
throw new IllegalArgumentException(
"RequestIndexer only supports Index, Delete and Update requests");
}
}
}
/**
* Add multiple {@link DeleteRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param deleteRequests The multiple {@link DeleteRequest} to add.
*/
void add(DeleteRequest... deleteRequests);
/**
* Add multiple {@link IndexRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param indexRequests The multiple {@link IndexRequest} to add.
*/
void add(IndexRequest... indexRequests);
/**
* Add multiple {@link UpdateRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param updateRequests The multiple {@link UpdateRequest} to add.
*/
void add(UpdateRequest... updateRequests);
}
| 5,781 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.util.InstantiationUtil;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.rest.RestStatus;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Base class for all Flink Elasticsearch Sinks.
*
* <p>This class implements the common behaviour across Elasticsearch versions, such as the use of
* an internal {@link BulkProcessor} to buffer multiple {@link ActionRequest}s before sending the
* requests to the cluster, as well as passing input records to the user provided {@link
* ElasticsearchSinkFunction} for processing.
*
* <p>The version specific API calls for different Elasticsearch versions should be defined by a
* concrete implementation of a {@link ElasticsearchApiCallBridge}, which is provided to the
* constructor of this class. This call bridge is used, for example, to create a Elasticsearch
* {@link Client}, handle failed item responses, etc.
*
* @param <T> Type of the elements handled by this sink
* @param <C> Type of the Elasticsearch client, which implements {@link AutoCloseable}
*/
@Internal
public abstract class ElasticsearchSinkBase<T, C extends AutoCloseable> extends RichSinkFunction<T>
implements CheckpointedFunction {
private static final long serialVersionUID = -1007596293618451942L;
// ------------------------------------------------------------------------
// Internal bulk processor configuration
// ------------------------------------------------------------------------
public static final String CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS = "bulk.flush.max.actions";
public static final String CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB = "bulk.flush.max.size.mb";
public static final String CONFIG_KEY_BULK_FLUSH_INTERVAL_MS = "bulk.flush.interval.ms";
public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE = "bulk.flush.backoff.enable";
public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE = "bulk.flush.backoff.type";
public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES = "bulk.flush.backoff.retries";
public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY = "bulk.flush.backoff.delay";
/** Used to control whether the retry delay should increase exponentially or remain constant. */
@PublicEvolving
public enum FlushBackoffType {
CONSTANT,
EXPONENTIAL
}
/**
* Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to
* resource constraints (i.e. the client's internal thread pool is full), the backoff policy
* decides how long the bulk processor will wait before the operation is retried internally.
*
* <p>This is a proxy for version specific backoff policies.
*/
public static class BulkFlushBackoffPolicy implements Serializable {
private static final long serialVersionUID = -6022851996101826049L;
// the default values follow the Elasticsearch default settings for BulkProcessor
private FlushBackoffType backoffType = FlushBackoffType.EXPONENTIAL;
private int maxRetryCount = 8;
private long delayMillis = 50;
public FlushBackoffType getBackoffType() {
return backoffType;
}
public int getMaxRetryCount() {
return maxRetryCount;
}
public long getDelayMillis() {
return delayMillis;
}
public void setBackoffType(FlushBackoffType backoffType) {
this.backoffType = checkNotNull(backoffType);
}
public void setMaxRetryCount(int maxRetryCount) {
checkArgument(maxRetryCount >= 0);
this.maxRetryCount = maxRetryCount;
}
public void setDelayMillis(long delayMillis) {
checkArgument(delayMillis >= 0);
this.delayMillis = delayMillis;
}
}
private final Integer bulkProcessorFlushMaxActions;
private final Integer bulkProcessorFlushMaxSizeMb;
private final Long bulkProcessorFlushIntervalMillis;
private final BulkFlushBackoffPolicy bulkProcessorFlushBackoffPolicy;
// ------------------------------------------------------------------------
// User-facing API and configuration
// ------------------------------------------------------------------------
/**
* The function that is used to construct multiple {@link ActionRequest ActionRequests} from
* each incoming element.
*/
private final ElasticsearchSinkFunction<T> elasticsearchSinkFunction;
/** User-provided handler for failed {@link ActionRequest ActionRequests}. */
private final ActionRequestFailureHandler failureHandler;
/**
* If true, the producer will wait until all outstanding action requests have been sent to
* Elasticsearch.
*/
private boolean flushOnCheckpoint = true;
/**
* Provided to the user via the {@link ElasticsearchSinkFunction} to add {@link ActionRequest
* ActionRequests}.
*/
private transient RequestIndexer requestIndexer;
/**
* Provided to the {@link ActionRequestFailureHandler} to allow users to re-index failed
* requests.
*/
private transient BufferingNoOpRequestIndexer failureRequestIndexer;
// ------------------------------------------------------------------------
// Internals for the Flink Elasticsearch Sink
// ------------------------------------------------------------------------
/** Call bridge for different version-specific. */
private final ElasticsearchApiCallBridge<C> callBridge;
/**
* Number of pending action requests not yet acknowledged by Elasticsearch. This value is
* maintained only if {@link ElasticsearchSinkBase#flushOnCheckpoint} is {@code true}.
*
* <p>This is incremented whenever the user adds (or re-adds through the {@link
* ActionRequestFailureHandler}) requests to the {@link RequestIndexer}. It is decremented for
* each completed request of a bulk request, in {@link BulkProcessor.Listener#afterBulk(long,
* BulkRequest, BulkResponse)} and {@link BulkProcessor.Listener#afterBulk(long, BulkRequest,
* Throwable)}.
*/
private AtomicLong numPendingRequests = new AtomicLong(0);
/** Elasticsearch client created using the call bridge. */
private transient C client;
/** Bulk processor to buffer and send requests to Elasticsearch, created using the client. */
private transient BulkProcessor bulkProcessor;
/**
* This is set from inside the {@link BulkProcessor.Listener} if a {@link Throwable} was thrown
* in callbacks and the user considered it should fail the sink via the {@link
* ActionRequestFailureHandler#onFailure(ActionRequest, Throwable, int, RequestIndexer)} method.
*
* <p>Errors will be checked and rethrown before processing each input element, and when the
* sink is closed.
*/
private final AtomicReference<Throwable> failureThrowable = new AtomicReference<>();
public ElasticsearchSinkBase(
ElasticsearchApiCallBridge<C> callBridge,
Map<String, String> userConfig,
ElasticsearchSinkFunction<T> elasticsearchSinkFunction,
ActionRequestFailureHandler failureHandler) {
this.callBridge = checkNotNull(callBridge);
this.elasticsearchSinkFunction = checkNotNull(elasticsearchSinkFunction);
this.failureHandler = checkNotNull(failureHandler);
// we eagerly check if the user-provided sink function and failure handler is serializable;
// otherwise, if they aren't serializable, users will merely get a non-informative error
// message
// "ElasticsearchSinkBase is not serializable"
checkArgument(
InstantiationUtil.isSerializable(elasticsearchSinkFunction),
"The implementation of the provided ElasticsearchSinkFunction is not serializable. "
+ "The object probably contains or references non-serializable fields.");
checkArgument(
InstantiationUtil.isSerializable(failureHandler),
"The implementation of the provided ActionRequestFailureHandler is not serializable. "
+ "The object probably contains or references non-serializable fields.");
// extract and remove bulk processor related configuration from the user-provided config,
// so that the resulting user config only contains configuration related to the
// Elasticsearch client.
checkNotNull(userConfig);
// copy config so we can remove entries without side-effects
userConfig = new HashMap<>(userConfig);
ParameterTool params = ParameterTool.fromMap(userConfig);
if (params.has(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS)) {
bulkProcessorFlushMaxActions = params.getInt(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS);
userConfig.remove(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS);
} else {
bulkProcessorFlushMaxActions = null;
}
if (params.has(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB)) {
bulkProcessorFlushMaxSizeMb = params.getInt(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB);
userConfig.remove(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB);
} else {
bulkProcessorFlushMaxSizeMb = null;
}
if (params.has(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS)) {
bulkProcessorFlushIntervalMillis = params.getLong(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS);
userConfig.remove(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS);
} else {
bulkProcessorFlushIntervalMillis = null;
}
boolean bulkProcessorFlushBackoffEnable =
params.getBoolean(CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, true);
userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE);
if (bulkProcessorFlushBackoffEnable) {
this.bulkProcessorFlushBackoffPolicy = new BulkFlushBackoffPolicy();
if (params.has(CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE)) {
bulkProcessorFlushBackoffPolicy.setBackoffType(
FlushBackoffType.valueOf(params.get(CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE)));
userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE);
}
if (params.has(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES)) {
bulkProcessorFlushBackoffPolicy.setMaxRetryCount(
params.getInt(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES));
userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES);
}
if (params.has(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY)) {
bulkProcessorFlushBackoffPolicy.setDelayMillis(
params.getLong(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY));
userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY);
}
} else {
bulkProcessorFlushBackoffPolicy = null;
}
}
/**
* Disable flushing on checkpoint. When disabled, the sink will not wait for all pending action
* requests to be acknowledged by Elasticsearch on checkpoints.
*
* <p>NOTE: If flushing on checkpoint is disabled, the Flink Elasticsearch Sink does NOT provide
* any strong guarantees for at-least-once delivery of action requests.
*/
public void disableFlushOnCheckpoint() {
this.flushOnCheckpoint = false;
}
@Override
public void open(Configuration parameters) throws Exception {
client = callBridge.createClient();
callBridge.verifyClientConnection(client);
bulkProcessor = buildBulkProcessor(new BulkProcessorListener());
requestIndexer =
callBridge.createBulkProcessorIndexer(
bulkProcessor, flushOnCheckpoint, numPendingRequests);
failureRequestIndexer = new BufferingNoOpRequestIndexer();
elasticsearchSinkFunction.open(getRuntimeContext());
}
@Override
public void invoke(T value, Context context) throws Exception {
checkAsyncErrorsAndRequests();
elasticsearchSinkFunction.process(value, getRuntimeContext(), requestIndexer);
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
// no initialization needed
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
checkAsyncErrorsAndRequests();
if (flushOnCheckpoint) {
while (numPendingRequests.get() != 0) {
bulkProcessor.flush();
checkAsyncErrorsAndRequests();
}
}
}
@Override
public void close() throws Exception {
elasticsearchSinkFunction.close();
if (bulkProcessor != null) {
bulkProcessor.close();
bulkProcessor = null;
}
if (client != null) {
client.close();
client = null;
}
callBridge.cleanup();
// make sure any errors from callbacks are rethrown
checkErrorAndRethrow();
}
/**
* Build the {@link BulkProcessor}.
*
* <p>Note: this is exposed for testing purposes.
*/
@VisibleForTesting
protected BulkProcessor buildBulkProcessor(BulkProcessor.Listener listener) {
checkNotNull(listener);
BulkProcessor.Builder bulkProcessorBuilder =
callBridge.createBulkProcessorBuilder(client, listener);
// This makes flush() blocking
bulkProcessorBuilder.setConcurrentRequests(0);
if (bulkProcessorFlushMaxActions != null) {
bulkProcessorBuilder.setBulkActions(bulkProcessorFlushMaxActions);
}
if (bulkProcessorFlushMaxSizeMb != null) {
configureBulkSize(bulkProcessorBuilder);
}
if (bulkProcessorFlushIntervalMillis != null) {
configureFlushInterval(bulkProcessorBuilder);
}
// if backoff retrying is disabled, bulkProcessorFlushBackoffPolicy will be null
callBridge.configureBulkProcessorBackoff(
bulkProcessorBuilder, bulkProcessorFlushBackoffPolicy);
return bulkProcessorBuilder.build();
}
private void configureBulkSize(BulkProcessor.Builder bulkProcessorBuilder) {
final ByteSizeUnit sizeUnit;
if (bulkProcessorFlushMaxSizeMb == -1) {
// bulk size can be disabled with -1, however the ByteSizeValue constructor accepts -1
// only with BYTES as the size unit
sizeUnit = ByteSizeUnit.BYTES;
} else {
sizeUnit = ByteSizeUnit.MB;
}
bulkProcessorBuilder.setBulkSize(new ByteSizeValue(bulkProcessorFlushMaxSizeMb, sizeUnit));
}
private void configureFlushInterval(BulkProcessor.Builder bulkProcessorBuilder) {
if (bulkProcessorFlushIntervalMillis == -1) {
bulkProcessorBuilder.setFlushInterval(null);
} else {
callBridge.configureBulkProcessorFlushInterval(
bulkProcessorBuilder, bulkProcessorFlushIntervalMillis);
}
}
private void checkErrorAndRethrow() {
Throwable cause = failureThrowable.get();
if (cause != null) {
throw new RuntimeException("An error occurred in ElasticsearchSink.", cause);
}
}
private void checkAsyncErrorsAndRequests() {
checkErrorAndRethrow();
failureRequestIndexer.processBufferedRequests(requestIndexer);
}
private class BulkProcessorListener implements BulkProcessor.Listener {
@Override
public void beforeBulk(long executionId, BulkRequest request) {}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
if (response.hasFailures()) {
BulkItemResponse itemResponse;
Throwable failure;
RestStatus restStatus;
DocWriteRequest actionRequest;
try {
for (int i = 0; i < response.getItems().length; i++) {
itemResponse = response.getItems()[i];
failure = callBridge.extractFailureCauseFromBulkItemResponse(itemResponse);
if (failure != null) {
restStatus = itemResponse.getFailure().getStatus();
actionRequest = request.requests().get(i);
if (restStatus == null) {
if (actionRequest instanceof ActionRequest) {
failureHandler.onFailure(
(ActionRequest) actionRequest,
failure,
-1,
failureRequestIndexer);
} else {
throw new UnsupportedOperationException(
"The sink currently only supports ActionRequests");
}
} else {
if (actionRequest instanceof ActionRequest) {
failureHandler.onFailure(
(ActionRequest) actionRequest,
failure,
restStatus.getStatus(),
failureRequestIndexer);
} else {
throw new UnsupportedOperationException(
"The sink currently only supports ActionRequests");
}
}
}
}
} catch (Throwable t) {
// fail the sink and skip the rest of the items
// if the failure handler decides to throw an exception
failureThrowable.compareAndSet(null, t);
}
}
if (flushOnCheckpoint) {
numPendingRequests.getAndAdd(-request.numberOfActions());
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
try {
for (DocWriteRequest writeRequest : request.requests()) {
if (writeRequest instanceof ActionRequest) {
failureHandler.onFailure(
(ActionRequest) writeRequest, failure, -1, failureRequestIndexer);
} else {
throw new UnsupportedOperationException(
"The sink currently only supports ActionRequests");
}
}
} catch (Throwable t) {
// fail the sink and skip the rest of the items
// if the failure handler decides to throw an exception
failureThrowable.compareAndSet(null, t);
}
if (flushOnCheckpoint) {
numPendingRequests.getAndAdd(-request.numberOfActions());
}
}
}
@VisibleForTesting
long getNumPendingRequests() {
if (flushOnCheckpoint) {
return numPendingRequests.get();
} else {
throw new UnsupportedOperationException(
"The number of pending requests is not maintained when flushing on checkpoint is disabled.");
}
}
}
| 5,782 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.functions.Function;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.elasticsearch.action.ActionRequest;
import java.io.Serializable;
/**
* Creates multiple {@link ActionRequest ActionRequests} from an element in a stream.
*
* <p>This is used by sinks to prepare elements for sending them to Elasticsearch.
*
* <p>Example:
*
* <pre>{@code
* private static class TestElasticSearchSinkFunction implements
* ElasticsearchSinkFunction<Tuple2<Integer, String>> {
*
* public IndexRequest createIndexRequest(Tuple2<Integer, String> element) {
* Map<String, Object> json = new HashMap<>();
* json.put("data", element.f1);
*
* return Requests.indexRequest()
* .index("my-index")
* .type("my-type")
* .id(element.f0.toString())
* .source(json);
* }
*
* public void process(Tuple2<Integer, String> element, RuntimeContext ctx, RequestIndexer indexer) {
* indexer.add(createIndexRequest(element));
* }
* }
*
* }</pre>
*
* @param <T> The type of the element handled by this {@code ElasticsearchSinkFunction}
* @deprecated This has been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public interface ElasticsearchSinkFunction<T> extends Serializable, Function {
/**
* Initialization method for the function. It is called once before the actual working process
* methods, if {@link #open(RuntimeContext)} is not overridden.
*/
default void open() throws Exception {}
/**
* Initialization method for the function. It is called once before the actual working process
* methods.
*/
default void open(RuntimeContext ctx) throws Exception {
open();
}
/** Tear-down method for the function. It is called when the sink closes. */
default void close() throws Exception {}
/**
* Process the incoming element to produce multiple {@link ActionRequest ActionsRequests}. The
* produced requests should be added to the provided {@link RequestIndexer}.
*
* @param element incoming element to process
* @param ctx runtime context containing information about the sink instance
* @param indexer request indexer that {@code ActionRequest} should be added to
*/
void process(T element, RuntimeContext ctx, RequestIndexer indexer);
}
| 5,783 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BufferingNoOpRequestIndexer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.annotation.Internal;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import javax.annotation.concurrent.NotThreadSafe;
import java.util.Collections;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* Implementation of a {@link RequestIndexer} that buffers {@link ActionRequest ActionRequests}
* before re-sending them to the Elasticsearch cluster upon request.
*/
@Internal
@NotThreadSafe
class BufferingNoOpRequestIndexer implements RequestIndexer {
private ConcurrentLinkedQueue<ActionRequest> bufferedRequests;
BufferingNoOpRequestIndexer() {
this.bufferedRequests = new ConcurrentLinkedQueue<ActionRequest>();
}
@Override
public void add(DeleteRequest... deleteRequests) {
Collections.addAll(bufferedRequests, deleteRequests);
}
@Override
public void add(IndexRequest... indexRequests) {
Collections.addAll(bufferedRequests, indexRequests);
}
@Override
public void add(UpdateRequest... updateRequests) {
Collections.addAll(bufferedRequests, updateRequests);
}
void processBufferedRequests(RequestIndexer actualIndexer) {
for (ActionRequest request : bufferedRequests) {
if (request instanceof IndexRequest) {
actualIndexer.add((IndexRequest) request);
} else if (request instanceof DeleteRequest) {
actualIndexer.add((DeleteRequest) request);
} else if (request instanceof UpdateRequest) {
actualIndexer.add((UpdateRequest) request);
}
}
bufferedRequests.clear();
}
}
| 5,784 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ActionRequestFailureHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.action.ActionRequest;
import java.io.Serializable;
/**
* An implementation of {@link ActionRequestFailureHandler} is provided by the user to define how
* failed {@link ActionRequest ActionRequests} should be handled, e.g. dropping them, reprocessing
* malformed documents, or simply requesting them to be sent to Elasticsearch again if the failure
* is only temporary.
*
* <p>Example:
*
* <pre>{@code
* private static class ExampleActionRequestFailureHandler implements ActionRequestFailureHandler {
*
* @Override
* void onFailure(ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) throws Throwable {
* if (ExceptionUtils.findThrowable(failure, EsRejectedExecutionException.class).isPresent()) {
* // full queue; re-add document for indexing
* indexer.add(action);
* } else if (ExceptionUtils.findThrowable(failure, ElasticsearchParseException.class).isPresent()) {
* // malformed document; simply drop request without failing sink
* } else {
* // for all other failures, fail the sink;
* // here the failure is simply rethrown, but users can also choose to throw custom exceptions
* throw failure;
* }
* }
* }
*
* }</pre>
*
* <p>The above example will let the sink re-add requests that failed due to queue capacity
* saturation and drop requests with malformed documents, without failing the sink. For all other
* failures, the sink will fail.
*
* <p>Note: For Elasticsearch 1.x, it is not feasible to match the type of the failure because the
* exact type could not be retrieved through the older version Java client APIs (thus, the types
* will be general {@link Exception}s and only differ in the failure message). In this case, it is
* recommended to match on the provided REST status code.
*
* @deprecated This has been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public interface ActionRequestFailureHandler extends Serializable {
/**
* Handle a failed {@link ActionRequest}.
*
* @param action the {@link ActionRequest} that failed due to the failure
* @param failure the cause of failure
* @param restStatusCode the REST status code of the failure (-1 if none can be retrieved)
* @param indexer request indexer to re-add the failed action, if intended to do so
* @throws Throwable if the sink should fail on this failure, the implementation should rethrow
* the exception or a custom one
*/
void onFailure(
ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer)
throws Throwable;
}
| 5,785 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/NoOpFailureHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.util;
import org.apache.flink.annotation.Internal;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.ActionRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** An {@link ActionRequestFailureHandler} that simply fails the sink on any failures. */
@Internal
public class NoOpFailureHandler implements ActionRequestFailureHandler {
private static final long serialVersionUID = 737941343410827885L;
private static final Logger LOG = LoggerFactory.getLogger(NoOpFailureHandler.class);
@Override
public void onFailure(
ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer)
throws Throwable {
LOG.error("Failed Elasticsearch item request: {}", failure.getMessage(), failure);
// simply fail the sink
throw failure;
}
@Override
public boolean equals(Object o) {
return o instanceof NoOpFailureHandler;
}
@Override
public int hashCode() {
return NoOpFailureHandler.class.hashCode();
}
}
| 5,786 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/RetryRejectedExecutionFailureHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.util;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.util.ExceptionUtils;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An {@link ActionRequestFailureHandler} that re-adds requests that failed due to temporary {@link
* EsRejectedExecutionException}s (which means that Elasticsearch node queues are currently full),
* and fails for all other failures.
*
* @deprecated This hase been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public class RetryRejectedExecutionFailureHandler implements ActionRequestFailureHandler {
private static final long serialVersionUID = -7423562912824511906L;
private static final Logger LOG =
LoggerFactory.getLogger(RetryRejectedExecutionFailureHandler.class);
@Override
public void onFailure(
ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer)
throws Throwable {
LOG.error("Failed Elasticsearch item request: {}", failure.getMessage(), failure);
if (ExceptionUtils.findThrowable(failure, EsRejectedExecutionException.class).isPresent()) {
indexer.add(action);
} else {
// rethrow all other failures
throw failure;
}
}
}
| 5,787 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/util/IgnoringFailureHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.util;
import org.apache.flink.annotation.Internal;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.ActionRequest;
/** Ignores all kinds of failures and drops the affected {@link ActionRequest}. */
@Internal
public class IgnoringFailureHandler implements ActionRequestFailureHandler {
private static final long serialVersionUID = 1662846593501L;
@Override
public void onFailure(
ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) {
// ignore failure
}
}
| 5,788 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/KeyExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.TableColumn;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.logical.DistinctType;
import org.apache.flink.table.types.logical.LogicalType;
import java.io.Serializable;
import java.time.Duration;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.Period;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/** An extractor for a Elasticsearch key from a {@link RowData}. */
@Internal
class KeyExtractor implements Function<RowData, String>, Serializable {
private final FieldFormatter[] fieldFormatters;
private final String keyDelimiter;
private interface FieldFormatter extends Serializable {
String format(RowData rowData);
}
private KeyExtractor(FieldFormatter[] fieldFormatters, String keyDelimiter) {
this.fieldFormatters = fieldFormatters;
this.keyDelimiter = keyDelimiter;
}
@Override
public String apply(RowData rowData) {
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < fieldFormatters.length; i++) {
if (i > 0) {
builder.append(keyDelimiter);
}
final String value = fieldFormatters[i].format(rowData);
builder.append(value);
}
return builder.toString();
}
private static class ColumnWithIndex {
public TableColumn column;
public int index;
public ColumnWithIndex(TableColumn column, int index) {
this.column = column;
this.index = index;
}
public LogicalType getType() {
return column.getType().getLogicalType();
}
public int getIndex() {
return index;
}
}
public static Function<RowData, String> createKeyExtractor(
TableSchema schema, String keyDelimiter) {
return schema.getPrimaryKey()
.map(
key -> {
Map<String, ColumnWithIndex> namesToColumns = new HashMap<>();
List<TableColumn> tableColumns = schema.getTableColumns();
for (int i = 0; i < schema.getFieldCount(); i++) {
TableColumn column = tableColumns.get(i);
namesToColumns.put(
column.getName(), new ColumnWithIndex(column, i));
}
FieldFormatter[] fieldFormatters =
key.getColumns().stream()
.map(namesToColumns::get)
.map(
column ->
toFormatter(
column.index, column.getType()))
.toArray(FieldFormatter[]::new);
return (Function<RowData, String>)
new KeyExtractor(fieldFormatters, keyDelimiter);
})
.orElseGet(() -> (Function<RowData, String> & Serializable) (row) -> null);
}
private static FieldFormatter toFormatter(int index, LogicalType type) {
switch (type.getTypeRoot()) {
case DATE:
return (row) -> LocalDate.ofEpochDay(row.getInt(index)).toString();
case TIME_WITHOUT_TIME_ZONE:
return (row) ->
LocalTime.ofNanoOfDay((long) row.getInt(index) * 1_000_000L).toString();
case INTERVAL_YEAR_MONTH:
return (row) -> Period.ofDays(row.getInt(index)).toString();
case INTERVAL_DAY_TIME:
return (row) -> Duration.ofMillis(row.getLong(index)).toString();
case DISTINCT_TYPE:
return toFormatter(index, ((DistinctType) type).getSourceType());
default:
RowData.FieldGetter fieldGetter = RowData.createFieldGetter(type, index);
return (row) -> fieldGetter.getFieldOrNull(row).toString();
}
}
}
| 5,789 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchConnectorOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.description.Description;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import java.time.Duration;
import java.util.List;
import static org.apache.flink.configuration.description.TextElement.text;
/**
* Options for the Elasticsearch connector.
*
* @deprecated This has been deprecated and will be removed in the future.
*/
@Deprecated
@PublicEvolving
public class ElasticsearchConnectorOptions {
public static final ConfigOption<List<String>> HOSTS_OPTION =
ConfigOptions.key("hosts")
.stringType()
.asList()
.noDefaultValue()
.withDescription("Elasticsearch hosts to connect to.");
public static final ConfigOption<String> INDEX_OPTION =
ConfigOptions.key("index")
.stringType()
.noDefaultValue()
.withDescription("Elasticsearch index for every record.");
public static final ConfigOption<String> DOCUMENT_TYPE_OPTION =
ConfigOptions.key("document-type")
.stringType()
.noDefaultValue()
.withDescription("Elasticsearch document type.");
public static final ConfigOption<String> PASSWORD_OPTION =
ConfigOptions.key("password")
.stringType()
.noDefaultValue()
.withDescription("Password used to connect to Elasticsearch instance.");
public static final ConfigOption<String> USERNAME_OPTION =
ConfigOptions.key("username")
.stringType()
.noDefaultValue()
.withDescription("Username used to connect to Elasticsearch instance.");
public static final ConfigOption<String> KEY_DELIMITER_OPTION =
ConfigOptions.key("document-id.key-delimiter")
.stringType()
.defaultValue("_")
.withDescription(
"Delimiter for composite keys e.g., \"$\" would result in IDs \"KEY1$KEY2$KEY3\".");
public static final ConfigOption<String> FAILURE_HANDLER_OPTION =
ConfigOptions.key("failure-handler")
.stringType()
.defaultValue("fail")
.withDescription(
Description.builder()
.text(
"Failure handling strategy in case a request to Elasticsearch fails")
.list(
text(
"\"fail\" (throws an exception if a request fails and thus causes a job failure)"),
text(
"\"ignore\" (ignores failures and drops the request)"),
text(
"\"retry-rejected\" (re-adds requests that have failed due to queue capacity saturation)"),
text(
"\"class name\" for failure handling with a ActionRequestFailureHandler subclass"))
.build());
public static final ConfigOption<Boolean> FLUSH_ON_CHECKPOINT_OPTION =
ConfigOptions.key("sink.flush-on-checkpoint")
.booleanType()
.defaultValue(true)
.withDescription("Disables flushing on checkpoint");
public static final ConfigOption<Integer> BULK_FLUSH_MAX_ACTIONS_OPTION =
ConfigOptions.key("sink.bulk-flush.max-actions")
.intType()
.defaultValue(1000)
.withDescription("Maximum number of actions to buffer for each bulk request.");
public static final ConfigOption<MemorySize> BULK_FLASH_MAX_SIZE_OPTION =
ConfigOptions.key("sink.bulk-flush.max-size")
.memoryType()
.defaultValue(MemorySize.parse("2mb"))
.withDescription("Maximum size of buffered actions per bulk request");
public static final ConfigOption<Duration> BULK_FLUSH_INTERVAL_OPTION =
ConfigOptions.key("sink.bulk-flush.interval")
.durationType()
.defaultValue(Duration.ofSeconds(1))
.withDescription("Bulk flush interval");
public static final ConfigOption<BackOffType> BULK_FLUSH_BACKOFF_TYPE_OPTION =
ConfigOptions.key("sink.bulk-flush.backoff.strategy")
.enumType(BackOffType.class)
.defaultValue(BackOffType.DISABLED)
.withDescription("Backoff strategy");
public static final ConfigOption<Integer> BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION =
ConfigOptions.key("sink.bulk-flush.backoff.max-retries")
.intType()
.noDefaultValue()
.withDescription("Maximum number of retries.");
public static final ConfigOption<Duration> BULK_FLUSH_BACKOFF_DELAY_OPTION =
ConfigOptions.key("sink.bulk-flush.backoff.delay")
.durationType()
.noDefaultValue()
.withDescription("Delay between each backoff attempt.");
public static final ConfigOption<String> CONNECTION_PATH_PREFIX =
ConfigOptions.key("connection.path-prefix")
.stringType()
.noDefaultValue()
.withDescription("Prefix string to be added to every REST communication.");
public static final ConfigOption<String> FORMAT_OPTION =
ConfigOptions.key("format")
.stringType()
.defaultValue("json")
.withDescription(
"The format must produce a valid JSON document. "
+ "Please refer to the documentation on formats for more details.");
// --------------------------------------------------------------------------------------------
// Enums
// --------------------------------------------------------------------------------------------
/**
* Backoff strategy. Extends {@link ElasticsearchSinkBase.FlushBackoffType} with {@code
* DISABLED} option.
*/
public enum BackOffType {
DISABLED,
CONSTANT,
EXPONENTIAL
}
private ElasticsearchConnectorOptions() {}
}
| 5,790 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.util.IgnoringFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
import org.apache.flink.streaming.connectors.elasticsearch.util.RetryRejectedExecutionFailureHandler;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.util.InstantiationUtil;
import java.time.Duration;
import java.util.Objects;
import java.util.Optional;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.FAILURE_HANDLER_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.PASSWORD_OPTION;
import static org.apache.flink.streaming.connectors.elasticsearch.table.ElasticsearchConnectorOptions.USERNAME_OPTION;
/** Accessor methods to elasticsearch options. */
@Internal
class ElasticsearchConfiguration {
protected final ReadableConfig config;
private final ClassLoader classLoader;
ElasticsearchConfiguration(ReadableConfig config, ClassLoader classLoader) {
this.config = config;
this.classLoader = classLoader;
}
public ActionRequestFailureHandler getFailureHandler() {
final ActionRequestFailureHandler failureHandler;
String value = config.get(FAILURE_HANDLER_OPTION);
switch (value.toUpperCase()) {
case "FAIL":
failureHandler = new NoOpFailureHandler();
break;
case "IGNORE":
failureHandler = new IgnoringFailureHandler();
break;
case "RETRY-REJECTED":
failureHandler = new RetryRejectedExecutionFailureHandler();
break;
default:
try {
Class<?> failureHandlerClass = Class.forName(value, false, classLoader);
failureHandler =
(ActionRequestFailureHandler)
InstantiationUtil.instantiate(failureHandlerClass);
} catch (ClassNotFoundException e) {
throw new ValidationException(
"Could not instantiate the failure handler class: " + value, e);
}
break;
}
return failureHandler;
}
public String getDocumentType() {
return config.get(ElasticsearchConnectorOptions.DOCUMENT_TYPE_OPTION);
}
public int getBulkFlushMaxActions() {
int maxActions = config.get(ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION);
// convert 0 to -1, because Elasticsearch client use -1 to disable this configuration.
return maxActions == 0 ? -1 : maxActions;
}
public long getBulkFlushMaxByteSize() {
long maxSize =
config.get(ElasticsearchConnectorOptions.BULK_FLASH_MAX_SIZE_OPTION).getBytes();
// convert 0 to -1, because Elasticsearch client use -1 to disable this configuration.
return maxSize == 0 ? -1 : maxSize;
}
public long getBulkFlushInterval() {
long interval = config.get(BULK_FLUSH_INTERVAL_OPTION).toMillis();
// convert 0 to -1, because Elasticsearch client use -1 to disable this configuration.
return interval == 0 ? -1 : interval;
}
public Optional<String> getUsername() {
return config.getOptional(USERNAME_OPTION);
}
public Optional<String> getPassword() {
return config.getOptional(PASSWORD_OPTION);
}
public boolean isBulkFlushBackoffEnabled() {
return config.get(BULK_FLUSH_BACKOFF_TYPE_OPTION)
!= ElasticsearchConnectorOptions.BackOffType.DISABLED;
}
public Optional<ElasticsearchSinkBase.FlushBackoffType> getBulkFlushBackoffType() {
switch (config.get(BULK_FLUSH_BACKOFF_TYPE_OPTION)) {
case CONSTANT:
return Optional.of(ElasticsearchSinkBase.FlushBackoffType.CONSTANT);
case EXPONENTIAL:
return Optional.of(ElasticsearchSinkBase.FlushBackoffType.EXPONENTIAL);
default:
return Optional.empty();
}
}
public Optional<Integer> getBulkFlushBackoffRetries() {
return config.getOptional(BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION);
}
public Optional<Long> getBulkFlushBackoffDelay() {
return config.getOptional(BULK_FLUSH_BACKOFF_DELAY_OPTION).map(Duration::toMillis);
}
public boolean isDisableFlushOnCheckpoint() {
return !config.get(ElasticsearchConnectorOptions.FLUSH_ON_CHECKPOINT_OPTION);
}
public String getIndex() {
return config.get(ElasticsearchConnectorOptions.INDEX_OPTION);
}
public String getKeyDelimiter() {
return config.get(ElasticsearchConnectorOptions.KEY_DELIMITER_OPTION);
}
public Optional<String> getPathPrefix() {
return config.getOptional(ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ElasticsearchConfiguration that = (ElasticsearchConfiguration) o;
return Objects.equals(config, that.config) && Objects.equals(classLoader, that.classLoader);
}
@Override
public int hashCode() {
return Objects.hash(config, classLoader);
}
}
| 5,791 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/RowElasticsearchSinkFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.data.RowData;
import org.apache.flink.util.Preconditions;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.xcontent.XContentType;
import javax.annotation.Nullable;
import java.util.Objects;
import java.util.function.Function;
/** Sink function for converting upserts into Elasticsearch {@link ActionRequest}s. */
@Internal
class RowElasticsearchSinkFunction implements ElasticsearchSinkFunction<RowData> {
private static final long serialVersionUID = 1L;
private final IndexGenerator indexGenerator;
private final String docType;
private final SerializationSchema<RowData> serializationSchema;
private final XContentType contentType;
private final RequestFactory requestFactory;
private final Function<RowData, String> createKey;
public RowElasticsearchSinkFunction(
IndexGenerator indexGenerator,
@Nullable String docType, // this is deprecated in es 7+
SerializationSchema<RowData> serializationSchema,
XContentType contentType,
RequestFactory requestFactory,
Function<RowData, String> createKey) {
this.indexGenerator = Preconditions.checkNotNull(indexGenerator);
this.docType = docType;
this.serializationSchema = Preconditions.checkNotNull(serializationSchema);
this.contentType = Preconditions.checkNotNull(contentType);
this.requestFactory = Preconditions.checkNotNull(requestFactory);
this.createKey = Preconditions.checkNotNull(createKey);
}
@Override
public void open(RuntimeContext ctx) throws Exception {
serializationSchema.open(
RuntimeContextInitializationContextAdapters.serializationAdapter(ctx));
indexGenerator.open();
}
@Override
public void process(RowData element, RuntimeContext ctx, RequestIndexer indexer) {
switch (element.getRowKind()) {
case INSERT:
case UPDATE_AFTER:
processUpsert(element, indexer);
break;
case UPDATE_BEFORE:
case DELETE:
processDelete(element, indexer);
break;
default:
throw new TableException("Unsupported message kind: " + element.getRowKind());
}
}
private void processUpsert(RowData row, RequestIndexer indexer) {
final byte[] document = serializationSchema.serialize(row);
final String key = createKey.apply(row);
if (key != null) {
final UpdateRequest updateRequest =
requestFactory.createUpdateRequest(
indexGenerator.generate(row), docType, key, contentType, document);
indexer.add(updateRequest);
} else {
final IndexRequest indexRequest =
requestFactory.createIndexRequest(
indexGenerator.generate(row), docType, key, contentType, document);
indexer.add(indexRequest);
}
}
private void processDelete(RowData row, RequestIndexer indexer) {
final String key = createKey.apply(row);
final DeleteRequest deleteRequest =
requestFactory.createDeleteRequest(indexGenerator.generate(row), docType, key);
indexer.add(deleteRequest);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RowElasticsearchSinkFunction that = (RowElasticsearchSinkFunction) o;
return Objects.equals(indexGenerator, that.indexGenerator)
&& Objects.equals(docType, that.docType)
&& Objects.equals(serializationSchema, that.serializationSchema)
&& contentType == that.contentType
&& Objects.equals(requestFactory, that.requestFactory)
&& Objects.equals(createKey, that.createKey);
}
@Override
public int hashCode() {
return Objects.hash(
indexGenerator,
docType,
serializationSchema,
contentType,
requestFactory,
createKey);
}
}
| 5,792 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import java.util.Objects;
/** Base class for {@link IndexGenerator}. */
@Internal
public abstract class IndexGeneratorBase implements IndexGenerator {
private static final long serialVersionUID = 1L;
protected final String index;
public IndexGeneratorBase(String index) {
this.index = index;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof IndexGeneratorBase)) {
return false;
}
IndexGeneratorBase that = (IndexGeneratorBase) o;
return index.equals(that.index);
}
@Override
public int hashCode() {
return Objects.hash(index);
}
}
| 5,793 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.Row;
import java.io.Serializable;
/** This interface is responsible to generate index name from given {@link Row} record. */
@Internal
interface IndexGenerator extends Serializable {
/**
* Initialize the index generator, this will be called only once before {@link
* #generate(RowData)} is called.
*/
default void open() {}
/** Generate index name according the the given row. */
String generate(RowData row);
}
| 5,794 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/RequestFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.Serializable;
/** For version-agnostic creating of {@link ActionRequest}s. */
@Internal
interface RequestFactory extends Serializable {
/**
* Creates an update request to be added to a {@link RequestIndexer}. Note: the type field has
* been deprecated since Elasticsearch 7.x and it would not take any effort.
*/
UpdateRequest createUpdateRequest(
String index, String docType, String key, XContentType contentType, byte[] document);
/**
* Creates an index request to be added to a {@link RequestIndexer}. Note: the type field has
* been deprecated since Elasticsearch 7.x and it would not take any effort.
*/
IndexRequest createIndexRequest(
String index, String docType, String key, XContentType contentType, byte[] document);
/**
* Creates a delete request to be added to a {@link RequestIndexer}. Note: the type field has
* been deprecated since Elasticsearch 7.x and it would not take any effort.
*/
DeleteRequest createDeleteRequest(String index, String docType, String key);
}
| 5,795 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchValidationUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.types.logical.DistinctType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeFamily;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/** Utility methods for validating Elasticsearch properties. */
@Internal
class ElasticsearchValidationUtils {
private static final Set<LogicalTypeRoot> ILLEGAL_PRIMARY_KEY_TYPES = new LinkedHashSet<>();
static {
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.ARRAY);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.MAP);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.MULTISET);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.STRUCTURED_TYPE);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.ROW);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.RAW);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.BINARY);
ILLEGAL_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.VARBINARY);
}
/**
* Checks that the table does not have primary key defined on illegal types. In Elasticsearch
* the primary key is used to calculate the Elasticsearch document id, which is a string of up
* to 512 bytes. It cannot have whitespaces. As of now it is calculated by concatenating the
* fields. Certain types do not have a good string representation to be used in this scenario.
* The illegal types are mostly {@link LogicalTypeFamily#COLLECTION} types and {@link
* LogicalTypeRoot#RAW} type.
*/
public static void validatePrimaryKey(TableSchema schema) {
schema.getPrimaryKey()
.ifPresent(
key -> {
List<LogicalTypeRoot> illegalTypes =
key.getColumns().stream()
.map(
fieldName -> {
LogicalType logicalType =
schema.getFieldDataType(fieldName)
.get()
.getLogicalType();
if (logicalType.is(
LogicalTypeRoot.DISTINCT_TYPE)) {
return ((DistinctType) logicalType)
.getSourceType()
.getTypeRoot();
} else {
return logicalType.getTypeRoot();
}
})
.filter(ILLEGAL_PRIMARY_KEY_TYPES::contains)
.collect(Collectors.toList());
if (!illegalTypes.isEmpty()) {
throw new ValidationException(
String.format(
"The table has a primary key on columns of illegal types: %s.\n"
+ " Elasticsearch sink does not support primary keys on columns of types: %s.",
illegalTypes, ILLEGAL_PRIMARY_KEY_TYPES));
}
});
}
private ElasticsearchValidationUtils() {}
}
| 5,796 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchRowDataLookupFunction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallBridge;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.util.DataFormatConverters;
import org.apache.flink.table.functions.FunctionContext;
import org.apache.flink.table.functions.LookupFunction;
import org.apache.flink.table.types.DataType;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.Preconditions;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** A lookup function implementing {@link LookupTableSource} in Elasticsearch connector. */
@Internal
public class ElasticsearchRowDataLookupFunction<C extends AutoCloseable> extends LookupFunction {
private static final Logger LOG =
LoggerFactory.getLogger(ElasticsearchRowDataLookupFunction.class);
private static final long serialVersionUID = 1L;
private final DeserializationSchema<RowData> deserializationSchema;
private final String index;
private final String type;
private final String[] producedNames;
private final String[] lookupKeys;
private final int maxRetryTimes;
// converters to convert data from internal to external in order to generate keys for the cache.
private final DataFormatConverters.DataFormatConverter[] converters;
private SearchRequest searchRequest;
private SearchSourceBuilder searchSourceBuilder;
private final ElasticsearchApiCallBridge<C> callBridge;
private transient C client;
public ElasticsearchRowDataLookupFunction(
DeserializationSchema<RowData> deserializationSchema,
int maxRetryTimes,
String index,
String type,
String[] producedNames,
DataType[] producedTypes,
String[] lookupKeys,
ElasticsearchApiCallBridge<C> callBridge) {
checkNotNull(deserializationSchema, "No DeserializationSchema supplied.");
checkNotNull(maxRetryTimes, "No maxRetryTimes supplied.");
checkNotNull(producedNames, "No fieldNames supplied.");
checkNotNull(producedTypes, "No fieldTypes supplied.");
checkNotNull(lookupKeys, "No keyNames supplied.");
checkNotNull(callBridge, "No ElasticsearchApiCallBridge supplied.");
this.deserializationSchema = deserializationSchema;
this.maxRetryTimes = maxRetryTimes;
this.index = index;
this.type = type;
this.producedNames = producedNames;
this.lookupKeys = lookupKeys;
this.converters = new DataFormatConverters.DataFormatConverter[lookupKeys.length];
Map<String, Integer> nameToIndex =
IntStream.range(0, producedNames.length)
.boxed()
.collect(Collectors.toMap(i -> producedNames[i], i -> i));
for (int i = 0; i < lookupKeys.length; i++) {
Integer position = nameToIndex.get(lookupKeys[i]);
Preconditions.checkArgument(
position != null, "Lookup keys %s not selected", Arrays.toString(lookupKeys));
converters[i] = DataFormatConverters.getConverterForDataType(producedTypes[position]);
}
this.callBridge = callBridge;
}
@Override
public void open(FunctionContext context) throws Exception {
this.client = callBridge.createClient();
// Set searchRequest in open method in case of amount of calling in eval method when every
// record comes.
this.searchRequest = new SearchRequest(index);
if (type == null) {
searchRequest.types(Strings.EMPTY_ARRAY);
} else {
searchRequest.types(type);
}
searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.fetchSource(producedNames, null);
deserializationSchema.open(null);
}
@Override
public Collection<RowData> lookup(RowData keyRow) {
BoolQueryBuilder lookupCondition = new BoolQueryBuilder();
for (int i = 0; i < lookupKeys.length; i++) {
lookupCondition.must(
new TermQueryBuilder(lookupKeys[i], converters[i].toExternal(keyRow, i)));
}
searchSourceBuilder.query(lookupCondition);
searchRequest.source(searchSourceBuilder);
for (int retry = 0; retry <= maxRetryTimes; retry++) {
try {
ArrayList<RowData> rows = new ArrayList<>();
Tuple2<String, String[]> searchResponse = callBridge.search(client, searchRequest);
if (searchResponse.f1.length > 0) {
String[] result = searchResponse.f1;
for (String s : result) {
RowData row = parseSearchResult(s);
rows.add(row);
}
rows.trimToSize();
return rows;
}
} catch (IOException e) {
LOG.error(String.format("Elasticsearch search error, retry times = %d", retry), e);
if (retry >= maxRetryTimes) {
throw new FlinkRuntimeException("Execution of Elasticsearch search failed.", e);
}
try {
Thread.sleep(1000L * retry);
} catch (InterruptedException e1) {
LOG.warn(
"Interrupted while waiting to retry failed elasticsearch search, aborting");
throw new FlinkRuntimeException(e1);
}
}
}
return Collections.emptyList();
}
private RowData parseSearchResult(String result) {
RowData row = null;
try {
row = deserializationSchema.deserialize(result.getBytes());
} catch (IOException e) {
LOG.error("Deserialize search hit failed: " + e.getMessage());
}
return row;
}
}
| 5,797 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/AbstractTimeIndexGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import java.time.format.DateTimeFormatter;
/** Abstract class for time related {@link IndexGenerator}. */
@Internal
abstract class AbstractTimeIndexGenerator extends IndexGeneratorBase {
private final String dateTimeFormat;
protected transient DateTimeFormatter dateTimeFormatter;
public AbstractTimeIndexGenerator(String index, String dateTimeFormat) {
super(index);
this.dateTimeFormat = dateTimeFormat;
}
@Override
public void open() {
this.dateTimeFormatter = DateTimeFormatter.ofPattern(dateTimeFormat);
}
}
| 5,798 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import javax.annotation.Nonnull;
import java.io.Serializable;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Factory of {@link IndexGenerator}.
*
* <p>Flink supports both static index and dynamic index.
*
* <p>If you want to have a static index, this option value should be a plain string, e.g.
* 'myusers', all the records will be consistently written into "myusers" index.
*
* <p>If you want to have a dynamic index, you can use '{field_name}' to reference a field value in
* the record to dynamically generate a target index. You can also use
* '{field_name|date_format_string}' to convert a field value of TIMESTAMP/DATE/TIME type into the
* format specified by date_format_string. The date_format_string is compatible with {@link
* java.text.SimpleDateFormat}. For example, if the option value is 'myusers_{log_ts|yyyy-MM-dd}',
* then a record with log_ts field value 2020-03-27 12:25:55 will be written into
* "myusers_2020-03-27" index.
*/
@Internal
final class IndexGeneratorFactory {
private IndexGeneratorFactory() {}
public static IndexGenerator createIndexGenerator(String index, TableSchema schema) {
return createIndexGenerator(index, schema, ZoneId.systemDefault());
}
public static IndexGenerator createIndexGenerator(
String index, TableSchema schema, ZoneId localTimeZoneId) {
final IndexHelper indexHelper = new IndexHelper();
if (indexHelper.checkIsDynamicIndex(index)) {
return createRuntimeIndexGenerator(
index,
schema.getFieldNames(),
schema.getFieldDataTypes(),
indexHelper,
localTimeZoneId);
} else {
return new StaticIndexGenerator(index);
}
}
interface DynamicFormatter extends Serializable {
String format(@Nonnull Object fieldValue, DateTimeFormatter formatter);
}
private static IndexGenerator createRuntimeIndexGenerator(
String index,
String[] fieldNames,
DataType[] fieldTypes,
IndexHelper indexHelper,
ZoneId localTimeZoneId) {
final String dynamicIndexPatternStr = indexHelper.extractDynamicIndexPatternStr(index);
final String indexPrefix = index.substring(0, index.indexOf(dynamicIndexPatternStr));
final String indexSuffix =
index.substring(indexPrefix.length() + dynamicIndexPatternStr.length());
if (indexHelper.checkIsDynamicIndexWithSystemTimeFormat(index)) {
final String dateTimeFormat =
indexHelper.extractDateFormat(
index, LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
return new AbstractTimeIndexGenerator(index, dateTimeFormat) {
@Override
public String generate(RowData row) {
return indexPrefix
.concat(LocalDateTime.now(localTimeZoneId).format(dateTimeFormatter))
.concat(indexSuffix);
}
};
}
final boolean isDynamicIndexWithFormat = indexHelper.checkIsDynamicIndexWithFormat(index);
final int indexFieldPos =
indexHelper.extractIndexFieldPos(index, fieldNames, isDynamicIndexWithFormat);
final LogicalType indexFieldType = fieldTypes[indexFieldPos].getLogicalType();
final LogicalTypeRoot indexFieldLogicalTypeRoot = indexFieldType.getTypeRoot();
// validate index field type
indexHelper.validateIndexFieldType(indexFieldLogicalTypeRoot);
// time extract dynamic index pattern
final RowData.FieldGetter fieldGetter =
RowData.createFieldGetter(indexFieldType, indexFieldPos);
if (isDynamicIndexWithFormat) {
final String dateTimeFormat =
indexHelper.extractDateFormat(index, indexFieldLogicalTypeRoot);
DynamicFormatter formatFunction =
createFormatFunction(
indexFieldType, indexFieldLogicalTypeRoot, localTimeZoneId);
return new AbstractTimeIndexGenerator(index, dateTimeFormat) {
@Override
public String generate(RowData row) {
Object fieldOrNull = fieldGetter.getFieldOrNull(row);
final String formattedField;
// TODO we can possibly optimize it to use the nullability of the field
if (fieldOrNull != null) {
formattedField = formatFunction.format(fieldOrNull, dateTimeFormatter);
} else {
formattedField = "null";
}
return indexPrefix.concat(formattedField).concat(indexSuffix);
}
};
}
// general dynamic index pattern
return new IndexGeneratorBase(index) {
@Override
public String generate(RowData row) {
Object indexField = fieldGetter.getFieldOrNull(row);
return indexPrefix
.concat(indexField == null ? "null" : indexField.toString())
.concat(indexSuffix);
}
};
}
private static DynamicFormatter createFormatFunction(
LogicalType indexFieldType,
LogicalTypeRoot indexFieldLogicalTypeRoot,
ZoneId localTimeZoneId) {
switch (indexFieldLogicalTypeRoot) {
case DATE:
return (value, dateTimeFormatter) -> {
Integer indexField = (Integer) value;
return LocalDate.ofEpochDay(indexField).format(dateTimeFormatter);
};
case TIME_WITHOUT_TIME_ZONE:
return (value, dateTimeFormatter) -> {
Integer indexField = (Integer) value;
return LocalTime.ofNanoOfDay(indexField * 1_000_000L).format(dateTimeFormatter);
};
case TIMESTAMP_WITHOUT_TIME_ZONE:
return (value, dateTimeFormatter) -> {
TimestampData indexField = (TimestampData) value;
return indexField.toLocalDateTime().format(dateTimeFormatter);
};
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException(
"TIMESTAMP_WITH_TIME_ZONE is not supported yet");
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return (value, dateTimeFormatter) -> {
TimestampData indexField = (TimestampData) value;
return indexField.toInstant().atZone(localTimeZoneId).format(dateTimeFormatter);
};
default:
throw new TableException(
String.format(
"Unsupported type '%s' found in Elasticsearch dynamic index field, "
+ "time-related pattern only support types are: DATE,TIME,TIMESTAMP.",
indexFieldType));
}
}
/**
* Helper class for {@link IndexGeneratorFactory}, this helper can use to validate index field
* type ans parse index format from pattern.
*/
static class IndexHelper {
private static final Pattern dynamicIndexPattern = Pattern.compile("\\{[^\\{\\}]+\\}?");
private static final Pattern dynamicIndexTimeExtractPattern =
Pattern.compile(".*\\{.+\\|.*\\}.*");
private static final Pattern dynamicIndexSystemTimeExtractPattern =
Pattern.compile(
".*\\{\\s*(now\\(\\s*\\)|NOW\\(\\s*\\)|current_timestamp|CURRENT_TIMESTAMP)\\s*\\|.*\\}.*");
private static final List<LogicalTypeRoot> supportedTypes = new ArrayList<>();
private static final Map<LogicalTypeRoot, String> defaultFormats = new HashMap<>();
static {
// time related types
supportedTypes.add(LogicalTypeRoot.DATE);
supportedTypes.add(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE);
supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE);
supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE);
supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
// general types
supportedTypes.add(LogicalTypeRoot.VARCHAR);
supportedTypes.add(LogicalTypeRoot.CHAR);
supportedTypes.add(LogicalTypeRoot.TINYINT);
supportedTypes.add(LogicalTypeRoot.INTEGER);
supportedTypes.add(LogicalTypeRoot.BIGINT);
}
static {
defaultFormats.put(LogicalTypeRoot.DATE, "yyyy_MM_dd");
defaultFormats.put(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE, "HH_mm_ss");
defaultFormats.put(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE, "yyyy_MM_dd_HH_mm_ss");
defaultFormats.put(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE, "yyyy_MM_dd_HH_mm_ss");
defaultFormats.put(
LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE, "yyyy_MM_dd_HH_mm_ssX");
}
/** Validate the index field Type. */
void validateIndexFieldType(LogicalTypeRoot logicalType) {
if (!supportedTypes.contains(logicalType)) {
throw new IllegalArgumentException(
String.format(
"Unsupported type %s of index field, " + "Supported types are: %s",
logicalType, supportedTypes));
}
}
/** Get the default date format. */
String getDefaultFormat(LogicalTypeRoot logicalType) {
return defaultFormats.get(logicalType);
}
/** Check general dynamic index is enabled or not by index pattern. */
boolean checkIsDynamicIndex(String index) {
final Matcher matcher = dynamicIndexPattern.matcher(index);
int count = 0;
while (matcher.find()) {
count++;
}
if (count > 1) {
throw new TableException(
String.format(
"Chaining dynamic index pattern %s is not supported,"
+ " only support single dynamic index pattern.",
index));
}
return count == 1;
}
/** Check time extract dynamic index is enabled or not by index pattern. */
boolean checkIsDynamicIndexWithFormat(String index) {
return dynamicIndexTimeExtractPattern.matcher(index).matches();
}
/** Check generate dynamic index is from system time or not. */
boolean checkIsDynamicIndexWithSystemTimeFormat(String index) {
return dynamicIndexSystemTimeExtractPattern.matcher(index).matches();
}
/** Extract dynamic index pattern string from index pattern string. */
String extractDynamicIndexPatternStr(String index) {
int start = index.indexOf("{");
int end = index.lastIndexOf("}");
return index.substring(start, end + 1);
}
/** Extract index field position in a fieldNames, return the field position. */
int extractIndexFieldPos(
String index, String[] fieldNames, boolean isDynamicIndexWithFormat) {
List<String> fieldList = Arrays.asList(fieldNames);
String indexFieldName;
if (isDynamicIndexWithFormat) {
indexFieldName = index.substring(index.indexOf("{") + 1, index.indexOf("|"));
} else {
indexFieldName = index.substring(index.indexOf("{") + 1, index.indexOf("}"));
}
if (!fieldList.contains(indexFieldName)) {
throw new TableException(
String.format(
"Unknown field '%s' in index pattern '%s', please check the field name.",
indexFieldName, index));
}
return fieldList.indexOf(indexFieldName);
}
/** Extract dateTime format by the date format that extracted from index pattern string. */
private String extractDateFormat(String index, LogicalTypeRoot logicalType) {
String format = index.substring(index.indexOf("|") + 1, index.indexOf("}"));
if ("".equals(format)) {
format = getDefaultFormat(logicalType);
}
return format;
}
}
}
| 5,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.