I wanna to write a DataSource which is DataStream from Tarantool-java https://github.com/tarantool/tarantool-java.
can anybody give me a guide on how to write DataSource by User-defined.
this is my code here:
package tarantooljava.streaming.flink_connecter;
import org.apache.flink.configuration.Configuration;
import org.tarantool.TarantoolConnection16;
import org.tarantool.TarantoolConnection16Impl;
import splunk.test.TestSchema;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import static java.util.Objects.requireNonNull;
/**
* Created by jaryzhen on 16/4/19.
*/
public class FlinkTarantoolJavaSpace<T> extends FlinkTarantoolJavaSpaceBase<T>{
private ConsumerThread<T> consumerThread;
public FlinkTarantoolJavaSpace(String ip, int port, String user, String pwd) throws IOException {
FlinkTarantoolJavaSpace(ip,port,user,pwd,11);
}
public List<T> FlinkTarantoolJavaSpace(String ip, int port, String user, String pwd, int a) throws IOException {
requireNonNull(ip, "topics");
TarantoolConnection16 con = new TarantoolConnection16Impl(ip, port);
con.auth(user, pwd);
final TestSchema schema = con.schema(new TestSchema());
List select0 = null;
for (int i=0 ; i <100 ; i=i+2) {
select0 = con.select(schema.tester.id, schema.tester.primary, Arrays.asList(i), 0, 30, 0);
//System.out.println("select0:" +i+ select0);
}
// System.out.println(a.size());
// System.out.println(a.get(0));
con.close();
return select0;
}
#Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
}
#Override
public void run(SourceContext<T> sourceContext) throws Exception {
consumerThread = new ConsumerThread<>(this, sourceContext);
}
#Override
public void cancel() {
// set ourselves as not running
boolean running = false;
if(true) {
} else {
// the consumer thread is not running, so we have to interrupt our own thread
}
}
#Override
public void close() throws Exception {
cancel();
super.close();
}
// ------------------------------------------------------------------------
// Checkpoint and restore
// ----------------------------------------------------------------
private static class ConsumerThread<T> extends Thread {
private FlinkTarantoolJavaSpace<T> flinConsumer;
private SourceContext<T> sourceContext;
private boolean running = true;
public ConsumerThread(FlinkTarantoolJavaSpace<T> flinkConsumer, SourceContext<T> sourceContext) {
this.sourceContext = sourceContext;
this.flinkConsumer=flinkConsumer;
}
#Override
public void run() {
}
/**
* Try to shutdown the thread
*/
public void shutdown() {
this.running = false;
}
}
}
Related
I have below avro schema User.avsc
{
"type": "record",
"namespace": "com.myorg",
"name": "User",
"fields": [
{
"name": "id",
"type": "long"
},
{
"name": "name",
"type": "string"
}
]
}
The below java User.java class is generated from above User.avsc using avro-maven-plugin.
package com.myorg;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.nio.ByteBuffer;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Parser;
import org.apache.avro.data.RecordBuilder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.SchemaStore;
import org.apache.avro.specific.AvroGenerated;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.specific.SpecificRecord;
import org.apache.avro.specific.SpecificRecordBase;
import org.apache.avro.specific.SpecificRecordBuilderBase;
#AvroGenerated
public class User extends SpecificRecordBase implements SpecificRecord {
private static final long serialVersionUID = 8699049231783654635L;
public static final Schema SCHEMA$ = (new Parser()).parse("{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"com.myorg\",\"fields\":[{\"name\":\"id\",\"type\":\"long\"},{\"name\":\"name\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}");
private static SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<User> ENCODER;
private static final BinaryMessageDecoder<User> DECODER;
/** #deprecated */
#Deprecated
public long id;
/** #deprecated */
#Deprecated
public String name;
private static final DatumWriter<User> WRITER$;
private static final DatumReader<User> READER$;
public static Schema getClassSchema() {
return SCHEMA$;
}
public static BinaryMessageDecoder<User> getDecoder() {
return DECODER;
}
public static BinaryMessageDecoder<User> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder(MODEL$, SCHEMA$, resolver);
}
public ByteBuffer toByteBuffer() throws IOException {
return ENCODER.encode(this);
}
public static User fromByteBuffer(ByteBuffer b) throws IOException {
return (User)DECODER.decode(b);
}
public User() {
}
public User(Long id, String name) {
this.id = id;
this.name = name;
}
public Schema getSchema() {
return SCHEMA$;
}
public Object get(int field$) {
switch(field$) {
case 0:
return this.id;
case 1:
return this.name;
default:
throw new AvroRuntimeException("Bad index");
}
}
public void put(int field$, Object value$) {
switch(field$) {
case 0:
this.id = (Long)value$;
break;
case 1:
this.name = (String)value$;
break;
default:
throw new AvroRuntimeException("Bad index");
}
}
public Long getId() {
return this.id;
}
public void setId(Long value) {
this.id = value;
}
public String getName() {
return this.name;
}
public void setName(String value) {
this.name = value;
}
public void writeExternal(ObjectOutput out) throws IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
public void readExternal(ObjectInput in) throws IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
static {
ENCODER = new BinaryMessageEncoder(MODEL$, SCHEMA$);
DECODER = new BinaryMessageDecoder(MODEL$, SCHEMA$);
WRITER$ = MODEL$.createDatumWriter(SCHEMA$);
READER$ = MODEL$.createDatumReader(SCHEMA$);
}
}
I want to write an instance of User SpecificRecord into File using apache flink`s FileSink.
Below is the program that I wrote -
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.avro.AvroWriters;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import com.myorg.User;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import java.util.Arrays;
public class AvroFileSinkApp {
private static final String OUTPUT_PATH = "./il/";
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment().enableCheckpointing(5000);
env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
env.setParallelism(4);
OutputFileConfig config = OutputFileConfig
.builder()
.withPartPrefix("il")
.withPartSuffix(".avro")
.build();
DataStream<User> source = env.fromCollection(Arrays.asList(getUser(), getUser(), getUser(), getUser(), getUser(), getUser()));
source.sinkTo(FileSink.forBulkFormat(new Path(OUTPUT_PATH), AvroWriters.forSpecificRecord(User.class)).withBucketCheckInterval(5000).withRollingPolicy(OnCheckpointRollingPolicy.build())
.withOutputFileConfig(config).withBucketAssigner(new DateTimeBucketAssigner<>("yyyy/MM/dd/HH")).build());
env.execute("FileSinkProgram");
Thread.sleep(300000);
}
public static User getUser() {
User u = new User();
u.setId(1L);
u.setName("raj");
return u;
}
}
I wrote this program using this and this as reference. The project is on github here.
When I run the program, the in progress files are getting created but not checkpointing and committing the temp files. I've added Thread.sleep(300000); but couldn't see the inprogress files to avro files.
I've awaited the main thread for an hour as well but no luck.
Any idea what is stopping in-progress files moving to finished state?
This problem is mainly because Source is a BOUNDED Source. The execution of the entire Flink Job is over before the Checkpoint has been executed.
You can refer to the following example to generate User records instead of fromCollection
/** Data-generating source function. */
public static final class Generator
implements SourceFunction<Tuple2<Integer, Integer>>, CheckpointedFunction {
private static final long serialVersionUID = -2819385275681175792L;
private final int numKeys;
private final int idlenessMs;
private final int recordsToEmit;
private volatile int numRecordsEmitted = 0;
private volatile boolean canceled = false;
private ListState<Integer> state = null;
Generator(final int numKeys, final int idlenessMs, final int durationSeconds) {
this.numKeys = numKeys;
this.idlenessMs = idlenessMs;
this.recordsToEmit = ((durationSeconds * 1000) / idlenessMs) * numKeys;
}
#Override
public void run(final SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
while (numRecordsEmitted < recordsToEmit) {
synchronized (ctx.getCheckpointLock()) {
for (int i = 0; i < numKeys; i++) {
ctx.collect(Tuple2.of(i, numRecordsEmitted));
numRecordsEmitted++;
}
}
Thread.sleep(idlenessMs);
}
while (!canceled) {
Thread.sleep(50);
}
}
#Override
public void cancel() {
canceled = true;
}
#Override
public void initializeState(FunctionInitializationContext context) throws Exception {
state =
context.getOperatorStateStore()
.getListState(
new ListStateDescriptor<Integer>(
"state", IntSerializer.INSTANCE));
for (Integer i : state.get()) {
numRecordsEmitted += i;
}
}
#Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
state.clear();
state.add(numRecordsEmitted);
}
}
}
The flink flow has multi data stream, then I merge those data stream with org.apache.flink.streaming.api.datastream.DataStream#union method.
Then, I got the problem, the datastream is disordered and I can not set window to sort the data in data stream.
Sorting union of streams to identify user sessions in Apache Flink
I got the the answer, but the com.liam.learn.flink.example.union.UnionStreamDemo.SortFunction#onTimer
never been invoked.
Environment Info: flink version 1.7.0
In general, I hope to sort the union datastream witout watermark.
You need watermarks so that the sorting function knows when it can safely emit sorted elements. Without watermarks, you get get an record from stream B that has an earlier date than any of the first N records of stream A, right?
But adding watermarks is easy, especially if you know that "event time" is strictly increasing for any one stream. Below is some code I wrote that extends what David Anderson posted in his answer to the other SO issue you referenced above - hopefully this will get you started.
-- Ken
package com.scaleunlimited.flinksnippets;
import java.util.PriorityQueue;
import java.util.Random;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.TimerService;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor;
import org.apache.flink.util.Collector;
import org.junit.Test;
public class MergeAndSortStreamsTest {
#Test
public void testMergeAndSort() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(2);
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
DataStream<Event> streamA = env.addSource(new EventSource("A"))
.assignTimestampsAndWatermarks(new EventTSWAssigner());
DataStream<Event> streamB = env.addSource(new EventSource("B"))
.assignTimestampsAndWatermarks(new EventTSWAssigner());
streamA.union(streamB)
.keyBy(r -> r.getKey())
.process(new SortByTimestampFunction())
.print();
env.execute();
}
private static class Event implements Comparable<Event> {
private String _label;
private long _timestamp;
public Event(String label, long timestamp) {
_label = label;
_timestamp = timestamp;
}
public String getLabel() {
return _label;
}
public void setLabel(String label) {
_label = label;
}
public String getKey() {
return "1";
}
public long getTimestamp() {
return _timestamp;
}
public void setTimestamp(long timestamp) {
_timestamp = timestamp;
}
#Override
public String toString() {
return String.format("%s # %d", _label, _timestamp);
}
#Override
public int compareTo(Event o) {
return Long.compare(_timestamp, o._timestamp);
}
}
#SuppressWarnings("serial")
private static class EventTSWAssigner extends AscendingTimestampExtractor<Event> {
#Override
public long extractAscendingTimestamp(Event element) {
return element.getTimestamp();
}
}
#SuppressWarnings("serial")
private static class SortByTimestampFunction extends KeyedProcessFunction<String, Event, Event> {
private ValueState<PriorityQueue<Event>> queueState = null;
#Override
public void open(Configuration config) {
ValueStateDescriptor<PriorityQueue<Event>> descriptor = new ValueStateDescriptor<>(
// state name
"sorted-events",
// type information of state
TypeInformation.of(new TypeHint<PriorityQueue<Event>>() {
}));
queueState = getRuntimeContext().getState(descriptor);
}
#Override
public void processElement(Event event, Context context, Collector<Event> out) throws Exception {
TimerService timerService = context.timerService();
long currentWatermark = timerService.currentWatermark();
System.out.format("processElement called with watermark %d\n", currentWatermark);
if (context.timestamp() > currentWatermark) {
PriorityQueue<Event> queue = queueState.value();
if (queue == null) {
queue = new PriorityQueue<>(10);
}
queue.add(event);
queueState.update(queue);
timerService.registerEventTimeTimer(event.getTimestamp());
}
}
#Override
public void onTimer(long timestamp, OnTimerContext context, Collector<Event> out) throws Exception {
PriorityQueue<Event> queue = queueState.value();
long watermark = context.timerService().currentWatermark();
System.out.format("onTimer called with watermark %d\n", watermark);
Event head = queue.peek();
while (head != null && head.getTimestamp() <= watermark) {
out.collect(head);
queue.remove(head);
head = queue.peek();
}
}
}
#SuppressWarnings("serial")
private static class EventSource extends RichParallelSourceFunction<Event> {
private String _prefix;
private transient Random _rand;
private transient boolean _running;
private transient int _numEvents;
public EventSource(String prefix) {
_prefix = prefix;
}
#Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
_rand = new Random(_prefix.hashCode() + getRuntimeContext().getIndexOfThisSubtask());
}
#Override
public void cancel() {
_running = false;
}
#Override
public void run(SourceContext<Event> context) throws Exception {
_running = true;
_numEvents = 0;
long timestamp = System.currentTimeMillis() + _rand.nextInt(10);
while (_running && (_numEvents < 100)) {
long deltaTime = timestamp - System.currentTimeMillis();
if (deltaTime > 0) {
Thread.sleep(deltaTime);
}
context.collect(new Event(_prefix, timestamp));
_numEvents++;
// Generate a timestamp every 5...15 ms, average is 10.
timestamp += (5 + _rand.nextInt(10));
}
}
}
}
During ingest json data from kafka and save them as parquet files which be loaded into hive, I met the same issue mentioned in Flink BucketingSink with Custom AvroParquetWriter create empty file
. Does anyone know how to resolve it? Thank you. I used Apache Flink 1.4.0 + HDFS 2.7.3
You can directly implement the Writer interface. It could look the following way:
import org.apache.flink.util.Preconditions;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import java.io.IOException;
/**
* Parquet writer.
*
* #param <T>
*/
public class ParquetSinkWriter<T extends GenericRecord> implements Writer<T> {
private static final long serialVersionUID = -975302556515811398L;
private final CompressionCodecName compressionCodecName = CompressionCodecName.SNAPPY;
private final int pageSize = 64 * 1024;
private final String schemaRepresentation;
private transient Schema schema;
private transient ParquetWriter<GenericRecord> writer;
private transient Path path;
private int position;
public ParquetSinkWriter(String schemaRepresentation) {
this.schemaRepresentation = Preconditions.checkNotNull(schemaRepresentation);
}
#Override
public void open(FileSystem fs, Path path) throws IOException {
this.position = 0;
this.path = path;
if (writer != null) {
writer.close();
}
writer = createWriter();
}
#Override
public long flush() throws IOException {
Preconditions.checkNotNull(writer);
position += writer.getDataSize();
writer.close();
writer = createWriter();
return position;
}
#Override
public long getPos() throws IOException {
Preconditions.checkNotNull(writer);
return position + writer.getDataSize();
}
#Override
public void close() throws IOException {
if (writer != null) {
writer.close();
writer = null;
}
}
#Override
public void write(T element) throws IOException {
Preconditions.checkNotNull(writer);
writer.write(element);
}
#Override
public Writer<T> duplicate() {
return new ParquetSinkWriter<>(schemaRepresentation);
}
private ParquetWriter<GenericRecord> createWriter() throws IOException {
if (schema == null) {
schema = new Schema.Parser().parse(schemaRepresentation);
}
return AvroParquetWriter.<GenericRecord>builder(path)
.withSchema(schema)
.withDataModel(new GenericData())
.withCompressionCodec(compressionCodecName)
.withPageSize(pageSize)
.build();
}
}
I have created a writer for BucketingSink. The sink and writer works without error but when it comes to the writer writing avro genericrecord to parquet, the file was created from in-progress, pending to complete. But the files are empty with 0 bytes. Can anyone tell me what is wrong with the code ? I have tried placing the initialization of AvroParquetWriter at the open() method, but result still the same.
When debugging the code, I confirm that writer.write(element) does executed and element contain the avro genericrecord data
Streaming Data
BucketingSink<DataEventRecord> sink =
new BucketingSink<DataEventRecord>("hdfs://localhost:9000/tmp/");
sink.setBucketer(new DateTimeBucketer<DataEventRecord>("yyyy-MM-dd--HHmm"));
sink.setWriter(new ParquetSinkWriter<DataEventRecord>());
ParquetSinkWriter
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.flink.streaming.connectors.fs.StreamWriterBase;
import org.apache.flink.streaming.connectors.fs.Writer;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import com.any.DataEventRecord;
public class ParquetSinkWriter<T> extends StreamWriterBase<T> {
private transient ParquetWriter<GenericRecord> writer;
private Path path;
private FileSystem fs;
private final CompressionCodecName compressionCodecName = CompressionCodecName.SNAPPY;
private final int blockSize = 256 * 1024 * 1024;
private final int pageSize = 64 * 1024;
#Override
// workaround
public void open(FileSystem fs, Path path) throws IOException {
super.open(fs, path);
this.path = path;
this.fs = fs;
}
#Override
public void write(T event) throws IOException {
DataEventRecord element = (DataEventRecord) event;
if (writer == null) {
writer = new AvroParquetWriter<GenericRecord>(this.path, element.getSchema(), compressionCodecName, blockSize, pageSize);
}
if (writer != null) {
GenericRecord datum = element.getRecord();
writer.write(datum);
}
}
#Override
public void close() throws IOException {
if (writer != null) {
writer.close();
}
super.close();
}
#Override
public Writer<T> duplicate() {
return new ParquetSinkWriter<T>();
}
}
Directly implementing Writer should look like
import org.apache.flink.util.Preconditions;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import java.io.IOException;
/**
* Parquet writer.
*
* #param <T>
*/
public class ParquetSinkWriter<T extends GenericRecord> implements Writer<T> {
private static final long serialVersionUID = -975302556515811398L;
private final CompressionCodecName compressionCodecName = CompressionCodecName.SNAPPY;
private final int pageSize = 64 * 1024;
private final String schemaRepresentation;
private transient Schema schema;
private transient ParquetWriter<GenericRecord> writer;
private transient Path path;
private int position;
public ParquetSinkWriter(String schemaRepresentation) {
this.schemaRepresentation = Preconditions.checkNotNull(schemaRepresentation);
}
#Override
public void open(FileSystem fs, Path path) throws IOException {
this.position = 0;
this.path = path;
if (writer != null) {
writer.close();
}
writer = createWriter();
}
#Override
public long flush() throws IOException {
Preconditions.checkNotNull(writer);
position += writer.getDataSize();
writer.close();
writer = createWriter();
return position;
}
#Override
public long getPos() throws IOException {
Preconditions.checkNotNull(writer);
return position + writer.getDataSize();
}
#Override
public void close() throws IOException {
if (writer != null) {
writer.close();
writer = null;
}
}
#Override
public void write(T element) throws IOException {
Preconditions.checkNotNull(writer);
writer.write(element);
}
#Override
public Writer<T> duplicate() {
return new ParquetSinkWriter<>(schemaRepresentation);
}
private ParquetWriter<GenericRecord> createWriter() throws IOException {
if (schema == null) {
schema = new Schema.Parser().parse(schemaRepresentation);
}
return AvroParquetWriter.<GenericRecord>builder(path)
.withSchema(schema)
.withDataModel(new GenericData())
.withCompressionCodec(compressionCodecName)
.withPageSize(pageSize)
.build();
}
}
I am trying to intercept a message to skip the Http request and proceed with my route.
Below is the class you can copy/paste to try it out.
Using camel-test, camel-core, camel-http4 2.10.2 and httpclient-osgi, httpcore-osgi 4.2.2
Here is the code :
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.builder.AdviceWithRouteBuilder;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.test.junit4.CamelTestSupport;
import org.junit.Test;
/**
* Created with IntelliJ IDEA.
* User: lleclerc
* Date: 12-11-28
* Time: 16:34
* To change this template use File | Settings | File Templates.
*/
public class IsUseAdviceWithJUnit4Test extends CamelTestSupport {
private String providerEndPointURI = "http://stackoverflow.com";
private String timerEndPointURI = "timer://myTimer";
private String mockEndPointURI = "mock:myMock";
private String directEndPointURI = "direct:myDirect";
private boolean messageIntercepted;
#Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
#Override
public void configure() throws Exception {
from(timerEndPointURI + "?fixedRate=true&delay=1000&period=1000")
.to(providerEndPointURI + "?throwExceptionOnFailure=false")
.to(mockEndPointURI);
}
};
}
#Test
public void testIsUseAdviceWith() throws Exception {
messageIntercepted = false;
context.getRouteDefinitions().get(0).adviceWith(context, new AdviceWithRouteBuilder() {
#Override
public void configure() throws Exception {
replaceFromWith(directEndPointURI);
mockEndpoints();
interceptSendToEndpoint(providerEndPointURI)
.skipSendToOriginalEndpoint()
.process(new Processor() {
#Override
public void process(Exchange exchange) throws Exception {
messageIntercepted = true;
System.out.println("INTERCEPTED");
}
});
}
});
// we must manually start when we are done with all the advice with
context.start();
getMockEndpoint(mockEndPointURI).expectedMessageCount(1);
template.sendBody(directEndPointURI, "a trigger");
assertMockEndpointsSatisfied();
assertEquals(true, messageIntercepted);
assertNotNull(context.hasEndpoint(directEndPointURI));
assertNotNull(context.hasEndpoint("mock:" + directEndPointURI));
assertNotNull(context.hasEndpoint(mockEndPointURI));
}
#Override
public boolean isUseAdviceWith() {
return true;
}
#Override
public boolean isUseRouteBuilder() {
return true;
}
}
Thank you for your help !
There was bugs inside camel-http4.
http://camel.465427.n5.nabble.com/Found-a-bug-with-camel-http4-td5723733.html
http://camel.465427.n5.nabble.com/Test-Intercept-with-adviceWith-and-http-td5723473.html