Committed by
Gerrit Code Review
Revamped ClusterCommunicationService API
Change-Id: I9326369de3d2413b0882b324979d10483c093de9
Showing
24 changed files
with
461 additions
and
373 deletions
... | @@ -219,7 +219,7 @@ public class IntentPerfCollector { | ... | @@ -219,7 +219,7 @@ public class IntentPerfCollector { |
219 | 219 | ||
220 | private void broadcastSample(long time, NodeId nodeId, double overallRate, double currentRate) { | 220 | private void broadcastSample(long time, NodeId nodeId, double overallRate, double currentRate) { |
221 | String data = String.format("%d|%f|%f", time, overallRate, currentRate); | 221 | String data = String.format("%d|%f|%f", time, overallRate, currentRate); |
222 | - communicationService.broadcast(new ClusterMessage(nodeId, SAMPLE, data.getBytes())); | 222 | + communicationService.broadcast(data, SAMPLE, str -> str.getBytes()); |
223 | } | 223 | } |
224 | 224 | ||
225 | private class InternalSampleCollector implements ClusterMessageHandler { | 225 | private class InternalSampleCollector implements ClusterMessageHandler { | ... | ... |
... | @@ -249,14 +249,14 @@ public class IntentPerfInstaller { | ... | @@ -249,14 +249,14 @@ public class IntentPerfInstaller { |
249 | public void start() { | 249 | public void start() { |
250 | if (stopped) { | 250 | if (stopped) { |
251 | stopped = false; | 251 | stopped = false; |
252 | - communicationService.broadcast(new ClusterMessage(nodeId, CONTROL, START.getBytes())); | 252 | + communicationService.broadcast(START, CONTROL, str -> str.getBytes()); |
253 | startTestRun(); | 253 | startTestRun(); |
254 | } | 254 | } |
255 | } | 255 | } |
256 | 256 | ||
257 | public void stop() { | 257 | public void stop() { |
258 | if (!stopped) { | 258 | if (!stopped) { |
259 | - communicationService.broadcast(new ClusterMessage(nodeId, CONTROL, STOP.getBytes())); | 259 | + communicationService.broadcast(STOP, CONTROL, str -> str.getBytes()); |
260 | stopTestRun(); | 260 | stopTestRun(); |
261 | } | 261 | } |
262 | } | 262 | } | ... | ... |
... | @@ -15,13 +15,16 @@ | ... | @@ -15,13 +15,16 @@ |
15 | */ | 15 | */ |
16 | package org.onosproject.store.cluster.messaging; | 16 | package org.onosproject.store.cluster.messaging; |
17 | 17 | ||
18 | -import com.google.common.util.concurrent.ListenableFuture; | 18 | +import java.util.Set; |
19 | +import java.util.concurrent.CompletableFuture; | ||
20 | +import java.util.concurrent.ExecutorService; | ||
21 | +import java.util.function.Consumer; | ||
22 | +import java.util.function.Function; | ||
23 | + | ||
19 | import org.onosproject.cluster.NodeId; | 24 | import org.onosproject.cluster.NodeId; |
20 | 25 | ||
21 | -import java.io.IOException; | 26 | +import com.google.common.util.concurrent.ListenableFuture; |
22 | -import java.util.concurrent.ExecutorService; | ||
23 | 27 | ||
24 | -// TODO: remove IOExceptions? | ||
25 | /** | 28 | /** |
26 | * Service for assisting communications between controller cluster nodes. | 29 | * Service for assisting communications between controller cluster nodes. |
27 | */ | 30 | */ |
... | @@ -33,6 +36,7 @@ public interface ClusterCommunicationService { | ... | @@ -33,6 +36,7 @@ public interface ClusterCommunicationService { |
33 | * @param message message to send | 36 | * @param message message to send |
34 | * @return true if the message was sent successfully to all nodes; false otherwise. | 37 | * @return true if the message was sent successfully to all nodes; false otherwise. |
35 | */ | 38 | */ |
39 | + @Deprecated | ||
36 | boolean broadcast(ClusterMessage message); | 40 | boolean broadcast(ClusterMessage message); |
37 | 41 | ||
38 | /** | 42 | /** |
... | @@ -41,6 +45,7 @@ public interface ClusterCommunicationService { | ... | @@ -41,6 +45,7 @@ public interface ClusterCommunicationService { |
41 | * @param message message to send | 45 | * @param message message to send |
42 | * @return true if the message was sent successfully to all nodes; false otherwise. | 46 | * @return true if the message was sent successfully to all nodes; false otherwise. |
43 | */ | 47 | */ |
48 | + @Deprecated | ||
44 | boolean broadcastIncludeSelf(ClusterMessage message); | 49 | boolean broadcastIncludeSelf(ClusterMessage message); |
45 | 50 | ||
46 | /** | 51 | /** |
... | @@ -50,6 +55,7 @@ public interface ClusterCommunicationService { | ... | @@ -50,6 +55,7 @@ public interface ClusterCommunicationService { |
50 | * @param toNodeId node identifier | 55 | * @param toNodeId node identifier |
51 | * @return true if the message was sent successfully; false otherwise. | 56 | * @return true if the message was sent successfully; false otherwise. |
52 | */ | 57 | */ |
58 | + @Deprecated | ||
53 | boolean unicast(ClusterMessage message, NodeId toNodeId); | 59 | boolean unicast(ClusterMessage message, NodeId toNodeId); |
54 | 60 | ||
55 | /** | 61 | /** |
... | @@ -59,6 +65,7 @@ public interface ClusterCommunicationService { | ... | @@ -59,6 +65,7 @@ public interface ClusterCommunicationService { |
59 | * @param nodeIds recipient node identifiers | 65 | * @param nodeIds recipient node identifiers |
60 | * @return true if the message was sent successfully to all nodes in the group; false otherwise. | 66 | * @return true if the message was sent successfully to all nodes in the group; false otherwise. |
61 | */ | 67 | */ |
68 | + @Deprecated | ||
62 | boolean multicast(ClusterMessage message, Iterable<NodeId> nodeIds); | 69 | boolean multicast(ClusterMessage message, Iterable<NodeId> nodeIds); |
63 | 70 | ||
64 | /** | 71 | /** |
... | @@ -66,27 +73,121 @@ public interface ClusterCommunicationService { | ... | @@ -66,27 +73,121 @@ public interface ClusterCommunicationService { |
66 | * @param message message to send | 73 | * @param message message to send |
67 | * @param toNodeId recipient node identifier | 74 | * @param toNodeId recipient node identifier |
68 | * @return reply future. | 75 | * @return reply future. |
69 | - * @throws IOException when I/O exception of some sort has occurred | ||
70 | */ | 76 | */ |
71 | - ListenableFuture<byte[]> sendAndReceive(ClusterMessage message, NodeId toNodeId) throws IOException; | 77 | + @Deprecated |
78 | + ListenableFuture<byte[]> sendAndReceive(ClusterMessage message, NodeId toNodeId); | ||
72 | 79 | ||
73 | /** | 80 | /** |
74 | * Adds a new subscriber for the specified message subject. | 81 | * Adds a new subscriber for the specified message subject. |
75 | * | 82 | * |
76 | * @param subject message subject | 83 | * @param subject message subject |
77 | * @param subscriber message subscriber | 84 | * @param subscriber message subscriber |
85 | + * @param executor executor to use for running handler. | ||
78 | */ | 86 | */ |
79 | @Deprecated | 87 | @Deprecated |
80 | - void addSubscriber(MessageSubject subject, ClusterMessageHandler subscriber); | 88 | + void addSubscriber(MessageSubject subject, ClusterMessageHandler subscriber, ExecutorService executor); |
89 | + | ||
90 | + /** | ||
91 | + * Broadcasts a message to all controller nodes. | ||
92 | + * | ||
93 | + * @param message message to send | ||
94 | + * @param subject message subject | ||
95 | + * @param encoder function for encoding message to byte[] | ||
96 | + * @param <M> message type | ||
97 | + */ | ||
98 | + <M> void broadcast(M message, | ||
99 | + MessageSubject subject, | ||
100 | + Function<M, byte[]> encoder); | ||
101 | + | ||
102 | + /** | ||
103 | + * Broadcasts a message to all controller nodes including self. | ||
104 | + * | ||
105 | + * @param message message to send | ||
106 | + * @param subject message subject | ||
107 | + * @param encoder function for encoding message to byte[] | ||
108 | + * @param <M> message type | ||
109 | + */ | ||
110 | + <M> void broadcastIncludeSelf(M message, | ||
111 | + MessageSubject subject, | ||
112 | + Function<M, byte[]> encoder); | ||
113 | + | ||
114 | + /** | ||
115 | + * Sends a message to the specified controller node. | ||
116 | + * | ||
117 | + * @param message message to send | ||
118 | + * @param subject message subject | ||
119 | + * @param encoder function for encoding message to byte[] | ||
120 | + * @param toNodeId destination node identifier | ||
121 | + * @param <M> message type | ||
122 | + * @return true if the message was sent successfully; false otherwise | ||
123 | + */ | ||
124 | + <M> boolean unicast(M message, | ||
125 | + MessageSubject subject, | ||
126 | + Function<M, byte[]> encoder, | ||
127 | + NodeId toNodeId); | ||
128 | + | ||
129 | + /** | ||
130 | + * Multicasts a message to a set of controller nodes. | ||
131 | + * | ||
132 | + * @param message message to send | ||
133 | + * @param subject message subject | ||
134 | + * @param encoder function for encoding message to byte[] | ||
135 | + * @param nodeIds recipient node identifiers | ||
136 | + * @param <M> message type | ||
137 | + */ | ||
138 | + <M> void multicast(M message, | ||
139 | + MessageSubject subject, | ||
140 | + Function<M, byte[]> encoder, | ||
141 | + Set<NodeId> nodeIds); | ||
142 | + | ||
143 | + /** | ||
144 | + * Sends a message and expects a reply. | ||
145 | + * | ||
146 | + * @param message message to send | ||
147 | + * @param subject message subject | ||
148 | + * @param encoder function for encoding request to byte[] | ||
149 | + * @param decoder function for decoding response from byte[] | ||
150 | + * @param toNodeId recipient node identifier | ||
151 | + * @param <M> request type | ||
152 | + * @param <R> reply type | ||
153 | + * @return reply future | ||
154 | + */ | ||
155 | + <M, R> CompletableFuture<R> sendAndReceive(M message, | ||
156 | + MessageSubject subject, | ||
157 | + Function<M, byte[]> encoder, | ||
158 | + Function<byte[], R> decoder, | ||
159 | + NodeId toNodeId); | ||
81 | 160 | ||
82 | /** | 161 | /** |
83 | * Adds a new subscriber for the specified message subject. | 162 | * Adds a new subscriber for the specified message subject. |
84 | * | 163 | * |
85 | * @param subject message subject | 164 | * @param subject message subject |
86 | - * @param subscriber message subscriber | 165 | + * @param decoder decoder for resurrecting incoming message |
87 | - * @param executor executor to use for running handler. | 166 | + * @param handler handler function that process the incoming message and produces a reply |
167 | + * @param encoder encoder for serializing reply | ||
168 | + * @param executor executor to run this handler on | ||
169 | + * @param <M> incoming message type | ||
170 | + * @param <R> reply message type | ||
88 | */ | 171 | */ |
89 | - void addSubscriber(MessageSubject subject, ClusterMessageHandler subscriber, ExecutorService executor); | 172 | + <M, R> void addSubscriber(MessageSubject subject, |
173 | + Function<byte[], M> decoder, | ||
174 | + Function<M, R> handler, | ||
175 | + Function<R, byte[]> encoder, | ||
176 | + ExecutorService executor); | ||
177 | + | ||
178 | + /** | ||
179 | + * Adds a new subscriber for the specified message subject. | ||
180 | + * | ||
181 | + * @param subject message subject | ||
182 | + * @param decoder decoder to resurrecting incoming message | ||
183 | + * @param handler handler for handling message | ||
184 | + * @param executor executor to run this handler on | ||
185 | + * @param <M> incoming message type | ||
186 | + */ | ||
187 | + <M> void addSubscriber(MessageSubject subject, | ||
188 | + Function<byte[], M> decoder, | ||
189 | + Consumer<M> handler, | ||
190 | + ExecutorService executor); | ||
90 | 191 | ||
91 | /** | 192 | /** |
92 | * Removes a subscriber for the specified message subject. | 193 | * Removes a subscriber for the specified message subject. |
... | @@ -94,5 +195,4 @@ public interface ClusterCommunicationService { | ... | @@ -94,5 +195,4 @@ public interface ClusterCommunicationService { |
94 | * @param subject message subject | 195 | * @param subject message subject |
95 | */ | 196 | */ |
96 | void removeSubscriber(MessageSubject subject); | 197 | void removeSubscriber(MessageSubject subject); |
97 | - | ||
98 | } | 198 | } | ... | ... |
... | @@ -17,7 +17,6 @@ package org.onosproject.store.app; | ... | @@ -17,7 +17,6 @@ package org.onosproject.store.app; |
17 | 17 | ||
18 | import com.google.common.base.Charsets; | 18 | import com.google.common.base.Charsets; |
19 | import com.google.common.collect.ImmutableSet; | 19 | import com.google.common.collect.ImmutableSet; |
20 | -import com.google.common.util.concurrent.ListenableFuture; | ||
21 | 20 | ||
22 | import org.apache.felix.scr.annotations.Activate; | 21 | import org.apache.felix.scr.annotations.Activate; |
23 | import org.apache.felix.scr.annotations.Component; | 22 | import org.apache.felix.scr.annotations.Component; |
... | @@ -54,13 +53,14 @@ import org.onosproject.store.service.StorageService; | ... | @@ -54,13 +53,14 @@ import org.onosproject.store.service.StorageService; |
54 | import org.slf4j.Logger; | 53 | import org.slf4j.Logger; |
55 | 54 | ||
56 | import java.io.ByteArrayInputStream; | 55 | import java.io.ByteArrayInputStream; |
57 | -import java.io.IOException; | ||
58 | import java.io.InputStream; | 56 | import java.io.InputStream; |
59 | import java.util.Set; | 57 | import java.util.Set; |
60 | import java.util.concurrent.CountDownLatch; | 58 | import java.util.concurrent.CountDownLatch; |
61 | import java.util.concurrent.ExecutorService; | 59 | import java.util.concurrent.ExecutorService; |
62 | import java.util.concurrent.Executors; | 60 | import java.util.concurrent.Executors; |
63 | import java.util.concurrent.ScheduledExecutorService; | 61 | import java.util.concurrent.ScheduledExecutorService; |
62 | +import java.util.function.Function; | ||
63 | + | ||
64 | import static com.google.common.io.ByteStreams.toByteArray; | 64 | import static com.google.common.io.ByteStreams.toByteArray; |
65 | import static java.util.concurrent.TimeUnit.MILLISECONDS; | 65 | import static java.util.concurrent.TimeUnit.MILLISECONDS; |
66 | import static org.onlab.util.Tools.groupedThreads; | 66 | import static org.onlab.util.Tools.groupedThreads; |
... | @@ -351,22 +351,34 @@ public class GossipApplicationStore extends ApplicationArchive | ... | @@ -351,22 +351,34 @@ public class GossipApplicationStore extends ApplicationArchive |
351 | */ | 351 | */ |
352 | private void fetchBits(Application app) { | 352 | private void fetchBits(Application app) { |
353 | ControllerNode localNode = clusterService.getLocalNode(); | 353 | ControllerNode localNode = clusterService.getLocalNode(); |
354 | - ClusterMessage message = new ClusterMessage(localNode.id(), APP_BITS_REQUEST, | ||
355 | - app.id().name().getBytes(Charsets.UTF_8)); | ||
356 | - //Map<ControllerNode, ListenableFuture<byte[]>> futures = new HashMap<>(); | ||
357 | CountDownLatch latch = new CountDownLatch(1); | 354 | CountDownLatch latch = new CountDownLatch(1); |
358 | 355 | ||
359 | // FIXME: send message with name & version to make sure we don't get served old bits | 356 | // FIXME: send message with name & version to make sure we don't get served old bits |
360 | 357 | ||
361 | log.info("Downloading bits for application {}", app.id().name()); | 358 | log.info("Downloading bits for application {}", app.id().name()); |
362 | for (ControllerNode node : clusterService.getNodes()) { | 359 | for (ControllerNode node : clusterService.getNodes()) { |
363 | - try { | 360 | + if (latch.getCount() == 0) { |
364 | - ListenableFuture<byte[]> future = clusterCommunicator.sendAndReceive(message, node.id()); | 361 | + break; |
365 | - future.addListener(new InternalBitListener(app, node, future, latch), executor); | 362 | + } |
366 | - } catch (IOException e) { | 363 | + if (node.equals(localNode)) { |
367 | - log.debug("Unable to request bits for application {} from node {}", | 364 | + continue; |
365 | + } | ||
366 | + clusterCommunicator.sendAndReceive(app.id().name(), | ||
367 | + APP_BITS_REQUEST, | ||
368 | + s -> s.getBytes(Charsets.UTF_8), | ||
369 | + Function.identity(), | ||
370 | + node.id()) | ||
371 | + .whenCompleteAsync((bits, error) -> { | ||
372 | + if (error == null && latch.getCount() > 0) { | ||
373 | + saveApplication(new ByteArrayInputStream(bits)); | ||
374 | + log.info("Downloaded bits for application {} from node {}", | ||
368 | app.id().name(), node.id()); | 375 | app.id().name(), node.id()); |
376 | + latch.countDown(); | ||
377 | + } else if (error != null) { | ||
378 | + log.warn("Unable to fetch bits for application {} from node {}", | ||
379 | + app.id().name(), node.id(), error); | ||
369 | } | 380 | } |
381 | + }, executor); | ||
370 | } | 382 | } |
371 | 383 | ||
372 | try { | 384 | try { |
... | @@ -392,41 +404,6 @@ public class GossipApplicationStore extends ApplicationArchive | ... | @@ -392,41 +404,6 @@ public class GossipApplicationStore extends ApplicationArchive |
392 | } | 404 | } |
393 | } | 405 | } |
394 | } | 406 | } |
395 | - | ||
396 | - /** | ||
397 | - * Processes completed fetch requests. | ||
398 | - */ | ||
399 | - private class InternalBitListener implements Runnable { | ||
400 | - private final Application app; | ||
401 | - private final ControllerNode node; | ||
402 | - private final ListenableFuture<byte[]> future; | ||
403 | - private final CountDownLatch latch; | ||
404 | - | ||
405 | - public InternalBitListener(Application app, ControllerNode node, | ||
406 | - ListenableFuture<byte[]> future, CountDownLatch latch) { | ||
407 | - this.app = app; | ||
408 | - this.node = node; | ||
409 | - this.future = future; | ||
410 | - this.latch = latch; | ||
411 | - } | ||
412 | - | ||
413 | - @Override | ||
414 | - public void run() { | ||
415 | - if (latch.getCount() > 0 && !future.isCancelled()) { | ||
416 | - try { | ||
417 | - byte[] bits = future.get(1, MILLISECONDS); | ||
418 | - saveApplication(new ByteArrayInputStream(bits)); | ||
419 | - log.info("Downloaded bits for application {} from node {}", | ||
420 | - app.id().name(), node.id()); | ||
421 | - latch.countDown(); | ||
422 | - } catch (Exception e) { | ||
423 | - log.warn("Unable to fetch bits for application {} from node {}", | ||
424 | - app.id().name(), node.id()); | ||
425 | - } | ||
426 | - } | ||
427 | - } | ||
428 | - } | ||
429 | - | ||
430 | /** | 407 | /** |
431 | * Prunes applications which are not in the map, but are on disk. | 408 | * Prunes applications which are not in the map, but are on disk. |
432 | */ | 409 | */ |
... | @@ -449,6 +426,4 @@ public class GossipApplicationStore extends ApplicationArchive | ... | @@ -449,6 +426,4 @@ public class GossipApplicationStore extends ApplicationArchive |
449 | appDesc.origin(), appDesc.permissions(), | 426 | appDesc.origin(), appDesc.permissions(), |
450 | appDesc.featuresRepo(), appDesc.features()); | 427 | appDesc.featuresRepo(), appDesc.features()); |
451 | } | 428 | } |
452 | - | ||
453 | } | 429 | } |
454 | - | ... | ... |
... | @@ -419,10 +419,9 @@ public class HazelcastLeadershipService implements LeadershipService { | ... | @@ -419,10 +419,9 @@ public class HazelcastLeadershipService implements LeadershipService { |
419 | // Dispatch to all instances | 419 | // Dispatch to all instances |
420 | 420 | ||
421 | clusterCommunicator.broadcastIncludeSelf( | 421 | clusterCommunicator.broadcastIncludeSelf( |
422 | - new ClusterMessage( | 422 | + leadershipEvent, |
423 | - clusterService.getLocalNode().id(), | ||
424 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, | 423 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, |
425 | - SERIALIZER.encode(leadershipEvent))); | 424 | + SERIALIZER::encode); |
426 | } else { | 425 | } else { |
427 | // | 426 | // |
428 | // Test if time to expire a stale leader | 427 | // Test if time to expire a stale leader |
... | @@ -491,11 +490,11 @@ public class HazelcastLeadershipService implements LeadershipService { | ... | @@ -491,11 +490,11 @@ public class HazelcastLeadershipService implements LeadershipService { |
491 | leadershipEvent = new LeadershipEvent( | 490 | leadershipEvent = new LeadershipEvent( |
492 | LeadershipEvent.Type.LEADER_ELECTED, | 491 | LeadershipEvent.Type.LEADER_ELECTED, |
493 | new Leadership(topicName, localNodeId, myLastLeaderTerm, 0)); | 492 | new Leadership(topicName, localNodeId, myLastLeaderTerm, 0)); |
493 | + | ||
494 | clusterCommunicator.broadcastIncludeSelf( | 494 | clusterCommunicator.broadcastIncludeSelf( |
495 | - new ClusterMessage( | 495 | + leadershipEvent, |
496 | - clusterService.getLocalNode().id(), | ||
497 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, | 496 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, |
498 | - SERIALIZER.encode(leadershipEvent))); | 497 | + SERIALIZER::encode); |
499 | } | 498 | } |
500 | 499 | ||
501 | // Sleep forever until interrupted | 500 | // Sleep forever until interrupted |
... | @@ -519,11 +518,12 @@ public class HazelcastLeadershipService implements LeadershipService { | ... | @@ -519,11 +518,12 @@ public class HazelcastLeadershipService implements LeadershipService { |
519 | leadershipEvent = new LeadershipEvent( | 518 | leadershipEvent = new LeadershipEvent( |
520 | LeadershipEvent.Type.LEADER_BOOTED, | 519 | LeadershipEvent.Type.LEADER_BOOTED, |
521 | new Leadership(topicName, localNodeId, myLastLeaderTerm, 0)); | 520 | new Leadership(topicName, localNodeId, myLastLeaderTerm, 0)); |
521 | + | ||
522 | clusterCommunicator.broadcastIncludeSelf( | 522 | clusterCommunicator.broadcastIncludeSelf( |
523 | - new ClusterMessage( | 523 | + leadershipEvent, |
524 | - clusterService.getLocalNode().id(), | ||
525 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, | 524 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, |
526 | - SERIALIZER.encode(leadershipEvent))); | 525 | + SERIALIZER::encode); |
526 | + | ||
527 | if (leaderLock.isLockedByCurrentThread()) { | 527 | if (leaderLock.isLockedByCurrentThread()) { |
528 | leaderLock.unlock(); | 528 | leaderLock.unlock(); |
529 | } | 529 | } | ... | ... |
... | @@ -15,7 +15,6 @@ | ... | @@ -15,7 +15,6 @@ |
15 | */ | 15 | */ |
16 | package org.onosproject.store.cluster.messaging.impl; | 16 | package org.onosproject.store.cluster.messaging.impl; |
17 | 17 | ||
18 | -import com.google.common.util.concurrent.ListenableFuture; | ||
19 | import org.apache.felix.scr.annotations.Activate; | 18 | import org.apache.felix.scr.annotations.Activate; |
20 | import org.apache.felix.scr.annotations.Component; | 19 | import org.apache.felix.scr.annotations.Component; |
21 | import org.apache.felix.scr.annotations.Deactivate; | 20 | import org.apache.felix.scr.annotations.Deactivate; |
... | @@ -37,8 +36,17 @@ import org.onosproject.store.cluster.messaging.MessageSubject; | ... | @@ -37,8 +36,17 @@ import org.onosproject.store.cluster.messaging.MessageSubject; |
37 | import org.slf4j.Logger; | 36 | import org.slf4j.Logger; |
38 | import org.slf4j.LoggerFactory; | 37 | import org.slf4j.LoggerFactory; |
39 | 38 | ||
39 | +import com.google.common.base.Objects; | ||
40 | +import com.google.common.util.concurrent.ListenableFuture; | ||
41 | +import com.google.common.util.concurrent.SettableFuture; | ||
42 | + | ||
40 | import java.io.IOException; | 43 | import java.io.IOException; |
44 | +import java.util.Set; | ||
45 | +import java.util.concurrent.CompletableFuture; | ||
41 | import java.util.concurrent.ExecutorService; | 46 | import java.util.concurrent.ExecutorService; |
47 | +import java.util.function.Consumer; | ||
48 | +import java.util.function.Function; | ||
49 | +import java.util.stream.Collectors; | ||
42 | 50 | ||
43 | import static com.google.common.base.Preconditions.checkArgument; | 51 | import static com.google.common.base.Preconditions.checkArgument; |
44 | 52 | ||
... | @@ -122,46 +130,101 @@ public class ClusterCommunicationManager | ... | @@ -122,46 +130,101 @@ public class ClusterCommunicationManager |
122 | return unicastUnchecked(message.subject(), message.getBytes(), toNodeId); | 130 | return unicastUnchecked(message.subject(), message.getBytes(), toNodeId); |
123 | } | 131 | } |
124 | 132 | ||
125 | - private boolean unicast(MessageSubject subject, byte[] payload, NodeId toNodeId) throws IOException { | 133 | + @Override |
126 | - ControllerNode node = clusterService.getNode(toNodeId); | 134 | + public ListenableFuture<byte[]> sendAndReceive(ClusterMessage message, NodeId toNodeId) { |
127 | - checkArgument(node != null, "Unknown nodeId: %s", toNodeId); | 135 | + SettableFuture<byte[]> response = SettableFuture.create(); |
128 | - Endpoint nodeEp = new Endpoint(node.ip(), node.tcpPort()); | 136 | + sendAndReceive(message.subject(), message.getBytes(), toNodeId).whenComplete((r, e) -> { |
129 | - try { | 137 | + if (e == null) { |
130 | - messagingService.sendAsync(nodeEp, subject.value(), payload); | 138 | + response.set(r); |
131 | - return true; | 139 | + } else { |
132 | - } catch (IOException e) { | 140 | + response.setException(e); |
133 | - log.debug("Failed to send cluster message to nodeId: " + toNodeId, e); | ||
134 | - throw e; | ||
135 | } | 141 | } |
142 | + }); | ||
143 | + return response; | ||
136 | } | 144 | } |
137 | 145 | ||
138 | - private boolean unicastUnchecked(MessageSubject subject, byte[] payload, NodeId toNodeId) { | 146 | + @Override |
139 | - try { | 147 | + public <M> void broadcast(M message, |
140 | - return unicast(subject, payload, toNodeId); | 148 | + MessageSubject subject, |
141 | - } catch (IOException e) { | 149 | + Function<M, byte[]> encoder) { |
142 | - return false; | 150 | + multicast(message, |
151 | + subject, | ||
152 | + encoder, | ||
153 | + clusterService.getNodes() | ||
154 | + .stream() | ||
155 | + .filter(node -> !Objects.equal(node, clusterService.getLocalNode())) | ||
156 | + .map(ControllerNode::id) | ||
157 | + .collect(Collectors.toSet())); | ||
158 | + } | ||
159 | + | ||
160 | + @Override | ||
161 | + public <M> void broadcastIncludeSelf(M message, | ||
162 | + MessageSubject subject, | ||
163 | + Function<M, byte[]> encoder) { | ||
164 | + multicast(message, | ||
165 | + subject, | ||
166 | + encoder, | ||
167 | + clusterService.getNodes() | ||
168 | + .stream() | ||
169 | + .map(ControllerNode::id) | ||
170 | + .collect(Collectors.toSet())); | ||
143 | } | 171 | } |
172 | + | ||
173 | + @Override | ||
174 | + public <M> boolean unicast(M message, | ||
175 | + MessageSubject subject, | ||
176 | + Function<M, byte[]> encoder, | ||
177 | + NodeId toNodeId) { | ||
178 | + byte[] payload = new ClusterMessage( | ||
179 | + clusterService.getLocalNode().id(), | ||
180 | + subject, | ||
181 | + encoder.apply(message)).getBytes(); | ||
182 | + return unicastUnchecked(subject, payload, toNodeId); | ||
144 | } | 183 | } |
145 | 184 | ||
146 | @Override | 185 | @Override |
147 | - public ListenableFuture<byte[]> sendAndReceive(ClusterMessage message, NodeId toNodeId) throws IOException { | 186 | + public <M> void multicast(M message, |
187 | + MessageSubject subject, | ||
188 | + Function<M, byte[]> encoder, | ||
189 | + Set<NodeId> nodes) { | ||
190 | + byte[] payload = new ClusterMessage( | ||
191 | + clusterService.getLocalNode().id(), | ||
192 | + subject, | ||
193 | + encoder.apply(message)).getBytes(); | ||
194 | + nodes.forEach(nodeId -> unicastUnchecked(subject, payload, nodeId)); | ||
195 | + } | ||
196 | + | ||
197 | + @Override | ||
198 | + public <M, R> CompletableFuture<R> sendAndReceive(M message, | ||
199 | + MessageSubject subject, | ||
200 | + Function<M, byte[]> encoder, | ||
201 | + Function<byte[], R> decoder, | ||
202 | + NodeId toNodeId) { | ||
203 | + ClusterMessage envelope = new ClusterMessage( | ||
204 | + clusterService.getLocalNode().id(), | ||
205 | + subject, | ||
206 | + encoder.apply(message)); | ||
207 | + return sendAndReceive(subject, envelope.getBytes(), toNodeId).thenApply(decoder); | ||
208 | + } | ||
209 | + | ||
210 | + private boolean unicastUnchecked(MessageSubject subject, byte[] payload, NodeId toNodeId) { | ||
148 | ControllerNode node = clusterService.getNode(toNodeId); | 211 | ControllerNode node = clusterService.getNode(toNodeId); |
149 | checkArgument(node != null, "Unknown nodeId: %s", toNodeId); | 212 | checkArgument(node != null, "Unknown nodeId: %s", toNodeId); |
150 | Endpoint nodeEp = new Endpoint(node.ip(), node.tcpPort()); | 213 | Endpoint nodeEp = new Endpoint(node.ip(), node.tcpPort()); |
151 | try { | 214 | try { |
152 | - return messagingService.sendAndReceive(nodeEp, message.subject().value(), message.getBytes()); | 215 | + messagingService.sendAsync(nodeEp, subject.value(), payload); |
153 | - | 216 | + return true; |
154 | } catch (IOException e) { | 217 | } catch (IOException e) { |
155 | - log.trace("Failed interaction with remote nodeId: " + toNodeId, e); | 218 | + log.debug("Failed to send cluster message to nodeId: " + toNodeId, e); |
156 | - throw e; | 219 | + return false; |
157 | } | 220 | } |
158 | } | 221 | } |
159 | 222 | ||
160 | - @Override | 223 | + private CompletableFuture<byte[]> sendAndReceive(MessageSubject subject, byte[] payload, NodeId toNodeId) { |
161 | - @Deprecated | 224 | + ControllerNode node = clusterService.getNode(toNodeId); |
162 | - public void addSubscriber(MessageSubject subject, | 225 | + checkArgument(node != null, "Unknown nodeId: %s", toNodeId); |
163 | - ClusterMessageHandler subscriber) { | 226 | + Endpoint nodeEp = new Endpoint(node.ip(), node.tcpPort()); |
164 | - messagingService.registerHandler(subject.value(), new InternalClusterMessageHandler(subscriber)); | 227 | + return messagingService.sendAndReceive(nodeEp, subject.value(), payload); |
165 | } | 228 | } |
166 | 229 | ||
167 | @Override | 230 | @Override |
... | @@ -202,6 +265,60 @@ public class ClusterCommunicationManager | ... | @@ -202,6 +265,60 @@ public class ClusterCommunicationManager |
202 | } | 265 | } |
203 | } | 266 | } |
204 | 267 | ||
268 | + @Override | ||
269 | + public <M, R> void addSubscriber(MessageSubject subject, | ||
270 | + Function<byte[], M> decoder, | ||
271 | + Function<M, R> handler, | ||
272 | + Function<R, byte[]> encoder, | ||
273 | + ExecutorService executor) { | ||
274 | + messagingService.registerHandler(subject.value(), | ||
275 | + new InternalMessageResponder<>(decoder, encoder, handler), | ||
276 | + executor); | ||
277 | + } | ||
278 | + | ||
279 | + @Override | ||
280 | + public <M> void addSubscriber(MessageSubject subject, | ||
281 | + Function<byte[], M> decoder, | ||
282 | + Consumer<M> handler, | ||
283 | + ExecutorService executor) { | ||
284 | + messagingService.registerHandler(subject.value(), | ||
285 | + new InternalMessageConsumer<>(decoder, handler), | ||
286 | + executor); | ||
287 | + } | ||
288 | + | ||
289 | + private class InternalMessageResponder<M, R> implements MessageHandler { | ||
290 | + private final Function<byte[], M> decoder; | ||
291 | + private final Function<R, byte[]> encoder; | ||
292 | + private final Function<M, R> handler; | ||
293 | + | ||
294 | + public InternalMessageResponder(Function<byte[], M> decoder, | ||
295 | + Function<R, byte[]> encoder, | ||
296 | + Function<M, R> handler) { | ||
297 | + this.decoder = decoder; | ||
298 | + this.encoder = encoder; | ||
299 | + this.handler = handler; | ||
300 | + } | ||
301 | + @Override | ||
302 | + public void handle(Message message) throws IOException { | ||
303 | + R response = handler.apply(decoder.apply(ClusterMessage.fromBytes(message.payload()).payload())); | ||
304 | + message.respond(encoder.apply(response)); | ||
305 | + } | ||
306 | + } | ||
307 | + | ||
308 | + private class InternalMessageConsumer<M> implements MessageHandler { | ||
309 | + private final Function<byte[], M> decoder; | ||
310 | + private final Consumer<M> consumer; | ||
311 | + | ||
312 | + public InternalMessageConsumer(Function<byte[], M> decoder, Consumer<M> consumer) { | ||
313 | + this.decoder = decoder; | ||
314 | + this.consumer = consumer; | ||
315 | + } | ||
316 | + @Override | ||
317 | + public void handle(Message message) throws IOException { | ||
318 | + consumer.accept(decoder.apply(ClusterMessage.fromBytes(message.payload()).payload())); | ||
319 | + } | ||
320 | + } | ||
321 | + | ||
205 | public static final class InternalClusterMessage extends ClusterMessage { | 322 | public static final class InternalClusterMessage extends ClusterMessage { |
206 | 323 | ||
207 | private final Message rawMessage; | 324 | private final Message rawMessage; | ... | ... |
... | @@ -343,11 +343,9 @@ public class DistributedLeadershipManager implements LeadershipService { | ... | @@ -343,11 +343,9 @@ public class DistributedLeadershipManager implements LeadershipService { |
343 | 343 | ||
344 | private void notifyPeers(LeadershipEvent event) { | 344 | private void notifyPeers(LeadershipEvent event) { |
345 | eventDispatcher.post(event); | 345 | eventDispatcher.post(event); |
346 | - clusterCommunicator.broadcast( | 346 | + clusterCommunicator.broadcast(event, |
347 | - new ClusterMessage( | ||
348 | - clusterService.getLocalNode().id(), | ||
349 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, | 347 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, |
350 | - SERIALIZER.encode(event))); | 348 | + SERIALIZER::encode); |
351 | } | 349 | } |
352 | 350 | ||
353 | private void notifyRemovedLeader(String path, NodeId leader, long epoch, long electedTime) { | 351 | private void notifyRemovedLeader(String path, NodeId leader, long epoch, long electedTime) { |
... | @@ -366,11 +364,9 @@ public class DistributedLeadershipManager implements LeadershipService { | ... | @@ -366,11 +364,9 @@ public class DistributedLeadershipManager implements LeadershipService { |
366 | if (updatedLeader) { | 364 | if (updatedLeader) { |
367 | LeadershipEvent event = new LeadershipEvent(LeadershipEvent.Type.LEADER_BOOTED, oldLeadership); | 365 | LeadershipEvent event = new LeadershipEvent(LeadershipEvent.Type.LEADER_BOOTED, oldLeadership); |
368 | eventDispatcher.post(event); | 366 | eventDispatcher.post(event); |
369 | - clusterCommunicator.broadcast( | 367 | + clusterCommunicator.broadcast(event, |
370 | - new ClusterMessage( | ||
371 | - clusterService.getLocalNode().id(), | ||
372 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, | 368 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, |
373 | - SERIALIZER.encode(event))); | 369 | + SERIALIZER::encode); |
374 | } | 370 | } |
375 | } | 371 | } |
376 | 372 | ||
... | @@ -469,11 +465,9 @@ public class DistributedLeadershipManager implements LeadershipService { | ... | @@ -469,11 +465,9 @@ public class DistributedLeadershipManager implements LeadershipService { |
469 | leaderBoard.forEach((path, leadership) -> { | 465 | leaderBoard.forEach((path, leadership) -> { |
470 | if (leadership.leader().equals(localNodeId)) { | 466 | if (leadership.leader().equals(localNodeId)) { |
471 | LeadershipEvent event = new LeadershipEvent(LeadershipEvent.Type.LEADER_ELECTED, leadership); | 467 | LeadershipEvent event = new LeadershipEvent(LeadershipEvent.Type.LEADER_ELECTED, leadership); |
472 | - clusterCommunicator.broadcast( | 468 | + clusterCommunicator.broadcast(event, |
473 | - new ClusterMessage( | ||
474 | - clusterService.getLocalNode().id(), | ||
475 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, | 469 | LEADERSHIP_EVENT_MESSAGE_SUBJECT, |
476 | - SERIALIZER.encode(event))); | 470 | + SERIALIZER::encode); |
477 | } | 471 | } |
478 | }); | 472 | }); |
479 | } catch (Exception e) { | 473 | } catch (Exception e) { | ... | ... |
... | @@ -304,11 +304,9 @@ public class GossipDeviceStore | ... | @@ -304,11 +304,9 @@ public class GossipDeviceStore |
304 | 304 | ||
305 | DeviceInjectedEvent deviceInjectedEvent = new DeviceInjectedEvent( | 305 | DeviceInjectedEvent deviceInjectedEvent = new DeviceInjectedEvent( |
306 | providerId, deviceId, deviceDescription); | 306 | providerId, deviceId, deviceDescription); |
307 | - ClusterMessage clusterMessage = new ClusterMessage(localNode, DEVICE_INJECTED, | ||
308 | - SERIALIZER.encode(deviceInjectedEvent)); | ||
309 | 307 | ||
310 | // TODO check unicast return value | 308 | // TODO check unicast return value |
311 | - clusterCommunicator.unicast(clusterMessage, deviceNode); | 309 | + clusterCommunicator.unicast(deviceInjectedEvent, DEVICE_INJECTED, SERIALIZER::encode, deviceNode); |
312 | /* error log: | 310 | /* error log: |
313 | log.warn("Failed to process injected device id: {} desc: {} " + | 311 | log.warn("Failed to process injected device id: {} desc: {} " + |
314 | "(cluster messaging failed: {})", | 312 | "(cluster messaging failed: {})", |
... | @@ -555,11 +553,9 @@ public class GossipDeviceStore | ... | @@ -555,11 +553,9 @@ public class GossipDeviceStore |
555 | } | 553 | } |
556 | 554 | ||
557 | PortInjectedEvent portInjectedEvent = new PortInjectedEvent(providerId, deviceId, portDescriptions); | 555 | PortInjectedEvent portInjectedEvent = new PortInjectedEvent(providerId, deviceId, portDescriptions); |
558 | - ClusterMessage clusterMessage = new ClusterMessage( | ||
559 | - localNode, PORT_INJECTED, SERIALIZER.encode(portInjectedEvent)); | ||
560 | 556 | ||
561 | //TODO check unicast return value | 557 | //TODO check unicast return value |
562 | - clusterCommunicator.unicast(clusterMessage, deviceNode); | 558 | + clusterCommunicator.unicast(portInjectedEvent, PORT_INJECTED, SERIALIZER::encode, deviceNode); |
563 | /* error log: | 559 | /* error log: |
564 | log.warn("Failed to process injected ports of device id: {} " + | 560 | log.warn("Failed to process injected ports of device id: {} " + |
565 | "(cluster messaging failed: {})", | 561 | "(cluster messaging failed: {})", |
... | @@ -867,13 +863,8 @@ public class GossipDeviceStore | ... | @@ -867,13 +863,8 @@ public class GossipDeviceStore |
867 | log.debug("{} has control of {}, forwarding remove request", | 863 | log.debug("{} has control of {}, forwarding remove request", |
868 | master, deviceId); | 864 | master, deviceId); |
869 | 865 | ||
870 | - ClusterMessage message = new ClusterMessage( | ||
871 | - myId, | ||
872 | - DEVICE_REMOVE_REQ, | ||
873 | - SERIALIZER.encode(deviceId)); | ||
874 | - | ||
875 | // TODO check unicast return value | 866 | // TODO check unicast return value |
876 | - clusterCommunicator.unicast(message, master); | 867 | + clusterCommunicator.unicast(deviceId, DEVICE_REMOVE_REQ, SERIALIZER::encode, master); |
877 | /* error log: | 868 | /* error log: |
878 | log.error("Failed to forward {} remove request to {}", deviceId, master, e); | 869 | log.error("Failed to forward {} remove request to {}", deviceId, master, e); |
879 | */ | 870 | */ |
... | @@ -1057,19 +1048,11 @@ public class GossipDeviceStore | ... | @@ -1057,19 +1048,11 @@ public class GossipDeviceStore |
1057 | } | 1048 | } |
1058 | 1049 | ||
1059 | private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException { | 1050 | private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException { |
1060 | - ClusterMessage message = new ClusterMessage( | 1051 | + clusterCommunicator.unicast(event, subject, SERIALIZER::encode, recipient); |
1061 | - clusterService.getLocalNode().id(), | ||
1062 | - subject, | ||
1063 | - SERIALIZER.encode(event)); | ||
1064 | - clusterCommunicator.unicast(message, recipient); | ||
1065 | } | 1052 | } |
1066 | 1053 | ||
1067 | private void broadcastMessage(MessageSubject subject, Object event) { | 1054 | private void broadcastMessage(MessageSubject subject, Object event) { |
1068 | - ClusterMessage message = new ClusterMessage( | 1055 | + clusterCommunicator.broadcast(event, subject, SERIALIZER::encode); |
1069 | - clusterService.getLocalNode().id(), | ||
1070 | - subject, | ||
1071 | - SERIALIZER.encode(event)); | ||
1072 | - clusterCommunicator.broadcast(message); | ||
1073 | } | 1056 | } |
1074 | 1057 | ||
1075 | private void notifyPeers(InternalDeviceEvent event) { | 1058 | private void notifyPeers(InternalDeviceEvent event) { | ... | ... |
... | @@ -510,11 +510,7 @@ public class EventuallyConsistentMapImpl<K, V> | ... | @@ -510,11 +510,7 @@ public class EventuallyConsistentMapImpl<K, V> |
510 | } | 510 | } |
511 | 511 | ||
512 | private boolean unicastMessage(NodeId peer, MessageSubject subject, Object event) { | 512 | private boolean unicastMessage(NodeId peer, MessageSubject subject, Object event) { |
513 | - ClusterMessage message = new ClusterMessage( | 513 | + return clusterCommunicator.unicast(event, subject, serializer::encode, peer); |
514 | - clusterService.getLocalNode().id(), | ||
515 | - subject, | ||
516 | - serializer.encode(event)); | ||
517 | - return clusterCommunicator.unicast(message, peer); | ||
518 | // Note: we had this flipped before... | 514 | // Note: we had this flipped before... |
519 | // communicationExecutor.execute(() -> clusterCommunicator.unicast(message, peer)); | 515 | // communicationExecutor.execute(() -> clusterCommunicator.unicast(message, peer)); |
520 | } | 516 | } | ... | ... |
... | @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; | ... | @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; |
22 | import com.google.common.collect.Iterables; | 22 | import com.google.common.collect.Iterables; |
23 | import com.google.common.collect.Maps; | 23 | import com.google.common.collect.Maps; |
24 | import com.google.common.collect.Sets; | 24 | import com.google.common.collect.Sets; |
25 | +import com.google.common.util.concurrent.Futures; | ||
25 | import com.hazelcast.core.IMap; | 26 | import com.hazelcast.core.IMap; |
26 | 27 | ||
27 | import org.apache.felix.scr.annotations.Activate; | 28 | import org.apache.felix.scr.annotations.Activate; |
... | @@ -35,6 +36,7 @@ import org.apache.felix.scr.annotations.Service; | ... | @@ -35,6 +36,7 @@ import org.apache.felix.scr.annotations.Service; |
35 | import org.onlab.util.BoundedThreadPool; | 36 | import org.onlab.util.BoundedThreadPool; |
36 | import org.onlab.util.KryoNamespace; | 37 | import org.onlab.util.KryoNamespace; |
37 | import org.onlab.util.NewConcurrentHashMap; | 38 | import org.onlab.util.NewConcurrentHashMap; |
39 | +import org.onlab.util.Tools; | ||
38 | import org.onosproject.cfg.ComponentConfigService; | 40 | import org.onosproject.cfg.ComponentConfigService; |
39 | import org.onosproject.cluster.ClusterService; | 41 | import org.onosproject.cluster.ClusterService; |
40 | import org.onosproject.cluster.NodeId; | 42 | import org.onosproject.cluster.NodeId; |
... | @@ -93,7 +95,6 @@ import java.util.concurrent.ExecutorService; | ... | @@ -93,7 +95,6 @@ import java.util.concurrent.ExecutorService; |
93 | import java.util.concurrent.Executors; | 95 | import java.util.concurrent.Executors; |
94 | import java.util.concurrent.Future; | 96 | import java.util.concurrent.Future; |
95 | import java.util.concurrent.TimeUnit; | 97 | import java.util.concurrent.TimeUnit; |
96 | -import java.util.concurrent.TimeoutException; | ||
97 | import java.util.stream.Collectors; | 98 | import java.util.stream.Collectors; |
98 | 99 | ||
99 | import static com.google.common.base.Preconditions.checkNotNull; | 100 | import static com.google.common.base.Preconditions.checkNotNull; |
... | @@ -360,21 +361,15 @@ public class DistributedFlowRuleStore | ... | @@ -360,21 +361,15 @@ public class DistributedFlowRuleStore |
360 | log.trace("Forwarding getFlowEntry to {}, which is the primary (master) for device {}", | 361 | log.trace("Forwarding getFlowEntry to {}, which is the primary (master) for device {}", |
361 | replicaInfo.master().orNull(), rule.deviceId()); | 362 | replicaInfo.master().orNull(), rule.deviceId()); |
362 | 363 | ||
363 | - ClusterMessage message = new ClusterMessage( | 364 | + return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(rule, |
364 | - clusterService.getLocalNode().id(), | ||
365 | FlowStoreMessageSubjects.GET_FLOW_ENTRY, | 365 | FlowStoreMessageSubjects.GET_FLOW_ENTRY, |
366 | - SERIALIZER.encode(rule)); | 366 | + SERIALIZER::encode, |
367 | - | 367 | + SERIALIZER::decode, |
368 | - try { | 368 | + replicaInfo.master().get()), |
369 | - Future<byte[]> responseFuture = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get()); | 369 | + FLOW_RULE_STORE_TIMEOUT_MILLIS, |
370 | - return SERIALIZER.decode(responseFuture.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)); | 370 | + TimeUnit.MILLISECONDS, |
371 | - } catch (IOException | TimeoutException | ExecutionException | InterruptedException e) { | 371 | + null); |
372 | - log.warn("Unable to fetch flow store contents from {}", replicaInfo.master().get()); | ||
373 | } | 372 | } |
374 | - return null; | ||
375 | - } | ||
376 | - | ||
377 | - | ||
378 | 373 | ||
379 | @Override | 374 | @Override |
380 | public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) { | 375 | public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) { |
... | @@ -393,21 +388,15 @@ public class DistributedFlowRuleStore | ... | @@ -393,21 +388,15 @@ public class DistributedFlowRuleStore |
393 | log.trace("Forwarding getFlowEntries to {}, which is the primary (master) for device {}", | 388 | log.trace("Forwarding getFlowEntries to {}, which is the primary (master) for device {}", |
394 | replicaInfo.master().orNull(), deviceId); | 389 | replicaInfo.master().orNull(), deviceId); |
395 | 390 | ||
396 | - ClusterMessage message = new ClusterMessage( | 391 | + return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive(deviceId, |
397 | - clusterService.getLocalNode().id(), | 392 | + FlowStoreMessageSubjects.GET_DEVICE_FLOW_ENTRIES, |
398 | - GET_DEVICE_FLOW_ENTRIES, | 393 | + SERIALIZER::encode, |
399 | - SERIALIZER.encode(deviceId)); | 394 | + SERIALIZER::decode, |
400 | - | 395 | + replicaInfo.master().get()), |
401 | - try { | 396 | + FLOW_RULE_STORE_TIMEOUT_MILLIS, |
402 | - Future<byte[]> responseFuture = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get()); | 397 | + TimeUnit.MILLISECONDS, |
403 | - return SERIALIZER.decode(responseFuture.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)); | 398 | + Collections.emptyList()); |
404 | - } catch (IOException | TimeoutException | ExecutionException | InterruptedException e) { | ||
405 | - log.warn("Unable to fetch flow store contents from {}", replicaInfo.master().get()); | ||
406 | } | 399 | } |
407 | - return Collections.emptyList(); | ||
408 | - } | ||
409 | - | ||
410 | - | ||
411 | 400 | ||
412 | @Override | 401 | @Override |
413 | public void storeFlowRule(FlowRule rule) { | 402 | public void storeFlowRule(FlowRule rule) { |
... | @@ -453,14 +442,10 @@ public class DistributedFlowRuleStore | ... | @@ -453,14 +442,10 @@ public class DistributedFlowRuleStore |
453 | log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}", | 442 | log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}", |
454 | replicaInfo.master().orNull(), deviceId); | 443 | replicaInfo.master().orNull(), deviceId); |
455 | 444 | ||
456 | - ClusterMessage message = new ClusterMessage( | 445 | + if (!clusterCommunicator.unicast(operation, |
457 | - local, | 446 | + APPLY_BATCH_FLOWS, SERIALIZER::encode, |
458 | - APPLY_BATCH_FLOWS, | 447 | + replicaInfo.master().get())) { |
459 | - SERIALIZER.encode(operation)); | 448 | + log.warn("Failed to storeBatch: {} to {}", operation, replicaInfo.master()); |
460 | - | ||
461 | - | ||
462 | - if (!clusterCommunicator.unicast(message, replicaInfo.master().get())) { | ||
463 | - log.warn("Failed to storeBatch: {} to {}", message, replicaInfo.master()); | ||
464 | 449 | ||
465 | Set<FlowRule> allFailures = operation.getOperations().stream() | 450 | Set<FlowRule> allFailures = operation.getOperations().stream() |
466 | .map(op -> op.target()) | 451 | .map(op -> op.target()) |
... | @@ -612,18 +597,15 @@ public class DistributedFlowRuleStore | ... | @@ -612,18 +597,15 @@ public class DistributedFlowRuleStore |
612 | log.trace("Forwarding removeFlowRule to {}, which is the primary (master) for device {}", | 597 | log.trace("Forwarding removeFlowRule to {}, which is the primary (master) for device {}", |
613 | replicaInfo.master().orNull(), deviceId); | 598 | replicaInfo.master().orNull(), deviceId); |
614 | 599 | ||
615 | - ClusterMessage message = new ClusterMessage( | 600 | + return Futures.get(clusterCommunicator.sendAndReceive( |
616 | - clusterService.getLocalNode().id(), | 601 | + rule, |
617 | REMOVE_FLOW_ENTRY, | 602 | REMOVE_FLOW_ENTRY, |
618 | - SERIALIZER.encode(rule)); | 603 | + SERIALIZER::encode, |
619 | - | 604 | + SERIALIZER::decode, |
620 | - try { | 605 | + replicaInfo.master().get()), |
621 | - Future<byte[]> responseFuture = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get()); | 606 | + FLOW_RULE_STORE_TIMEOUT_MILLIS, |
622 | - return SERIALIZER.decode(responseFuture.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)); | 607 | + TimeUnit.MILLISECONDS, |
623 | - } catch (IOException | TimeoutException | ExecutionException | InterruptedException e) { | 608 | + RuntimeException.class); |
624 | - // TODO: Retry against latest master or throw a FlowStoreException | ||
625 | - throw new RuntimeException(e); | ||
626 | - } | ||
627 | } | 609 | } |
628 | 610 | ||
629 | private FlowRuleEvent removeFlowRuleInternal(FlowEntry rule) { | 611 | private FlowRuleEvent removeFlowRuleInternal(FlowEntry rule) { |
... | @@ -649,12 +631,8 @@ public class DistributedFlowRuleStore | ... | @@ -649,12 +631,8 @@ public class DistributedFlowRuleStore |
649 | if (nodeId == null) { | 631 | if (nodeId == null) { |
650 | notifyDelegate(event); | 632 | notifyDelegate(event); |
651 | } else { | 633 | } else { |
652 | - ClusterMessage message = new ClusterMessage( | ||
653 | - clusterService.getLocalNode().id(), | ||
654 | - REMOTE_APPLY_COMPLETED, | ||
655 | - SERIALIZER.encode(event)); | ||
656 | // TODO check unicast return value | 634 | // TODO check unicast return value |
657 | - clusterCommunicator.unicast(message, nodeId); | 635 | + clusterCommunicator.unicast(event, REMOTE_APPLY_COMPLETED, SERIALIZER::encode, nodeId); |
658 | //error log: log.warn("Failed to respond to peer for batch operation result"); | 636 | //error log: log.warn("Failed to respond to peer for batch operation result"); |
659 | } | 637 | } |
660 | } | 638 | } | ... | ... |
... | @@ -20,6 +20,7 @@ import com.google.common.cache.CacheBuilder; | ... | @@ -20,6 +20,7 @@ import com.google.common.cache.CacheBuilder; |
20 | import com.google.common.util.concurrent.Futures; | 20 | import com.google.common.util.concurrent.Futures; |
21 | import com.google.common.util.concurrent.ListenableFuture; | 21 | import com.google.common.util.concurrent.ListenableFuture; |
22 | import com.google.common.util.concurrent.SettableFuture; | 22 | import com.google.common.util.concurrent.SettableFuture; |
23 | + | ||
23 | import org.apache.felix.scr.annotations.Activate; | 24 | import org.apache.felix.scr.annotations.Activate; |
24 | import org.apache.felix.scr.annotations.Component; | 25 | import org.apache.felix.scr.annotations.Component; |
25 | import org.apache.felix.scr.annotations.Deactivate; | 26 | import org.apache.felix.scr.annotations.Deactivate; |
... | @@ -45,7 +46,6 @@ import org.onosproject.store.cluster.messaging.ClusterMessageHandler; | ... | @@ -45,7 +46,6 @@ import org.onosproject.store.cluster.messaging.ClusterMessageHandler; |
45 | import org.onosproject.store.flow.ReplicaInfo; | 46 | import org.onosproject.store.flow.ReplicaInfo; |
46 | import org.onosproject.store.flow.ReplicaInfoEventListener; | 47 | import org.onosproject.store.flow.ReplicaInfoEventListener; |
47 | import org.onosproject.store.flow.ReplicaInfoService; | 48 | import org.onosproject.store.flow.ReplicaInfoService; |
48 | -import org.onosproject.store.serializers.DecodeTo; | ||
49 | import org.onosproject.store.serializers.KryoSerializer; | 49 | import org.onosproject.store.serializers.KryoSerializer; |
50 | import org.onosproject.store.serializers.StoreSerializer; | 50 | import org.onosproject.store.serializers.StoreSerializer; |
51 | import org.onosproject.store.serializers.impl.DistributedStoreSerializers; | 51 | import org.onosproject.store.serializers.impl.DistributedStoreSerializers; |
... | @@ -199,18 +199,12 @@ public class DefaultFlowRuleExtRouter | ... | @@ -199,18 +199,12 @@ public class DefaultFlowRuleExtRouter |
199 | log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}", | 199 | log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}", |
200 | replicaInfo.master().orNull(), deviceId); | 200 | replicaInfo.master().orNull(), deviceId); |
201 | 201 | ||
202 | - ClusterMessage message = new ClusterMessage(clusterService | 202 | + return clusterCommunicator.sendAndReceive( |
203 | - .getLocalNode().id(), APPLY_EXTEND_FLOWS, SERIALIZER.encode(batchOperation)); | 203 | + batchOperation, |
204 | - | 204 | + APPLY_EXTEND_FLOWS, |
205 | - try { | 205 | + SERIALIZER::encode, |
206 | - ListenableFuture<byte[]> responseFuture = clusterCommunicator | 206 | + SERIALIZER::decode, |
207 | - .sendAndReceive(message, replicaInfo.master().get()); | 207 | + replicaInfo.master().get()); |
208 | - // here should add another decode process | ||
209 | - return Futures.transform(responseFuture, | ||
210 | - new DecodeTo<FlowExtCompletedOperation>(SERIALIZER)); | ||
211 | - } catch (IOException e) { | ||
212 | - return Futures.immediateFailedFuture(e); | ||
213 | - } | ||
214 | } | 208 | } |
215 | 209 | ||
216 | /** | 210 | /** | ... | ... |
... | @@ -382,17 +382,13 @@ public class DistributedGroupStore | ... | @@ -382,17 +382,13 @@ public class DistributedGroupStore |
382 | GroupStoreMessage groupOp = GroupStoreMessage. | 382 | GroupStoreMessage groupOp = GroupStoreMessage. |
383 | createGroupAddRequestMsg(groupDesc.deviceId(), | 383 | createGroupAddRequestMsg(groupDesc.deviceId(), |
384 | groupDesc); | 384 | groupDesc); |
385 | - ClusterMessage message = new ClusterMessage( | 385 | + |
386 | - clusterService.getLocalNode().id(), | 386 | + if (!clusterCommunicator.unicast(groupOp, |
387 | - GroupStoreMessageSubjects. | 387 | + GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST, |
388 | - REMOTE_GROUP_OP_REQUEST, | 388 | + m -> kryoBuilder.build().serialize(m), |
389 | - kryoBuilder.build().serialize(groupOp)); | 389 | + mastershipService.getMasterFor(groupDesc.deviceId()))) { |
390 | - if (!clusterCommunicator.unicast(message, | ||
391 | - mastershipService. | ||
392 | - getMasterFor( | ||
393 | - groupDesc.deviceId()))) { | ||
394 | log.warn("Failed to send request to master: {} to {}", | 390 | log.warn("Failed to send request to master: {} to {}", |
395 | - message, | 391 | + groupOp, |
396 | mastershipService.getMasterFor(groupDesc.deviceId())); | 392 | mastershipService.getMasterFor(groupDesc.deviceId())); |
397 | //TODO: Send Group operation failure event | 393 | //TODO: Send Group operation failure event |
398 | } | 394 | } |
... | @@ -472,16 +468,13 @@ public class DistributedGroupStore | ... | @@ -472,16 +468,13 @@ public class DistributedGroupStore |
472 | type, | 468 | type, |
473 | newBuckets, | 469 | newBuckets, |
474 | newAppCookie); | 470 | newAppCookie); |
475 | - ClusterMessage message = | 471 | + |
476 | - new ClusterMessage(clusterService.getLocalNode().id(), | 472 | + if (!clusterCommunicator.unicast(groupOp, |
477 | - GroupStoreMessageSubjects. | 473 | + GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST, |
478 | - REMOTE_GROUP_OP_REQUEST, | 474 | + m -> kryoBuilder.build().serialize(m), |
479 | - kryoBuilder.build().serialize(groupOp)); | 475 | + mastershipService.getMasterFor(deviceId))) { |
480 | - if (!clusterCommunicator.unicast(message, | ||
481 | - mastershipService. | ||
482 | - getMasterFor(deviceId))) { | ||
483 | log.warn("Failed to send request to master: {} to {}", | 476 | log.warn("Failed to send request to master: {} to {}", |
484 | - message, | 477 | + groupOp, |
485 | mastershipService.getMasterFor(deviceId)); | 478 | mastershipService.getMasterFor(deviceId)); |
486 | //TODO: Send Group operation failure event | 479 | //TODO: Send Group operation failure event |
487 | } | 480 | } |
... | @@ -584,16 +577,13 @@ public class DistributedGroupStore | ... | @@ -584,16 +577,13 @@ public class DistributedGroupStore |
584 | GroupStoreMessage groupOp = GroupStoreMessage. | 577 | GroupStoreMessage groupOp = GroupStoreMessage. |
585 | createGroupDeleteRequestMsg(deviceId, | 578 | createGroupDeleteRequestMsg(deviceId, |
586 | appCookie); | 579 | appCookie); |
587 | - ClusterMessage message = | 580 | + |
588 | - new ClusterMessage(clusterService.getLocalNode().id(), | 581 | + if (!clusterCommunicator.unicast(groupOp, |
589 | - GroupStoreMessageSubjects. | 582 | + GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST, |
590 | - REMOTE_GROUP_OP_REQUEST, | 583 | + m -> kryoBuilder.build().serialize(m), |
591 | - kryoBuilder.build().serialize(groupOp)); | 584 | + mastershipService.getMasterFor(deviceId))) { |
592 | - if (!clusterCommunicator.unicast(message, | ||
593 | - mastershipService. | ||
594 | - getMasterFor(deviceId))) { | ||
595 | log.warn("Failed to send request to master: {} to {}", | 585 | log.warn("Failed to send request to master: {} to {}", |
596 | - message, | 586 | + groupOp, |
597 | mastershipService.getMasterFor(deviceId)); | 587 | mastershipService.getMasterFor(deviceId)); |
598 | //TODO: Send Group operation failure event | 588 | //TODO: Send Group operation failure event |
599 | } | 589 | } | ... | ... |
... | @@ -477,21 +477,13 @@ public class GossipHostStore | ... | @@ -477,21 +477,13 @@ public class GossipHostStore |
477 | } | 477 | } |
478 | 478 | ||
479 | private void broadcastMessage(MessageSubject subject, Object event) { | 479 | private void broadcastMessage(MessageSubject subject, Object event) { |
480 | - ClusterMessage message = new ClusterMessage( | 480 | + clusterCommunicator.broadcast(event, subject, SERIALIZER::encode); |
481 | - clusterService.getLocalNode().id(), | ||
482 | - subject, | ||
483 | - SERIALIZER.encode(event)); | ||
484 | - clusterCommunicator.broadcast(message); | ||
485 | } | 481 | } |
486 | 482 | ||
487 | private void unicastMessage(NodeId peer, | 483 | private void unicastMessage(NodeId peer, |
488 | MessageSubject subject, | 484 | MessageSubject subject, |
489 | Object event) throws IOException { | 485 | Object event) throws IOException { |
490 | - ClusterMessage message = new ClusterMessage( | 486 | + clusterCommunicator.unicast(event, subject, SERIALIZER::encode, peer); |
491 | - clusterService.getLocalNode().id(), | ||
492 | - subject, | ||
493 | - SERIALIZER.encode(event)); | ||
494 | - clusterCommunicator.unicast(message, peer); | ||
495 | } | 487 | } |
496 | 488 | ||
497 | private void notifyDelegateIfNotNull(HostEvent event) { | 489 | private void notifyDelegateIfNotNull(HostEvent event) { | ... | ... |
... | @@ -334,17 +334,12 @@ public class GossipLinkStore | ... | @@ -334,17 +334,12 @@ public class GossipLinkStore |
334 | 334 | ||
335 | 335 | ||
336 | LinkInjectedEvent linkInjectedEvent = new LinkInjectedEvent(providerId, linkDescription); | 336 | LinkInjectedEvent linkInjectedEvent = new LinkInjectedEvent(providerId, linkDescription); |
337 | - ClusterMessage linkInjectedMessage = new ClusterMessage(localNode, | ||
338 | - GossipLinkStoreMessageSubjects.LINK_INJECTED, SERIALIZER.encode(linkInjectedEvent)); | ||
339 | 337 | ||
340 | // TODO check unicast return value | 338 | // TODO check unicast return value |
341 | - clusterCommunicator.unicast(linkInjectedMessage, dstNode); | 339 | + clusterCommunicator.unicast(linkInjectedEvent, |
342 | - /* error log: | 340 | + GossipLinkStoreMessageSubjects.LINK_INJECTED, |
343 | - log.warn("Failed to process link update between src: {} and dst: {} " + | 341 | + SERIALIZER::encode, |
344 | - "(cluster messaging failed: {})", | 342 | + dstNode); |
345 | - linkDescription.src(), linkDescription.dst(), e); | ||
346 | - */ | ||
347 | - | ||
348 | } | 343 | } |
349 | 344 | ||
350 | return linkEvent; | 345 | return linkEvent; |
... | @@ -653,19 +648,11 @@ public class GossipLinkStore | ... | @@ -653,19 +648,11 @@ public class GossipLinkStore |
653 | } | 648 | } |
654 | 649 | ||
655 | private void broadcastMessage(MessageSubject subject, Object event) { | 650 | private void broadcastMessage(MessageSubject subject, Object event) { |
656 | - ClusterMessage message = new ClusterMessage( | 651 | + clusterCommunicator.broadcast(event, subject, SERIALIZER::encode); |
657 | - clusterService.getLocalNode().id(), | ||
658 | - subject, | ||
659 | - SERIALIZER.encode(event)); | ||
660 | - clusterCommunicator.broadcast(message); | ||
661 | } | 652 | } |
662 | 653 | ||
663 | private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException { | 654 | private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException { |
664 | - ClusterMessage message = new ClusterMessage( | 655 | + clusterCommunicator.unicast(event, subject, SERIALIZER::encode, recipient); |
665 | - clusterService.getLocalNode().id(), | ||
666 | - subject, | ||
667 | - SERIALIZER.encode(event)); | ||
668 | - clusterCommunicator.unicast(message, recipient); | ||
669 | } | 656 | } |
670 | 657 | ||
671 | private void notifyPeers(InternalLinkEvent event) { | 658 | private void notifyPeers(InternalLinkEvent event) { | ... | ... |
... | @@ -181,20 +181,14 @@ public class ConsistentDeviceMastershipStore | ... | @@ -181,20 +181,14 @@ public class ConsistentDeviceMastershipStore |
181 | } else { | 181 | } else { |
182 | return MastershipRole.NONE; | 182 | return MastershipRole.NONE; |
183 | } | 183 | } |
184 | - } else { | 184 | + } |
185 | - try { | ||
186 | MastershipRole role = complete(clusterCommunicator.sendAndReceive( | 185 | MastershipRole role = complete(clusterCommunicator.sendAndReceive( |
187 | - new ClusterMessage( | 186 | + deviceId, |
188 | - localNodeId, | ||
189 | ROLE_QUERY_SUBJECT, | 187 | ROLE_QUERY_SUBJECT, |
190 | - SERIALIZER.encode(deviceId)), | 188 | + SERIALIZER::encode, |
189 | + SERIALIZER::decode, | ||
191 | nodeId)); | 190 | nodeId)); |
192 | return role == null ? MastershipRole.NONE : role; | 191 | return role == null ? MastershipRole.NONE : role; |
193 | - } catch (IOException e) { | ||
194 | - log.warn("Failed to query {} for {}'s role. Defaulting to NONE", nodeId, deviceId, e); | ||
195 | - return MastershipRole.NONE; | ||
196 | - } | ||
197 | - } | ||
198 | } | 192 | } |
199 | 193 | ||
200 | @Override | 194 | @Override |
... | @@ -276,17 +270,12 @@ public class ConsistentDeviceMastershipStore | ... | @@ -276,17 +270,12 @@ public class ConsistentDeviceMastershipStore |
276 | if (!nodeId.equals(localNodeId)) { | 270 | if (!nodeId.equals(localNodeId)) { |
277 | log.debug("Forwarding request to relinquish " | 271 | log.debug("Forwarding request to relinquish " |
278 | + "role for device {} to {}", deviceId, nodeId); | 272 | + "role for device {} to {}", deviceId, nodeId); |
279 | - try { | ||
280 | return complete(clusterCommunicator.sendAndReceive( | 273 | return complete(clusterCommunicator.sendAndReceive( |
281 | - new ClusterMessage( | 274 | + deviceId, |
282 | - localNodeId, | ||
283 | ROLE_RELINQUISH_SUBJECT, | 275 | ROLE_RELINQUISH_SUBJECT, |
284 | - SERIALIZER.encode(deviceId)), | 276 | + SERIALIZER::encode, |
277 | + SERIALIZER::decode, | ||
285 | nodeId)); | 278 | nodeId)); |
286 | - } catch (IOException e) { | ||
287 | - log.warn("Failed to send a request to relinquish role for {} to {}", deviceId, nodeId, e); | ||
288 | - return null; | ||
289 | - } | ||
290 | } | 279 | } |
291 | 280 | ||
292 | // Check if this node is can be managed by this node. | 281 | // Check if this node is can be managed by this node. | ... | ... |
... | @@ -131,9 +131,7 @@ public class DistributedPacketStore | ... | @@ -131,9 +131,7 @@ public class DistributedPacketStore |
131 | } | 131 | } |
132 | 132 | ||
133 | // TODO check unicast return value | 133 | // TODO check unicast return value |
134 | - communicationService.unicast(new ClusterMessage(myId, PACKET_OUT_SUBJECT, | 134 | + communicationService.unicast(packet, PACKET_OUT_SUBJECT, SERIALIZER::encode, master); |
135 | - SERIALIZER.encode(packet)), | ||
136 | - master); | ||
137 | // error log: log.warn("Failed to send packet-out to {}", master); | 135 | // error log: log.warn("Failed to send packet-out to {}", master); |
138 | } | 136 | } |
139 | 137 | ... | ... |
... | @@ -16,6 +16,7 @@ | ... | @@ -16,6 +16,7 @@ |
16 | package org.onosproject.store.statistic.impl; | 16 | package org.onosproject.store.statistic.impl; |
17 | 17 | ||
18 | import com.google.common.collect.Sets; | 18 | import com.google.common.collect.Sets; |
19 | + | ||
19 | import org.apache.felix.scr.annotations.Activate; | 20 | import org.apache.felix.scr.annotations.Activate; |
20 | import org.apache.felix.scr.annotations.Component; | 21 | import org.apache.felix.scr.annotations.Component; |
21 | import org.apache.felix.scr.annotations.Deactivate; | 22 | import org.apache.felix.scr.annotations.Deactivate; |
... | @@ -23,6 +24,7 @@ import org.apache.felix.scr.annotations.Reference; | ... | @@ -23,6 +24,7 @@ import org.apache.felix.scr.annotations.Reference; |
23 | import org.apache.felix.scr.annotations.ReferenceCardinality; | 24 | import org.apache.felix.scr.annotations.ReferenceCardinality; |
24 | import org.apache.felix.scr.annotations.Service; | 25 | import org.apache.felix.scr.annotations.Service; |
25 | import org.onlab.util.KryoNamespace; | 26 | import org.onlab.util.KryoNamespace; |
27 | +import org.onlab.util.Tools; | ||
26 | import org.onosproject.cluster.ClusterService; | 28 | import org.onosproject.cluster.ClusterService; |
27 | import org.onosproject.net.ConnectPoint; | 29 | import org.onosproject.net.ConnectPoint; |
28 | import org.onosproject.net.DeviceId; | 30 | import org.onosproject.net.DeviceId; |
... | @@ -47,12 +49,9 @@ import java.util.HashSet; | ... | @@ -47,12 +49,9 @@ import java.util.HashSet; |
47 | import java.util.Map; | 49 | import java.util.Map; |
48 | import java.util.Set; | 50 | import java.util.Set; |
49 | import java.util.concurrent.ConcurrentHashMap; | 51 | import java.util.concurrent.ConcurrentHashMap; |
50 | -import java.util.concurrent.ExecutionException; | ||
51 | import java.util.concurrent.ExecutorService; | 52 | import java.util.concurrent.ExecutorService; |
52 | import java.util.concurrent.Executors; | 53 | import java.util.concurrent.Executors; |
53 | -import java.util.concurrent.Future; | ||
54 | import java.util.concurrent.TimeUnit; | 54 | import java.util.concurrent.TimeUnit; |
55 | -import java.util.concurrent.TimeoutException; | ||
56 | import java.util.concurrent.atomic.AtomicInteger; | 55 | import java.util.concurrent.atomic.AtomicInteger; |
57 | 56 | ||
58 | import static org.onlab.util.Tools.groupedThreads; | 57 | import static org.onlab.util.Tools.groupedThreads; |
... | @@ -218,20 +217,15 @@ public class DistributedStatisticStore implements StatisticStore { | ... | @@ -218,20 +217,15 @@ public class DistributedStatisticStore implements StatisticStore { |
218 | if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) { | 217 | if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) { |
219 | return getCurrentStatisticInternal(connectPoint); | 218 | return getCurrentStatisticInternal(connectPoint); |
220 | } else { | 219 | } else { |
221 | - ClusterMessage message = new ClusterMessage( | 220 | + return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive( |
222 | - clusterService.getLocalNode().id(), | 221 | + connectPoint, |
223 | GET_CURRENT, | 222 | GET_CURRENT, |
224 | - SERIALIZER.encode(connectPoint)); | 223 | + SERIALIZER::encode, |
225 | - | 224 | + SERIALIZER::decode, |
226 | - try { | 225 | + replicaInfo.master().get()), |
227 | - Future<byte[]> response = | 226 | + STATISTIC_STORE_TIMEOUT_MILLIS, |
228 | - clusterCommunicator.sendAndReceive(message, replicaInfo.master().get()); | 227 | + TimeUnit.MILLISECONDS, |
229 | - return SERIALIZER.decode(response.get(STATISTIC_STORE_TIMEOUT_MILLIS, | 228 | + Collections.emptySet()); |
230 | - TimeUnit.MILLISECONDS)); | ||
231 | - } catch (IOException | TimeoutException | ExecutionException | InterruptedException e) { | ||
232 | - log.warn("Unable to communicate with peer {}", replicaInfo.master().get()); | ||
233 | - return Collections.emptySet(); | ||
234 | - } | ||
235 | } | 229 | } |
236 | 230 | ||
237 | } | 231 | } |
... | @@ -251,24 +245,18 @@ public class DistributedStatisticStore implements StatisticStore { | ... | @@ -251,24 +245,18 @@ public class DistributedStatisticStore implements StatisticStore { |
251 | if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) { | 245 | if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) { |
252 | return getPreviousStatisticInternal(connectPoint); | 246 | return getPreviousStatisticInternal(connectPoint); |
253 | } else { | 247 | } else { |
254 | - ClusterMessage message = new ClusterMessage( | 248 | + return Tools.futureGetOrElse(clusterCommunicator.sendAndReceive( |
255 | - clusterService.getLocalNode().id(), | 249 | + connectPoint, |
256 | GET_PREVIOUS, | 250 | GET_PREVIOUS, |
257 | - SERIALIZER.encode(connectPoint)); | 251 | + SERIALIZER::encode, |
258 | - | 252 | + SERIALIZER::decode, |
259 | - try { | 253 | + replicaInfo.master().get()), |
260 | - Future<byte[]> response = | 254 | + STATISTIC_STORE_TIMEOUT_MILLIS, |
261 | - clusterCommunicator.sendAndReceive(message, replicaInfo.master().get()); | 255 | + TimeUnit.MILLISECONDS, |
262 | - return SERIALIZER.decode(response.get(STATISTIC_STORE_TIMEOUT_MILLIS, | 256 | + Collections.emptySet()); |
263 | - TimeUnit.MILLISECONDS)); | ||
264 | - } catch (IOException | TimeoutException | ExecutionException | InterruptedException e) { | ||
265 | - log.warn("Unable to communicate with peer {}", replicaInfo.master().get()); | ||
266 | - return Collections.emptySet(); | ||
267 | } | 257 | } |
268 | } | 258 | } |
269 | 259 | ||
270 | - } | ||
271 | - | ||
272 | private synchronized Set<FlowEntry> getPreviousStatisticInternal(ConnectPoint connectPoint) { | 260 | private synchronized Set<FlowEntry> getPreviousStatisticInternal(ConnectPoint connectPoint) { |
273 | return previous.get(connectPoint); | 261 | return previous.get(connectPoint); |
274 | } | 262 | } | ... | ... |
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
core/store/serializers/src/main/java/org/onosproject/store/serializers/DecodeTo.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.serializers; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
19 | - | ||
20 | -import com.google.common.base.Function; | ||
21 | - | ||
22 | -/** | ||
23 | - * Function to convert byte[] into {@code T}. | ||
24 | - * | ||
25 | - * @param <T> Type after decoding | ||
26 | - */ | ||
27 | -public final class DecodeTo<T> implements Function<byte[], T> { | ||
28 | - | ||
29 | - private StoreSerializer serializer; | ||
30 | - | ||
31 | - public DecodeTo(StoreSerializer serializer) { | ||
32 | - this.serializer = checkNotNull(serializer); | ||
33 | - } | ||
34 | - | ||
35 | - @Override | ||
36 | - public T apply(byte[] input) { | ||
37 | - return serializer.decode(input); | ||
38 | - } | ||
39 | -} |
... | @@ -15,10 +15,10 @@ | ... | @@ -15,10 +15,10 @@ |
15 | */ | 15 | */ |
16 | package org.onlab.util; | 16 | package org.onlab.util; |
17 | 17 | ||
18 | -import com.google.common.base.Strings; | 18 | +import static java.nio.file.Files.delete; |
19 | -import com.google.common.primitives.UnsignedLongs; | 19 | +import static java.nio.file.Files.walkFileTree; |
20 | -import com.google.common.util.concurrent.ThreadFactoryBuilder; | 20 | +import static org.onlab.util.GroupedThreadFactory.groupedThreadFactory; |
21 | -import org.slf4j.Logger; | 21 | +import static org.slf4j.LoggerFactory.getLogger; |
22 | 22 | ||
23 | import java.io.BufferedReader; | 23 | import java.io.BufferedReader; |
24 | import java.io.File; | 24 | import java.io.File; |
... | @@ -37,12 +37,17 @@ import java.util.ArrayList; | ... | @@ -37,12 +37,17 @@ import java.util.ArrayList; |
37 | import java.util.Collection; | 37 | import java.util.Collection; |
38 | import java.util.Dictionary; | 38 | import java.util.Dictionary; |
39 | import java.util.List; | 39 | import java.util.List; |
40 | +import java.util.concurrent.ExecutionException; | ||
41 | +import java.util.concurrent.Future; | ||
40 | import java.util.concurrent.ThreadFactory; | 42 | import java.util.concurrent.ThreadFactory; |
43 | +import java.util.concurrent.TimeUnit; | ||
44 | +import java.util.concurrent.TimeoutException; | ||
41 | 45 | ||
42 | -import static java.nio.file.Files.delete; | 46 | +import org.slf4j.Logger; |
43 | -import static java.nio.file.Files.walkFileTree; | 47 | + |
44 | -import static org.onlab.util.GroupedThreadFactory.groupedThreadFactory; | 48 | +import com.google.common.base.Strings; |
45 | -import static org.slf4j.LoggerFactory.getLogger; | 49 | +import com.google.common.primitives.UnsignedLongs; |
50 | +import com.google.common.util.concurrent.ThreadFactoryBuilder; | ||
46 | 51 | ||
47 | /** | 52 | /** |
48 | * Miscellaneous utility methods. | 53 | * Miscellaneous utility methods. |
... | @@ -324,6 +329,51 @@ public abstract class Tools { | ... | @@ -324,6 +329,51 @@ public abstract class Tools { |
324 | dst.getAbsolutePath())); | 329 | dst.getAbsolutePath())); |
325 | } | 330 | } |
326 | 331 | ||
332 | + /** | ||
333 | + * Returns the future value when complete or if future | ||
334 | + * completes exceptionally returns the defaultValue. | ||
335 | + * @param future future | ||
336 | + * @param defaultValue default value | ||
337 | + * @param <T> future value type | ||
338 | + * @return future value when complete or if future | ||
339 | + * completes exceptionally returns the defaultValue. | ||
340 | + */ | ||
341 | + public static <T> T futureGetOrElse(Future<T> future, T defaultValue) { | ||
342 | + try { | ||
343 | + return future.get(); | ||
344 | + } catch (InterruptedException e) { | ||
345 | + Thread.currentThread().interrupt(); | ||
346 | + return defaultValue; | ||
347 | + } catch (ExecutionException e) { | ||
348 | + return defaultValue; | ||
349 | + } | ||
350 | + } | ||
351 | + | ||
352 | + /** | ||
353 | + * Returns the future value when complete or if future | ||
354 | + * completes exceptionally returns the defaultValue. | ||
355 | + * @param future future | ||
356 | + * @param timeout time to wait for successful completion | ||
357 | + * @param timeUnit time unit | ||
358 | + * @param defaultValue default value | ||
359 | + * @param <T> future value type | ||
360 | + * @return future value when complete or if future | ||
361 | + * completes exceptionally returns the defaultValue. | ||
362 | + */ | ||
363 | + public static <T> T futureGetOrElse(Future<T> future, | ||
364 | + long timeout, | ||
365 | + TimeUnit timeUnit, | ||
366 | + T defaultValue) { | ||
367 | + try { | ||
368 | + return future.get(timeout, timeUnit); | ||
369 | + } catch (InterruptedException e) { | ||
370 | + Thread.currentThread().interrupt(); | ||
371 | + return defaultValue; | ||
372 | + } catch (ExecutionException | TimeoutException e) { | ||
373 | + return defaultValue; | ||
374 | + } | ||
375 | + } | ||
376 | + | ||
327 | // Auxiliary path visitor for recursive directory structure copying. | 377 | // Auxiliary path visitor for recursive directory structure copying. |
328 | private static class DirectoryCopier extends SimpleFileVisitor<Path> { | 378 | private static class DirectoryCopier extends SimpleFileVisitor<Path> { |
329 | private Path src; | 379 | private Path src; | ... | ... |
... | @@ -16,10 +16,9 @@ | ... | @@ -16,10 +16,9 @@ |
16 | package org.onlab.netty; | 16 | package org.onlab.netty; |
17 | 17 | ||
18 | import java.io.IOException; | 18 | import java.io.IOException; |
19 | +import java.util.concurrent.CompletableFuture; | ||
19 | import java.util.concurrent.ExecutorService; | 20 | import java.util.concurrent.ExecutorService; |
20 | 21 | ||
21 | -import com.google.common.util.concurrent.ListenableFuture; | ||
22 | - | ||
23 | /** | 22 | /** |
24 | * Interface for low level messaging primitives. | 23 | * Interface for low level messaging primitives. |
25 | */ | 24 | */ |
... | @@ -40,9 +39,8 @@ public interface MessagingService { | ... | @@ -40,9 +39,8 @@ public interface MessagingService { |
40 | * @param type type of message. | 39 | * @param type type of message. |
41 | * @param payload message payload. | 40 | * @param payload message payload. |
42 | * @return a response future | 41 | * @return a response future |
43 | - * @throws IOException when I/O exception of some sort has occurred | ||
44 | */ | 42 | */ |
45 | - public ListenableFuture<byte[]> sendAndReceive(Endpoint ep, String type, byte[] payload) throws IOException; | 43 | + public CompletableFuture<byte[]> sendAndReceive(Endpoint ep, String type, byte[] payload); |
46 | 44 | ||
47 | /** | 45 | /** |
48 | * Registers a new message handler for message type. | 46 | * Registers a new message handler for message type. | ... | ... |
... | @@ -39,6 +39,7 @@ import io.netty.channel.socket.nio.NioSocketChannel; | ... | @@ -39,6 +39,7 @@ import io.netty.channel.socket.nio.NioSocketChannel; |
39 | import java.io.IOException; | 39 | import java.io.IOException; |
40 | import java.net.InetAddress; | 40 | import java.net.InetAddress; |
41 | import java.net.UnknownHostException; | 41 | import java.net.UnknownHostException; |
42 | +import java.util.concurrent.CompletableFuture; | ||
42 | import java.util.concurrent.ConcurrentHashMap; | 43 | import java.util.concurrent.ConcurrentHashMap; |
43 | import java.util.concurrent.ConcurrentMap; | 44 | import java.util.concurrent.ConcurrentMap; |
44 | import java.util.concurrent.ExecutorService; | 45 | import java.util.concurrent.ExecutorService; |
... | @@ -56,8 +57,6 @@ import com.google.common.cache.Cache; | ... | @@ -56,8 +57,6 @@ import com.google.common.cache.Cache; |
56 | import com.google.common.cache.CacheBuilder; | 57 | import com.google.common.cache.CacheBuilder; |
57 | import com.google.common.cache.RemovalListener; | 58 | import com.google.common.cache.RemovalListener; |
58 | import com.google.common.cache.RemovalNotification; | 59 | import com.google.common.cache.RemovalNotification; |
59 | -import com.google.common.util.concurrent.ListenableFuture; | ||
60 | -import com.google.common.util.concurrent.SettableFuture; | ||
61 | 60 | ||
62 | /** | 61 | /** |
63 | * A Netty based implementation of MessagingService. | 62 | * A Netty based implementation of MessagingService. |
... | @@ -69,14 +68,14 @@ public class NettyMessagingService implements MessagingService { | ... | @@ -69,14 +68,14 @@ public class NettyMessagingService implements MessagingService { |
69 | private final Endpoint localEp; | 68 | private final Endpoint localEp; |
70 | private final ConcurrentMap<String, MessageHandler> handlers = new ConcurrentHashMap<>(); | 69 | private final ConcurrentMap<String, MessageHandler> handlers = new ConcurrentHashMap<>(); |
71 | private final AtomicLong messageIdGenerator = new AtomicLong(0); | 70 | private final AtomicLong messageIdGenerator = new AtomicLong(0); |
72 | - private final Cache<Long, SettableFuture<byte[]>> responseFutures = CacheBuilder.newBuilder() | 71 | + private final Cache<Long, CompletableFuture<byte[]>> responseFutures = CacheBuilder.newBuilder() |
73 | .maximumSize(100000) | 72 | .maximumSize(100000) |
74 | .expireAfterWrite(10, TimeUnit.SECONDS) | 73 | .expireAfterWrite(10, TimeUnit.SECONDS) |
75 | - .removalListener(new RemovalListener<Long, SettableFuture<byte[]>>() { | 74 | + .removalListener(new RemovalListener<Long, CompletableFuture<byte[]>>() { |
76 | @Override | 75 | @Override |
77 | - public void onRemoval(RemovalNotification<Long, SettableFuture<byte[]>> entry) { | 76 | + public void onRemoval(RemovalNotification<Long, CompletableFuture<byte[]>> entry) { |
78 | if (entry.wasEvicted()) { | 77 | if (entry.wasEvicted()) { |
79 | - entry.getValue().setException(new TimeoutException("Timedout waiting for reply")); | 78 | + entry.getValue().completeExceptionally(new TimeoutException("Timedout waiting for reply")); |
80 | } | 79 | } |
81 | } | 80 | } |
82 | }) | 81 | }) |
... | @@ -178,11 +177,10 @@ public class NettyMessagingService implements MessagingService { | ... | @@ -178,11 +177,10 @@ public class NettyMessagingService implements MessagingService { |
178 | } | 177 | } |
179 | 178 | ||
180 | @Override | 179 | @Override |
181 | - public ListenableFuture<byte[]> sendAndReceive(Endpoint ep, String type, byte[] payload) | 180 | + public CompletableFuture<byte[]> sendAndReceive(Endpoint ep, String type, byte[] payload) { |
182 | - throws IOException { | 181 | + CompletableFuture<byte[]> response = new CompletableFuture<>(); |
183 | - SettableFuture<byte[]> futureResponse = SettableFuture.create(); | ||
184 | Long messageId = messageIdGenerator.incrementAndGet(); | 182 | Long messageId = messageIdGenerator.incrementAndGet(); |
185 | - responseFutures.put(messageId, futureResponse); | 183 | + responseFutures.put(messageId, response); |
186 | InternalMessage message = new InternalMessage.Builder(this) | 184 | InternalMessage message = new InternalMessage.Builder(this) |
187 | .withId(messageId) | 185 | .withId(messageId) |
188 | .withSender(localEp) | 186 | .withSender(localEp) |
... | @@ -193,9 +191,9 @@ public class NettyMessagingService implements MessagingService { | ... | @@ -193,9 +191,9 @@ public class NettyMessagingService implements MessagingService { |
193 | sendAsync(ep, message); | 191 | sendAsync(ep, message); |
194 | } catch (Exception e) { | 192 | } catch (Exception e) { |
195 | responseFutures.invalidate(messageId); | 193 | responseFutures.invalidate(messageId); |
196 | - throw e; | 194 | + response.completeExceptionally(e); |
197 | } | 195 | } |
198 | - return futureResponse; | 196 | + return response; |
199 | } | 197 | } |
200 | 198 | ||
201 | @Override | 199 | @Override |
... | @@ -333,10 +331,10 @@ public class NettyMessagingService implements MessagingService { | ... | @@ -333,10 +331,10 @@ public class NettyMessagingService implements MessagingService { |
333 | String type = message.type(); | 331 | String type = message.type(); |
334 | if (InternalMessage.REPLY_MESSAGE_TYPE.equals(type)) { | 332 | if (InternalMessage.REPLY_MESSAGE_TYPE.equals(type)) { |
335 | try { | 333 | try { |
336 | - SettableFuture<byte[]> futureResponse = | 334 | + CompletableFuture<byte[]> futureResponse = |
337 | NettyMessagingService.this.responseFutures.getIfPresent(message.id()); | 335 | NettyMessagingService.this.responseFutures.getIfPresent(message.id()); |
338 | if (futureResponse != null) { | 336 | if (futureResponse != null) { |
339 | - futureResponse.set(message.payload()); | 337 | + futureResponse.complete(message.payload()); |
340 | } else { | 338 | } else { |
341 | log.warn("Received a reply for message id:[{}]. " | 339 | log.warn("Received a reply for message id:[{}]. " |
342 | + " from {}. But was unable to locate the" | 340 | + " from {}. But was unable to locate the" | ... | ... |
-
Please register or login to post a comment