Brian O'Connor
Committed by Ray Milkey

Updates to ECM interface

Change-Id: Ie0cae42ac2b361cf3b94e5047c157cb0945f4209

Adding origin to IntentData and use it to pick GossipIntentStore peer

Change-Id: I50e9621a69a35ec02b8c8dd79cc926591e5a73e9
......@@ -84,14 +84,13 @@ public class IntentRemoveCommand extends AbstractShellCommand {
Key key = Key.of(new BigInteger(id, 16).longValue(), appId);
Intent intent = intentService.getIntent(key);
if (intent != null) {
// set up latch and listener to track uninstall progress
CountDownLatch latch = new CountDownLatch(1);
IntentListener listener = (IntentEvent event) -> {
if (Objects.equals(event.subject().key(), key) &&
(event.type() == IntentEvent.Type.WITHDRAWN
|| event.type() == IntentEvent.Type.WITHDRAWN)) {
(event.type() == IntentEvent.Type.WITHDRAWN ||
event.type() == IntentEvent.Type.FAILED)) {
latch.countDown();
}
};
......
......@@ -17,6 +17,7 @@ package org.onosproject.net.intent;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
import org.onosproject.cluster.NodeId;
import org.onosproject.store.Timestamp;
import java.util.List;
......@@ -32,6 +33,7 @@ public class IntentData { //FIXME need to make this "immutable"
private IntentState state;
private Timestamp version;
private NodeId origin;
private List<Intent> installables;
......@@ -61,6 +63,19 @@ public class IntentData { //FIXME need to make this "immutable"
return version;
}
/**
* Sets the origin, which is the node that created the instance.
*
* @param origin origin instance
*/
public void setOrigin(NodeId origin) {
this.origin = origin;
}
public NodeId origin() {
return origin;
}
public void setState(IntentState newState) {
this.state = newState;
}
......
......@@ -16,11 +16,9 @@
package org.onosproject.store.cluster.messaging;
import com.google.common.util.concurrent.ListenableFuture;
import org.onosproject.cluster.NodeId;
import java.io.IOException;
import java.util.Set;
import java.util.concurrent.ExecutorService;
// TODO: remove IOExceptions?
......@@ -51,9 +49,8 @@ public interface ClusterCommunicationService {
* @param message message to send
* @param toNodeId node identifier
* @return true if the message was sent successfully; false otherwise.
* @throws IOException when I/O exception of some sort has occurred
*/
boolean unicast(ClusterMessage message, NodeId toNodeId) throws IOException;
boolean unicast(ClusterMessage message, NodeId toNodeId);
/**
* Multicast a message to a set of controller nodes.
......@@ -62,7 +59,7 @@ public interface ClusterCommunicationService {
* @param nodeIds recipient node identifiers
* @return true if the message was sent successfully to all nodes in the group; false otherwise.
*/
boolean multicast(ClusterMessage message, Set<NodeId> nodeIds);
boolean multicast(ClusterMessage message, Iterable<NodeId> nodeIds);
/**
* Sends a message synchronously.
......
......@@ -16,7 +16,6 @@
package org.onosproject.store.cluster.messaging.impl;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
......@@ -39,7 +38,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import static com.google.common.base.Preconditions.checkArgument;
......@@ -107,7 +105,7 @@ public class ClusterCommunicationManager
}
@Override
public boolean multicast(ClusterMessage message, Set<NodeId> nodes) {
public boolean multicast(ClusterMessage message, Iterable<NodeId> nodes) {
boolean ok = true;
final ControllerNode localNode = clusterService.getLocalNode();
byte[] payload = message.getBytes();
......@@ -120,8 +118,8 @@ public class ClusterCommunicationManager
}
@Override
public boolean unicast(ClusterMessage message, NodeId toNodeId) throws IOException {
return unicast(message.subject(), message.getBytes(), toNodeId);
public boolean unicast(ClusterMessage message, NodeId toNodeId) {
return unicastUnchecked(message.subject(), message.getBytes(), toNodeId);
}
private boolean unicast(MessageSubject subject, byte[] payload, NodeId toNodeId) throws IOException {
......@@ -137,7 +135,6 @@ public class ClusterCommunicationManager
}
}
private boolean unicastUnchecked(MessageSubject subject, byte[] payload, NodeId toNodeId) {
try {
return unicast(subject, payload, toNodeId);
......
......@@ -21,7 +21,6 @@ import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.commons.lang3.RandomUtils;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
......@@ -305,14 +304,13 @@ public class GossipDeviceStore
ClusterMessage clusterMessage = new ClusterMessage(localNode, DEVICE_INJECTED,
SERIALIZER.encode(deviceInjectedEvent));
try {
clusterCommunicator.unicast(clusterMessage, deviceNode);
} catch (IOException e) {
log.warn("Failed to process injected device id: {} desc: {} " +
"(cluster messaging failed: {})",
deviceId, deviceDescription, e);
}
// TODO check unicast return value
clusterCommunicator.unicast(clusterMessage, deviceNode);
/* error log:
log.warn("Failed to process injected device id: {} desc: {} " +
"(cluster messaging failed: {})",
deviceId, deviceDescription, e);
*/
}
return deviceEvent;
......@@ -556,13 +554,14 @@ public class GossipDeviceStore
PortInjectedEvent portInjectedEvent = new PortInjectedEvent(providerId, deviceId, portDescriptions);
ClusterMessage clusterMessage = new ClusterMessage(
localNode, PORT_INJECTED, SERIALIZER.encode(portInjectedEvent));
try {
clusterCommunicator.unicast(clusterMessage, deviceNode);
} catch (IOException e) {
log.warn("Failed to process injected ports of device id: {} " +
"(cluster messaging failed: {})",
deviceId, e);
}
//TODO check unicast return value
clusterCommunicator.unicast(clusterMessage, deviceNode);
/* error log:
log.warn("Failed to process injected ports of device id: {} " +
"(cluster messaging failed: {})",
deviceId, e);
*/
}
return deviceEvents == null ? Collections.emptyList() : deviceEvents;
......@@ -842,13 +841,13 @@ public class GossipDeviceStore
DEVICE_REMOVE_REQ,
SERIALIZER.encode(deviceId));
try {
clusterCommunicator.unicast(message, master);
} catch (IOException e) {
log.error("Failed to forward {} remove request to {}", deviceId, master, e);
}
// TODO check unicast return value
clusterCommunicator.unicast(message, master);
/* error log:
log.error("Failed to forward {} remove request to {}", deviceId, master, e);
*/
// event will be triggered after master processes it.
// event will be triggered after master processes it.
return null;
}
......
......@@ -29,6 +29,7 @@ import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.util.BoundedThreadPool;
import org.onlab.util.KryoNamespace;
import org.onlab.util.NewConcurrentHashMap;
import org.onosproject.cluster.ClusterService;
......@@ -138,7 +139,8 @@ public class DistributedFlowRuleStore
private ExecutorService messageHandlingExecutor;
private final ExecutorService backupExecutors =
Executors.newSingleThreadExecutor(groupedThreads("onos/flow", "async-backups"));
BoundedThreadPool.newSingleThreadExecutor(groupedThreads("onos/flow", "async-backups"));
//Executors.newSingleThreadExecutor(groupedThreads("onos/flow", "async-backups"));
private boolean syncBackup = false;
......@@ -385,12 +387,8 @@ public class DistributedFlowRuleStore
SERIALIZER.encode(operation));
try {
clusterCommunicator.unicast(message, replicaInfo.master().get());
} catch (IOException e) {
log.warn("Failed to storeBatch: {}", e.getMessage());
if (!clusterCommunicator.unicast(message, replicaInfo.master().get())) {
log.warn("Failed to storeBatch: {} to {}", message, replicaInfo.master());
Set<FlowRule> allFailures = operation.getOperations().stream()
.map(op -> op.target())
......@@ -401,7 +399,6 @@ public class DistributedFlowRuleStore
new CompletedBatchOperation(false, allFailures, deviceId)));
return;
}
}
private void storeBatchInternal(FlowRuleBatchOperation operation) {
......@@ -576,15 +573,13 @@ public class DistributedFlowRuleStore
if (nodeId == null) {
notifyDelegate(event);
} else {
try {
ClusterMessage message = new ClusterMessage(
clusterService.getLocalNode().id(),
REMOTE_APPLY_COMPLETED,
SERIALIZER.encode(event));
clusterCommunicator.unicast(message, nodeId);
} catch (IOException e) {
log.warn("Failed to respond to peer for batch operation result");
}
ClusterMessage message = new ClusterMessage(
clusterService.getLocalNode().id(),
REMOTE_APPLY_COMPLETED,
SERIALIZER.encode(event));
// TODO check unicast return value
clusterCommunicator.unicast(message, nodeId);
//error log: log.warn("Failed to respond to peer for batch operation result");
}
}
......
......@@ -15,6 +15,7 @@
*/
package org.onosproject.store.intent.impl;
import com.google.common.collect.ImmutableList;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
......@@ -23,6 +24,8 @@ import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.util.KryoNamespace;
import org.onosproject.cluster.ClusterService;
import org.onosproject.cluster.ControllerNode;
import org.onosproject.cluster.NodeId;
import org.onosproject.net.intent.Intent;
import org.onosproject.net.intent.IntentData;
import org.onosproject.net.intent.IntentEvent;
......@@ -41,7 +44,10 @@ import org.onosproject.store.impl.WallClockTimestamp;
import org.onosproject.store.serializers.KryoNamespaces;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import static org.onosproject.net.intent.IntentState.*;
......@@ -51,6 +57,8 @@ import static org.slf4j.LoggerFactory.getLogger;
* Manages inventory of Intents in a distributed data store that uses optimistic
* replication and gossip based techniques.
*/
//FIXME we should listen for leadership changes. if the local instance has just
// ... become a leader, scan the pending map and process those
@Component(immediate = false, enabled = true)
@Service
public class GossipIntentStore
......@@ -86,15 +94,17 @@ public class GossipIntentStore
clusterService,
clusterCommunicator,
intentSerializer,
new IntentDataLogicalClockManager<>());
new IntentDataLogicalClockManager<>(),
(key, intentData) -> getPeerNodes(key, intentData));
pendingMap = new EventuallyConsistentMapImpl<>("intent-pending",
clusterService,
clusterCommunicator,
intentSerializer, // TODO
new IntentDataClockManager<>());
new IntentDataClockManager<>(),
(key, intentData) -> getPeerNodes(key, intentData));
currentMap.addListener(new InternalIntentStatesListener());
currentMap.addListener(new InternalCurrentListener());
pendingMap.addListener(new InternalPendingListener());
log.info("Started");
......@@ -226,7 +236,6 @@ public class GossipIntentStore
@Override
public void write(IntentData newData) {
IntentData currentData = currentMap.get(newData.key());
if (isUpdateAcceptable(currentData, newData)) {
// Only the master is modifying the current state. Therefore assume
// this always succeeds
......@@ -239,6 +248,34 @@ public class GossipIntentStore
}
}
private Iterable<NodeId> getPeerNodes(Key key, IntentData data) {
NodeId master = partitionService.getLeader(key);
NodeId origin = (data != null) ? data.origin() : null;
NodeId me = clusterService.getLocalNode().id();
boolean isMaster = Objects.equals(master, me);
boolean isOrigin = Objects.equals(origin, me);
if (isMaster && isOrigin) {
return ImmutableList.of(getRandomNode());
} else if (isMaster) {
return ImmutableList.of(origin);
} else if (isOrigin) {
return ImmutableList.of(master);
} else {
// FIXME: why are we here? log error?
return ImmutableList.of(master);
}
}
private NodeId getRandomNode() {
List<NodeId> nodes = clusterService.getNodes().stream()
.map(ControllerNode::id)
.collect(Collectors.toCollection(ArrayList::new));
Collections.shuffle(nodes);
// FIXME check if self
// FIXME verify nodes.size() > 0
return nodes.get(0);
}
@Override
public void batchWrite(Iterable<IntentData> updates) {
updates.forEach(this::write);
......@@ -263,6 +300,7 @@ public class GossipIntentStore
if (data.version() == null) {
data.setVersion(new WallClockTimestamp());
}
data.setOrigin(clusterService.getLocalNode().id());
pendingMap.put(data.key(), copyData(data));
}
......@@ -292,7 +330,7 @@ public class GossipIntentStore
}
}
private final class InternalIntentStatesListener implements
private final class InternalCurrentListener implements
EventuallyConsistentMapListener<Key, IntentData> {
@Override
public void event(
......
......@@ -29,13 +29,13 @@ import org.onosproject.cluster.Leadership;
import org.onosproject.cluster.LeadershipEvent;
import org.onosproject.cluster.LeadershipEventListener;
import org.onosproject.cluster.LeadershipService;
import org.onosproject.cluster.NodeId;
import org.onosproject.net.intent.Key;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Objects;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
......@@ -109,8 +109,13 @@ public class PartitionManager implements PartitionService {
@Override
public boolean isMine(Key intentKey) {
return Objects.equal(leadershipService.getLeader(getPartitionPath(getPartitionForKey(intentKey))),
clusterService.getLocalNode().id());
return Objects.equals(leadershipService.getLeader(getPartitionPath(getPartitionForKey(intentKey))),
clusterService.getLocalNode().id());
}
@Override
public NodeId getLeader(Key intentKey) {
return leadershipService.getLeader(getPartitionPath(getPartitionForKey(intentKey)));
}
private void doRelinquish() {
......@@ -171,7 +176,7 @@ public class PartitionManager implements PartitionService {
public void event(LeadershipEvent event) {
Leadership leadership = event.subject();
if (Objects.equal(leadership.leader(), clusterService.getLocalNode().id()) &&
if (Objects.equals(leadership.leader(), clusterService.getLocalNode().id()) &&
leadership.topic().startsWith(ELECTION_PREFIX)) {
// See if we need to let some partitions go
......
......@@ -15,6 +15,7 @@
*/
package org.onosproject.store.intent.impl;
import org.onosproject.cluster.NodeId;
import org.onosproject.net.intent.Key;
/**
......@@ -31,5 +32,13 @@ public interface PartitionService {
*/
boolean isMine(Key intentKey);
/**
* Returns the leader for a particular key.
*
* @param intentKey intent key to query
* @return the leader node
*/
NodeId getLeader(Key intentKey);
// TODO add API for rebalancing partitions
}
......
......@@ -337,13 +337,13 @@ public class GossipLinkStore
ClusterMessage linkInjectedMessage = new ClusterMessage(localNode,
GossipLinkStoreMessageSubjects.LINK_INJECTED, SERIALIZER.encode(linkInjectedEvent));
try {
clusterCommunicator.unicast(linkInjectedMessage, dstNode);
} catch (IOException e) {
log.warn("Failed to process link update between src: {} and dst: {} " +
"(cluster messaging failed: {})",
linkDescription.src(), linkDescription.dst(), e);
}
// TODO check unicast return value
clusterCommunicator.unicast(linkInjectedMessage, dstNode);
/* error log:
log.warn("Failed to process link update between src: {} and dst: {} " +
"(cluster messaging failed: {})",
linkDescription.src(), linkDescription.dst(), e);
*/
}
......
......@@ -15,19 +15,13 @@
*/
package org.onosproject.store.packet.impl;
import static org.onlab.util.Tools.groupedThreads;
import static org.slf4j.LoggerFactory.getLogger;
import java.io.IOException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.util.KryoNamespace;
import org.onosproject.cluster.ClusterService;
import org.onosproject.cluster.NodeId;
import org.onosproject.mastership.MastershipService;
......@@ -43,9 +37,14 @@ import org.onosproject.store.cluster.messaging.ClusterMessageHandler;
import org.onosproject.store.cluster.messaging.MessageSubject;
import org.onosproject.store.serializers.KryoNamespaces;
import org.onosproject.store.serializers.KryoSerializer;
import org.onlab.util.KryoNamespace;
import org.slf4j.Logger;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import static org.onlab.util.Tools.groupedThreads;
import static org.slf4j.LoggerFactory.getLogger;
/**
* Distributed packet store implementation allowing packets to be sent to
* remote instances.
......@@ -118,12 +117,10 @@ public class DistributedPacketStore
return;
}
try {
communicationService.unicast(new ClusterMessage(
myId, PACKET_OUT_SUBJECT, SERIALIZER.encode(packet)), master);
} catch (IOException e) {
log.warn("Failed to send packet-out to {}", master);
}
// TODO check unicast return value
communicationService.unicast(new ClusterMessage(
myId, PACKET_OUT_SUBJECT, SERIALIZER.encode(packet)), master);
// error log: log.warn("Failed to send packet-out to {}", master);
}
/**
......
......@@ -16,9 +16,9 @@
package org.onosproject.store.ecmap;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
......@@ -53,10 +53,7 @@ import java.util.concurrent.atomic.AtomicLong;
import static com.google.common.base.Preconditions.checkArgument;
import static junit.framework.TestCase.assertFalse;
import static org.easymock.EasyMock.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assert.*;
/**
* Unit tests for EventuallyConsistentMapImpl.
......@@ -119,8 +116,8 @@ public class EventuallyConsistentMapImplTest {
@Before
public void setUp() throws Exception {
clusterService = createMock(ClusterService.class);
expect(clusterService.getLocalNode()).andReturn(self)
.anyTimes();
expect(clusterService.getLocalNode()).andReturn(self).anyTimes();
expect(clusterService.getNodes()).andReturn(ImmutableSet.of(self)).anyTimes();
replay(clusterService);
clusterCommunicator = createMock(ClusterCommunicationService.class);
......@@ -163,7 +160,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testSize() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertEquals(0, ecMap.size());
ecMap.put(KEY1, VALUE1);
......@@ -184,7 +181,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testIsEmpty() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertTrue(ecMap.isEmpty());
ecMap.put(KEY1, VALUE1);
......@@ -195,7 +192,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testContainsKey() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertFalse(ecMap.containsKey(KEY1));
ecMap.put(KEY1, VALUE1);
......@@ -207,7 +204,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testContainsValue() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertFalse(ecMap.containsValue(VALUE1));
ecMap.put(KEY1, VALUE1);
......@@ -222,7 +219,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testGet() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
CountDownLatch latch;
......@@ -278,7 +275,7 @@ public class EventuallyConsistentMapImplTest {
ecMap.addListener(listener);
// Set up expected internal message to be broadcast to peers on first put
expectSpecificMessage(generatePutMessage(KEY1, VALUE1, clockService
expectSpecificMulticastMessage(generatePutMessage(KEY1, VALUE1, clockService
.peekAtNextTimestamp()), clusterCommunicator);
// Put first value
......@@ -289,7 +286,7 @@ public class EventuallyConsistentMapImplTest {
verify(clusterCommunicator);
// Set up expected internal message to be broadcast to peers on second put
expectSpecificMessage(generatePutMessage(
expectSpecificMulticastMessage(generatePutMessage(
KEY1, VALUE2, clockService.peekAtNextTimestamp()), clusterCommunicator);
// Update same key to a new value
......@@ -332,14 +329,14 @@ public class EventuallyConsistentMapImplTest {
ecMap.addListener(listener);
// Put in an initial value
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
ecMap.put(KEY1, VALUE1);
assertEquals(VALUE1, ecMap.get(KEY1));
// Remove the value and check the correct internal cluster messages
// are sent
expectSpecificMessage(generateRemoveMessage(KEY1, clockService.peekAtNextTimestamp()),
clusterCommunicator);
expectSpecificMulticastMessage(generateRemoveMessage(KEY1, clockService.peekAtNextTimestamp()),
clusterCommunicator);
ecMap.remove(KEY1);
assertNull(ecMap.get(KEY1));
......@@ -349,8 +346,8 @@ public class EventuallyConsistentMapImplTest {
// Remove the same value again. Even though the value is no longer in
// the map, we expect that the tombstone is updated and another remove
// event is sent to the cluster and external listeners.
expectSpecificMessage(generateRemoveMessage(KEY1, clockService.peekAtNextTimestamp()),
clusterCommunicator);
expectSpecificMulticastMessage(generateRemoveMessage(KEY1, clockService.peekAtNextTimestamp()),
clusterCommunicator);
ecMap.remove(KEY1);
assertNull(ecMap.get(KEY1));
......@@ -359,7 +356,7 @@ public class EventuallyConsistentMapImplTest {
// Put in a new value for us to try and remove
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
ecMap.put(KEY2, VALUE2);
......@@ -400,8 +397,8 @@ public class EventuallyConsistentMapImplTest {
ecMap.addListener(listener);
// Expect a multi-update inter-instance message
expectSpecificMessage(generatePutMessage(KEY1, VALUE1, KEY2, VALUE2),
clusterCommunicator);
expectSpecificBroadcastMessage(generatePutMessage(KEY1, VALUE1, KEY2, VALUE2),
clusterCommunicator);
Map<String, String> putAllValues = new HashMap<>();
putAllValues.put(KEY1, VALUE1);
......@@ -434,12 +431,12 @@ public class EventuallyConsistentMapImplTest {
verify(clusterCommunicator);
// Put some items in the map
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
ecMap.put(KEY1, VALUE1);
ecMap.put(KEY2, VALUE2);
ecMap.addListener(listener);
expectSpecificMessage(generateRemoveMessage(KEY1, KEY2), clusterCommunicator);
expectSpecificBroadcastMessage(generateRemoveMessage(KEY1, KEY2), clusterCommunicator);
ecMap.clear();
......@@ -449,7 +446,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testKeySet() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertTrue(ecMap.keySet().isEmpty());
......@@ -482,7 +479,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testValues() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertTrue(ecMap.values().isEmpty());
......@@ -520,7 +517,7 @@ public class EventuallyConsistentMapImplTest {
@Test
public void testEntrySet() throws Exception {
expectAnyMessage(clusterCommunicator);
expectPeerMessage(clusterCommunicator);
assertTrue(ecMap.entrySet().isEmpty());
......@@ -658,21 +655,52 @@ public class EventuallyConsistentMapImplTest {
* @param m message we expect to be sent
* @param clusterCommunicator a mock ClusterCommunicationService to set up
*/
private static void expectSpecificMessage(ClusterMessage m,
ClusterCommunicationService clusterCommunicator) {
private static void expectSpecificBroadcastMessage(ClusterMessage m,
ClusterCommunicationService clusterCommunicator) {
reset(clusterCommunicator);
expect(clusterCommunicator.broadcast(m)).andReturn(true);
replay(clusterCommunicator);
}
/**
* Sets up a mock ClusterCommunicationService to expect any cluster message
* Sets up a mock ClusterCommunicationService to expect a specific cluster
* message to be multicast to the cluster.
*
* @param m message we expect to be sent
* @param clusterCommunicator a mock ClusterCommunicationService to set up
*/
private static void expectSpecificMulticastMessage(ClusterMessage m,
ClusterCommunicationService clusterCommunicator) {
reset(clusterCommunicator);
expect(clusterCommunicator.multicast(eq(m), anyObject(Set.class))).andReturn(true);
replay(clusterCommunicator);
}
/**
* Sets up a mock ClusterCommunicationService to expect a multicast cluster message
* that is sent to it. This is useful for unit tests where we aren't
* interested in testing the messaging component.
*
* @param clusterCommunicator a mock ClusterCommunicationService to set up
*/
private void expectPeerMessage(ClusterCommunicationService clusterCommunicator) {
reset(clusterCommunicator);
expect(clusterCommunicator.multicast(anyObject(ClusterMessage.class),
anyObject(Iterable.class)))
.andReturn(true)
.anyTimes();
replay(clusterCommunicator);
}
/**
* Sets up a mock ClusterCommunicationService to expect a broadcast cluster message
* that is sent to it. This is useful for unit tests where we aren't
* interested in testing the messaging component.
*
* @param clusterCommunicator a mock ClusterCommunicationService to set up
*/
private void expectAnyMessage(ClusterCommunicationService clusterCommunicator) {
private void expectBroadcastMessage(ClusterCommunicationService clusterCommunicator) {
reset(clusterCommunicator);
expect(clusterCommunicator.broadcast(anyObject(ClusterMessage.class)))
.andReturn(true)
......@@ -700,13 +728,12 @@ public class EventuallyConsistentMapImplTest {
}
@Override
public boolean unicast(ClusterMessage message, NodeId toNodeId)
throws IOException {
public boolean unicast(ClusterMessage message, NodeId toNodeId) {
return false;
}
@Override
public boolean multicast(ClusterMessage message, Set<NodeId> nodeIds) {
public boolean multicast(ClusterMessage message, Iterable<NodeId> nodeIds) {
return false;
}
......
......@@ -31,6 +31,8 @@ import static org.junit.Assert.*;
*/
public class BlockingBooleanTest {
private static final int TIMEOUT = 100; //ms
@Test
public void basics() {
BlockingBoolean b = new BlockingBoolean(false);
......@@ -60,7 +62,7 @@ public class BlockingBooleanTest {
}
b.set(value);
try {
assertTrue(latch.await(10, TimeUnit.MILLISECONDS));
assertTrue(latch.await(TIMEOUT, TimeUnit.MILLISECONDS));
} catch (InterruptedException e) {
fail();
}
......@@ -92,7 +94,7 @@ public class BlockingBooleanTest {
}
});
try {
assertTrue(latch.await(10, TimeUnit.MILLISECONDS));
assertTrue(latch.await(TIMEOUT, TimeUnit.MILLISECONDS));
} catch (InterruptedException e) {
fail();
}
......@@ -124,14 +126,14 @@ public class BlockingBooleanTest {
});
}
try {
assertTrue(sameLatch.await(10, TimeUnit.MILLISECONDS));
assertTrue(sameLatch.await(TIMEOUT, TimeUnit.MILLISECONDS));
assertEquals(waitLatch.getCount(), numThreads / 2);
} catch (InterruptedException e) {
fail();
}
b.set(true);
try {
assertTrue(waitLatch.await(10, TimeUnit.MILLISECONDS));
assertTrue(waitLatch.await(TIMEOUT, TimeUnit.MILLISECONDS));
} catch (InterruptedException e) {
fail();
}
......@@ -156,7 +158,7 @@ public class BlockingBooleanTest {
}
});
try {
assertTrue(latch.await(10, TimeUnit.MILLISECONDS));
assertTrue(latch.await(TIMEOUT, TimeUnit.MILLISECONDS));
} catch (InterruptedException e) {
fail();
}
......