Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next
Conflicts: core/net/src/main/java/org/onlab/onos/net/proxyarp/impl/package-info.java Change-Id: I7bf076fae02c619ff0d57ffcbff4a4189716c474
Showing
50 changed files
with
2228 additions
and
211 deletions
... | @@ -28,10 +28,6 @@ | ... | @@ -28,10 +28,6 @@ |
28 | <version>${project.version}</version> | 28 | <version>${project.version}</version> |
29 | </dependency> | 29 | </dependency> |
30 | <dependency> | 30 | <dependency> |
31 | - <groupId>org.livetribe.slp</groupId> | ||
32 | - <artifactId>livetribe-slp</artifactId> | ||
33 | - </dependency> | ||
34 | - <dependency> | ||
35 | <groupId>org.apache.karaf.shell</groupId> | 31 | <groupId>org.apache.karaf.shell</groupId> |
36 | <artifactId>org.apache.karaf.shell.console</artifactId> | 32 | <artifactId>org.apache.karaf.shell.console</artifactId> |
37 | </dependency> | 33 | </dependency> | ... | ... |
... | @@ -233,7 +233,7 @@ public class IOLoopTestClient { | ... | @@ -233,7 +233,7 @@ public class IOLoopTestClient { |
233 | } | 233 | } |
234 | 234 | ||
235 | @Override | 235 | @Override |
236 | - protected void connect(SelectionKey key) { | 236 | + protected void connect(SelectionKey key) throws IOException { |
237 | super.connect(key); | 237 | super.connect(key); |
238 | TestMessageStream b = (TestMessageStream) key.attachment(); | 238 | TestMessageStream b = (TestMessageStream) key.attachment(); |
239 | Worker w = ((CustomIOLoop) b.loop()).worker; | 239 | Worker w = ((CustomIOLoop) b.loop()).worker; | ... | ... |
1 | +livetribe.slp.da.expired.services.purge.period=60 | ||
2 | +livetribe.slp.sa.client.connect.address=127.0.0.1 | ||
3 | +livetribe.slp.sa.client.factory=org.livetribe.slp.sa.StandardServiceAgentClient$Factory | ||
4 | +livetribe.slp.sa.factory=org.livetribe.slp.sa.StandardServiceAgent$Factory | ||
5 | +livetribe.slp.sa.service.renewal.enabled=true | ||
6 | +livetribe.slp.sa.unicast.prefer.tcp=false | ||
7 | +livetribe.slp.tcp.connector.factory=org.livetribe.slp.spi.net.SocketTCPConnector$Factory | ||
8 | +livetribe.slp.tcp.connector.server.factory=org.livetribe.slp.spi.net.SocketTCPConnectorServer$Factory | ||
9 | +livetribe.slp.tcp.message.max.length=4096 | ||
10 | +livetribe.slp.tcp.read.timeout=300000 | ||
11 | +livetribe.slp.ua.client.factory=org.livetribe.slp.ua.StandardUserAgentClient$Factory | ||
12 | +livetribe.slp.ua.factory=org.livetribe.slp.ua.StandardUserAgent$Factory | ||
13 | +livetribe.slp.ua.unicast.prefer.tcp=false | ||
14 | +livetribe.slp.udp.connector.factory=org.livetribe.slp.spi.net.SocketUDPConnector$Factory | ||
15 | +livetribe.slp.udp.connector.server.factory=org.livetribe.slp.spi.net.SocketUDPConnectorServer$Factory | ||
16 | +net.slp.DAAddresses= | ||
17 | +net.slp.DAAttributes= | ||
18 | +net.slp.DAHeartBeat=10800 | ||
19 | +net.slp.MTU=1400 | ||
20 | +net.slp.SAAttributes= | ||
21 | +net.slp.broadcastAddress=255.255.255.255 | ||
22 | +net.slp.datagramTimeouts=150,250,400 | ||
23 | +net.slp.interfaces=0.0.0.0 | ||
24 | +net.slp.isBroadcastOnly=false | ||
25 | +net.slp.locale=en | ||
26 | +net.slp.multicastAddress=239.255.255.253 | ||
27 | +net.slp.multicastMaximumWait=15000 | ||
28 | +net.slp.multicastTTL=255 | ||
29 | +net.slp.multicastTimeouts=150,250,400,600,1000 | ||
30 | +net.slp.notificationPort=1847 | ||
31 | +net.slp.port=427 | ||
32 | +net.slp.useScopes=default | ||
33 | + | ||
34 | +org.onlab.cluster.name = TV-ONOS |
1 | +package org.onlab.onos.cli; | ||
2 | + | ||
3 | +import org.apache.karaf.shell.commands.Argument; | ||
4 | +import org.apache.karaf.shell.commands.Command; | ||
5 | +import org.onlab.onos.cluster.ClusterAdminService; | ||
6 | +import org.onlab.onos.cluster.NodeId; | ||
7 | +import org.onlab.packet.IpPrefix; | ||
8 | + | ||
9 | +/** | ||
10 | + * Adds a new controller cluster node. | ||
11 | + */ | ||
12 | +@Command(scope = "onos", name = "add-node", | ||
13 | + description = "Adds a new controller cluster node") | ||
14 | +public class NodeAddCommand extends AbstractShellCommand { | ||
15 | + | ||
16 | + @Argument(index = 0, name = "nodeId", description = "Node ID", | ||
17 | + required = true, multiValued = false) | ||
18 | + String nodeId = null; | ||
19 | + | ||
20 | + @Argument(index = 1, name = "ip", description = "Node IP address", | ||
21 | + required = true, multiValued = false) | ||
22 | + String ip = null; | ||
23 | + | ||
24 | + @Argument(index = 2, name = "tcpPort", description = "Node TCP listen port", | ||
25 | + required = false, multiValued = false) | ||
26 | + int tcpPort = 9876; | ||
27 | + | ||
28 | + @Override | ||
29 | + protected void execute() { | ||
30 | + ClusterAdminService service = get(ClusterAdminService.class); | ||
31 | + service.addNode(new NodeId(nodeId), IpPrefix.valueOf(ip), tcpPort); | ||
32 | + } | ||
33 | + | ||
34 | +} |
1 | +package org.onlab.onos.cli; | ||
2 | + | ||
3 | +import org.apache.karaf.shell.commands.Argument; | ||
4 | +import org.apache.karaf.shell.commands.Command; | ||
5 | +import org.onlab.onos.cluster.ClusterAdminService; | ||
6 | +import org.onlab.onos.cluster.NodeId; | ||
7 | + | ||
8 | +/** | ||
9 | + * Removes a controller cluster node. | ||
10 | + */ | ||
11 | +@Command(scope = "onos", name = "remove-node", | ||
12 | + description = "Removes a new controller cluster node") | ||
13 | +public class NodeRemoveCommand extends AbstractShellCommand { | ||
14 | + | ||
15 | + @Argument(index = 0, name = "nodeId", description = "Node ID", | ||
16 | + required = true, multiValued = false) | ||
17 | + String nodeId = null; | ||
18 | + | ||
19 | + @Override | ||
20 | + protected void execute() { | ||
21 | + ClusterAdminService service = get(ClusterAdminService.class); | ||
22 | + service.removeNode(new NodeId(nodeId)); | ||
23 | + } | ||
24 | + | ||
25 | +} |
... | @@ -17,7 +17,7 @@ import static com.google.common.collect.Lists.newArrayList; | ... | @@ -17,7 +17,7 @@ import static com.google.common.collect.Lists.newArrayList; |
17 | public class NodesListCommand extends AbstractShellCommand { | 17 | public class NodesListCommand extends AbstractShellCommand { |
18 | 18 | ||
19 | private static final String FMT = | 19 | private static final String FMT = |
20 | - "id=%s, ip=%s, state=%s %s"; | 20 | + "id=%s, address=%s:%s, state=%s %s"; |
21 | 21 | ||
22 | @Override | 22 | @Override |
23 | protected void execute() { | 23 | protected void execute() { |
... | @@ -26,7 +26,7 @@ public class NodesListCommand extends AbstractShellCommand { | ... | @@ -26,7 +26,7 @@ public class NodesListCommand extends AbstractShellCommand { |
26 | Collections.sort(nodes, Comparators.NODE_COMPARATOR); | 26 | Collections.sort(nodes, Comparators.NODE_COMPARATOR); |
27 | ControllerNode self = service.getLocalNode(); | 27 | ControllerNode self = service.getLocalNode(); |
28 | for (ControllerNode node : nodes) { | 28 | for (ControllerNode node : nodes) { |
29 | - print(FMT, node.id(), node.ip(), | 29 | + print(FMT, node.id(), node.ip(), node.tcpPort(), |
30 | service.getState(node.id()), | 30 | service.getState(node.id()), |
31 | node.equals(self) ? "*" : ""); | 31 | node.equals(self) ? "*" : ""); |
32 | } | 32 | } | ... | ... |
... | @@ -5,6 +5,12 @@ | ... | @@ -5,6 +5,12 @@ |
5 | <action class="org.onlab.onos.cli.NodesListCommand"/> | 5 | <action class="org.onlab.onos.cli.NodesListCommand"/> |
6 | </command> | 6 | </command> |
7 | <command> | 7 | <command> |
8 | + <action class="org.onlab.onos.cli.NodeAddCommand"/> | ||
9 | + </command> | ||
10 | + <command> | ||
11 | + <action class="org.onlab.onos.cli.NodeRemoveCommand"/> | ||
12 | + </command> | ||
13 | + <command> | ||
8 | <action class="org.onlab.onos.cli.MastersListCommand"/> | 14 | <action class="org.onlab.onos.cli.MastersListCommand"/> |
9 | <completers> | 15 | <completers> |
10 | <ref component-id="clusterIdCompleter"/> | 16 | <ref component-id="clusterIdCompleter"/> | ... | ... |
1 | package org.onlab.onos.cluster; | 1 | package org.onlab.onos.cluster; |
2 | 2 | ||
3 | +import org.onlab.packet.IpPrefix; | ||
4 | + | ||
3 | /** | 5 | /** |
4 | * Service for administering the cluster node membership. | 6 | * Service for administering the cluster node membership. |
5 | */ | 7 | */ |
6 | public interface ClusterAdminService { | 8 | public interface ClusterAdminService { |
7 | 9 | ||
8 | /** | 10 | /** |
11 | + * Adds a new controller node to the cluster. | ||
12 | + * | ||
13 | + * @param nodeId controller node identifier | ||
14 | + * @param ip node IP listen address | ||
15 | + * @param tcpPort tcp listen port | ||
16 | + * @return newly added node | ||
17 | + */ | ||
18 | + ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort); | ||
19 | + | ||
20 | + /** | ||
9 | * Removes the specified node from the cluster node list. | 21 | * Removes the specified node from the cluster node list. |
10 | * | 22 | * |
11 | * @param nodeId controller node identifier | 23 | * @param nodeId controller node identifier | ... | ... |
1 | package org.onlab.onos.cluster; | 1 | package org.onlab.onos.cluster; |
2 | 2 | ||
3 | import org.onlab.onos.store.Store; | 3 | import org.onlab.onos.store.Store; |
4 | +import org.onlab.packet.IpPrefix; | ||
4 | 5 | ||
5 | import java.util.Set; | 6 | import java.util.Set; |
6 | 7 | ||
... | @@ -40,6 +41,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate> | ... | @@ -40,6 +41,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate> |
40 | ControllerNode.State getState(NodeId nodeId); | 41 | ControllerNode.State getState(NodeId nodeId); |
41 | 42 | ||
42 | /** | 43 | /** |
44 | + * Adds a new controller node to the cluster. | ||
45 | + * | ||
46 | + * @param nodeId controller node identifier | ||
47 | + * @param ip node IP listen address | ||
48 | + * @param tcpPort tcp listen port | ||
49 | + * @return newly added node | ||
50 | + */ | ||
51 | + ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort); | ||
52 | + | ||
53 | + /** | ||
43 | * Removes the specified node from the inventory of cluster nodes. | 54 | * Removes the specified node from the inventory of cluster nodes. |
44 | * | 55 | * |
45 | * @param nodeId controller instance identifier | 56 | * @param nodeId controller instance identifier | ... | ... |
... | @@ -35,4 +35,12 @@ public interface ControllerNode { | ... | @@ -35,4 +35,12 @@ public interface ControllerNode { |
35 | */ | 35 | */ |
36 | IpPrefix ip(); | 36 | IpPrefix ip(); |
37 | 37 | ||
38 | + | ||
39 | + /** | ||
40 | + * Returns the TCP port on which the node listens for connections. | ||
41 | + * | ||
42 | + * @return TCP port | ||
43 | + */ | ||
44 | + int tcpPort(); | ||
45 | + | ||
38 | } | 46 | } | ... | ... |
... | @@ -11,13 +11,17 @@ import static com.google.common.base.MoreObjects.toStringHelper; | ... | @@ -11,13 +11,17 @@ import static com.google.common.base.MoreObjects.toStringHelper; |
11 | */ | 11 | */ |
12 | public class DefaultControllerNode implements ControllerNode { | 12 | public class DefaultControllerNode implements ControllerNode { |
13 | 13 | ||
14 | + private static final int DEFAULT_PORT = 9876; | ||
15 | + | ||
14 | private final NodeId id; | 16 | private final NodeId id; |
15 | private final IpPrefix ip; | 17 | private final IpPrefix ip; |
18 | + private final int tcpPort; | ||
16 | 19 | ||
17 | // For serialization | 20 | // For serialization |
18 | private DefaultControllerNode() { | 21 | private DefaultControllerNode() { |
19 | this.id = null; | 22 | this.id = null; |
20 | this.ip = null; | 23 | this.ip = null; |
24 | + this.tcpPort = 0; | ||
21 | } | 25 | } |
22 | 26 | ||
23 | /** | 27 | /** |
... | @@ -27,8 +31,19 @@ public class DefaultControllerNode implements ControllerNode { | ... | @@ -27,8 +31,19 @@ public class DefaultControllerNode implements ControllerNode { |
27 | * @param ip instance IP address | 31 | * @param ip instance IP address |
28 | */ | 32 | */ |
29 | public DefaultControllerNode(NodeId id, IpPrefix ip) { | 33 | public DefaultControllerNode(NodeId id, IpPrefix ip) { |
34 | + this(id, ip, DEFAULT_PORT); | ||
35 | + } | ||
36 | + | ||
37 | + /** | ||
38 | + * Creates a new instance with the specified id and IP address and TCP port. | ||
39 | + * | ||
40 | + * @param id instance identifier | ||
41 | + * @param ip instance IP address | ||
42 | + */ | ||
43 | + public DefaultControllerNode(NodeId id, IpPrefix ip, int tcpPort) { | ||
30 | this.id = id; | 44 | this.id = id; |
31 | this.ip = ip; | 45 | this.ip = ip; |
46 | + this.tcpPort = tcpPort; | ||
32 | } | 47 | } |
33 | 48 | ||
34 | @Override | 49 | @Override |
... | @@ -42,6 +57,11 @@ public class DefaultControllerNode implements ControllerNode { | ... | @@ -42,6 +57,11 @@ public class DefaultControllerNode implements ControllerNode { |
42 | } | 57 | } |
43 | 58 | ||
44 | @Override | 59 | @Override |
60 | + public int tcpPort() { | ||
61 | + return tcpPort; | ||
62 | + } | ||
63 | + | ||
64 | + @Override | ||
45 | public int hashCode() { | 65 | public int hashCode() { |
46 | return Objects.hash(id); | 66 | return Objects.hash(id); |
47 | } | 67 | } |
... | @@ -60,7 +80,8 @@ public class DefaultControllerNode implements ControllerNode { | ... | @@ -60,7 +80,8 @@ public class DefaultControllerNode implements ControllerNode { |
60 | 80 | ||
61 | @Override | 81 | @Override |
62 | public String toString() { | 82 | public String toString() { |
63 | - return toStringHelper(this).add("id", id).add("ip", ip).toString(); | 83 | + return toStringHelper(this).add("id", id) |
84 | + .add("ip", ip).add("tcpPort", tcpPort).toString(); | ||
64 | } | 85 | } |
65 | 86 | ||
66 | } | 87 | } | ... | ... |
... | @@ -16,10 +16,12 @@ import org.onlab.onos.cluster.ControllerNode; | ... | @@ -16,10 +16,12 @@ import org.onlab.onos.cluster.ControllerNode; |
16 | import org.onlab.onos.cluster.NodeId; | 16 | import org.onlab.onos.cluster.NodeId; |
17 | import org.onlab.onos.event.AbstractListenerRegistry; | 17 | import org.onlab.onos.event.AbstractListenerRegistry; |
18 | import org.onlab.onos.event.EventDeliveryService; | 18 | import org.onlab.onos.event.EventDeliveryService; |
19 | +import org.onlab.packet.IpPrefix; | ||
19 | import org.slf4j.Logger; | 20 | import org.slf4j.Logger; |
20 | 21 | ||
21 | import java.util.Set; | 22 | import java.util.Set; |
22 | 23 | ||
24 | +import static com.google.common.base.Preconditions.checkArgument; | ||
23 | import static com.google.common.base.Preconditions.checkNotNull; | 25 | import static com.google.common.base.Preconditions.checkNotNull; |
24 | import static org.slf4j.LoggerFactory.getLogger; | 26 | import static org.slf4j.LoggerFactory.getLogger; |
25 | 27 | ||
... | @@ -81,6 +83,14 @@ public class ClusterManager implements ClusterService, ClusterAdminService { | ... | @@ -81,6 +83,14 @@ public class ClusterManager implements ClusterService, ClusterAdminService { |
81 | } | 83 | } |
82 | 84 | ||
83 | @Override | 85 | @Override |
86 | + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) { | ||
87 | + checkNotNull(nodeId, INSTANCE_ID_NULL); | ||
88 | + checkNotNull(ip, "IP address cannot be null"); | ||
89 | + checkArgument(tcpPort > 5000, "TCP port must be > 5000"); | ||
90 | + return store.addNode(nodeId, ip, tcpPort); | ||
91 | + } | ||
92 | + | ||
93 | + @Override | ||
84 | public void removeNode(NodeId nodeId) { | 94 | public void removeNode(NodeId nodeId) { |
85 | checkNotNull(nodeId, INSTANCE_ID_NULL); | 95 | checkNotNull(nodeId, INSTANCE_ID_NULL); |
86 | store.removeNode(nodeId); | 96 | store.removeNode(nodeId); | ... | ... |
... | @@ -33,8 +33,11 @@ import org.onlab.onos.net.device.PortDescription; | ... | @@ -33,8 +33,11 @@ import org.onlab.onos.net.device.PortDescription; |
33 | import org.onlab.onos.net.provider.AbstractProvider; | 33 | import org.onlab.onos.net.provider.AbstractProvider; |
34 | import org.onlab.onos.net.provider.ProviderId; | 34 | import org.onlab.onos.net.provider.ProviderId; |
35 | import org.onlab.onos.store.common.StoreManager; | 35 | import org.onlab.onos.store.common.StoreManager; |
36 | +import org.onlab.onos.store.common.StoreService; | ||
36 | import org.onlab.onos.store.common.TestStoreManager; | 37 | import org.onlab.onos.store.common.TestStoreManager; |
37 | import org.onlab.onos.store.device.impl.DistributedDeviceStore; | 38 | import org.onlab.onos.store.device.impl.DistributedDeviceStore; |
39 | +import org.onlab.onos.store.serializers.KryoSerializationManager; | ||
40 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
38 | import org.onlab.packet.IpPrefix; | 41 | import org.onlab.packet.IpPrefix; |
39 | 42 | ||
40 | import java.util.ArrayList; | 43 | import java.util.ArrayList; |
... | @@ -92,6 +95,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -92,6 +95,7 @@ public class DistributedDeviceManagerTest { |
92 | private DistributedDeviceStore dstore; | 95 | private DistributedDeviceStore dstore; |
93 | private TestMastershipManager masterManager; | 96 | private TestMastershipManager masterManager; |
94 | private EventDeliveryService eventService; | 97 | private EventDeliveryService eventService; |
98 | + private KryoSerializationManager serializationMgr; | ||
95 | 99 | ||
96 | @Before | 100 | @Before |
97 | public void setUp() { | 101 | public void setUp() { |
... | @@ -107,7 +111,10 @@ public class DistributedDeviceManagerTest { | ... | @@ -107,7 +111,10 @@ public class DistributedDeviceManagerTest { |
107 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | 111 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); |
108 | storeManager.activate(); | 112 | storeManager.activate(); |
109 | 113 | ||
110 | - dstore = new TestDistributedDeviceStore(); | 114 | + serializationMgr = new KryoSerializationManager(); |
115 | + serializationMgr.activate(); | ||
116 | + | ||
117 | + dstore = new TestDistributedDeviceStore(storeManager, serializationMgr); | ||
111 | dstore.activate(); | 118 | dstore.activate(); |
112 | 119 | ||
113 | mgr.store = dstore; | 120 | mgr.store = dstore; |
... | @@ -133,6 +140,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -133,6 +140,7 @@ public class DistributedDeviceManagerTest { |
133 | mgr.deactivate(); | 140 | mgr.deactivate(); |
134 | 141 | ||
135 | dstore.deactivate(); | 142 | dstore.deactivate(); |
143 | + serializationMgr.deactivate(); | ||
136 | storeManager.deactivate(); | 144 | storeManager.deactivate(); |
137 | } | 145 | } |
138 | 146 | ||
... | @@ -163,7 +171,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -163,7 +171,7 @@ public class DistributedDeviceManagerTest { |
163 | public void deviceDisconnected() { | 171 | public void deviceDisconnected() { |
164 | connectDevice(DID1, SW1); | 172 | connectDevice(DID1, SW1); |
165 | connectDevice(DID2, SW1); | 173 | connectDevice(DID2, SW1); |
166 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED); | 174 | + validateEvents(DEVICE_ADDED, DEVICE_ADDED); |
167 | assertTrue("device should be available", service.isAvailable(DID1)); | 175 | assertTrue("device should be available", service.isAvailable(DID1)); |
168 | 176 | ||
169 | // Disconnect | 177 | // Disconnect |
... | @@ -182,10 +190,10 @@ public class DistributedDeviceManagerTest { | ... | @@ -182,10 +190,10 @@ public class DistributedDeviceManagerTest { |
182 | @Test | 190 | @Test |
183 | public void deviceUpdated() { | 191 | public void deviceUpdated() { |
184 | connectDevice(DID1, SW1); | 192 | connectDevice(DID1, SW1); |
185 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED); | 193 | + validateEvents(DEVICE_ADDED); |
186 | 194 | ||
187 | connectDevice(DID1, SW2); | 195 | connectDevice(DID1, SW2); |
188 | - validateEvents(DEVICE_UPDATED, DEVICE_UPDATED); | 196 | + validateEvents(DEVICE_UPDATED); |
189 | } | 197 | } |
190 | 198 | ||
191 | @Test | 199 | @Test |
... | @@ -202,7 +210,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -202,7 +210,7 @@ public class DistributedDeviceManagerTest { |
202 | pds.add(new DefaultPortDescription(P2, true)); | 210 | pds.add(new DefaultPortDescription(P2, true)); |
203 | pds.add(new DefaultPortDescription(P3, true)); | 211 | pds.add(new DefaultPortDescription(P3, true)); |
204 | providerService.updatePorts(DID1, pds); | 212 | providerService.updatePorts(DID1, pds); |
205 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED); | 213 | + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED); |
206 | pds.clear(); | 214 | pds.clear(); |
207 | 215 | ||
208 | pds.add(new DefaultPortDescription(P1, false)); | 216 | pds.add(new DefaultPortDescription(P1, false)); |
... | @@ -218,7 +226,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -218,7 +226,7 @@ public class DistributedDeviceManagerTest { |
218 | pds.add(new DefaultPortDescription(P1, true)); | 226 | pds.add(new DefaultPortDescription(P1, true)); |
219 | pds.add(new DefaultPortDescription(P2, true)); | 227 | pds.add(new DefaultPortDescription(P2, true)); |
220 | providerService.updatePorts(DID1, pds); | 228 | providerService.updatePorts(DID1, pds); |
221 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED); | 229 | + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED); |
222 | 230 | ||
223 | providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false)); | 231 | providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false)); |
224 | validateEvents(PORT_UPDATED); | 232 | validateEvents(PORT_UPDATED); |
... | @@ -233,7 +241,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -233,7 +241,7 @@ public class DistributedDeviceManagerTest { |
233 | pds.add(new DefaultPortDescription(P1, true)); | 241 | pds.add(new DefaultPortDescription(P1, true)); |
234 | pds.add(new DefaultPortDescription(P2, true)); | 242 | pds.add(new DefaultPortDescription(P2, true)); |
235 | providerService.updatePorts(DID1, pds); | 243 | providerService.updatePorts(DID1, pds); |
236 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED); | 244 | + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED); |
237 | assertEquals("wrong port count", 2, service.getPorts(DID1).size()); | 245 | assertEquals("wrong port count", 2, service.getPorts(DID1).size()); |
238 | 246 | ||
239 | Port port = service.getPort(DID1, P1); | 247 | Port port = service.getPort(DID1, P1); |
... | @@ -247,7 +255,7 @@ public class DistributedDeviceManagerTest { | ... | @@ -247,7 +255,7 @@ public class DistributedDeviceManagerTest { |
247 | connectDevice(DID2, SW2); | 255 | connectDevice(DID2, SW2); |
248 | assertEquals("incorrect device count", 2, service.getDeviceCount()); | 256 | assertEquals("incorrect device count", 2, service.getDeviceCount()); |
249 | admin.removeDevice(DID1); | 257 | admin.removeDevice(DID1); |
250 | - validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED, DEVICE_REMOVED); | 258 | + validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED); |
251 | assertNull("device should not be found", service.getDevice(DID1)); | 259 | assertNull("device should not be found", service.getDevice(DID1)); |
252 | assertNotNull("device should be found", service.getDevice(DID2)); | 260 | assertNotNull("device should be found", service.getDevice(DID2)); |
253 | assertEquals("incorrect device count", 1, service.getDeviceCount()); | 261 | assertEquals("incorrect device count", 1, service.getDeviceCount()); |
... | @@ -298,8 +306,10 @@ public class DistributedDeviceManagerTest { | ... | @@ -298,8 +306,10 @@ public class DistributedDeviceManagerTest { |
298 | 306 | ||
299 | private class TestDistributedDeviceStore extends DistributedDeviceStore { | 307 | private class TestDistributedDeviceStore extends DistributedDeviceStore { |
300 | 308 | ||
301 | - public TestDistributedDeviceStore() { | 309 | + public TestDistributedDeviceStore(StoreService storeService, |
302 | - this.storeService = storeManager; | 310 | + KryoSerializationService kryoSerializationService) { |
311 | + this.storeService = storeService; | ||
312 | + this.kryoSerializationService = kryoSerializationService; | ||
303 | } | 313 | } |
304 | } | 314 | } |
305 | 315 | ... | ... |
... | @@ -26,6 +26,23 @@ | ... | @@ -26,6 +26,23 @@ |
26 | <artifactId>onos-core-serializers</artifactId> | 26 | <artifactId>onos-core-serializers</artifactId> |
27 | <version>${project.version}</version> | 27 | <version>${project.version}</version> |
28 | </dependency> | 28 | </dependency> |
29 | + | ||
30 | + | ||
31 | + <dependency> | ||
32 | + <groupId>org.onlab.onos</groupId> | ||
33 | + <artifactId>onlab-nio</artifactId> | ||
34 | + <version>${project.version}</version> | ||
35 | + </dependency> | ||
36 | + | ||
37 | + <dependency> | ||
38 | + <groupId>com.fasterxml.jackson.core</groupId> | ||
39 | + <artifactId>jackson-databind</artifactId> | ||
40 | + </dependency> | ||
41 | + <dependency> | ||
42 | + <groupId>com.fasterxml.jackson.core</groupId> | ||
43 | + <artifactId>jackson-annotations</artifactId> | ||
44 | + </dependency> | ||
45 | + | ||
29 | <dependency> | 46 | <dependency> |
30 | <groupId>org.apache.felix</groupId> | 47 | <groupId>org.apache.felix</groupId> |
31 | <artifactId>org.apache.felix.scr.annotations</artifactId> | 48 | <artifactId>org.apache.felix.scr.annotations</artifactId> | ... | ... |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import com.fasterxml.jackson.core.JsonEncoding; | ||
4 | +import com.fasterxml.jackson.core.JsonFactory; | ||
5 | +import com.fasterxml.jackson.databind.JsonNode; | ||
6 | +import com.fasterxml.jackson.databind.ObjectMapper; | ||
7 | +import com.fasterxml.jackson.databind.node.ArrayNode; | ||
8 | +import com.fasterxml.jackson.databind.node.ObjectNode; | ||
9 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
10 | +import org.onlab.onos.cluster.NodeId; | ||
11 | +import org.onlab.packet.IpPrefix; | ||
12 | + | ||
13 | +import java.io.File; | ||
14 | +import java.io.IOException; | ||
15 | +import java.util.HashSet; | ||
16 | +import java.util.Iterator; | ||
17 | +import java.util.Set; | ||
18 | + | ||
19 | +/** | ||
20 | + * Allows for reading and writing cluster definition as a JSON file. | ||
21 | + */ | ||
22 | +public class ClusterDefinitionStore { | ||
23 | + | ||
24 | + private final File file; | ||
25 | + | ||
26 | + /** | ||
27 | + * Creates a reader/writer of the cluster definition file. | ||
28 | + * | ||
29 | + * @param filePath location of the definition file | ||
30 | + */ | ||
31 | + public ClusterDefinitionStore(String filePath) { | ||
32 | + file = new File(filePath); | ||
33 | + } | ||
34 | + | ||
35 | + /** | ||
36 | + * Returns set of the controller nodes, including self. | ||
37 | + * | ||
38 | + * @return set of controller nodes | ||
39 | + */ | ||
40 | + public Set<DefaultControllerNode> read() throws IOException { | ||
41 | + Set<DefaultControllerNode> nodes = new HashSet<>(); | ||
42 | + ObjectMapper mapper = new ObjectMapper(); | ||
43 | + ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file); | ||
44 | + Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements(); | ||
45 | + while (it.hasNext()) { | ||
46 | + ObjectNode nodeDef = (ObjectNode) it.next(); | ||
47 | + nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()), | ||
48 | + IpPrefix.valueOf(nodeDef.get("ip").asText()), | ||
49 | + nodeDef.get("tcpPort").asInt(9876))); | ||
50 | + } | ||
51 | + return nodes; | ||
52 | + } | ||
53 | + | ||
54 | + /** | ||
55 | + * Writes the given set of the controller nodes. | ||
56 | + * | ||
57 | + * @param nodes set of controller nodes | ||
58 | + */ | ||
59 | + public void write(Set<DefaultControllerNode> nodes) throws IOException { | ||
60 | + ObjectMapper mapper = new ObjectMapper(); | ||
61 | + ObjectNode clusterNodeDef = mapper.createObjectNode(); | ||
62 | + ArrayNode nodeDefs = mapper.createArrayNode(); | ||
63 | + clusterNodeDef.set("nodes", nodeDefs); | ||
64 | + for (DefaultControllerNode node : nodes) { | ||
65 | + ObjectNode nodeDef = mapper.createObjectNode(); | ||
66 | + nodeDef.put("id", node.id().toString()) | ||
67 | + .put("ip", node.ip().toString()) | ||
68 | + .put("tcpPort", node.tcpPort()); | ||
69 | + nodeDefs.add(nodeDef); | ||
70 | + } | ||
71 | + mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8), | ||
72 | + clusterNodeDef); | ||
73 | + } | ||
74 | + | ||
75 | +} |
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
0 → 100644
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import com.google.common.collect.ImmutableSet; | ||
4 | +import org.apache.felix.scr.annotations.Activate; | ||
5 | +import org.apache.felix.scr.annotations.Component; | ||
6 | +import org.apache.felix.scr.annotations.Deactivate; | ||
7 | +import org.apache.felix.scr.annotations.Service; | ||
8 | +import org.onlab.nio.AcceptorLoop; | ||
9 | +import org.onlab.nio.IOLoop; | ||
10 | +import org.onlab.nio.MessageStream; | ||
11 | +import org.onlab.onos.cluster.ClusterEvent; | ||
12 | +import org.onlab.onos.cluster.ClusterStore; | ||
13 | +import org.onlab.onos.cluster.ClusterStoreDelegate; | ||
14 | +import org.onlab.onos.cluster.ControllerNode; | ||
15 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
16 | +import org.onlab.onos.cluster.NodeId; | ||
17 | +import org.onlab.onos.store.AbstractStore; | ||
18 | +import org.onlab.packet.IpPrefix; | ||
19 | +import org.slf4j.Logger; | ||
20 | +import org.slf4j.LoggerFactory; | ||
21 | + | ||
22 | +import java.io.IOException; | ||
23 | +import java.net.InetSocketAddress; | ||
24 | +import java.net.Socket; | ||
25 | +import java.net.SocketAddress; | ||
26 | +import java.nio.channels.ByteChannel; | ||
27 | +import java.nio.channels.SelectionKey; | ||
28 | +import java.nio.channels.ServerSocketChannel; | ||
29 | +import java.nio.channels.SocketChannel; | ||
30 | +import java.util.ArrayList; | ||
31 | +import java.util.List; | ||
32 | +import java.util.Map; | ||
33 | +import java.util.Objects; | ||
34 | +import java.util.Set; | ||
35 | +import java.util.Timer; | ||
36 | +import java.util.TimerTask; | ||
37 | +import java.util.concurrent.ConcurrentHashMap; | ||
38 | +import java.util.concurrent.ExecutorService; | ||
39 | +import java.util.concurrent.Executors; | ||
40 | + | ||
41 | +import static java.net.InetAddress.getByAddress; | ||
42 | +import static org.onlab.onos.cluster.ControllerNode.State; | ||
43 | +import static org.onlab.packet.IpPrefix.valueOf; | ||
44 | +import static org.onlab.util.Tools.namedThreads; | ||
45 | + | ||
46 | +/** | ||
47 | + * Distributed implementation of the cluster nodes store. | ||
48 | + */ | ||
49 | +@Component(immediate = true) | ||
50 | +@Service | ||
51 | +public class DistributedClusterStore | ||
52 | + extends AbstractStore<ClusterEvent, ClusterStoreDelegate> | ||
53 | + implements ClusterStore { | ||
54 | + | ||
55 | + private static final int HELLO_MSG = 1; | ||
56 | + private static final int ECHO_MSG = 2; | ||
57 | + | ||
58 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
59 | + | ||
60 | + private static final long CONNECTION_CUSTODIAN_DELAY = 1000L; | ||
61 | + private static final long CONNECTION_CUSTODIAN_FREQUENCY = 5000; | ||
62 | + | ||
63 | + private static final long START_TIMEOUT = 1000; | ||
64 | + private static final long SELECT_TIMEOUT = 50; | ||
65 | + private static final int WORKERS = 3; | ||
66 | + private static final int COMM_BUFFER_SIZE = 32 * 1024; | ||
67 | + private static final int COMM_IDLE_TIME = 500; | ||
68 | + | ||
69 | + private static final boolean SO_NO_DELAY = false; | ||
70 | + private static final int SO_SEND_BUFFER_SIZE = COMM_BUFFER_SIZE; | ||
71 | + private static final int SO_RCV_BUFFER_SIZE = COMM_BUFFER_SIZE; | ||
72 | + | ||
73 | + private DefaultControllerNode self; | ||
74 | + private final Map<NodeId, DefaultControllerNode> nodes = new ConcurrentHashMap<>(); | ||
75 | + private final Map<NodeId, State> states = new ConcurrentHashMap<>(); | ||
76 | + | ||
77 | + // Means to track message streams to other nodes. | ||
78 | + private final Map<NodeId, TLVMessageStream> streams = new ConcurrentHashMap<>(); | ||
79 | + private final Map<SocketChannel, DefaultControllerNode> nodesByChannel = new ConcurrentHashMap<>(); | ||
80 | + | ||
81 | + // Executor pools for listening and managing connections to other nodes. | ||
82 | + private final ExecutorService listenExecutor = | ||
83 | + Executors.newSingleThreadExecutor(namedThreads("onos-comm-listen")); | ||
84 | + private final ExecutorService commExecutors = | ||
85 | + Executors.newFixedThreadPool(WORKERS, namedThreads("onos-comm-cluster")); | ||
86 | + private final ExecutorService heartbeatExecutor = | ||
87 | + Executors.newSingleThreadExecutor(namedThreads("onos-comm-heartbeat")); | ||
88 | + | ||
89 | + private final Timer timer = new Timer("onos-comm-initiator"); | ||
90 | + private final TimerTask connectionCustodian = new ConnectionCustodian(); | ||
91 | + | ||
92 | + private ListenLoop listenLoop; | ||
93 | + private List<CommLoop> commLoops = new ArrayList<>(WORKERS); | ||
94 | + | ||
95 | + @Activate | ||
96 | + public void activate() { | ||
97 | + loadClusterDefinition(); | ||
98 | + startCommunications(); | ||
99 | + startListening(); | ||
100 | + startInitiating(); | ||
101 | + log.info("Started"); | ||
102 | + } | ||
103 | + | ||
104 | + @Deactivate | ||
105 | + public void deactivate() { | ||
106 | + listenLoop.shutdown(); | ||
107 | + for (CommLoop loop : commLoops) { | ||
108 | + loop.shutdown(); | ||
109 | + } | ||
110 | + log.info("Stopped"); | ||
111 | + } | ||
112 | + | ||
113 | + // Loads the cluster definition file | ||
114 | + private void loadClusterDefinition() { | ||
115 | +// ClusterDefinitionStore cds = new ClusterDefinitionStore("../config/cluster.json"); | ||
116 | +// try { | ||
117 | +// Set<DefaultControllerNode> storedNodes = cds.read(); | ||
118 | +// for (DefaultControllerNode node : storedNodes) { | ||
119 | +// nodes.put(node.id(), node); | ||
120 | +// } | ||
121 | +// } catch (IOException e) { | ||
122 | +// log.error("Unable to read cluster definitions", e); | ||
123 | +// } | ||
124 | + | ||
125 | + // Establishes the controller's own identity. | ||
126 | + IpPrefix ip = valueOf(System.getProperty("onos.ip", "127.0.1.1")); | ||
127 | + self = nodes.get(new NodeId(ip.toString())); | ||
128 | + | ||
129 | + // As a fall-back, let's make sure we at least know who we are. | ||
130 | + if (self == null) { | ||
131 | + self = new DefaultControllerNode(new NodeId(ip.toString()), ip); | ||
132 | + nodes.put(self.id(), self); | ||
133 | + states.put(self.id(), State.ACTIVE); | ||
134 | + } | ||
135 | + } | ||
136 | + | ||
137 | + // Kicks off the IO loops. | ||
138 | + private void startCommunications() { | ||
139 | + for (int i = 0; i < WORKERS; i++) { | ||
140 | + try { | ||
141 | + CommLoop loop = new CommLoop(); | ||
142 | + commLoops.add(loop); | ||
143 | + commExecutors.execute(loop); | ||
144 | + } catch (IOException e) { | ||
145 | + log.warn("Unable to start comm IO loop", e); | ||
146 | + } | ||
147 | + } | ||
148 | + | ||
149 | + // Wait for the IO loops to start | ||
150 | + for (CommLoop loop : commLoops) { | ||
151 | + if (!loop.awaitStart(START_TIMEOUT)) { | ||
152 | + log.warn("Comm loop did not start on-time; moving on..."); | ||
153 | + } | ||
154 | + } | ||
155 | + } | ||
156 | + | ||
157 | + // Starts listening for connections from peer cluster members. | ||
158 | + private void startListening() { | ||
159 | + try { | ||
160 | + listenLoop = new ListenLoop(self.ip(), self.tcpPort()); | ||
161 | + listenExecutor.execute(listenLoop); | ||
162 | + if (!listenLoop.awaitStart(START_TIMEOUT)) { | ||
163 | + log.warn("Listen loop did not start on-time; moving on..."); | ||
164 | + } | ||
165 | + } catch (IOException e) { | ||
166 | + log.error("Unable to listen for cluster connections", e); | ||
167 | + } | ||
168 | + } | ||
169 | + | ||
170 | + /** | ||
171 | + * Initiates open connection request and registers the pending socket | ||
172 | + * channel with the given IO loop. | ||
173 | + * | ||
174 | + * @param loop loop with which the channel should be registered | ||
175 | + * @throws java.io.IOException if the socket could not be open or connected | ||
176 | + */ | ||
177 | + private void openConnection(DefaultControllerNode node, CommLoop loop) throws IOException { | ||
178 | + SocketAddress sa = new InetSocketAddress(getByAddress(node.ip().toOctets()), node.tcpPort()); | ||
179 | + SocketChannel ch = SocketChannel.open(); | ||
180 | + nodesByChannel.put(ch, node); | ||
181 | + ch.configureBlocking(false); | ||
182 | + ch.connect(sa); | ||
183 | + loop.connectStream(ch); | ||
184 | + } | ||
185 | + | ||
186 | + | ||
187 | + // Attempts to connect to any nodes that do not have an associated connection. | ||
188 | + private void startInitiating() { | ||
189 | + timer.schedule(connectionCustodian, CONNECTION_CUSTODIAN_DELAY, CONNECTION_CUSTODIAN_FREQUENCY); | ||
190 | + } | ||
191 | + | ||
192 | + @Override | ||
193 | + public ControllerNode getLocalNode() { | ||
194 | + return self; | ||
195 | + } | ||
196 | + | ||
197 | + @Override | ||
198 | + public Set<ControllerNode> getNodes() { | ||
199 | + ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder(); | ||
200 | + return builder.addAll(nodes.values()).build(); | ||
201 | + } | ||
202 | + | ||
203 | + @Override | ||
204 | + public ControllerNode getNode(NodeId nodeId) { | ||
205 | + return nodes.get(nodeId); | ||
206 | + } | ||
207 | + | ||
208 | + @Override | ||
209 | + public State getState(NodeId nodeId) { | ||
210 | + State state = states.get(nodeId); | ||
211 | + return state == null ? State.INACTIVE : state; | ||
212 | + } | ||
213 | + | ||
214 | + @Override | ||
215 | + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) { | ||
216 | + DefaultControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort); | ||
217 | + nodes.put(nodeId, node); | ||
218 | + return node; | ||
219 | + } | ||
220 | + | ||
221 | + @Override | ||
222 | + public void removeNode(NodeId nodeId) { | ||
223 | + nodes.remove(nodeId); | ||
224 | + TLVMessageStream stream = streams.remove(nodeId); | ||
225 | + if (stream != null) { | ||
226 | + stream.close(); | ||
227 | + } | ||
228 | + } | ||
229 | + | ||
230 | + // Listens and accepts inbound connections from other cluster nodes. | ||
231 | + private class ListenLoop extends AcceptorLoop { | ||
232 | + ListenLoop(IpPrefix ip, int tcpPort) throws IOException { | ||
233 | + super(SELECT_TIMEOUT, new InetSocketAddress(getByAddress(ip.toOctets()), tcpPort)); | ||
234 | + } | ||
235 | + | ||
236 | + @Override | ||
237 | + protected void acceptConnection(ServerSocketChannel channel) throws IOException { | ||
238 | + SocketChannel sc = channel.accept(); | ||
239 | + sc.configureBlocking(false); | ||
240 | + | ||
241 | + Socket so = sc.socket(); | ||
242 | + so.setTcpNoDelay(SO_NO_DELAY); | ||
243 | + so.setReceiveBufferSize(SO_RCV_BUFFER_SIZE); | ||
244 | + so.setSendBufferSize(SO_SEND_BUFFER_SIZE); | ||
245 | + | ||
246 | + findLeastUtilizedLoop().acceptStream(sc); | ||
247 | + } | ||
248 | + } | ||
249 | + | ||
250 | + private class CommLoop extends IOLoop<TLVMessage, TLVMessageStream> { | ||
251 | + CommLoop() throws IOException { | ||
252 | + super(SELECT_TIMEOUT); | ||
253 | + } | ||
254 | + | ||
255 | + @Override | ||
256 | + protected TLVMessageStream createStream(ByteChannel byteChannel) { | ||
257 | + return new TLVMessageStream(this, byteChannel, COMM_BUFFER_SIZE, COMM_IDLE_TIME); | ||
258 | + } | ||
259 | + | ||
260 | + @Override | ||
261 | + protected void processMessages(List<TLVMessage> messages, MessageStream<TLVMessage> stream) { | ||
262 | + TLVMessageStream tlvStream = (TLVMessageStream) stream; | ||
263 | + for (TLVMessage message : messages) { | ||
264 | + // TODO: add type-based dispatching here... this is just a hack to get going | ||
265 | + if (message.type() == HELLO_MSG) { | ||
266 | + processHello(message, tlvStream); | ||
267 | + } else if (message.type() == ECHO_MSG) { | ||
268 | + processEcho(message, tlvStream); | ||
269 | + } else { | ||
270 | + log.info("Deal with other messages"); | ||
271 | + } | ||
272 | + } | ||
273 | + } | ||
274 | + | ||
275 | + @Override | ||
276 | + public TLVMessageStream acceptStream(SocketChannel channel) { | ||
277 | + TLVMessageStream stream = super.acceptStream(channel); | ||
278 | + try { | ||
279 | + InetSocketAddress sa = (InetSocketAddress) channel.getRemoteAddress(); | ||
280 | + log.info("Accepted connection from node {}", valueOf(sa.getAddress().getAddress())); | ||
281 | + stream.write(createHello(self)); | ||
282 | + | ||
283 | + } catch (IOException e) { | ||
284 | + log.warn("Unable to accept connection from an unknown end-point", e); | ||
285 | + } | ||
286 | + return stream; | ||
287 | + } | ||
288 | + | ||
289 | + @Override | ||
290 | + public TLVMessageStream connectStream(SocketChannel channel) { | ||
291 | + TLVMessageStream stream = super.connectStream(channel); | ||
292 | + DefaultControllerNode node = nodesByChannel.get(channel); | ||
293 | + if (node != null) { | ||
294 | + log.debug("Opened connection to node {}", node.id()); | ||
295 | + nodesByChannel.remove(channel); | ||
296 | + } | ||
297 | + return stream; | ||
298 | + } | ||
299 | + | ||
300 | + @Override | ||
301 | + protected void connect(SelectionKey key) throws IOException { | ||
302 | + try { | ||
303 | + super.connect(key); | ||
304 | + TLVMessageStream stream = (TLVMessageStream) key.attachment(); | ||
305 | + send(stream, createHello(self)); | ||
306 | + } catch (IOException e) { | ||
307 | + if (!Objects.equals(e.getMessage(), "Connection refused")) { | ||
308 | + throw e; | ||
309 | + } | ||
310 | + } | ||
311 | + } | ||
312 | + | ||
313 | + @Override | ||
314 | + protected void removeStream(MessageStream<TLVMessage> stream) { | ||
315 | + DefaultControllerNode node = ((TLVMessageStream) stream).node(); | ||
316 | + if (node != null) { | ||
317 | + log.info("Closed connection to node {}", node.id()); | ||
318 | + states.put(node.id(), State.INACTIVE); | ||
319 | + streams.remove(node.id()); | ||
320 | + } | ||
321 | + super.removeStream(stream); | ||
322 | + } | ||
323 | + } | ||
324 | + | ||
325 | + // Processes a HELLO message from a peer controller node. | ||
326 | + private void processHello(TLVMessage message, TLVMessageStream stream) { | ||
327 | + // FIXME: pure hack for now | ||
328 | + String data = new String(message.data()); | ||
329 | + String[] fields = data.split(":"); | ||
330 | + DefaultControllerNode node = new DefaultControllerNode(new NodeId(fields[0]), | ||
331 | + valueOf(fields[1]), | ||
332 | + Integer.parseInt(fields[2])); | ||
333 | + stream.setNode(node); | ||
334 | + nodes.put(node.id(), node); | ||
335 | + streams.put(node.id(), stream); | ||
336 | + states.put(node.id(), State.ACTIVE); | ||
337 | + } | ||
338 | + | ||
339 | + // Processes an ECHO message from a peer controller node. | ||
340 | + private void processEcho(TLVMessage message, TLVMessageStream tlvStream) { | ||
341 | + // TODO: implement heart-beat refresh | ||
342 | + log.info("Dealing with echoes..."); | ||
343 | + } | ||
344 | + | ||
345 | + // Sends message to the specified stream. | ||
346 | + private void send(TLVMessageStream stream, TLVMessage message) { | ||
347 | + try { | ||
348 | + stream.write(message); | ||
349 | + } catch (IOException e) { | ||
350 | + log.warn("Unable to send message to {}", stream.node().id()); | ||
351 | + } | ||
352 | + } | ||
353 | + | ||
354 | + // Creates a hello message to be sent to a peer controller node. | ||
355 | + private TLVMessage createHello(DefaultControllerNode self) { | ||
356 | + return new TLVMessage(HELLO_MSG, (self.id() + ":" + self.ip() + ":" + self.tcpPort()).getBytes()); | ||
357 | + } | ||
358 | + | ||
359 | + // Sweeps through all controller nodes and attempts to open connection to | ||
360 | + // those that presently do not have one. | ||
361 | + private class ConnectionCustodian extends TimerTask { | ||
362 | + @Override | ||
363 | + public void run() { | ||
364 | + for (DefaultControllerNode node : nodes.values()) { | ||
365 | + if (node != self && !streams.containsKey(node.id())) { | ||
366 | + try { | ||
367 | + openConnection(node, findLeastUtilizedLoop()); | ||
368 | + } catch (IOException e) { | ||
369 | + log.debug("Unable to connect", e); | ||
370 | + } | ||
371 | + } | ||
372 | + } | ||
373 | + } | ||
374 | + } | ||
375 | + | ||
376 | + // Finds the least utilities IO loop. | ||
377 | + private CommLoop findLeastUtilizedLoop() { | ||
378 | + CommLoop leastUtilized = null; | ||
379 | + int minCount = Integer.MAX_VALUE; | ||
380 | + for (CommLoop loop : commLoops) { | ||
381 | + int count = loop.streamCount(); | ||
382 | + if (count == 0) { | ||
383 | + return loop; | ||
384 | + } | ||
385 | + | ||
386 | + if (count < minCount) { | ||
387 | + leastUtilized = loop; | ||
388 | + minCount = count; | ||
389 | + } | ||
390 | + } | ||
391 | + return leastUtilized; | ||
392 | + } | ||
393 | +} |
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.nio.AbstractMessage; | ||
4 | + | ||
5 | +import java.util.Objects; | ||
6 | + | ||
7 | +import static com.google.common.base.MoreObjects.toStringHelper; | ||
8 | + | ||
9 | +/** | ||
10 | + * Base message for cluster-wide communications using TLVs. | ||
11 | + */ | ||
12 | +public class TLVMessage extends AbstractMessage { | ||
13 | + | ||
14 | + private final int type; | ||
15 | + private final byte[] data; | ||
16 | + | ||
17 | + /** | ||
18 | + * Creates an immutable TLV message. | ||
19 | + * | ||
20 | + * @param type message type | ||
21 | + * @param data message data bytes | ||
22 | + */ | ||
23 | + public TLVMessage(int type, byte[] data) { | ||
24 | + this.length = data.length + TLVMessageStream.METADATA_LENGTH; | ||
25 | + this.type = type; | ||
26 | + this.data = data; | ||
27 | + } | ||
28 | + | ||
29 | + /** | ||
30 | + * Returns the message type indicator. | ||
31 | + * | ||
32 | + * @return message type | ||
33 | + */ | ||
34 | + public int type() { | ||
35 | + return type; | ||
36 | + } | ||
37 | + | ||
38 | + /** | ||
39 | + * Returns the data bytes. | ||
40 | + * | ||
41 | + * @return message data | ||
42 | + */ | ||
43 | + public byte[] data() { | ||
44 | + return data; | ||
45 | + } | ||
46 | + | ||
47 | + @Override | ||
48 | + public int hashCode() { | ||
49 | + return Objects.hash(type, data); | ||
50 | + } | ||
51 | + | ||
52 | + @Override | ||
53 | + public boolean equals(Object obj) { | ||
54 | + if (this == obj) { | ||
55 | + return true; | ||
56 | + } | ||
57 | + if (obj == null || getClass() != obj.getClass()) { | ||
58 | + return false; | ||
59 | + } | ||
60 | + final TLVMessage other = (TLVMessage) obj; | ||
61 | + return Objects.equals(this.type, other.type) && | ||
62 | + Objects.equals(this.data, other.data); | ||
63 | + } | ||
64 | + | ||
65 | + @Override | ||
66 | + public String toString() { | ||
67 | + return toStringHelper(this).add("type", type).add("length", length).toString(); | ||
68 | + } | ||
69 | + | ||
70 | +} |
1 | +package org.onlab.onos.store.cluster.impl; | ||
2 | + | ||
3 | +import org.onlab.nio.IOLoop; | ||
4 | +import org.onlab.nio.MessageStream; | ||
5 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
6 | + | ||
7 | +import java.nio.ByteBuffer; | ||
8 | +import java.nio.channels.ByteChannel; | ||
9 | + | ||
10 | +import static com.google.common.base.Preconditions.checkState; | ||
11 | + | ||
12 | +/** | ||
13 | + * Stream for transferring TLV messages between cluster members. | ||
14 | + */ | ||
15 | +public class TLVMessageStream extends MessageStream<TLVMessage> { | ||
16 | + | ||
17 | + public static final int METADATA_LENGTH = 16; // 8 + 4 + 4 | ||
18 | + | ||
19 | + private static final int LENGTH_OFFSET = 12; | ||
20 | + private static final long MARKER = 0xfeedcafecafefeedL; | ||
21 | + | ||
22 | + private DefaultControllerNode node; | ||
23 | + | ||
24 | + /** | ||
25 | + * Creates a message stream associated with the specified IO loop and | ||
26 | + * backed by the given byte channel. | ||
27 | + * | ||
28 | + * @param loop IO loop | ||
29 | + * @param byteChannel backing byte channel | ||
30 | + * @param bufferSize size of the backing byte buffers | ||
31 | + * @param maxIdleMillis maximum number of millis the stream can be idle | ||
32 | + */ | ||
33 | + protected TLVMessageStream(IOLoop<TLVMessage, ?> loop, ByteChannel byteChannel, | ||
34 | + int bufferSize, int maxIdleMillis) { | ||
35 | + super(loop, byteChannel, bufferSize, maxIdleMillis); | ||
36 | + } | ||
37 | + | ||
38 | + /** | ||
39 | + * Returns the node with which this stream is associated. | ||
40 | + * | ||
41 | + * @return controller node | ||
42 | + */ | ||
43 | + DefaultControllerNode node() { | ||
44 | + return node; | ||
45 | + } | ||
46 | + | ||
47 | + /** | ||
48 | + * Sets the node with which this stream is affiliated. | ||
49 | + * | ||
50 | + * @param node controller node | ||
51 | + */ | ||
52 | + void setNode(DefaultControllerNode node) { | ||
53 | + checkState(this.node == null, "Stream is already bound to a node"); | ||
54 | + this.node = node; | ||
55 | + } | ||
56 | + | ||
57 | + @Override | ||
58 | + protected TLVMessage read(ByteBuffer buffer) { | ||
59 | + // Do we have enough bytes to read the header? If not, bail. | ||
60 | + if (buffer.remaining() < METADATA_LENGTH) { | ||
61 | + return null; | ||
62 | + } | ||
63 | + | ||
64 | + // Peek at the length and if we have enough to read the entire message | ||
65 | + // go ahead, otherwise bail. | ||
66 | + int length = buffer.getInt(buffer.position() + LENGTH_OFFSET); | ||
67 | + if (buffer.remaining() < length) { | ||
68 | + return null; | ||
69 | + } | ||
70 | + | ||
71 | + // At this point, we have enough data to read a complete message. | ||
72 | + long marker = buffer.getLong(); | ||
73 | + checkState(marker == MARKER, "Incorrect message marker"); | ||
74 | + | ||
75 | + int type = buffer.getInt(); | ||
76 | + length = buffer.getInt(); | ||
77 | + | ||
78 | + // TODO: add deserialization hook here | ||
79 | + byte[] data = new byte[length - METADATA_LENGTH]; | ||
80 | + buffer.get(data); | ||
81 | + | ||
82 | + return new TLVMessage(type, data); | ||
83 | + } | ||
84 | + | ||
85 | + @Override | ||
86 | + protected void write(TLVMessage message, ByteBuffer buffer) { | ||
87 | + buffer.putLong(MARKER); | ||
88 | + buffer.putInt(message.type()); | ||
89 | + buffer.putInt(message.length()); | ||
90 | + | ||
91 | + // TODO: add serialization hook here | ||
92 | + buffer.put(message.data()); | ||
93 | + } | ||
94 | + | ||
95 | +} |
... | @@ -86,46 +86,48 @@ public class OnosDistributedDeviceStore | ... | @@ -86,46 +86,48 @@ public class OnosDistributedDeviceStore |
86 | 86 | ||
87 | @Override | 87 | @Override |
88 | public Iterable<Device> getDevices() { | 88 | public Iterable<Device> getDevices() { |
89 | - // TODO builder v.s. copyOf. Guava semms to be using copyOf? | ||
90 | - // FIXME: synchronize. | ||
91 | Builder<Device> builder = ImmutableSet.builder(); | 89 | Builder<Device> builder = ImmutableSet.builder(); |
92 | - for (VersionedValue<? extends Device> device : devices.values()) { | 90 | + synchronized (this) { |
91 | + for (VersionedValue<Device> device : devices.values()) { | ||
93 | builder.add(device.entity()); | 92 | builder.add(device.entity()); |
94 | } | 93 | } |
95 | return builder.build(); | 94 | return builder.build(); |
96 | } | 95 | } |
96 | + } | ||
97 | 97 | ||
98 | @Override | 98 | @Override |
99 | public Device getDevice(DeviceId deviceId) { | 99 | public Device getDevice(DeviceId deviceId) { |
100 | - return devices.get(deviceId).entity(); | 100 | + VersionedValue<Device> device = devices.get(deviceId); |
101 | + checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); | ||
102 | + return device.entity(); | ||
101 | } | 103 | } |
102 | 104 | ||
103 | @Override | 105 | @Override |
104 | public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId, | 106 | public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId, |
105 | DeviceDescription deviceDescription) { | 107 | DeviceDescription deviceDescription) { |
106 | - Timestamp now = clockService.getTimestamp(deviceId); | 108 | + Timestamp newTimestamp = clockService.getTimestamp(deviceId); |
107 | VersionedValue<Device> device = devices.get(deviceId); | 109 | VersionedValue<Device> device = devices.get(deviceId); |
108 | 110 | ||
109 | if (device == null) { | 111 | if (device == null) { |
110 | - return createDevice(providerId, deviceId, deviceDescription, now); | 112 | + return createDevice(providerId, deviceId, deviceDescription, newTimestamp); |
111 | } | 113 | } |
112 | 114 | ||
113 | - checkState(now.compareTo(device.timestamp()) > 0, | 115 | + checkState(newTimestamp.compareTo(device.timestamp()) > 0, |
114 | "Existing device has a timestamp in the future!"); | 116 | "Existing device has a timestamp in the future!"); |
115 | 117 | ||
116 | - return updateDevice(providerId, device.entity(), deviceDescription, now); | 118 | + return updateDevice(providerId, device.entity(), deviceDescription, newTimestamp); |
117 | } | 119 | } |
118 | 120 | ||
119 | // Creates the device and returns the appropriate event if necessary. | 121 | // Creates the device and returns the appropriate event if necessary. |
120 | private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId, | 122 | private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId, |
121 | DeviceDescription desc, Timestamp timestamp) { | 123 | DeviceDescription desc, Timestamp timestamp) { |
122 | - DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(), | 124 | + Device device = new DefaultDevice(providerId, deviceId, desc.type(), |
123 | desc.manufacturer(), | 125 | desc.manufacturer(), |
124 | desc.hwVersion(), desc.swVersion(), | 126 | desc.hwVersion(), desc.swVersion(), |
125 | desc.serialNumber()); | 127 | desc.serialNumber()); |
126 | 128 | ||
127 | - devices.put(deviceId, new VersionedValue<Device>(device, true, timestamp)); | 129 | + devices.put(deviceId, new VersionedValue<>(device, true, timestamp)); |
128 | - // FIXME: broadcast a message telling peers of a device event. | 130 | + // TODO,FIXME: broadcast a message telling peers of a device event. |
129 | return new DeviceEvent(DEVICE_ADDED, device, null); | 131 | return new DeviceEvent(DEVICE_ADDED, device, null); |
130 | } | 132 | } |
131 | 133 | ||
... | @@ -148,7 +150,7 @@ public class OnosDistributedDeviceStore | ... | @@ -148,7 +150,7 @@ public class OnosDistributedDeviceStore |
148 | } | 150 | } |
149 | 151 | ||
150 | // Otherwise merely attempt to change availability | 152 | // Otherwise merely attempt to change availability |
151 | - DefaultDevice updated = new DefaultDevice(providerId, device.id(), | 153 | + Device updated = new DefaultDevice(providerId, device.id(), |
152 | desc.type(), | 154 | desc.type(), |
153 | desc.manufacturer(), | 155 | desc.manufacturer(), |
154 | desc.hwVersion(), | 156 | desc.hwVersion(), |
... | @@ -196,18 +198,18 @@ public class OnosDistributedDeviceStore | ... | @@ -196,18 +198,18 @@ public class OnosDistributedDeviceStore |
196 | VersionedValue<Device> device = devices.get(deviceId); | 198 | VersionedValue<Device> device = devices.get(deviceId); |
197 | checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); | 199 | checkArgument(device != null, DEVICE_NOT_FOUND, deviceId); |
198 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); | 200 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); |
199 | - Timestamp timestamp = clockService.getTimestamp(deviceId); | 201 | + Timestamp newTimestamp = clockService.getTimestamp(deviceId); |
200 | 202 | ||
201 | // Add new ports | 203 | // Add new ports |
202 | Set<PortNumber> processed = new HashSet<>(); | 204 | Set<PortNumber> processed = new HashSet<>(); |
203 | for (PortDescription portDescription : portDescriptions) { | 205 | for (PortDescription portDescription : portDescriptions) { |
204 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); | 206 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); |
205 | if (port == null) { | 207 | if (port == null) { |
206 | - events.add(createPort(device, portDescription, ports, timestamp)); | 208 | + events.add(createPort(device, portDescription, ports, newTimestamp)); |
207 | } | 209 | } |
208 | - checkState(timestamp.compareTo(port.timestamp()) > 0, | 210 | + checkState(newTimestamp.compareTo(port.timestamp()) > 0, |
209 | "Existing port state has a timestamp in the future!"); | 211 | "Existing port state has a timestamp in the future!"); |
210 | - events.add(updatePort(device, port, portDescription, ports, timestamp)); | 212 | + events.add(updatePort(device.entity(), port.entity(), portDescription, ports, newTimestamp)); |
211 | processed.add(portDescription.portNumber()); | 213 | processed.add(portDescription.portNumber()); |
212 | } | 214 | } |
213 | 215 | ||
... | @@ -233,19 +235,19 @@ public class OnosDistributedDeviceStore | ... | @@ -233,19 +235,19 @@ public class OnosDistributedDeviceStore |
233 | // Checks if the specified port requires update and if so, it replaces the | 235 | // Checks if the specified port requires update and if so, it replaces the |
234 | // existing entry in the map and returns corresponding event. | 236 | // existing entry in the map and returns corresponding event. |
235 | //@GuardedBy("this") | 237 | //@GuardedBy("this") |
236 | - private DeviceEvent updatePort(VersionedValue<Device> device, VersionedValue<Port> port, | 238 | + private DeviceEvent updatePort(Device device, Port port, |
237 | PortDescription portDescription, | 239 | PortDescription portDescription, |
238 | Map<PortNumber, VersionedValue<Port>> ports, | 240 | Map<PortNumber, VersionedValue<Port>> ports, |
239 | Timestamp timestamp) { | 241 | Timestamp timestamp) { |
240 | - if (port.entity().isEnabled() != portDescription.isEnabled()) { | 242 | + if (port.isEnabled() != portDescription.isEnabled()) { |
241 | VersionedValue<Port> updatedPort = new VersionedValue<Port>( | 243 | VersionedValue<Port> updatedPort = new VersionedValue<Port>( |
242 | - new DefaultPort(device.entity(), portDescription.portNumber(), | 244 | + new DefaultPort(device, portDescription.portNumber(), |
243 | portDescription.isEnabled()), | 245 | portDescription.isEnabled()), |
244 | portDescription.isEnabled(), | 246 | portDescription.isEnabled(), |
245 | timestamp); | 247 | timestamp); |
246 | - ports.put(port.entity().number(), updatedPort); | 248 | + ports.put(port.number(), updatedPort); |
247 | - updatePortMap(device.entity().id(), ports); | 249 | + updatePortMap(device.id(), ports); |
248 | - return new DeviceEvent(PORT_UPDATED, device.entity(), updatedPort.entity()); | 250 | + return new DeviceEvent(PORT_UPDATED, device, updatedPort.entity()); |
249 | } | 251 | } |
250 | return null; | 252 | return null; |
251 | } | 253 | } |
... | @@ -300,7 +302,7 @@ public class OnosDistributedDeviceStore | ... | @@ -300,7 +302,7 @@ public class OnosDistributedDeviceStore |
300 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); | 302 | Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId); |
301 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); | 303 | VersionedValue<Port> port = ports.get(portDescription.portNumber()); |
302 | Timestamp timestamp = clockService.getTimestamp(deviceId); | 304 | Timestamp timestamp = clockService.getTimestamp(deviceId); |
303 | - return updatePort(device, port, portDescription, ports, timestamp); | 305 | + return updatePort(device.entity(), port.entity(), portDescription, ports, timestamp); |
304 | } | 306 | } |
305 | 307 | ||
306 | @Override | 308 | @Override | ... | ... |
core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java
0 → 100644
1 | +package org.onlab.onos.store.link.impl; | ||
2 | + | ||
3 | +import static org.onlab.onos.net.Link.Type.DIRECT; | ||
4 | +import static org.onlab.onos.net.Link.Type.INDIRECT; | ||
5 | +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED; | ||
6 | +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED; | ||
7 | +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED; | ||
8 | +import static org.slf4j.LoggerFactory.getLogger; | ||
9 | + | ||
10 | +import java.util.HashSet; | ||
11 | +import java.util.Set; | ||
12 | +import java.util.concurrent.ConcurrentHashMap; | ||
13 | +import java.util.concurrent.ConcurrentMap; | ||
14 | + | ||
15 | +import org.apache.felix.scr.annotations.Activate; | ||
16 | +import org.apache.felix.scr.annotations.Component; | ||
17 | +import org.apache.felix.scr.annotations.Deactivate; | ||
18 | +import org.apache.felix.scr.annotations.Reference; | ||
19 | +import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
20 | +import org.apache.felix.scr.annotations.Service; | ||
21 | +import org.onlab.onos.net.ConnectPoint; | ||
22 | +import org.onlab.onos.net.DefaultLink; | ||
23 | +import org.onlab.onos.net.DeviceId; | ||
24 | +import org.onlab.onos.net.Link; | ||
25 | +import org.onlab.onos.net.LinkKey; | ||
26 | +import org.onlab.onos.net.link.LinkDescription; | ||
27 | +import org.onlab.onos.net.link.LinkEvent; | ||
28 | +import org.onlab.onos.net.link.LinkStore; | ||
29 | +import org.onlab.onos.net.link.LinkStoreDelegate; | ||
30 | +import org.onlab.onos.net.provider.ProviderId; | ||
31 | +import org.onlab.onos.store.AbstractStore; | ||
32 | +import org.onlab.onos.store.ClockService; | ||
33 | +import org.onlab.onos.store.Timestamp; | ||
34 | +import org.onlab.onos.store.device.impl.VersionedValue; | ||
35 | +import org.slf4j.Logger; | ||
36 | + | ||
37 | +import com.google.common.collect.HashMultimap; | ||
38 | +import com.google.common.collect.ImmutableSet; | ||
39 | +import com.google.common.collect.Multimap; | ||
40 | +import com.google.common.collect.ImmutableSet.Builder; | ||
41 | + | ||
42 | +import static com.google.common.base.Preconditions.checkArgument; | ||
43 | +import static com.google.common.base.Preconditions.checkState; | ||
44 | + | ||
45 | +/** | ||
46 | + * Manages inventory of infrastructure links using a protocol that takes into consideration | ||
47 | + * the order in which events occur. | ||
48 | + */ | ||
49 | +// FIXME: This does not yet implement the full protocol. | ||
50 | +// The full protocol requires the sender of LLDP message to include the | ||
51 | +// version information of src device/port and the receiver to | ||
52 | +// take that into account when figuring out if a more recent src | ||
53 | +// device/port down event renders the link discovery obsolete. | ||
54 | +@Component(immediate = true) | ||
55 | +@Service | ||
56 | +public class OnosDistributedLinkStore | ||
57 | + extends AbstractStore<LinkEvent, LinkStoreDelegate> | ||
58 | + implements LinkStore { | ||
59 | + | ||
60 | + private final Logger log = getLogger(getClass()); | ||
61 | + | ||
62 | + // Link inventory | ||
63 | + private ConcurrentMap<LinkKey, VersionedValue<Link>> links; | ||
64 | + | ||
65 | + public static final String LINK_NOT_FOUND = "Link between %s and %s not found"; | ||
66 | + | ||
67 | + // TODO synchronize? | ||
68 | + // Egress and ingress link sets | ||
69 | + private final Multimap<DeviceId, VersionedValue<Link>> srcLinks = HashMultimap.create(); | ||
70 | + private final Multimap<DeviceId, VersionedValue<Link>> dstLinks = HashMultimap.create(); | ||
71 | + | ||
72 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
73 | + protected ClockService clockService; | ||
74 | + | ||
75 | + @Activate | ||
76 | + public void activate() { | ||
77 | + | ||
78 | + links = new ConcurrentHashMap<>(); | ||
79 | + | ||
80 | + log.info("Started"); | ||
81 | + } | ||
82 | + | ||
83 | + @Deactivate | ||
84 | + public void deactivate() { | ||
85 | + log.info("Stopped"); | ||
86 | + } | ||
87 | + | ||
88 | + @Override | ||
89 | + public int getLinkCount() { | ||
90 | + return links.size(); | ||
91 | + } | ||
92 | + | ||
93 | + @Override | ||
94 | + public Iterable<Link> getLinks() { | ||
95 | + Builder<Link> builder = ImmutableSet.builder(); | ||
96 | + synchronized (this) { | ||
97 | + for (VersionedValue<Link> link : links.values()) { | ||
98 | + builder.add(link.entity()); | ||
99 | + } | ||
100 | + return builder.build(); | ||
101 | + } | ||
102 | + } | ||
103 | + | ||
104 | + @Override | ||
105 | + public Set<Link> getDeviceEgressLinks(DeviceId deviceId) { | ||
106 | + Set<VersionedValue<Link>> egressLinks = ImmutableSet.copyOf(srcLinks.get(deviceId)); | ||
107 | + Set<Link> rawEgressLinks = new HashSet<>(); | ||
108 | + for (VersionedValue<Link> link : egressLinks) { | ||
109 | + rawEgressLinks.add(link.entity()); | ||
110 | + } | ||
111 | + return rawEgressLinks; | ||
112 | + } | ||
113 | + | ||
114 | + @Override | ||
115 | + public Set<Link> getDeviceIngressLinks(DeviceId deviceId) { | ||
116 | + Set<VersionedValue<Link>> ingressLinks = ImmutableSet.copyOf(dstLinks.get(deviceId)); | ||
117 | + Set<Link> rawIngressLinks = new HashSet<>(); | ||
118 | + for (VersionedValue<Link> link : ingressLinks) { | ||
119 | + rawIngressLinks.add(link.entity()); | ||
120 | + } | ||
121 | + return rawIngressLinks; | ||
122 | + } | ||
123 | + | ||
124 | + @Override | ||
125 | + public Link getLink(ConnectPoint src, ConnectPoint dst) { | ||
126 | + VersionedValue<Link> link = links.get(new LinkKey(src, dst)); | ||
127 | + checkArgument(link != null, "LINK_NOT_FOUND", src, dst); | ||
128 | + return link.entity(); | ||
129 | + } | ||
130 | + | ||
131 | + @Override | ||
132 | + public Set<Link> getEgressLinks(ConnectPoint src) { | ||
133 | + Set<Link> egressLinks = new HashSet<>(); | ||
134 | + for (VersionedValue<Link> link : srcLinks.get(src.deviceId())) { | ||
135 | + if (link.entity().src().equals(src)) { | ||
136 | + egressLinks.add(link.entity()); | ||
137 | + } | ||
138 | + } | ||
139 | + return egressLinks; | ||
140 | + } | ||
141 | + | ||
142 | + @Override | ||
143 | + public Set<Link> getIngressLinks(ConnectPoint dst) { | ||
144 | + Set<Link> ingressLinks = new HashSet<>(); | ||
145 | + for (VersionedValue<Link> link : dstLinks.get(dst.deviceId())) { | ||
146 | + if (link.entity().dst().equals(dst)) { | ||
147 | + ingressLinks.add(link.entity()); | ||
148 | + } | ||
149 | + } | ||
150 | + return ingressLinks; | ||
151 | + } | ||
152 | + | ||
153 | + @Override | ||
154 | + public LinkEvent createOrUpdateLink(ProviderId providerId, | ||
155 | + LinkDescription linkDescription) { | ||
156 | + | ||
157 | + final DeviceId destinationDeviceId = linkDescription.dst().deviceId(); | ||
158 | + final Timestamp newTimestamp = clockService.getTimestamp(destinationDeviceId); | ||
159 | + | ||
160 | + LinkKey key = new LinkKey(linkDescription.src(), linkDescription.dst()); | ||
161 | + VersionedValue<Link> link = links.get(key); | ||
162 | + if (link == null) { | ||
163 | + return createLink(providerId, key, linkDescription, newTimestamp); | ||
164 | + } | ||
165 | + | ||
166 | + checkState(newTimestamp.compareTo(link.timestamp()) > 0, | ||
167 | + "Existing Link has a timestamp in the future!"); | ||
168 | + | ||
169 | + return updateLink(providerId, link, key, linkDescription, newTimestamp); | ||
170 | + } | ||
171 | + | ||
172 | + // Creates and stores the link and returns the appropriate event. | ||
173 | + private LinkEvent createLink(ProviderId providerId, LinkKey key, | ||
174 | + LinkDescription linkDescription, Timestamp timestamp) { | ||
175 | + VersionedValue<Link> link = new VersionedValue<Link>(new DefaultLink(providerId, key.src(), key.dst(), | ||
176 | + linkDescription.type()), true, timestamp); | ||
177 | + synchronized (this) { | ||
178 | + links.put(key, link); | ||
179 | + addNewLink(link, timestamp); | ||
180 | + } | ||
181 | + // FIXME: notify peers. | ||
182 | + return new LinkEvent(LINK_ADDED, link.entity()); | ||
183 | + } | ||
184 | + | ||
185 | + // update Egress and ingress link sets | ||
186 | + private void addNewLink(VersionedValue<Link> link, Timestamp timestamp) { | ||
187 | + Link rawLink = link.entity(); | ||
188 | + synchronized (this) { | ||
189 | + srcLinks.put(rawLink.src().deviceId(), link); | ||
190 | + dstLinks.put(rawLink.dst().deviceId(), link); | ||
191 | + } | ||
192 | + } | ||
193 | + | ||
194 | + // Updates, if necessary the specified link and returns the appropriate event. | ||
195 | + private LinkEvent updateLink(ProviderId providerId, VersionedValue<Link> existingLink, | ||
196 | + LinkKey key, LinkDescription linkDescription, Timestamp timestamp) { | ||
197 | + // FIXME confirm Link update condition is OK | ||
198 | + if (existingLink.entity().type() == INDIRECT && linkDescription.type() == DIRECT) { | ||
199 | + synchronized (this) { | ||
200 | + | ||
201 | + VersionedValue<Link> updatedLink = new VersionedValue<Link>( | ||
202 | + new DefaultLink(providerId, existingLink.entity().src(), existingLink.entity().dst(), | ||
203 | + linkDescription.type()), true, timestamp); | ||
204 | + links.replace(key, existingLink, updatedLink); | ||
205 | + | ||
206 | + replaceLink(existingLink, updatedLink); | ||
207 | + // FIXME: notify peers. | ||
208 | + return new LinkEvent(LINK_UPDATED, updatedLink.entity()); | ||
209 | + } | ||
210 | + } | ||
211 | + return null; | ||
212 | + } | ||
213 | + | ||
214 | + // update Egress and ingress link sets | ||
215 | + private void replaceLink(VersionedValue<Link> current, VersionedValue<Link> updated) { | ||
216 | + synchronized (this) { | ||
217 | + srcLinks.remove(current.entity().src().deviceId(), current); | ||
218 | + dstLinks.remove(current.entity().dst().deviceId(), current); | ||
219 | + | ||
220 | + srcLinks.put(current.entity().src().deviceId(), updated); | ||
221 | + dstLinks.put(current.entity().dst().deviceId(), updated); | ||
222 | + } | ||
223 | + } | ||
224 | + | ||
225 | + @Override | ||
226 | + public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) { | ||
227 | + synchronized (this) { | ||
228 | + LinkKey key = new LinkKey(src, dst); | ||
229 | + VersionedValue<Link> link = links.remove(key); | ||
230 | + if (link != null) { | ||
231 | + removeLink(link); | ||
232 | + // notify peers | ||
233 | + return new LinkEvent(LINK_REMOVED, link.entity()); | ||
234 | + } | ||
235 | + return null; | ||
236 | + } | ||
237 | + } | ||
238 | + | ||
239 | + // update Egress and ingress link sets | ||
240 | + private void removeLink(VersionedValue<Link> link) { | ||
241 | + synchronized (this) { | ||
242 | + srcLinks.remove(link.entity().src().deviceId(), link); | ||
243 | + dstLinks.remove(link.entity().dst().deviceId(), link); | ||
244 | + } | ||
245 | + } | ||
246 | +} |
... | @@ -49,6 +49,7 @@ public class DistributedClusterStore | ... | @@ -49,6 +49,7 @@ public class DistributedClusterStore |
49 | private final MembershipListener listener = new InternalMembershipListener(); | 49 | private final MembershipListener listener = new InternalMembershipListener(); |
50 | private final Map<NodeId, State> states = new ConcurrentHashMap<>(); | 50 | private final Map<NodeId, State> states = new ConcurrentHashMap<>(); |
51 | 51 | ||
52 | + @Override | ||
52 | @Activate | 53 | @Activate |
53 | public void activate() { | 54 | public void activate() { |
54 | super.activate(); | 55 | super.activate(); |
... | @@ -56,9 +57,9 @@ public class DistributedClusterStore | ... | @@ -56,9 +57,9 @@ public class DistributedClusterStore |
56 | 57 | ||
57 | rawNodes = theInstance.getMap("nodes"); | 58 | rawNodes = theInstance.getMap("nodes"); |
58 | OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader | 59 | OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader |
59 | - = new OptionalCacheLoader<>(storeService, rawNodes); | 60 | + = new OptionalCacheLoader<>(kryoSerializationService, rawNodes); |
60 | nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); | 61 | nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); |
61 | - rawNodes.addEntryListener(new RemoteEventHandler<>(nodes), true); | 62 | + rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true); |
62 | 63 | ||
63 | loadClusterNodes(); | 64 | loadClusterNodes(); |
64 | 65 | ||
... | @@ -68,7 +69,7 @@ public class DistributedClusterStore | ... | @@ -68,7 +69,7 @@ public class DistributedClusterStore |
68 | // Loads the initial set of cluster nodes | 69 | // Loads the initial set of cluster nodes |
69 | private void loadClusterNodes() { | 70 | private void loadClusterNodes() { |
70 | for (Member member : theInstance.getCluster().getMembers()) { | 71 | for (Member member : theInstance.getCluster().getMembers()) { |
71 | - addMember(member); | 72 | + addNode(node(member)); |
72 | } | 73 | } |
73 | } | 74 | } |
74 | 75 | ||
... | @@ -104,6 +105,11 @@ public class DistributedClusterStore | ... | @@ -104,6 +105,11 @@ public class DistributedClusterStore |
104 | } | 105 | } |
105 | 106 | ||
106 | @Override | 107 | @Override |
108 | + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) { | ||
109 | + return addNode(new DefaultControllerNode(nodeId, ip, tcpPort)); | ||
110 | + } | ||
111 | + | ||
112 | + @Override | ||
107 | public void removeNode(NodeId nodeId) { | 113 | public void removeNode(NodeId nodeId) { |
108 | synchronized (this) { | 114 | synchronized (this) { |
109 | rawNodes.remove(serialize(nodeId)); | 115 | rawNodes.remove(serialize(nodeId)); |
... | @@ -112,8 +118,7 @@ public class DistributedClusterStore | ... | @@ -112,8 +118,7 @@ public class DistributedClusterStore |
112 | } | 118 | } |
113 | 119 | ||
114 | // Adds a new node based on the specified member | 120 | // Adds a new node based on the specified member |
115 | - private synchronized ControllerNode addMember(Member member) { | 121 | + private synchronized ControllerNode addNode(DefaultControllerNode node) { |
116 | - DefaultControllerNode node = node(member); | ||
117 | rawNodes.put(serialize(node.id()), serialize(node)); | 122 | rawNodes.put(serialize(node.id()), serialize(node)); |
118 | nodes.put(node.id(), Optional.of(node)); | 123 | nodes.put(node.id(), Optional.of(node)); |
119 | states.put(node.id(), State.ACTIVE); | 124 | states.put(node.id(), State.ACTIVE); |
... | @@ -136,7 +141,7 @@ public class DistributedClusterStore | ... | @@ -136,7 +141,7 @@ public class DistributedClusterStore |
136 | @Override | 141 | @Override |
137 | public void memberAdded(MembershipEvent membershipEvent) { | 142 | public void memberAdded(MembershipEvent membershipEvent) { |
138 | log.info("Member {} added", membershipEvent.getMember()); | 143 | log.info("Member {} added", membershipEvent.getMember()); |
139 | - ControllerNode node = addMember(membershipEvent.getMember()); | 144 | + ControllerNode node = addNode(node(membershipEvent.getMember())); |
140 | notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node)); | 145 | notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node)); |
141 | } | 146 | } |
142 | 147 | ... | ... |
... | @@ -52,7 +52,7 @@ implements MastershipStore { | ... | @@ -52,7 +52,7 @@ implements MastershipStore { |
52 | 52 | ||
53 | rawMasters = theInstance.getMap("masters"); | 53 | rawMasters = theInstance.getMap("masters"); |
54 | OptionalCacheLoader<DeviceId, NodeId> nodeLoader | 54 | OptionalCacheLoader<DeviceId, NodeId> nodeLoader |
55 | - = new OptionalCacheLoader<>(storeService, rawMasters); | 55 | + = new OptionalCacheLoader<>(kryoSerializationService, rawMasters); |
56 | masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); | 56 | masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); |
57 | rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true); | 57 | rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true); |
58 | 58 | ||
... | @@ -123,7 +123,7 @@ implements MastershipStore { | ... | @@ -123,7 +123,7 @@ implements MastershipStore { |
123 | return null; | 123 | return null; |
124 | } | 124 | } |
125 | 125 | ||
126 | - private class RemoteMasterShipEventHandler extends RemoteEventHandler<DeviceId, NodeId> { | 126 | + private class RemoteMasterShipEventHandler extends RemoteCacheEventHandler<DeviceId, NodeId> { |
127 | public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) { | 127 | public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) { |
128 | super(cache); | 128 | super(cache); |
129 | } | 129 | } | ... | ... |
... | @@ -6,6 +6,7 @@ import com.hazelcast.core.EntryAdapter; | ... | @@ -6,6 +6,7 @@ import com.hazelcast.core.EntryAdapter; |
6 | import com.hazelcast.core.EntryEvent; | 6 | import com.hazelcast.core.EntryEvent; |
7 | import com.hazelcast.core.HazelcastInstance; | 7 | import com.hazelcast.core.HazelcastInstance; |
8 | import com.hazelcast.core.MapEvent; | 8 | import com.hazelcast.core.MapEvent; |
9 | +import com.hazelcast.core.Member; | ||
9 | 10 | ||
10 | import org.apache.felix.scr.annotations.Activate; | 11 | import org.apache.felix.scr.annotations.Activate; |
11 | import org.apache.felix.scr.annotations.Component; | 12 | import org.apache.felix.scr.annotations.Component; |
... | @@ -14,6 +15,7 @@ import org.apache.felix.scr.annotations.ReferenceCardinality; | ... | @@ -14,6 +15,7 @@ import org.apache.felix.scr.annotations.ReferenceCardinality; |
14 | import org.onlab.onos.event.Event; | 15 | import org.onlab.onos.event.Event; |
15 | import org.onlab.onos.store.AbstractStore; | 16 | import org.onlab.onos.store.AbstractStore; |
16 | import org.onlab.onos.store.StoreDelegate; | 17 | import org.onlab.onos.store.StoreDelegate; |
18 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
17 | import org.slf4j.Logger; | 19 | import org.slf4j.Logger; |
18 | 20 | ||
19 | import static com.google.common.base.Preconditions.checkNotNull; | 21 | import static com.google.common.base.Preconditions.checkNotNull; |
... | @@ -31,6 +33,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -31,6 +33,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
31 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 33 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
32 | protected StoreService storeService; | 34 | protected StoreService storeService; |
33 | 35 | ||
36 | + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
37 | + protected KryoSerializationService kryoSerializationService; | ||
38 | + | ||
34 | protected HazelcastInstance theInstance; | 39 | protected HazelcastInstance theInstance; |
35 | 40 | ||
36 | @Activate | 41 | @Activate |
... | @@ -45,7 +50,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -45,7 +50,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
45 | * @return serialized object | 50 | * @return serialized object |
46 | */ | 51 | */ |
47 | protected byte[] serialize(Object obj) { | 52 | protected byte[] serialize(Object obj) { |
48 | - return storeService.serialize(obj); | 53 | + return kryoSerializationService.serialize(obj); |
49 | } | 54 | } |
50 | 55 | ||
51 | /** | 56 | /** |
... | @@ -56,7 +61,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -56,7 +61,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
56 | * @return deserialized object | 61 | * @return deserialized object |
57 | */ | 62 | */ |
58 | protected <T> T deserialize(byte[] bytes) { | 63 | protected <T> T deserialize(byte[] bytes) { |
59 | - return storeService.deserialize(bytes); | 64 | + return kryoSerializationService.deserialize(bytes); |
60 | } | 65 | } |
61 | 66 | ||
62 | 67 | ||
... | @@ -66,8 +71,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -66,8 +71,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
66 | * @param <K> IMap key type after deserialization | 71 | * @param <K> IMap key type after deserialization |
67 | * @param <V> IMap value type after deserialization | 72 | * @param <V> IMap value type after deserialization |
68 | */ | 73 | */ |
69 | - public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { | 74 | + public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { |
70 | 75 | ||
76 | + private final Member localMember; | ||
71 | private LoadingCache<K, Optional<V>> cache; | 77 | private LoadingCache<K, Optional<V>> cache; |
72 | 78 | ||
73 | /** | 79 | /** |
... | @@ -75,17 +81,26 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -75,17 +81,26 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
75 | * | 81 | * |
76 | * @param cache cache to update | 82 | * @param cache cache to update |
77 | */ | 83 | */ |
78 | - public RemoteEventHandler(LoadingCache<K, Optional<V>> cache) { | 84 | + public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) { |
85 | + this.localMember = theInstance.getCluster().getLocalMember(); | ||
79 | this.cache = checkNotNull(cache); | 86 | this.cache = checkNotNull(cache); |
80 | } | 87 | } |
81 | 88 | ||
82 | @Override | 89 | @Override |
83 | public void mapCleared(MapEvent event) { | 90 | public void mapCleared(MapEvent event) { |
91 | + if (localMember.equals(event.getMember())) { | ||
92 | + // ignore locally triggered event | ||
93 | + return; | ||
94 | + } | ||
84 | cache.invalidateAll(); | 95 | cache.invalidateAll(); |
85 | } | 96 | } |
86 | 97 | ||
87 | @Override | 98 | @Override |
88 | public void entryAdded(EntryEvent<byte[], byte[]> event) { | 99 | public void entryAdded(EntryEvent<byte[], byte[]> event) { |
100 | + if (localMember.equals(event.getMember())) { | ||
101 | + // ignore locally triggered event | ||
102 | + return; | ||
103 | + } | ||
89 | K key = deserialize(event.getKey()); | 104 | K key = deserialize(event.getKey()); |
90 | V newVal = deserialize(event.getValue()); | 105 | V newVal = deserialize(event.getValue()); |
91 | Optional<V> newValue = Optional.of(newVal); | 106 | Optional<V> newValue = Optional.of(newVal); |
... | @@ -95,6 +110,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -95,6 +110,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
95 | 110 | ||
96 | @Override | 111 | @Override |
97 | public void entryUpdated(EntryEvent<byte[], byte[]> event) { | 112 | public void entryUpdated(EntryEvent<byte[], byte[]> event) { |
113 | + if (localMember.equals(event.getMember())) { | ||
114 | + // ignore locally triggered event | ||
115 | + return; | ||
116 | + } | ||
98 | K key = deserialize(event.getKey()); | 117 | K key = deserialize(event.getKey()); |
99 | V oldVal = deserialize(event.getOldValue()); | 118 | V oldVal = deserialize(event.getOldValue()); |
100 | Optional<V> oldValue = Optional.fromNullable(oldVal); | 119 | Optional<V> oldValue = Optional.fromNullable(oldVal); |
... | @@ -106,6 +125,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -106,6 +125,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
106 | 125 | ||
107 | @Override | 126 | @Override |
108 | public void entryRemoved(EntryEvent<byte[], byte[]> event) { | 127 | public void entryRemoved(EntryEvent<byte[], byte[]> event) { |
128 | + if (localMember.equals(event.getMember())) { | ||
129 | + // ignore locally triggered event | ||
130 | + return; | ||
131 | + } | ||
109 | K key = deserialize(event.getKey()); | 132 | K key = deserialize(event.getKey()); |
110 | V val = deserialize(event.getOldValue()); | 133 | V val = deserialize(event.getOldValue()); |
111 | cache.invalidate(key); | 134 | cache.invalidate(key); |
... | @@ -141,4 +164,80 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel | ... | @@ -141,4 +164,80 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel |
141 | } | 164 | } |
142 | } | 165 | } |
143 | 166 | ||
167 | + /** | ||
168 | + * Distributed object remote event entry listener. | ||
169 | + * | ||
170 | + * @param <K> Entry key type after deserialization | ||
171 | + * @param <V> Entry value type after deserialization | ||
172 | + */ | ||
173 | + public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { | ||
174 | + | ||
175 | + private final Member localMember; | ||
176 | + | ||
177 | + public RemoteEventHandler() { | ||
178 | + this.localMember = theInstance.getCluster().getLocalMember(); | ||
179 | + } | ||
180 | + @Override | ||
181 | + public void entryAdded(EntryEvent<byte[], byte[]> event) { | ||
182 | + if (localMember.equals(event.getMember())) { | ||
183 | + // ignore locally triggered event | ||
184 | + return; | ||
185 | + } | ||
186 | + K key = deserialize(event.getKey()); | ||
187 | + V newVal = deserialize(event.getValue()); | ||
188 | + onAdd(key, newVal); | ||
189 | + } | ||
190 | + | ||
191 | + @Override | ||
192 | + public void entryRemoved(EntryEvent<byte[], byte[]> event) { | ||
193 | + if (localMember.equals(event.getMember())) { | ||
194 | + // ignore locally triggered event | ||
195 | + return; | ||
196 | + } | ||
197 | + K key = deserialize(event.getKey()); | ||
198 | + V val = deserialize(event.getValue()); | ||
199 | + onRemove(key, val); | ||
200 | + } | ||
201 | + | ||
202 | + @Override | ||
203 | + public void entryUpdated(EntryEvent<byte[], byte[]> event) { | ||
204 | + if (localMember.equals(event.getMember())) { | ||
205 | + // ignore locally triggered event | ||
206 | + return; | ||
207 | + } | ||
208 | + K key = deserialize(event.getKey()); | ||
209 | + V oldVal = deserialize(event.getOldValue()); | ||
210 | + V newVal = deserialize(event.getValue()); | ||
211 | + onUpdate(key, oldVal, newVal); | ||
212 | + } | ||
213 | + | ||
214 | + /** | ||
215 | + * Remote entry addition hook. | ||
216 | + * | ||
217 | + * @param key new key | ||
218 | + * @param newVal new value | ||
219 | + */ | ||
220 | + protected void onAdd(K key, V newVal) { | ||
221 | + } | ||
222 | + | ||
223 | + /** | ||
224 | + * Remote entry update hook. | ||
225 | + * | ||
226 | + * @param key new key | ||
227 | + * @param oldValue old value | ||
228 | + * @param newVal new value | ||
229 | + */ | ||
230 | + protected void onUpdate(K key, V oldValue, V newVal) { | ||
231 | + } | ||
232 | + | ||
233 | + /** | ||
234 | + * Remote entry remove hook. | ||
235 | + * | ||
236 | + * @param key new key | ||
237 | + * @param val old value | ||
238 | + */ | ||
239 | + protected void onRemove(K key, V val) { | ||
240 | + } | ||
241 | + } | ||
242 | + | ||
144 | } | 243 | } | ... | ... |
... | @@ -2,6 +2,8 @@ package org.onlab.onos.store.common; | ... | @@ -2,6 +2,8 @@ package org.onlab.onos.store.common; |
2 | 2 | ||
3 | import static com.google.common.base.Preconditions.checkNotNull; | 3 | import static com.google.common.base.Preconditions.checkNotNull; |
4 | 4 | ||
5 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
6 | + | ||
5 | import com.google.common.base.Optional; | 7 | import com.google.common.base.Optional; |
6 | import com.google.common.cache.CacheLoader; | 8 | import com.google.common.cache.CacheLoader; |
7 | import com.hazelcast.core.IMap; | 9 | import com.hazelcast.core.IMap; |
... | @@ -16,28 +18,28 @@ import com.hazelcast.core.IMap; | ... | @@ -16,28 +18,28 @@ import com.hazelcast.core.IMap; |
16 | public final class OptionalCacheLoader<K, V> extends | 18 | public final class OptionalCacheLoader<K, V> extends |
17 | CacheLoader<K, Optional<V>> { | 19 | CacheLoader<K, Optional<V>> { |
18 | 20 | ||
19 | - private final StoreService storeService; | 21 | + private final KryoSerializationService kryoSerializationService; |
20 | private IMap<byte[], byte[]> rawMap; | 22 | private IMap<byte[], byte[]> rawMap; |
21 | 23 | ||
22 | /** | 24 | /** |
23 | * Constructor. | 25 | * Constructor. |
24 | * | 26 | * |
25 | - * @param storeService to use for serialization | 27 | + * @param kryoSerializationService to use for serialization |
26 | * @param rawMap underlying IMap | 28 | * @param rawMap underlying IMap |
27 | */ | 29 | */ |
28 | - public OptionalCacheLoader(StoreService storeService, IMap<byte[], byte[]> rawMap) { | 30 | + public OptionalCacheLoader(KryoSerializationService kryoSerializationService, IMap<byte[], byte[]> rawMap) { |
29 | - this.storeService = checkNotNull(storeService); | 31 | + this.kryoSerializationService = checkNotNull(kryoSerializationService); |
30 | this.rawMap = checkNotNull(rawMap); | 32 | this.rawMap = checkNotNull(rawMap); |
31 | } | 33 | } |
32 | 34 | ||
33 | @Override | 35 | @Override |
34 | public Optional<V> load(K key) throws Exception { | 36 | public Optional<V> load(K key) throws Exception { |
35 | - byte[] keyBytes = storeService.serialize(key); | 37 | + byte[] keyBytes = kryoSerializationService.serialize(key); |
36 | byte[] valBytes = rawMap.get(keyBytes); | 38 | byte[] valBytes = rawMap.get(keyBytes); |
37 | if (valBytes == null) { | 39 | if (valBytes == null) { |
38 | return Optional.absent(); | 40 | return Optional.absent(); |
39 | } | 41 | } |
40 | - V dev = storeService.deserialize(valBytes); | 42 | + V dev = kryoSerializationService.deserialize(valBytes); |
41 | return Optional.of(dev); | 43 | return Optional.of(dev); |
42 | } | 44 | } |
43 | } | 45 | } | ... | ... |
... | @@ -5,46 +5,14 @@ import com.hazelcast.config.FileSystemXmlConfig; | ... | @@ -5,46 +5,14 @@ import com.hazelcast.config.FileSystemXmlConfig; |
5 | import com.hazelcast.core.Hazelcast; | 5 | import com.hazelcast.core.Hazelcast; |
6 | import com.hazelcast.core.HazelcastInstance; | 6 | import com.hazelcast.core.HazelcastInstance; |
7 | 7 | ||
8 | -import de.javakaffee.kryoserializers.URISerializer; | ||
9 | - | ||
10 | import org.apache.felix.scr.annotations.Activate; | 8 | import org.apache.felix.scr.annotations.Activate; |
11 | import org.apache.felix.scr.annotations.Component; | 9 | import org.apache.felix.scr.annotations.Component; |
12 | import org.apache.felix.scr.annotations.Deactivate; | 10 | import org.apache.felix.scr.annotations.Deactivate; |
13 | import org.apache.felix.scr.annotations.Service; | 11 | import org.apache.felix.scr.annotations.Service; |
14 | -import org.onlab.onos.cluster.ControllerNode; | ||
15 | -import org.onlab.onos.cluster.DefaultControllerNode; | ||
16 | -import org.onlab.onos.cluster.NodeId; | ||
17 | -import org.onlab.onos.net.ConnectPoint; | ||
18 | -import org.onlab.onos.net.DefaultDevice; | ||
19 | -import org.onlab.onos.net.DefaultLink; | ||
20 | -import org.onlab.onos.net.DefaultPort; | ||
21 | -import org.onlab.onos.net.Device; | ||
22 | -import org.onlab.onos.net.DeviceId; | ||
23 | -import org.onlab.onos.net.Element; | ||
24 | -import org.onlab.onos.net.Link; | ||
25 | -import org.onlab.onos.net.LinkKey; | ||
26 | -import org.onlab.onos.net.MastershipRole; | ||
27 | -import org.onlab.onos.net.Port; | ||
28 | -import org.onlab.onos.net.PortNumber; | ||
29 | -import org.onlab.onos.net.provider.ProviderId; | ||
30 | -import org.onlab.onos.store.serializers.ConnectPointSerializer; | ||
31 | -import org.onlab.onos.store.serializers.DefaultLinkSerializer; | ||
32 | -import org.onlab.onos.store.serializers.DefaultPortSerializer; | ||
33 | -import org.onlab.onos.store.serializers.DeviceIdSerializer; | ||
34 | -import org.onlab.onos.store.serializers.IpPrefixSerializer; | ||
35 | -import org.onlab.onos.store.serializers.LinkKeySerializer; | ||
36 | -import org.onlab.onos.store.serializers.NodeIdSerializer; | ||
37 | -import org.onlab.onos.store.serializers.PortNumberSerializer; | ||
38 | -import org.onlab.onos.store.serializers.ProviderIdSerializer; | ||
39 | -import org.onlab.packet.IpPrefix; | ||
40 | -import org.onlab.util.KryoPool; | ||
41 | import org.slf4j.Logger; | 12 | import org.slf4j.Logger; |
42 | import org.slf4j.LoggerFactory; | 13 | import org.slf4j.LoggerFactory; |
43 | 14 | ||
44 | import java.io.FileNotFoundException; | 15 | import java.io.FileNotFoundException; |
45 | -import java.net.URI; | ||
46 | -import java.util.ArrayList; | ||
47 | -import java.util.HashMap; | ||
48 | 16 | ||
49 | /** | 17 | /** |
50 | * Auxiliary bootstrap of distributed store. | 18 | * Auxiliary bootstrap of distributed store. |
... | @@ -58,55 +26,18 @@ public class StoreManager implements StoreService { | ... | @@ -58,55 +26,18 @@ public class StoreManager implements StoreService { |
58 | private final Logger log = LoggerFactory.getLogger(getClass()); | 26 | private final Logger log = LoggerFactory.getLogger(getClass()); |
59 | 27 | ||
60 | protected HazelcastInstance instance; | 28 | protected HazelcastInstance instance; |
61 | - private KryoPool serializerPool; | ||
62 | - | ||
63 | 29 | ||
64 | @Activate | 30 | @Activate |
65 | public void activate() { | 31 | public void activate() { |
66 | try { | 32 | try { |
67 | Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); | 33 | Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); |
68 | instance = Hazelcast.newHazelcastInstance(config); | 34 | instance = Hazelcast.newHazelcastInstance(config); |
69 | - setupKryoPool(); | ||
70 | log.info("Started"); | 35 | log.info("Started"); |
71 | } catch (FileNotFoundException e) { | 36 | } catch (FileNotFoundException e) { |
72 | log.error("Unable to configure Hazelcast", e); | 37 | log.error("Unable to configure Hazelcast", e); |
73 | } | 38 | } |
74 | } | 39 | } |
75 | 40 | ||
76 | - /** | ||
77 | - * Sets up the common serialzers pool. | ||
78 | - */ | ||
79 | - protected void setupKryoPool() { | ||
80 | - // FIXME Slice out types used in common to separate pool/namespace. | ||
81 | - serializerPool = KryoPool.newBuilder() | ||
82 | - .register(ArrayList.class, | ||
83 | - HashMap.class, | ||
84 | - | ||
85 | - ControllerNode.State.class, | ||
86 | - Device.Type.class, | ||
87 | - | ||
88 | - DefaultControllerNode.class, | ||
89 | - DefaultDevice.class, | ||
90 | - MastershipRole.class, | ||
91 | - Port.class, | ||
92 | - Element.class, | ||
93 | - | ||
94 | - Link.Type.class | ||
95 | - ) | ||
96 | - .register(IpPrefix.class, new IpPrefixSerializer()) | ||
97 | - .register(URI.class, new URISerializer()) | ||
98 | - .register(NodeId.class, new NodeIdSerializer()) | ||
99 | - .register(ProviderId.class, new ProviderIdSerializer()) | ||
100 | - .register(DeviceId.class, new DeviceIdSerializer()) | ||
101 | - .register(PortNumber.class, new PortNumberSerializer()) | ||
102 | - .register(DefaultPort.class, new DefaultPortSerializer()) | ||
103 | - .register(LinkKey.class, new LinkKeySerializer()) | ||
104 | - .register(ConnectPoint.class, new ConnectPointSerializer()) | ||
105 | - .register(DefaultLink.class, new DefaultLinkSerializer()) | ||
106 | - .build() | ||
107 | - .populate(10); | ||
108 | - } | ||
109 | - | ||
110 | @Deactivate | 41 | @Deactivate |
111 | public void deactivate() { | 42 | public void deactivate() { |
112 | instance.shutdown(); | 43 | instance.shutdown(); |
... | @@ -118,18 +49,4 @@ public class StoreManager implements StoreService { | ... | @@ -118,18 +49,4 @@ public class StoreManager implements StoreService { |
118 | return instance; | 49 | return instance; |
119 | } | 50 | } |
120 | 51 | ||
121 | - | ||
122 | - @Override | ||
123 | - public byte[] serialize(final Object obj) { | ||
124 | - return serializerPool.serialize(obj); | ||
125 | - } | ||
126 | - | ||
127 | - @Override | ||
128 | - public <T> T deserialize(final byte[] bytes) { | ||
129 | - if (bytes == null) { | ||
130 | - return null; | ||
131 | - } | ||
132 | - return serializerPool.deserialize(bytes); | ||
133 | - } | ||
134 | - | ||
135 | } | 52 | } | ... | ... |
... | @@ -15,22 +15,4 @@ public interface StoreService { | ... | @@ -15,22 +15,4 @@ public interface StoreService { |
15 | */ | 15 | */ |
16 | HazelcastInstance getHazelcastInstance(); | 16 | HazelcastInstance getHazelcastInstance(); |
17 | 17 | ||
18 | - /** | ||
19 | - * Serializes the specified object into bytes using one of the | ||
20 | - * pre-registered serializers. | ||
21 | - * | ||
22 | - * @param obj object to be serialized | ||
23 | - * @return serialized bytes | ||
24 | - */ | ||
25 | - public byte[] serialize(final Object obj); | ||
26 | - | ||
27 | - /** | ||
28 | - * Deserializes the specified bytes into an object using one of the | ||
29 | - * pre-registered serializers. | ||
30 | - * | ||
31 | - * @param bytes bytes to be deserialized | ||
32 | - * @return deserialized object | ||
33 | - */ | ||
34 | - public <T> T deserialize(final byte[] bytes); | ||
35 | - | ||
36 | } | 18 | } | ... | ... |
... | @@ -46,9 +46,8 @@ public class TestStoreManager extends StoreManager { | ... | @@ -46,9 +46,8 @@ public class TestStoreManager extends StoreManager { |
46 | this.instance = instance; | 46 | this.instance = instance; |
47 | } | 47 | } |
48 | 48 | ||
49 | - // Hazelcast setup removed from original code. | ||
50 | @Override | 49 | @Override |
51 | public void activate() { | 50 | public void activate() { |
52 | - setupKryoPool(); | 51 | + // Hazelcast setup removed from original code. |
53 | } | 52 | } |
54 | } | 53 | } | ... | ... |
... | @@ -72,6 +72,10 @@ public class DistributedDeviceStore | ... | @@ -72,6 +72,10 @@ public class DistributedDeviceStore |
72 | private IMap<byte[], byte[]> rawDevicePorts; | 72 | private IMap<byte[], byte[]> rawDevicePorts; |
73 | private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts; | 73 | private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts; |
74 | 74 | ||
75 | + private String devicesListener; | ||
76 | + | ||
77 | + private String portsListener; | ||
78 | + | ||
75 | @Override | 79 | @Override |
76 | @Activate | 80 | @Activate |
77 | public void activate() { | 81 | public void activate() { |
... | @@ -83,20 +87,20 @@ public class DistributedDeviceStore | ... | @@ -83,20 +87,20 @@ public class DistributedDeviceStore |
83 | // TODO decide on Map name scheme to avoid collision | 87 | // TODO decide on Map name scheme to avoid collision |
84 | rawDevices = theInstance.getMap("devices"); | 88 | rawDevices = theInstance.getMap("devices"); |
85 | final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader | 89 | final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader |
86 | - = new OptionalCacheLoader<>(storeService, rawDevices); | 90 | + = new OptionalCacheLoader<>(kryoSerializationService, rawDevices); |
87 | devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader)); | 91 | devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader)); |
88 | // refresh/populate cache based on notification from other instance | 92 | // refresh/populate cache based on notification from other instance |
89 | - rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue); | 93 | + devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue); |
90 | 94 | ||
91 | // TODO cache availableDevices | 95 | // TODO cache availableDevices |
92 | availableDevices = theInstance.getSet("availableDevices"); | 96 | availableDevices = theInstance.getSet("availableDevices"); |
93 | 97 | ||
94 | rawDevicePorts = theInstance.getMap("devicePorts"); | 98 | rawDevicePorts = theInstance.getMap("devicePorts"); |
95 | final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader | 99 | final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader |
96 | - = new OptionalCacheLoader<>(storeService, rawDevicePorts); | 100 | + = new OptionalCacheLoader<>(kryoSerializationService, rawDevicePorts); |
97 | devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader)); | 101 | devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader)); |
98 | // refresh/populate cache based on notification from other instance | 102 | // refresh/populate cache based on notification from other instance |
99 | - rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue); | 103 | + portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue); |
100 | 104 | ||
101 | loadDeviceCache(); | 105 | loadDeviceCache(); |
102 | loadDevicePortsCache(); | 106 | loadDevicePortsCache(); |
... | @@ -106,6 +110,8 @@ public class DistributedDeviceStore | ... | @@ -106,6 +110,8 @@ public class DistributedDeviceStore |
106 | 110 | ||
107 | @Deactivate | 111 | @Deactivate |
108 | public void deactivate() { | 112 | public void deactivate() { |
113 | + rawDevicePorts.removeEntryListener(portsListener); | ||
114 | + rawDevices.removeEntryListener(devicesListener); | ||
109 | log.info("Stopped"); | 115 | log.info("Stopped"); |
110 | } | 116 | } |
111 | 117 | ||
... | @@ -354,7 +360,7 @@ public class DistributedDeviceStore | ... | @@ -354,7 +360,7 @@ public class DistributedDeviceStore |
354 | } | 360 | } |
355 | } | 361 | } |
356 | 362 | ||
357 | - private class RemoteDeviceEventHandler extends RemoteEventHandler<DeviceId, DefaultDevice> { | 363 | + private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> { |
358 | public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) { | 364 | public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) { |
359 | super(cache); | 365 | super(cache); |
360 | } | 366 | } |
... | @@ -375,7 +381,7 @@ public class DistributedDeviceStore | ... | @@ -375,7 +381,7 @@ public class DistributedDeviceStore |
375 | } | 381 | } |
376 | } | 382 | } |
377 | 383 | ||
378 | - private class RemotePortEventHandler extends RemoteEventHandler<DeviceId, Map<PortNumber, Port>> { | 384 | + private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> { |
379 | public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) { | 385 | public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) { |
380 | super(cache); | 386 | super(cache); |
381 | } | 387 | } | ... | ... |
... | @@ -58,6 +58,8 @@ public class DistributedLinkStore | ... | @@ -58,6 +58,8 @@ public class DistributedLinkStore |
58 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); | 58 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); |
59 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); | 59 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); |
60 | 60 | ||
61 | + private String linksListener; | ||
62 | + | ||
61 | @Override | 63 | @Override |
62 | @Activate | 64 | @Activate |
63 | public void activate() { | 65 | public void activate() { |
... | @@ -68,10 +70,10 @@ public class DistributedLinkStore | ... | @@ -68,10 +70,10 @@ public class DistributedLinkStore |
68 | // TODO decide on Map name scheme to avoid collision | 70 | // TODO decide on Map name scheme to avoid collision |
69 | rawLinks = theInstance.getMap("links"); | 71 | rawLinks = theInstance.getMap("links"); |
70 | final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader | 72 | final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader |
71 | - = new OptionalCacheLoader<>(storeService, rawLinks); | 73 | + = new OptionalCacheLoader<>(kryoSerializationService, rawLinks); |
72 | links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader)); | 74 | links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader)); |
73 | // refresh/populate cache based on notification from other instance | 75 | // refresh/populate cache based on notification from other instance |
74 | - rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue); | 76 | + linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue); |
75 | 77 | ||
76 | loadLinkCache(); | 78 | loadLinkCache(); |
77 | 79 | ||
... | @@ -80,7 +82,7 @@ public class DistributedLinkStore | ... | @@ -80,7 +82,7 @@ public class DistributedLinkStore |
80 | 82 | ||
81 | @Deactivate | 83 | @Deactivate |
82 | public void deactivate() { | 84 | public void deactivate() { |
83 | - super.activate(); | 85 | + rawLinks.removeEntryListener(linksListener); |
84 | log.info("Stopped"); | 86 | log.info("Stopped"); |
85 | } | 87 | } |
86 | 88 | ||
... | @@ -233,7 +235,7 @@ public class DistributedLinkStore | ... | @@ -233,7 +235,7 @@ public class DistributedLinkStore |
233 | } | 235 | } |
234 | } | 236 | } |
235 | 237 | ||
236 | - private class RemoteLinkEventHandler extends RemoteEventHandler<LinkKey, DefaultLink> { | 238 | + private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> { |
237 | public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) { | 239 | public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) { |
238 | super(cache); | 240 | super(cache); |
239 | } | 241 | } | ... | ... |
... | @@ -20,6 +20,7 @@ import org.junit.After; | ... | @@ -20,6 +20,7 @@ import org.junit.After; |
20 | import org.junit.AfterClass; | 20 | import org.junit.AfterClass; |
21 | import org.junit.Before; | 21 | import org.junit.Before; |
22 | import org.junit.BeforeClass; | 22 | import org.junit.BeforeClass; |
23 | +import org.junit.Ignore; | ||
23 | import org.junit.Test; | 24 | import org.junit.Test; |
24 | import org.onlab.onos.net.Device; | 25 | import org.onlab.onos.net.Device; |
25 | import org.onlab.onos.net.DeviceId; | 26 | import org.onlab.onos.net.DeviceId; |
... | @@ -35,12 +36,17 @@ import org.onlab.onos.net.provider.ProviderId; | ... | @@ -35,12 +36,17 @@ import org.onlab.onos.net.provider.ProviderId; |
35 | import org.onlab.onos.store.common.StoreManager; | 36 | import org.onlab.onos.store.common.StoreManager; |
36 | import org.onlab.onos.store.common.StoreService; | 37 | import org.onlab.onos.store.common.StoreService; |
37 | import org.onlab.onos.store.common.TestStoreManager; | 38 | import org.onlab.onos.store.common.TestStoreManager; |
39 | +import org.onlab.onos.store.serializers.KryoSerializationManager; | ||
40 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
38 | 41 | ||
39 | import com.google.common.collect.Iterables; | 42 | import com.google.common.collect.Iterables; |
40 | import com.google.common.collect.Sets; | 43 | import com.google.common.collect.Sets; |
41 | import com.hazelcast.config.Config; | 44 | import com.hazelcast.config.Config; |
42 | import com.hazelcast.core.Hazelcast; | 45 | import com.hazelcast.core.Hazelcast; |
43 | 46 | ||
47 | +/** | ||
48 | + * Test of the Hazelcast based distributed DeviceStore implementation. | ||
49 | + */ | ||
44 | public class DistributedDeviceStoreTest { | 50 | public class DistributedDeviceStoreTest { |
45 | 51 | ||
46 | private static final ProviderId PID = new ProviderId("of", "foo"); | 52 | private static final ProviderId PID = new ProviderId("of", "foo"); |
... | @@ -57,6 +63,7 @@ public class DistributedDeviceStoreTest { | ... | @@ -57,6 +63,7 @@ public class DistributedDeviceStoreTest { |
57 | private static final PortNumber P3 = PortNumber.portNumber(3); | 63 | private static final PortNumber P3 = PortNumber.portNumber(3); |
58 | 64 | ||
59 | private DistributedDeviceStore deviceStore; | 65 | private DistributedDeviceStore deviceStore; |
66 | + private KryoSerializationManager serializationMgr; | ||
60 | 67 | ||
61 | private StoreManager storeManager; | 68 | private StoreManager storeManager; |
62 | 69 | ||
... | @@ -78,7 +85,10 @@ public class DistributedDeviceStoreTest { | ... | @@ -78,7 +85,10 @@ public class DistributedDeviceStoreTest { |
78 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | 85 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); |
79 | storeManager.activate(); | 86 | storeManager.activate(); |
80 | 87 | ||
81 | - deviceStore = new TestDistributedDeviceStore(storeManager); | 88 | + serializationMgr = new KryoSerializationManager(); |
89 | + serializationMgr.activate(); | ||
90 | + | ||
91 | + deviceStore = new TestDistributedDeviceStore(storeManager, serializationMgr); | ||
82 | deviceStore.activate(); | 92 | deviceStore.activate(); |
83 | } | 93 | } |
84 | 94 | ||
... | @@ -86,6 +96,8 @@ public class DistributedDeviceStoreTest { | ... | @@ -86,6 +96,8 @@ public class DistributedDeviceStoreTest { |
86 | public void tearDown() throws Exception { | 96 | public void tearDown() throws Exception { |
87 | deviceStore.deactivate(); | 97 | deviceStore.deactivate(); |
88 | 98 | ||
99 | + serializationMgr.deactivate(); | ||
100 | + | ||
89 | storeManager.deactivate(); | 101 | storeManager.deactivate(); |
90 | } | 102 | } |
91 | 103 | ||
... | @@ -326,6 +338,7 @@ public class DistributedDeviceStoreTest { | ... | @@ -326,6 +338,7 @@ public class DistributedDeviceStoreTest { |
326 | } | 338 | } |
327 | 339 | ||
328 | // TODO add test for Port events when we have them | 340 | // TODO add test for Port events when we have them |
341 | + @Ignore("Ignore until Delegate spec. is clear.") | ||
329 | @Test | 342 | @Test |
330 | public final void testEvents() throws InterruptedException { | 343 | public final void testEvents() throws InterruptedException { |
331 | final CountDownLatch addLatch = new CountDownLatch(1); | 344 | final CountDownLatch addLatch = new CountDownLatch(1); |
... | @@ -379,8 +392,10 @@ public class DistributedDeviceStoreTest { | ... | @@ -379,8 +392,10 @@ public class DistributedDeviceStoreTest { |
379 | } | 392 | } |
380 | 393 | ||
381 | private class TestDistributedDeviceStore extends DistributedDeviceStore { | 394 | private class TestDistributedDeviceStore extends DistributedDeviceStore { |
382 | - public TestDistributedDeviceStore(StoreService storeService) { | 395 | + public TestDistributedDeviceStore(StoreService storeService, |
396 | + KryoSerializationService kryoSerializationService) { | ||
383 | this.storeService = storeService; | 397 | this.storeService = storeService; |
398 | + this.kryoSerializationService = kryoSerializationService; | ||
384 | } | 399 | } |
385 | } | 400 | } |
386 | } | 401 | } | ... | ... |
... | @@ -15,6 +15,7 @@ import org.junit.After; | ... | @@ -15,6 +15,7 @@ import org.junit.After; |
15 | import org.junit.AfterClass; | 15 | import org.junit.AfterClass; |
16 | import org.junit.Before; | 16 | import org.junit.Before; |
17 | import org.junit.BeforeClass; | 17 | import org.junit.BeforeClass; |
18 | +import org.junit.Ignore; | ||
18 | import org.junit.Test; | 19 | import org.junit.Test; |
19 | import org.onlab.onos.net.ConnectPoint; | 20 | import org.onlab.onos.net.ConnectPoint; |
20 | import org.onlab.onos.net.DeviceId; | 21 | import org.onlab.onos.net.DeviceId; |
... | @@ -29,27 +30,28 @@ import org.onlab.onos.net.provider.ProviderId; | ... | @@ -29,27 +30,28 @@ import org.onlab.onos.net.provider.ProviderId; |
29 | import org.onlab.onos.store.common.StoreManager; | 30 | import org.onlab.onos.store.common.StoreManager; |
30 | import org.onlab.onos.store.common.StoreService; | 31 | import org.onlab.onos.store.common.StoreService; |
31 | import org.onlab.onos.store.common.TestStoreManager; | 32 | import org.onlab.onos.store.common.TestStoreManager; |
33 | +import org.onlab.onos.store.serializers.KryoSerializationManager; | ||
34 | +import org.onlab.onos.store.serializers.KryoSerializationService; | ||
32 | 35 | ||
33 | import com.google.common.collect.Iterables; | 36 | import com.google.common.collect.Iterables; |
34 | import com.hazelcast.config.Config; | 37 | import com.hazelcast.config.Config; |
35 | import com.hazelcast.core.Hazelcast; | 38 | import com.hazelcast.core.Hazelcast; |
36 | 39 | ||
40 | +/** | ||
41 | + * Test of the Hazelcast based distributed LinkStore implementation. | ||
42 | + */ | ||
37 | public class DistributedLinkStoreTest { | 43 | public class DistributedLinkStoreTest { |
38 | 44 | ||
39 | private static final ProviderId PID = new ProviderId("of", "foo"); | 45 | private static final ProviderId PID = new ProviderId("of", "foo"); |
40 | private static final DeviceId DID1 = deviceId("of:foo"); | 46 | private static final DeviceId DID1 = deviceId("of:foo"); |
41 | private static final DeviceId DID2 = deviceId("of:bar"); | 47 | private static final DeviceId DID2 = deviceId("of:bar"); |
42 | -// private static final String MFR = "whitebox"; | ||
43 | -// private static final String HW = "1.1.x"; | ||
44 | -// private static final String SW1 = "3.8.1"; | ||
45 | -// private static final String SW2 = "3.9.5"; | ||
46 | -// private static final String SN = "43311-12345"; | ||
47 | 48 | ||
48 | private static final PortNumber P1 = PortNumber.portNumber(1); | 49 | private static final PortNumber P1 = PortNumber.portNumber(1); |
49 | private static final PortNumber P2 = PortNumber.portNumber(2); | 50 | private static final PortNumber P2 = PortNumber.portNumber(2); |
50 | private static final PortNumber P3 = PortNumber.portNumber(3); | 51 | private static final PortNumber P3 = PortNumber.portNumber(3); |
51 | 52 | ||
52 | private StoreManager storeManager; | 53 | private StoreManager storeManager; |
54 | + private KryoSerializationManager serializationMgr; | ||
53 | 55 | ||
54 | private DistributedLinkStore linkStore; | 56 | private DistributedLinkStore linkStore; |
55 | 57 | ||
... | @@ -69,13 +71,17 @@ public class DistributedLinkStoreTest { | ... | @@ -69,13 +71,17 @@ public class DistributedLinkStoreTest { |
69 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); | 71 | storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config)); |
70 | storeManager.activate(); | 72 | storeManager.activate(); |
71 | 73 | ||
72 | - linkStore = new TestDistributedLinkStore(storeManager); | 74 | + serializationMgr = new KryoSerializationManager(); |
75 | + serializationMgr.activate(); | ||
76 | + | ||
77 | + linkStore = new TestDistributedLinkStore(storeManager, serializationMgr); | ||
73 | linkStore.activate(); | 78 | linkStore.activate(); |
74 | } | 79 | } |
75 | 80 | ||
76 | @After | 81 | @After |
77 | public void tearDown() throws Exception { | 82 | public void tearDown() throws Exception { |
78 | linkStore.deactivate(); | 83 | linkStore.deactivate(); |
84 | + serializationMgr.deactivate(); | ||
79 | storeManager.deactivate(); | 85 | storeManager.deactivate(); |
80 | } | 86 | } |
81 | 87 | ||
... | @@ -302,6 +308,7 @@ public class DistributedLinkStoreTest { | ... | @@ -302,6 +308,7 @@ public class DistributedLinkStoreTest { |
302 | assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); | 308 | assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); |
303 | } | 309 | } |
304 | 310 | ||
311 | + @Ignore("Ignore until Delegate spec. is clear.") | ||
305 | @Test | 312 | @Test |
306 | public final void testEvents() throws InterruptedException { | 313 | public final void testEvents() throws InterruptedException { |
307 | 314 | ||
... | @@ -354,8 +361,10 @@ public class DistributedLinkStoreTest { | ... | @@ -354,8 +361,10 @@ public class DistributedLinkStoreTest { |
354 | 361 | ||
355 | 362 | ||
356 | class TestDistributedLinkStore extends DistributedLinkStore { | 363 | class TestDistributedLinkStore extends DistributedLinkStore { |
357 | - TestDistributedLinkStore(StoreService storeService) { | 364 | + TestDistributedLinkStore(StoreService storeService, |
365 | + KryoSerializationService kryoSerializationService) { | ||
358 | this.storeService = storeService; | 366 | this.storeService = storeService; |
367 | + this.kryoSerializationService = kryoSerializationService; | ||
359 | } | 368 | } |
360 | } | 369 | } |
361 | } | 370 | } | ... | ... |
core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializationManager.java
0 → 100644
1 | +package org.onlab.onos.store.serializers; | ||
2 | + | ||
3 | +import java.net.URI; | ||
4 | +import java.util.ArrayList; | ||
5 | +import java.util.HashMap; | ||
6 | + | ||
7 | +import org.apache.felix.scr.annotations.Activate; | ||
8 | +import org.apache.felix.scr.annotations.Component; | ||
9 | +import org.apache.felix.scr.annotations.Deactivate; | ||
10 | +import org.apache.felix.scr.annotations.Service; | ||
11 | +import org.onlab.onos.cluster.ControllerNode; | ||
12 | +import org.onlab.onos.cluster.DefaultControllerNode; | ||
13 | +import org.onlab.onos.cluster.NodeId; | ||
14 | +import org.onlab.onos.net.ConnectPoint; | ||
15 | +import org.onlab.onos.net.DefaultDevice; | ||
16 | +import org.onlab.onos.net.DefaultLink; | ||
17 | +import org.onlab.onos.net.DefaultPort; | ||
18 | +import org.onlab.onos.net.Device; | ||
19 | +import org.onlab.onos.net.DeviceId; | ||
20 | +import org.onlab.onos.net.Element; | ||
21 | +import org.onlab.onos.net.Link; | ||
22 | +import org.onlab.onos.net.LinkKey; | ||
23 | +import org.onlab.onos.net.MastershipRole; | ||
24 | +import org.onlab.onos.net.Port; | ||
25 | +import org.onlab.onos.net.PortNumber; | ||
26 | +import org.onlab.onos.net.provider.ProviderId; | ||
27 | +import org.onlab.packet.IpPrefix; | ||
28 | +import org.onlab.util.KryoPool; | ||
29 | +import org.slf4j.Logger; | ||
30 | +import org.slf4j.LoggerFactory; | ||
31 | + | ||
32 | +import de.javakaffee.kryoserializers.URISerializer; | ||
33 | + | ||
34 | +/** | ||
35 | + * Serialization service using Kryo. | ||
36 | + */ | ||
37 | +@Component(immediate = true) | ||
38 | +@Service | ||
39 | +public class KryoSerializationManager implements KryoSerializationService { | ||
40 | + | ||
41 | + private final Logger log = LoggerFactory.getLogger(getClass()); | ||
42 | + private KryoPool serializerPool; | ||
43 | + | ||
44 | + | ||
45 | + @Activate | ||
46 | + public void activate() { | ||
47 | + setupKryoPool(); | ||
48 | + log.info("Started"); | ||
49 | + } | ||
50 | + | ||
51 | + @Deactivate | ||
52 | + public void deactivate() { | ||
53 | + log.info("Stopped"); | ||
54 | + } | ||
55 | + | ||
56 | + /** | ||
57 | + * Sets up the common serialzers pool. | ||
58 | + */ | ||
59 | + protected void setupKryoPool() { | ||
60 | + // FIXME Slice out types used in common to separate pool/namespace. | ||
61 | + serializerPool = KryoPool.newBuilder() | ||
62 | + .register(ArrayList.class, | ||
63 | + HashMap.class, | ||
64 | + | ||
65 | + ControllerNode.State.class, | ||
66 | + Device.Type.class, | ||
67 | + | ||
68 | + DefaultControllerNode.class, | ||
69 | + DefaultDevice.class, | ||
70 | + MastershipRole.class, | ||
71 | + Port.class, | ||
72 | + Element.class, | ||
73 | + | ||
74 | + Link.Type.class | ||
75 | + ) | ||
76 | + .register(IpPrefix.class, new IpPrefixSerializer()) | ||
77 | + .register(URI.class, new URISerializer()) | ||
78 | + .register(NodeId.class, new NodeIdSerializer()) | ||
79 | + .register(ProviderId.class, new ProviderIdSerializer()) | ||
80 | + .register(DeviceId.class, new DeviceIdSerializer()) | ||
81 | + .register(PortNumber.class, new PortNumberSerializer()) | ||
82 | + .register(DefaultPort.class, new DefaultPortSerializer()) | ||
83 | + .register(LinkKey.class, new LinkKeySerializer()) | ||
84 | + .register(ConnectPoint.class, new ConnectPointSerializer()) | ||
85 | + .register(DefaultLink.class, new DefaultLinkSerializer()) | ||
86 | + .build() | ||
87 | + .populate(1); | ||
88 | + } | ||
89 | + | ||
90 | + @Override | ||
91 | + public byte[] serialize(final Object obj) { | ||
92 | + return serializerPool.serialize(obj); | ||
93 | + } | ||
94 | + | ||
95 | + @Override | ||
96 | + public <T> T deserialize(final byte[] bytes) { | ||
97 | + if (bytes == null) { | ||
98 | + return null; | ||
99 | + } | ||
100 | + return serializerPool.deserialize(bytes); | ||
101 | + } | ||
102 | + | ||
103 | +} |
core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializationService.java
0 → 100644
1 | +package org.onlab.onos.store.serializers; | ||
2 | + | ||
3 | +// TODO: To be replaced with SerializationService from IOLoop activity | ||
4 | +/** | ||
5 | + * Service to serialize Objects into byte array. | ||
6 | + */ | ||
7 | +public interface KryoSerializationService { | ||
8 | + | ||
9 | + /** | ||
10 | + * Serializes the specified object into bytes using one of the | ||
11 | + * pre-registered serializers. | ||
12 | + * | ||
13 | + * @param obj object to be serialized | ||
14 | + * @return serialized bytes | ||
15 | + */ | ||
16 | + public byte[] serialize(final Object obj); | ||
17 | + | ||
18 | + /** | ||
19 | + * Deserializes the specified bytes into an object using one of the | ||
20 | + * pre-registered serializers. | ||
21 | + * | ||
22 | + * @param bytes bytes to be deserialized | ||
23 | + * @return deserialized object | ||
24 | + */ | ||
25 | + public <T> T deserialize(final byte[] bytes); | ||
26 | + | ||
27 | +} |
... | @@ -20,7 +20,7 @@ import java.util.Set; | ... | @@ -20,7 +20,7 @@ import java.util.Set; |
20 | import static org.slf4j.LoggerFactory.getLogger; | 20 | import static org.slf4j.LoggerFactory.getLogger; |
21 | 21 | ||
22 | /** | 22 | /** |
23 | - * Manages inventory of infrastructure DEVICES using trivial in-memory | 23 | + * Manages inventory of infrastructure devices using trivial in-memory |
24 | * structures implementation. | 24 | * structures implementation. |
25 | */ | 25 | */ |
26 | @Component(immediate = true) | 26 | @Component(immediate = true) |
... | @@ -68,6 +68,11 @@ public class SimpleClusterStore | ... | @@ -68,6 +68,11 @@ public class SimpleClusterStore |
68 | } | 68 | } |
69 | 69 | ||
70 | @Override | 70 | @Override |
71 | + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) { | ||
72 | + return null; | ||
73 | + } | ||
74 | + | ||
75 | + @Override | ||
71 | public void removeNode(NodeId nodeId) { | 76 | public void removeNode(NodeId nodeId) { |
72 | } | 77 | } |
73 | 78 | ... | ... |
... | @@ -101,9 +101,6 @@ public class SimpleDeviceStore | ... | @@ -101,9 +101,6 @@ public class SimpleDeviceStore |
101 | synchronized (this) { | 101 | synchronized (this) { |
102 | devices.put(deviceId, device); | 102 | devices.put(deviceId, device); |
103 | availableDevices.add(deviceId); | 103 | availableDevices.add(deviceId); |
104 | - | ||
105 | - // For now claim the device as a master automatically. | ||
106 | - // roles.put(deviceId, MastershipRole.MASTER); | ||
107 | } | 104 | } |
108 | return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null); | 105 | return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null); |
109 | } | 106 | } |
... | @@ -189,7 +186,7 @@ public class SimpleDeviceStore | ... | @@ -189,7 +186,7 @@ public class SimpleDeviceStore |
189 | new DefaultPort(device, portDescription.portNumber(), | 186 | new DefaultPort(device, portDescription.portNumber(), |
190 | portDescription.isEnabled()); | 187 | portDescription.isEnabled()); |
191 | ports.put(port.number(), updatedPort); | 188 | ports.put(port.number(), updatedPort); |
192 | - return new DeviceEvent(PORT_UPDATED, device, port); | 189 | + return new DeviceEvent(PORT_UPDATED, device, updatedPort); |
193 | } | 190 | } |
194 | return null; | 191 | return null; |
195 | } | 192 | } | ... | ... |
... | @@ -51,8 +51,6 @@ public class SimpleLinkStore | ... | @@ -51,8 +51,6 @@ public class SimpleLinkStore |
51 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); | 51 | private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); |
52 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); | 52 | private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); |
53 | 53 | ||
54 | - private static final Set<Link> EMPTY = ImmutableSet.of(); | ||
55 | - | ||
56 | @Activate | 54 | @Activate |
57 | public void activate() { | 55 | public void activate() { |
58 | log.info("Started"); | 56 | log.info("Started"); | ... | ... |
core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleDeviceStoreTest.java
0 → 100644
1 | +/** | ||
2 | + * | ||
3 | + */ | ||
4 | +package org.onlab.onos.net.trivial.impl; | ||
5 | + | ||
6 | +import static org.junit.Assert.*; | ||
7 | +import static org.onlab.onos.net.Device.Type.SWITCH; | ||
8 | +import static org.onlab.onos.net.DeviceId.deviceId; | ||
9 | +import static org.onlab.onos.net.device.DeviceEvent.Type.*; | ||
10 | + | ||
11 | +import java.util.Arrays; | ||
12 | +import java.util.HashMap; | ||
13 | +import java.util.List; | ||
14 | +import java.util.Map; | ||
15 | +import java.util.Set; | ||
16 | +import java.util.concurrent.CountDownLatch; | ||
17 | +import java.util.concurrent.TimeUnit; | ||
18 | + | ||
19 | +import org.junit.After; | ||
20 | +import org.junit.AfterClass; | ||
21 | +import org.junit.Before; | ||
22 | +import org.junit.BeforeClass; | ||
23 | +import org.junit.Ignore; | ||
24 | +import org.junit.Test; | ||
25 | +import org.onlab.onos.net.Device; | ||
26 | +import org.onlab.onos.net.DeviceId; | ||
27 | +import org.onlab.onos.net.Port; | ||
28 | +import org.onlab.onos.net.PortNumber; | ||
29 | +import org.onlab.onos.net.device.DefaultDeviceDescription; | ||
30 | +import org.onlab.onos.net.device.DefaultPortDescription; | ||
31 | +import org.onlab.onos.net.device.DeviceDescription; | ||
32 | +import org.onlab.onos.net.device.DeviceEvent; | ||
33 | +import org.onlab.onos.net.device.DeviceStore; | ||
34 | +import org.onlab.onos.net.device.DeviceStoreDelegate; | ||
35 | +import org.onlab.onos.net.device.PortDescription; | ||
36 | +import org.onlab.onos.net.provider.ProviderId; | ||
37 | + | ||
38 | +import com.google.common.collect.Iterables; | ||
39 | +import com.google.common.collect.Sets; | ||
40 | + | ||
41 | +/** | ||
42 | + * Test of the simple DeviceStore implementation. | ||
43 | + */ | ||
44 | +public class SimpleDeviceStoreTest { | ||
45 | + | ||
46 | + private static final ProviderId PID = new ProviderId("of", "foo"); | ||
47 | + private static final DeviceId DID1 = deviceId("of:foo"); | ||
48 | + private static final DeviceId DID2 = deviceId("of:bar"); | ||
49 | + private static final String MFR = "whitebox"; | ||
50 | + private static final String HW = "1.1.x"; | ||
51 | + private static final String SW1 = "3.8.1"; | ||
52 | + private static final String SW2 = "3.9.5"; | ||
53 | + private static final String SN = "43311-12345"; | ||
54 | + | ||
55 | + private static final PortNumber P1 = PortNumber.portNumber(1); | ||
56 | + private static final PortNumber P2 = PortNumber.portNumber(2); | ||
57 | + private static final PortNumber P3 = PortNumber.portNumber(3); | ||
58 | + | ||
59 | + private SimpleDeviceStore simpleDeviceStore; | ||
60 | + private DeviceStore deviceStore; | ||
61 | + | ||
62 | + | ||
63 | + | ||
64 | + @BeforeClass | ||
65 | + public static void setUpBeforeClass() throws Exception { | ||
66 | + } | ||
67 | + | ||
68 | + @AfterClass | ||
69 | + public static void tearDownAfterClass() throws Exception { | ||
70 | + } | ||
71 | + | ||
72 | + | ||
73 | + @Before | ||
74 | + public void setUp() throws Exception { | ||
75 | + simpleDeviceStore = new SimpleDeviceStore(); | ||
76 | + simpleDeviceStore.activate(); | ||
77 | + deviceStore = simpleDeviceStore; | ||
78 | + } | ||
79 | + | ||
80 | + @After | ||
81 | + public void tearDown() throws Exception { | ||
82 | + simpleDeviceStore.deactivate(); | ||
83 | + } | ||
84 | + | ||
85 | + private void putDevice(DeviceId deviceId, String swVersion) { | ||
86 | + DeviceDescription description = | ||
87 | + new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR, | ||
88 | + HW, swVersion, SN); | ||
89 | + deviceStore.createOrUpdateDevice(PID, deviceId, description); | ||
90 | + } | ||
91 | + | ||
92 | + private static void assertDevice(DeviceId id, String swVersion, Device device) { | ||
93 | + assertNotNull(device); | ||
94 | + assertEquals(id, device.id()); | ||
95 | + assertEquals(MFR, device.manufacturer()); | ||
96 | + assertEquals(HW, device.hwVersion()); | ||
97 | + assertEquals(swVersion, device.swVersion()); | ||
98 | + assertEquals(SN, device.serialNumber()); | ||
99 | + } | ||
100 | + | ||
101 | + @Test | ||
102 | + public final void testGetDeviceCount() { | ||
103 | + assertEquals("initialy empty", 0, deviceStore.getDeviceCount()); | ||
104 | + | ||
105 | + putDevice(DID1, SW1); | ||
106 | + putDevice(DID2, SW2); | ||
107 | + putDevice(DID1, SW1); | ||
108 | + | ||
109 | + assertEquals("expect 2 uniq devices", 2, deviceStore.getDeviceCount()); | ||
110 | + } | ||
111 | + | ||
112 | + @Test | ||
113 | + public final void testGetDevices() { | ||
114 | + assertEquals("initialy empty", 0, Iterables.size(deviceStore.getDevices())); | ||
115 | + | ||
116 | + putDevice(DID1, SW1); | ||
117 | + putDevice(DID2, SW2); | ||
118 | + putDevice(DID1, SW1); | ||
119 | + | ||
120 | + assertEquals("expect 2 uniq devices", | ||
121 | + 2, Iterables.size(deviceStore.getDevices())); | ||
122 | + | ||
123 | + Map<DeviceId, Device> devices = new HashMap<>(); | ||
124 | + for (Device device : deviceStore.getDevices()) { | ||
125 | + devices.put(device.id(), device); | ||
126 | + } | ||
127 | + | ||
128 | + assertDevice(DID1, SW1, devices.get(DID1)); | ||
129 | + assertDevice(DID2, SW2, devices.get(DID2)); | ||
130 | + | ||
131 | + // add case for new node? | ||
132 | + } | ||
133 | + | ||
134 | + @Test | ||
135 | + public final void testGetDevice() { | ||
136 | + | ||
137 | + putDevice(DID1, SW1); | ||
138 | + | ||
139 | + assertDevice(DID1, SW1, deviceStore.getDevice(DID1)); | ||
140 | + assertNull("DID2 shouldn't be there", deviceStore.getDevice(DID2)); | ||
141 | + } | ||
142 | + | ||
143 | + @Test | ||
144 | + public final void testCreateOrUpdateDevice() { | ||
145 | + DeviceDescription description = | ||
146 | + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
147 | + HW, SW1, SN); | ||
148 | + DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description); | ||
149 | + assertEquals(DEVICE_ADDED, event.type()); | ||
150 | + assertDevice(DID1, SW1, event.subject()); | ||
151 | + | ||
152 | + DeviceDescription description2 = | ||
153 | + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
154 | + HW, SW2, SN); | ||
155 | + DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2); | ||
156 | + assertEquals(DEVICE_UPDATED, event2.type()); | ||
157 | + assertDevice(DID1, SW2, event2.subject()); | ||
158 | + | ||
159 | + assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2)); | ||
160 | + } | ||
161 | + | ||
162 | + @Test | ||
163 | + public final void testMarkOffline() { | ||
164 | + | ||
165 | + putDevice(DID1, SW1); | ||
166 | + assertTrue(deviceStore.isAvailable(DID1)); | ||
167 | + | ||
168 | + DeviceEvent event = deviceStore.markOffline(DID1); | ||
169 | + assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type()); | ||
170 | + assertDevice(DID1, SW1, event.subject()); | ||
171 | + assertFalse(deviceStore.isAvailable(DID1)); | ||
172 | + | ||
173 | + DeviceEvent event2 = deviceStore.markOffline(DID1); | ||
174 | + assertNull("No change, no event", event2); | ||
175 | +} | ||
176 | + | ||
177 | + @Test | ||
178 | + public final void testUpdatePorts() { | ||
179 | + putDevice(DID1, SW1); | ||
180 | + List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
181 | + new DefaultPortDescription(P1, true), | ||
182 | + new DefaultPortDescription(P2, true) | ||
183 | + ); | ||
184 | + | ||
185 | + List<DeviceEvent> events = deviceStore.updatePorts(DID1, pds); | ||
186 | + | ||
187 | + Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2); | ||
188 | + for (DeviceEvent event : events) { | ||
189 | + assertEquals(PORT_ADDED, event.type()); | ||
190 | + assertDevice(DID1, SW1, event.subject()); | ||
191 | + assertTrue("PortNumber is one of expected", | ||
192 | + expectedPorts.remove(event.port().number())); | ||
193 | + assertTrue("Port is enabled", event.port().isEnabled()); | ||
194 | + } | ||
195 | + assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty()); | ||
196 | + | ||
197 | + | ||
198 | + List<PortDescription> pds2 = Arrays.<PortDescription>asList( | ||
199 | + new DefaultPortDescription(P1, false), | ||
200 | + new DefaultPortDescription(P2, true), | ||
201 | + new DefaultPortDescription(P3, true) | ||
202 | + ); | ||
203 | + | ||
204 | + events = deviceStore.updatePorts(DID1, pds2); | ||
205 | + assertFalse("event should be triggered", events.isEmpty()); | ||
206 | + for (DeviceEvent event : events) { | ||
207 | + PortNumber num = event.port().number(); | ||
208 | + if (P1.equals(num)) { | ||
209 | + assertEquals(PORT_UPDATED, event.type()); | ||
210 | + assertDevice(DID1, SW1, event.subject()); | ||
211 | + assertFalse("Port is disabled", event.port().isEnabled()); | ||
212 | + } else if (P2.equals(num)) { | ||
213 | + fail("P2 event not expected."); | ||
214 | + } else if (P3.equals(num)) { | ||
215 | + assertEquals(PORT_ADDED, event.type()); | ||
216 | + assertDevice(DID1, SW1, event.subject()); | ||
217 | + assertTrue("Port is enabled", event.port().isEnabled()); | ||
218 | + } else { | ||
219 | + fail("Unknown port number encountered: " + num); | ||
220 | + } | ||
221 | + } | ||
222 | + | ||
223 | + List<PortDescription> pds3 = Arrays.<PortDescription>asList( | ||
224 | + new DefaultPortDescription(P1, false), | ||
225 | + new DefaultPortDescription(P2, true) | ||
226 | + ); | ||
227 | + events = deviceStore.updatePorts(DID1, pds3); | ||
228 | + assertFalse("event should be triggered", events.isEmpty()); | ||
229 | + for (DeviceEvent event : events) { | ||
230 | + PortNumber num = event.port().number(); | ||
231 | + if (P1.equals(num)) { | ||
232 | + fail("P1 event not expected."); | ||
233 | + } else if (P2.equals(num)) { | ||
234 | + fail("P2 event not expected."); | ||
235 | + } else if (P3.equals(num)) { | ||
236 | + assertEquals(PORT_REMOVED, event.type()); | ||
237 | + assertDevice(DID1, SW1, event.subject()); | ||
238 | + assertTrue("Port was enabled", event.port().isEnabled()); | ||
239 | + } else { | ||
240 | + fail("Unknown port number encountered: " + num); | ||
241 | + } | ||
242 | + } | ||
243 | + | ||
244 | + } | ||
245 | + | ||
246 | + @Test | ||
247 | + public final void testUpdatePortStatus() { | ||
248 | + putDevice(DID1, SW1); | ||
249 | + List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
250 | + new DefaultPortDescription(P1, true) | ||
251 | + ); | ||
252 | + deviceStore.updatePorts(DID1, pds); | ||
253 | + | ||
254 | + DeviceEvent event = deviceStore.updatePortStatus(DID1, | ||
255 | + new DefaultPortDescription(P1, false)); | ||
256 | + assertEquals(PORT_UPDATED, event.type()); | ||
257 | + assertDevice(DID1, SW1, event.subject()); | ||
258 | + assertEquals(P1, event.port().number()); | ||
259 | + assertFalse("Port is disabled", event.port().isEnabled()); | ||
260 | + } | ||
261 | + | ||
262 | + @Test | ||
263 | + public final void testGetPorts() { | ||
264 | + putDevice(DID1, SW1); | ||
265 | + putDevice(DID2, SW1); | ||
266 | + List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
267 | + new DefaultPortDescription(P1, true), | ||
268 | + new DefaultPortDescription(P2, true) | ||
269 | + ); | ||
270 | + deviceStore.updatePorts(DID1, pds); | ||
271 | + | ||
272 | + Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2); | ||
273 | + List<Port> ports = deviceStore.getPorts(DID1); | ||
274 | + for (Port port : ports) { | ||
275 | + assertTrue("Port is enabled", port.isEnabled()); | ||
276 | + assertTrue("PortNumber is one of expected", | ||
277 | + expectedPorts.remove(port.number())); | ||
278 | + } | ||
279 | + assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty()); | ||
280 | + | ||
281 | + | ||
282 | + assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty()); | ||
283 | + } | ||
284 | + | ||
285 | + @Test | ||
286 | + public final void testGetPort() { | ||
287 | + putDevice(DID1, SW1); | ||
288 | + putDevice(DID2, SW1); | ||
289 | + List<PortDescription> pds = Arrays.<PortDescription>asList( | ||
290 | + new DefaultPortDescription(P1, true), | ||
291 | + new DefaultPortDescription(P2, false) | ||
292 | + ); | ||
293 | + deviceStore.updatePorts(DID1, pds); | ||
294 | + | ||
295 | + Port port1 = deviceStore.getPort(DID1, P1); | ||
296 | + assertEquals(P1, port1.number()); | ||
297 | + assertTrue("Port is enabled", port1.isEnabled()); | ||
298 | + | ||
299 | + Port port2 = deviceStore.getPort(DID1, P2); | ||
300 | + assertEquals(P2, port2.number()); | ||
301 | + assertFalse("Port is disabled", port2.isEnabled()); | ||
302 | + | ||
303 | + Port port3 = deviceStore.getPort(DID1, P3); | ||
304 | + assertNull("P3 not expected", port3); | ||
305 | + } | ||
306 | + | ||
307 | + @Test | ||
308 | + public final void testRemoveDevice() { | ||
309 | + putDevice(DID1, SW1); | ||
310 | + putDevice(DID2, SW1); | ||
311 | + | ||
312 | + assertEquals(2, deviceStore.getDeviceCount()); | ||
313 | + | ||
314 | + DeviceEvent event = deviceStore.removeDevice(DID1); | ||
315 | + assertEquals(DEVICE_REMOVED, event.type()); | ||
316 | + assertDevice(DID1, SW1, event.subject()); | ||
317 | + | ||
318 | + assertEquals(1, deviceStore.getDeviceCount()); | ||
319 | + } | ||
320 | + | ||
321 | + // If Delegates should be called only on remote events, | ||
322 | + // then Simple* should never call them, thus not test required. | ||
323 | + // TODO add test for Port events when we have them | ||
324 | + @Ignore("Ignore until Delegate spec. is clear.") | ||
325 | + @Test | ||
326 | + public final void testEvents() throws InterruptedException { | ||
327 | + final CountDownLatch addLatch = new CountDownLatch(1); | ||
328 | + DeviceStoreDelegate checkAdd = new DeviceStoreDelegate() { | ||
329 | + @Override | ||
330 | + public void notify(DeviceEvent event) { | ||
331 | + assertEquals(DEVICE_ADDED, event.type()); | ||
332 | + assertDevice(DID1, SW1, event.subject()); | ||
333 | + addLatch.countDown(); | ||
334 | + } | ||
335 | + }; | ||
336 | + final CountDownLatch updateLatch = new CountDownLatch(1); | ||
337 | + DeviceStoreDelegate checkUpdate = new DeviceStoreDelegate() { | ||
338 | + @Override | ||
339 | + public void notify(DeviceEvent event) { | ||
340 | + assertEquals(DEVICE_UPDATED, event.type()); | ||
341 | + assertDevice(DID1, SW2, event.subject()); | ||
342 | + updateLatch.countDown(); | ||
343 | + } | ||
344 | + }; | ||
345 | + final CountDownLatch removeLatch = new CountDownLatch(1); | ||
346 | + DeviceStoreDelegate checkRemove = new DeviceStoreDelegate() { | ||
347 | + @Override | ||
348 | + public void notify(DeviceEvent event) { | ||
349 | + assertEquals(DEVICE_REMOVED, event.type()); | ||
350 | + assertDevice(DID1, SW2, event.subject()); | ||
351 | + removeLatch.countDown(); | ||
352 | + } | ||
353 | + }; | ||
354 | + | ||
355 | + DeviceDescription description = | ||
356 | + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
357 | + HW, SW1, SN); | ||
358 | + deviceStore.setDelegate(checkAdd); | ||
359 | + deviceStore.createOrUpdateDevice(PID, DID1, description); | ||
360 | + assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS)); | ||
361 | + | ||
362 | + | ||
363 | + DeviceDescription description2 = | ||
364 | + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR, | ||
365 | + HW, SW2, SN); | ||
366 | + deviceStore.unsetDelegate(checkAdd); | ||
367 | + deviceStore.setDelegate(checkUpdate); | ||
368 | + deviceStore.createOrUpdateDevice(PID, DID1, description2); | ||
369 | + assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS)); | ||
370 | + | ||
371 | + deviceStore.unsetDelegate(checkUpdate); | ||
372 | + deviceStore.setDelegate(checkRemove); | ||
373 | + deviceStore.removeDevice(DID1); | ||
374 | + assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS)); | ||
375 | + } | ||
376 | +} |
core/store/trivial/src/test/java/org/onlab/onos/net/trivial/impl/SimpleLinkStoreTest.java
0 → 100644
1 | +package org.onlab.onos.net.trivial.impl; | ||
2 | + | ||
3 | +import static org.junit.Assert.*; | ||
4 | +import static org.onlab.onos.net.DeviceId.deviceId; | ||
5 | +import static org.onlab.onos.net.Link.Type.*; | ||
6 | +import static org.onlab.onos.net.link.LinkEvent.Type.*; | ||
7 | + | ||
8 | +import java.util.HashMap; | ||
9 | +import java.util.Map; | ||
10 | +import java.util.Set; | ||
11 | +import java.util.concurrent.CountDownLatch; | ||
12 | +import java.util.concurrent.TimeUnit; | ||
13 | + | ||
14 | +import org.junit.After; | ||
15 | +import org.junit.AfterClass; | ||
16 | +import org.junit.Before; | ||
17 | +import org.junit.BeforeClass; | ||
18 | +import org.junit.Ignore; | ||
19 | +import org.junit.Test; | ||
20 | +import org.onlab.onos.net.ConnectPoint; | ||
21 | +import org.onlab.onos.net.DeviceId; | ||
22 | +import org.onlab.onos.net.Link; | ||
23 | +import org.onlab.onos.net.LinkKey; | ||
24 | +import org.onlab.onos.net.PortNumber; | ||
25 | +import org.onlab.onos.net.Link.Type; | ||
26 | +import org.onlab.onos.net.link.DefaultLinkDescription; | ||
27 | +import org.onlab.onos.net.link.LinkEvent; | ||
28 | +import org.onlab.onos.net.link.LinkStore; | ||
29 | +import org.onlab.onos.net.link.LinkStoreDelegate; | ||
30 | +import org.onlab.onos.net.provider.ProviderId; | ||
31 | + | ||
32 | +import com.google.common.collect.Iterables; | ||
33 | + | ||
34 | +/** | ||
35 | + * Test of the simple LinkStore implementation. | ||
36 | + */ | ||
37 | +public class SimpleLinkStoreTest { | ||
38 | + | ||
39 | + private static final ProviderId PID = new ProviderId("of", "foo"); | ||
40 | + private static final DeviceId DID1 = deviceId("of:foo"); | ||
41 | + private static final DeviceId DID2 = deviceId("of:bar"); | ||
42 | + | ||
43 | + private static final PortNumber P1 = PortNumber.portNumber(1); | ||
44 | + private static final PortNumber P2 = PortNumber.portNumber(2); | ||
45 | + private static final PortNumber P3 = PortNumber.portNumber(3); | ||
46 | + | ||
47 | + | ||
48 | + private SimpleLinkStore simpleLinkStore; | ||
49 | + private LinkStore linkStore; | ||
50 | + | ||
51 | + @BeforeClass | ||
52 | + public static void setUpBeforeClass() throws Exception { | ||
53 | + } | ||
54 | + | ||
55 | + @AfterClass | ||
56 | + public static void tearDownAfterClass() throws Exception { | ||
57 | + } | ||
58 | + | ||
59 | + @Before | ||
60 | + public void setUp() throws Exception { | ||
61 | + simpleLinkStore = new SimpleLinkStore(); | ||
62 | + simpleLinkStore.activate(); | ||
63 | + linkStore = simpleLinkStore; | ||
64 | + } | ||
65 | + | ||
66 | + @After | ||
67 | + public void tearDown() throws Exception { | ||
68 | + simpleLinkStore.deactivate(); | ||
69 | + } | ||
70 | + | ||
71 | + private void putLink(DeviceId srcId, PortNumber srcNum, | ||
72 | + DeviceId dstId, PortNumber dstNum, Type type) { | ||
73 | + ConnectPoint src = new ConnectPoint(srcId, srcNum); | ||
74 | + ConnectPoint dst = new ConnectPoint(dstId, dstNum); | ||
75 | + linkStore.createOrUpdateLink(PID, new DefaultLinkDescription(src, dst, type)); | ||
76 | + } | ||
77 | + | ||
78 | + private void putLink(LinkKey key, Type type) { | ||
79 | + putLink(key.src().deviceId(), key.src().port(), | ||
80 | + key.dst().deviceId(), key.dst().port(), | ||
81 | + type); | ||
82 | + } | ||
83 | + | ||
84 | + private static void assertLink(DeviceId srcId, PortNumber srcNum, | ||
85 | + DeviceId dstId, PortNumber dstNum, Type type, | ||
86 | + Link link) { | ||
87 | + assertEquals(srcId, link.src().deviceId()); | ||
88 | + assertEquals(srcNum, link.src().port()); | ||
89 | + assertEquals(dstId, link.dst().deviceId()); | ||
90 | + assertEquals(dstNum, link.dst().port()); | ||
91 | + assertEquals(type, link.type()); | ||
92 | + } | ||
93 | + | ||
94 | + private static void assertLink(LinkKey key, Type type, Link link) { | ||
95 | + assertLink(key.src().deviceId(), key.src().port(), | ||
96 | + key.dst().deviceId(), key.dst().port(), | ||
97 | + type, link); | ||
98 | + } | ||
99 | + | ||
100 | + @Test | ||
101 | + public final void testGetLinkCount() { | ||
102 | + assertEquals("initialy empty", 0, linkStore.getLinkCount()); | ||
103 | + | ||
104 | + putLink(DID1, P1, DID2, P2, DIRECT); | ||
105 | + putLink(DID2, P2, DID1, P1, DIRECT); | ||
106 | + putLink(DID1, P1, DID2, P2, DIRECT); | ||
107 | + | ||
108 | + assertEquals("expecting 2 unique link", 2, linkStore.getLinkCount()); | ||
109 | + } | ||
110 | + | ||
111 | + @Test | ||
112 | + public final void testGetLinks() { | ||
113 | + assertEquals("initialy empty", 0, | ||
114 | + Iterables.size(linkStore.getLinks())); | ||
115 | + | ||
116 | + LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); | ||
117 | + LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); | ||
118 | + | ||
119 | + putLink(linkId1, DIRECT); | ||
120 | + putLink(linkId2, DIRECT); | ||
121 | + putLink(linkId1, DIRECT); | ||
122 | + | ||
123 | + assertEquals("expecting 2 unique link", 2, | ||
124 | + Iterables.size(linkStore.getLinks())); | ||
125 | + | ||
126 | + Map<LinkKey, Link> links = new HashMap<>(); | ||
127 | + for (Link link : linkStore.getLinks()) { | ||
128 | + links.put(new LinkKey(link.src(), link.dst()), link); | ||
129 | + } | ||
130 | + | ||
131 | + assertLink(linkId1, DIRECT, links.get(linkId1)); | ||
132 | + assertLink(linkId2, DIRECT, links.get(linkId2)); | ||
133 | + } | ||
134 | + | ||
135 | + @Test | ||
136 | + public final void testGetDeviceEgressLinks() { | ||
137 | + LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); | ||
138 | + LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); | ||
139 | + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
140 | + | ||
141 | + putLink(linkId1, DIRECT); | ||
142 | + putLink(linkId2, DIRECT); | ||
143 | + putLink(linkId3, DIRECT); | ||
144 | + | ||
145 | + // DID1,P1 => DID2,P2 | ||
146 | + // DID2,P2 => DID1,P1 | ||
147 | + // DID1,P2 => DID2,P3 | ||
148 | + | ||
149 | + Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1); | ||
150 | + assertEquals(2, links1.size()); | ||
151 | + // check | ||
152 | + | ||
153 | + Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2); | ||
154 | + assertEquals(1, links2.size()); | ||
155 | + assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
156 | + } | ||
157 | + | ||
158 | + @Test | ||
159 | + public final void testGetDeviceIngressLinks() { | ||
160 | + LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); | ||
161 | + LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); | ||
162 | + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
163 | + | ||
164 | + putLink(linkId1, DIRECT); | ||
165 | + putLink(linkId2, DIRECT); | ||
166 | + putLink(linkId3, DIRECT); | ||
167 | + | ||
168 | + // DID1,P1 => DID2,P2 | ||
169 | + // DID2,P2 => DID1,P1 | ||
170 | + // DID1,P2 => DID2,P3 | ||
171 | + | ||
172 | + Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2); | ||
173 | + assertEquals(2, links1.size()); | ||
174 | + // check | ||
175 | + | ||
176 | + Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1); | ||
177 | + assertEquals(1, links2.size()); | ||
178 | + assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
179 | + } | ||
180 | + | ||
181 | + @Test | ||
182 | + public final void testGetLink() { | ||
183 | + ConnectPoint src = new ConnectPoint(DID1, P1); | ||
184 | + ConnectPoint dst = new ConnectPoint(DID2, P2); | ||
185 | + LinkKey linkId1 = new LinkKey(src, dst); | ||
186 | + | ||
187 | + putLink(linkId1, DIRECT); | ||
188 | + | ||
189 | + Link link = linkStore.getLink(src, dst); | ||
190 | + assertLink(linkId1, DIRECT, link); | ||
191 | + | ||
192 | + assertNull("There shouldn't be reverese link", | ||
193 | + linkStore.getLink(dst, src)); | ||
194 | + } | ||
195 | + | ||
196 | + @Test | ||
197 | + public final void testGetEgressLinks() { | ||
198 | + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
199 | + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
200 | + LinkKey linkId1 = new LinkKey(d1P1, d2P2); | ||
201 | + LinkKey linkId2 = new LinkKey(d2P2, d1P1); | ||
202 | + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
203 | + | ||
204 | + putLink(linkId1, DIRECT); | ||
205 | + putLink(linkId2, DIRECT); | ||
206 | + putLink(linkId3, DIRECT); | ||
207 | + | ||
208 | + // DID1,P1 => DID2,P2 | ||
209 | + // DID2,P2 => DID1,P1 | ||
210 | + // DID1,P2 => DID2,P3 | ||
211 | + | ||
212 | + Set<Link> links1 = linkStore.getEgressLinks(d1P1); | ||
213 | + assertEquals(1, links1.size()); | ||
214 | + assertLink(linkId1, DIRECT, links1.iterator().next()); | ||
215 | + | ||
216 | + Set<Link> links2 = linkStore.getEgressLinks(d2P2); | ||
217 | + assertEquals(1, links2.size()); | ||
218 | + assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
219 | + } | ||
220 | + | ||
221 | + @Test | ||
222 | + public final void testGetIngressLinks() { | ||
223 | + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
224 | + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
225 | + LinkKey linkId1 = new LinkKey(d1P1, d2P2); | ||
226 | + LinkKey linkId2 = new LinkKey(d2P2, d1P1); | ||
227 | + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3)); | ||
228 | + | ||
229 | + putLink(linkId1, DIRECT); | ||
230 | + putLink(linkId2, DIRECT); | ||
231 | + putLink(linkId3, DIRECT); | ||
232 | + | ||
233 | + // DID1,P1 => DID2,P2 | ||
234 | + // DID2,P2 => DID1,P1 | ||
235 | + // DID1,P2 => DID2,P3 | ||
236 | + | ||
237 | + Set<Link> links1 = linkStore.getIngressLinks(d2P2); | ||
238 | + assertEquals(1, links1.size()); | ||
239 | + assertLink(linkId1, DIRECT, links1.iterator().next()); | ||
240 | + | ||
241 | + Set<Link> links2 = linkStore.getIngressLinks(d1P1); | ||
242 | + assertEquals(1, links2.size()); | ||
243 | + assertLink(linkId2, DIRECT, links2.iterator().next()); | ||
244 | + } | ||
245 | + | ||
246 | + @Test | ||
247 | + public final void testCreateOrUpdateLink() { | ||
248 | + ConnectPoint src = new ConnectPoint(DID1, P1); | ||
249 | + ConnectPoint dst = new ConnectPoint(DID2, P2); | ||
250 | + | ||
251 | + // add link | ||
252 | + LinkEvent event = linkStore.createOrUpdateLink(PID, | ||
253 | + new DefaultLinkDescription(src, dst, INDIRECT)); | ||
254 | + | ||
255 | + assertLink(DID1, P1, DID2, P2, INDIRECT, event.subject()); | ||
256 | + assertEquals(LINK_ADDED, event.type()); | ||
257 | + | ||
258 | + // update link type | ||
259 | + LinkEvent event2 = linkStore.createOrUpdateLink(PID, | ||
260 | + new DefaultLinkDescription(src, dst, DIRECT)); | ||
261 | + | ||
262 | + assertLink(DID1, P1, DID2, P2, DIRECT, event2.subject()); | ||
263 | + assertEquals(LINK_UPDATED, event2.type()); | ||
264 | + | ||
265 | + // no change | ||
266 | + LinkEvent event3 = linkStore.createOrUpdateLink(PID, | ||
267 | + new DefaultLinkDescription(src, dst, DIRECT)); | ||
268 | + | ||
269 | + assertNull("No change event expected", event3); | ||
270 | + } | ||
271 | + | ||
272 | + @Test | ||
273 | + public final void testRemoveLink() { | ||
274 | + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
275 | + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
276 | + LinkKey linkId1 = new LinkKey(d1P1, d2P2); | ||
277 | + LinkKey linkId2 = new LinkKey(d2P2, d1P1); | ||
278 | + | ||
279 | + putLink(linkId1, DIRECT); | ||
280 | + putLink(linkId2, DIRECT); | ||
281 | + | ||
282 | + // DID1,P1 => DID2,P2 | ||
283 | + // DID2,P2 => DID1,P1 | ||
284 | + // DID1,P2 => DID2,P3 | ||
285 | + | ||
286 | + LinkEvent event = linkStore.removeLink(d1P1, d2P2); | ||
287 | + assertEquals(LINK_REMOVED, event.type()); | ||
288 | + LinkEvent event2 = linkStore.removeLink(d1P1, d2P2); | ||
289 | + assertNull(event2); | ||
290 | + | ||
291 | + assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); | ||
292 | + } | ||
293 | + | ||
294 | + // If Delegates should be called only on remote events, | ||
295 | + // then Simple* should never call them, thus not test required. | ||
296 | + @Ignore("Ignore until Delegate spec. is clear.") | ||
297 | + @Test | ||
298 | + public final void testEvents() throws InterruptedException { | ||
299 | + | ||
300 | + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1); | ||
301 | + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2); | ||
302 | + final LinkKey linkId1 = new LinkKey(d1P1, d2P2); | ||
303 | + | ||
304 | + final CountDownLatch addLatch = new CountDownLatch(1); | ||
305 | + LinkStoreDelegate checkAdd = new LinkStoreDelegate() { | ||
306 | + @Override | ||
307 | + public void notify(LinkEvent event) { | ||
308 | + assertEquals(LINK_ADDED, event.type()); | ||
309 | + assertLink(linkId1, INDIRECT, event.subject()); | ||
310 | + addLatch.countDown(); | ||
311 | + } | ||
312 | + }; | ||
313 | + final CountDownLatch updateLatch = new CountDownLatch(1); | ||
314 | + LinkStoreDelegate checkUpdate = new LinkStoreDelegate() { | ||
315 | + @Override | ||
316 | + public void notify(LinkEvent event) { | ||
317 | + assertEquals(LINK_UPDATED, event.type()); | ||
318 | + assertLink(linkId1, DIRECT, event.subject()); | ||
319 | + updateLatch.countDown(); | ||
320 | + } | ||
321 | + }; | ||
322 | + final CountDownLatch removeLatch = new CountDownLatch(1); | ||
323 | + LinkStoreDelegate checkRemove = new LinkStoreDelegate() { | ||
324 | + @Override | ||
325 | + public void notify(LinkEvent event) { | ||
326 | + assertEquals(LINK_REMOVED, event.type()); | ||
327 | + assertLink(linkId1, DIRECT, event.subject()); | ||
328 | + removeLatch.countDown(); | ||
329 | + } | ||
330 | + }; | ||
331 | + | ||
332 | + linkStore.setDelegate(checkAdd); | ||
333 | + putLink(linkId1, INDIRECT); | ||
334 | + assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS)); | ||
335 | + | ||
336 | + linkStore.unsetDelegate(checkAdd); | ||
337 | + linkStore.setDelegate(checkUpdate); | ||
338 | + putLink(linkId1, DIRECT); | ||
339 | + assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS)); | ||
340 | + | ||
341 | + linkStore.unsetDelegate(checkUpdate); | ||
342 | + linkStore.setDelegate(checkRemove); | ||
343 | + linkStore.removeLink(d1P1, d2P2); | ||
344 | + assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS)); | ||
345 | + } | ||
346 | +} |
... | @@ -48,20 +48,17 @@ | ... | @@ -48,20 +48,17 @@ |
48 | description="ONOS core components"> | 48 | description="ONOS core components"> |
49 | <feature>onos-api</feature> | 49 | <feature>onos-api</feature> |
50 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> | 50 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> |
51 | - <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle> | 51 | + <bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle> |
52 | - <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle> | ||
53 | - <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle> | ||
54 | - <bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle> | ||
55 | </feature> | 52 | </feature> |
56 | 53 | ||
57 | - <feature name="onos-core-dist" version="1.0.0" | 54 | + <feature name="onos-core-hazelcast" version="1.0.0" |
58 | - description="ONOS core components"> | 55 | + description="ONOS core components built on hazelcast"> |
59 | <feature>onos-api</feature> | 56 | <feature>onos-api</feature> |
60 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> | 57 | <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> |
61 | <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle> | 58 | <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle> |
62 | <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle> | 59 | <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle> |
63 | <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle> | 60 | <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle> |
64 | - <bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle> | 61 | + <bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle> |
65 | </feature> | 62 | </feature> |
66 | 63 | ||
67 | <feature name="onos-core-trivial" version="1.0.0" | 64 | <feature name="onos-core-trivial" version="1.0.0" | ... | ... |
... | @@ -9,5 +9,5 @@ | ... | @@ -9,5 +9,5 @@ |
9 | nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2) | 9 | nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2) |
10 | 10 | ||
11 | onos-package | 11 | onos-package |
12 | -for node in $nodes; do printf "%s: " $node; onos-install -f $node; done | 12 | +for node in $nodes; do (printf "%s: %s\n" "$node" "`onos-install -f $node`")& done |
13 | for node in $nodes; do onos-wait-for-start $node; done | 13 | for node in $nodes; do onos-wait-for-start $node; done | ... | ... |
... | @@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 | ... | @@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 |
15 | 15 | ||
16 | pre-stop script | 16 | pre-stop script |
17 | /opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log | 17 | /opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log |
18 | - sleep 3 | 18 | + sleep 2 |
19 | end script | 19 | end script |
20 | 20 | ||
21 | script | 21 | script | ... | ... |
... | @@ -8,7 +8,21 @@ | ... | @@ -8,7 +8,21 @@ |
8 | 8 | ||
9 | remote=$ONOS_USER@${1:-$OCI} | 9 | remote=$ONOS_USER@${1:-$OCI} |
10 | 10 | ||
11 | +# Generate a cluster.json from the ON* environment variables | ||
12 | +CDEF_FILE=/tmp/cluster.json | ||
13 | +echo "{ \"nodes\":[" > $CDEF_FILE | ||
14 | +for node in $(env | sort | egrep "OC[2-9]+" | cut -d= -f2); do | ||
15 | + echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $CDEF_FILE | ||
16 | +done | ||
17 | +echo " { \"id\": \"$OC1\", \"ip\": \"$OC1\", \"tcpPort\": 9876 }" >> $CDEF_FILE | ||
18 | +echo "]}" >> $CDEF_FILE | ||
19 | + | ||
11 | ssh $remote " | 20 | ssh $remote " |
12 | sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \ | 21 | sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \ |
13 | $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml | 22 | $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml |
23 | + | ||
24 | + echo \"onos.ip = \$(ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ | ||
25 | + >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties | ||
14 | " | 26 | " |
27 | + | ||
28 | +scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/ | ||
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
... | @@ -24,6 +24,7 @@ ssh $remote " | ... | @@ -24,6 +24,7 @@ ssh $remote " |
24 | # Make a link to the log file directory and make a home for auxiliaries | 24 | # Make a link to the log file directory and make a home for auxiliaries |
25 | ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log | 25 | ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log |
26 | mkdir $ONOS_INSTALL_DIR/var | 26 | mkdir $ONOS_INSTALL_DIR/var |
27 | + mkdir $ONOS_INSTALL_DIR/config | ||
27 | 28 | ||
28 | # Install the upstart configuration file and setup options for debugging | 29 | # Install the upstart configuration file and setup options for debugging |
29 | sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf | 30 | sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf | ... | ... |
1 | # Default virtual box ONOS instances 1,2 & ONOS mininet box | 1 | # Default virtual box ONOS instances 1,2 & ONOS mininet box |
2 | . $ONOS_ROOT/tools/test/cells/.reset | 2 | . $ONOS_ROOT/tools/test/cells/.reset |
3 | 3 | ||
4 | +export ONOS_NIC=192.168.56.* | ||
5 | + | ||
4 | export OC1="192.168.56.101" | 6 | export OC1="192.168.56.101" |
5 | export OC2="192.168.56.102" | 7 | export OC2="192.168.56.102" |
6 | 8 | ... | ... |
1 | package org.onlab.util; | 1 | package org.onlab.util; |
2 | 2 | ||
3 | +import java.nio.ByteBuffer; | ||
3 | import java.util.ArrayList; | 4 | import java.util.ArrayList; |
4 | import java.util.List; | 5 | import java.util.List; |
5 | import java.util.concurrent.ConcurrentLinkedQueue; | 6 | import java.util.concurrent.ConcurrentLinkedQueue; |
... | @@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair; | ... | @@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair; |
8 | 9 | ||
9 | import com.esotericsoftware.kryo.Kryo; | 10 | import com.esotericsoftware.kryo.Kryo; |
10 | import com.esotericsoftware.kryo.Serializer; | 11 | import com.esotericsoftware.kryo.Serializer; |
12 | +import com.esotericsoftware.kryo.io.ByteBufferInput; | ||
13 | +import com.esotericsoftware.kryo.io.ByteBufferOutput; | ||
11 | import com.esotericsoftware.kryo.io.Input; | 14 | import com.esotericsoftware.kryo.io.Input; |
12 | import com.esotericsoftware.kryo.io.Output; | 15 | import com.esotericsoftware.kryo.io.Output; |
13 | import com.google.common.collect.ImmutableList; | 16 | import com.google.common.collect.ImmutableList; |
... | @@ -174,6 +177,22 @@ public final class KryoPool { | ... | @@ -174,6 +177,22 @@ public final class KryoPool { |
174 | } | 177 | } |
175 | 178 | ||
176 | /** | 179 | /** |
180 | + * Serializes given object to byte buffer using Kryo instance in pool. | ||
181 | + * | ||
182 | + * @param obj Object to serialize | ||
183 | + * @param buffer to write to | ||
184 | + */ | ||
185 | + public void serialize(final Object obj, final ByteBuffer buffer) { | ||
186 | + ByteBufferOutput out = new ByteBufferOutput(buffer); | ||
187 | + Kryo kryo = getKryo(); | ||
188 | + try { | ||
189 | + kryo.writeClassAndObject(out, obj); | ||
190 | + } finally { | ||
191 | + putKryo(kryo); | ||
192 | + } | ||
193 | + } | ||
194 | + | ||
195 | + /** | ||
177 | * Deserializes given byte array to Object using Kryo instance in pool. | 196 | * Deserializes given byte array to Object using Kryo instance in pool. |
178 | * | 197 | * |
179 | * @param bytes serialized bytes | 198 | * @param bytes serialized bytes |
... | @@ -192,6 +211,24 @@ public final class KryoPool { | ... | @@ -192,6 +211,24 @@ public final class KryoPool { |
192 | } | 211 | } |
193 | } | 212 | } |
194 | 213 | ||
214 | + /** | ||
215 | + * Deserializes given byte buffer to Object using Kryo instance in pool. | ||
216 | + * | ||
217 | + * @param buffer input with serialized bytes | ||
218 | + * @param <T> deserialized Object type | ||
219 | + * @return deserialized Object | ||
220 | + */ | ||
221 | + public <T> T deserialize(final ByteBuffer buffer) { | ||
222 | + ByteBufferInput in = new ByteBufferInput(buffer); | ||
223 | + Kryo kryo = getKryo(); | ||
224 | + try { | ||
225 | + @SuppressWarnings("unchecked") | ||
226 | + T obj = (T) kryo.readClassAndObject(in); | ||
227 | + return obj; | ||
228 | + } finally { | ||
229 | + putKryo(kryo); | ||
230 | + } | ||
231 | + } | ||
195 | 232 | ||
196 | /** | 233 | /** |
197 | * Creates a Kryo instance with {@link #registeredTypes} pre-registered. | 234 | * Creates a Kryo instance with {@link #registeredTypes} pre-registered. | ... | ... |
... | @@ -54,6 +54,15 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -54,6 +54,15 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
54 | } | 54 | } |
55 | 55 | ||
56 | /** | 56 | /** |
57 | + * Returns the number of message stream in custody of the loop. | ||
58 | + * | ||
59 | + * @return number of message streams | ||
60 | + */ | ||
61 | + public int streamCount() { | ||
62 | + return streams.size(); | ||
63 | + } | ||
64 | + | ||
65 | + /** | ||
57 | * Creates a new message stream backed by the specified socket channel. | 66 | * Creates a new message stream backed by the specified socket channel. |
58 | * | 67 | * |
59 | * @param byteChannel backing byte channel | 68 | * @param byteChannel backing byte channel |
... | @@ -84,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -84,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
84 | * | 93 | * |
85 | * @param key selection key holding the pending connect operation. | 94 | * @param key selection key holding the pending connect operation. |
86 | */ | 95 | */ |
87 | - protected void connect(SelectionKey key) { | 96 | + protected void connect(SelectionKey key) throws IOException { |
88 | - try { | ||
89 | SocketChannel ch = (SocketChannel) key.channel(); | 97 | SocketChannel ch = (SocketChannel) key.channel(); |
90 | ch.finishConnect(); | 98 | ch.finishConnect(); |
91 | - } catch (IOException | IllegalStateException e) { | ||
92 | - log.warn("Unable to complete connection", e); | ||
93 | - } | ||
94 | - | ||
95 | if (key.isValid()) { | 99 | if (key.isValid()) { |
96 | key.interestOps(SelectionKey.OP_READ); | 100 | key.interestOps(SelectionKey.OP_READ); |
97 | } | 101 | } |
... | @@ -115,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -115,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
115 | 119 | ||
116 | // If there is a pending connect operation, complete it. | 120 | // If there is a pending connect operation, complete it. |
117 | if (key.isConnectable()) { | 121 | if (key.isConnectable()) { |
122 | + try { | ||
118 | connect(key); | 123 | connect(key); |
124 | + } catch (IOException | IllegalStateException e) { | ||
125 | + log.warn("Unable to complete connection", e); | ||
126 | + } | ||
119 | } | 127 | } |
120 | 128 | ||
121 | // If there is a read operation, slurp as much data as possible. | 129 | // If there is a read operation, slurp as much data as possible. |
... | @@ -182,9 +190,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -182,9 +190,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
182 | * with a pending accept operation. | 190 | * with a pending accept operation. |
183 | * | 191 | * |
184 | * @param channel backing socket channel | 192 | * @param channel backing socket channel |
193 | + * @return newly accepted message stream | ||
185 | */ | 194 | */ |
186 | - public void acceptStream(SocketChannel channel) { | 195 | + public S acceptStream(SocketChannel channel) { |
187 | - createAndAdmit(channel, SelectionKey.OP_READ); | 196 | + return createAndAdmit(channel, SelectionKey.OP_READ); |
188 | } | 197 | } |
189 | 198 | ||
190 | 199 | ||
... | @@ -193,9 +202,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -193,9 +202,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
193 | * with a pending connect operation. | 202 | * with a pending connect operation. |
194 | * | 203 | * |
195 | * @param channel backing socket channel | 204 | * @param channel backing socket channel |
205 | + * @return newly connected message stream | ||
196 | */ | 206 | */ |
197 | - public void connectStream(SocketChannel channel) { | 207 | + public S connectStream(SocketChannel channel) { |
198 | - createAndAdmit(channel, SelectionKey.OP_CONNECT); | 208 | + return createAndAdmit(channel, SelectionKey.OP_CONNECT); |
199 | } | 209 | } |
200 | 210 | ||
201 | /** | 211 | /** |
... | @@ -205,12 +215,14 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> | ... | @@ -205,12 +215,14 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> |
205 | * @param channel socket channel | 215 | * @param channel socket channel |
206 | * @param op pending operations mask to be applied to the selection | 216 | * @param op pending operations mask to be applied to the selection |
207 | * key as a set of initial interestedOps | 217 | * key as a set of initial interestedOps |
218 | + * @return newly created message stream | ||
208 | */ | 219 | */ |
209 | - private synchronized void createAndAdmit(SocketChannel channel, int op) { | 220 | + private synchronized S createAndAdmit(SocketChannel channel, int op) { |
210 | S stream = createStream(channel); | 221 | S stream = createStream(channel); |
211 | streams.add(stream); | 222 | streams.add(stream); |
212 | newStreamRequests.add(new NewStreamRequest(stream, channel, op)); | 223 | newStreamRequests.add(new NewStreamRequest(stream, channel, op)); |
213 | selector.wakeup(); | 224 | selector.wakeup(); |
225 | + return stream; | ||
214 | } | 226 | } |
215 | 227 | ||
216 | /** | 228 | /** | ... | ... |
... | @@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel; | ... | @@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel; |
10 | import java.nio.channels.SelectionKey; | 10 | import java.nio.channels.SelectionKey; |
11 | import java.util.ArrayList; | 11 | import java.util.ArrayList; |
12 | import java.util.List; | 12 | import java.util.List; |
13 | +import java.util.Objects; | ||
13 | 14 | ||
14 | import static com.google.common.base.Preconditions.checkArgument; | 15 | import static com.google.common.base.Preconditions.checkArgument; |
15 | import static com.google.common.base.Preconditions.checkNotNull; | 16 | import static com.google.common.base.Preconditions.checkNotNull; |
... | @@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> { | ... | @@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> { |
170 | } | 171 | } |
171 | 172 | ||
172 | /** | 173 | /** |
173 | - * Reads, withouth blocking, a list of messages from the stream. | 174 | + * Reads, without blocking, a list of messages from the stream. |
174 | * The list will be empty if there were not messages pending. | 175 | * The list will be empty if there were not messages pending. |
175 | * | 176 | * |
176 | * @return list of messages or null if backing channel has been closed | 177 | * @return list of messages or null if backing channel has been closed |
... | @@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> { | ... | @@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> { |
262 | try { | 263 | try { |
263 | channel.write(outbound); | 264 | channel.write(outbound); |
264 | } catch (IOException e) { | 265 | } catch (IOException e) { |
265 | - if (!closed && !e.getMessage().equals("Broken pipe")) { | 266 | + if (!closed && !Objects.equals(e.getMessage(), "Broken pipe")) { |
266 | log.warn("Unable to write data", e); | 267 | log.warn("Unable to write data", e); |
267 | ioError = e; | 268 | ioError = e; |
268 | } | 269 | } | ... | ... |
... | @@ -230,7 +230,7 @@ public class IOLoopTestClient { | ... | @@ -230,7 +230,7 @@ public class IOLoopTestClient { |
230 | } | 230 | } |
231 | 231 | ||
232 | @Override | 232 | @Override |
233 | - protected void connect(SelectionKey key) { | 233 | + protected void connect(SelectionKey key) throws IOException { |
234 | super.connect(key); | 234 | super.connect(key); |
235 | TestMessageStream b = (TestMessageStream) key.attachment(); | 235 | TestMessageStream b = (TestMessageStream) key.attachment(); |
236 | Worker w = ((CustomIOLoop) b.loop()).worker; | 236 | Worker w = ((CustomIOLoop) b.loop()).worker; | ... | ... |
-
Please register or login to post a comment