Ayaka Koshibe

Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next

Conflicts:
	core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
	core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
	core/store/hz/cluster/src/main/java/org/onlab/onos/store/cluster/impl/DistributedMastershipStore.java

Change-Id: I6a8b756fc20968e18ea3fd145e155d6282cea945
Showing 117 changed files with 5422 additions and 237 deletions
...@@ -28,10 +28,6 @@ ...@@ -28,10 +28,6 @@
28 <version>${project.version}</version> 28 <version>${project.version}</version>
29 </dependency> 29 </dependency>
30 <dependency> 30 <dependency>
31 - <groupId>org.livetribe.slp</groupId>
32 - <artifactId>livetribe-slp</artifactId>
33 - </dependency>
34 - <dependency>
35 <groupId>org.apache.karaf.shell</groupId> 31 <groupId>org.apache.karaf.shell</groupId>
36 <artifactId>org.apache.karaf.shell.console</artifactId> 32 <artifactId>org.apache.karaf.shell.console</artifactId>
37 </dependency> 33 </dependency>
......
...@@ -233,7 +233,7 @@ public class IOLoopTestClient { ...@@ -233,7 +233,7 @@ public class IOLoopTestClient {
233 } 233 }
234 234
235 @Override 235 @Override
236 - protected void connect(SelectionKey key) { 236 + protected void connect(SelectionKey key) throws IOException {
237 super.connect(key); 237 super.connect(key);
238 TestMessageStream b = (TestMessageStream) key.attachment(); 238 TestMessageStream b = (TestMessageStream) key.attachment();
239 Worker w = ((CustomIOLoop) b.loop()).worker; 239 Worker w = ((CustomIOLoop) b.loop()).worker;
......
1 /** 1 /**
2 * Sample application for use in various experiments. 2 * Sample application for use in various experiments.
3 */ 3 */
4 -package org.onlab.onos.foo;
...\ No newline at end of file ...\ No newline at end of file
4 +package org.onlab.onos.foo;
......
1 +livetribe.slp.da.expired.services.purge.period=60
2 +livetribe.slp.sa.client.connect.address=127.0.0.1
3 +livetribe.slp.sa.client.factory=org.livetribe.slp.sa.StandardServiceAgentClient$Factory
4 +livetribe.slp.sa.factory=org.livetribe.slp.sa.StandardServiceAgent$Factory
5 +livetribe.slp.sa.service.renewal.enabled=true
6 +livetribe.slp.sa.unicast.prefer.tcp=false
7 +livetribe.slp.tcp.connector.factory=org.livetribe.slp.spi.net.SocketTCPConnector$Factory
8 +livetribe.slp.tcp.connector.server.factory=org.livetribe.slp.spi.net.SocketTCPConnectorServer$Factory
9 +livetribe.slp.tcp.message.max.length=4096
10 +livetribe.slp.tcp.read.timeout=300000
11 +livetribe.slp.ua.client.factory=org.livetribe.slp.ua.StandardUserAgentClient$Factory
12 +livetribe.slp.ua.factory=org.livetribe.slp.ua.StandardUserAgent$Factory
13 +livetribe.slp.ua.unicast.prefer.tcp=false
14 +livetribe.slp.udp.connector.factory=org.livetribe.slp.spi.net.SocketUDPConnector$Factory
15 +livetribe.slp.udp.connector.server.factory=org.livetribe.slp.spi.net.SocketUDPConnectorServer$Factory
16 +net.slp.DAAddresses=
17 +net.slp.DAAttributes=
18 +net.slp.DAHeartBeat=10800
19 +net.slp.MTU=1400
20 +net.slp.SAAttributes=
21 +net.slp.broadcastAddress=255.255.255.255
22 +net.slp.datagramTimeouts=150,250,400
23 +net.slp.interfaces=0.0.0.0
24 +net.slp.isBroadcastOnly=false
25 +net.slp.locale=en
26 +net.slp.multicastAddress=239.255.255.253
27 +net.slp.multicastMaximumWait=15000
28 +net.slp.multicastTTL=255
29 +net.slp.multicastTimeouts=150,250,400,600,1000
30 +net.slp.notificationPort=1847
31 +net.slp.port=427
32 +net.slp.useScopes=default
33 +
34 +org.onlab.cluster.name = TV-ONOS
1 +package org.onlab.onos.cli;
2 +
3 +import org.apache.karaf.shell.commands.Argument;
4 +import org.apache.karaf.shell.commands.Command;
5 +import org.onlab.onos.cluster.ClusterAdminService;
6 +import org.onlab.onos.cluster.NodeId;
7 +import org.onlab.packet.IpPrefix;
8 +
9 +/**
10 + * Adds a new controller cluster node.
11 + */
12 +@Command(scope = "onos", name = "add-node",
13 + description = "Adds a new controller cluster node")
14 +public class NodeAddCommand extends AbstractShellCommand {
15 +
16 + @Argument(index = 0, name = "nodeId", description = "Node ID",
17 + required = true, multiValued = false)
18 + String nodeId = null;
19 +
20 + @Argument(index = 1, name = "ip", description = "Node IP address",
21 + required = true, multiValued = false)
22 + String ip = null;
23 +
24 + @Argument(index = 2, name = "tcpPort", description = "Node TCP listen port",
25 + required = false, multiValued = false)
26 + int tcpPort = 9876;
27 +
28 + @Override
29 + protected void execute() {
30 + ClusterAdminService service = get(ClusterAdminService.class);
31 + service.addNode(new NodeId(nodeId), IpPrefix.valueOf(ip), tcpPort);
32 + }
33 +
34 +}
1 +package org.onlab.onos.cli;
2 +
3 +import org.apache.karaf.shell.commands.Argument;
4 +import org.apache.karaf.shell.commands.Command;
5 +import org.onlab.onos.cluster.ClusterAdminService;
6 +import org.onlab.onos.cluster.NodeId;
7 +
8 +/**
9 + * Removes a controller cluster node.
10 + */
11 +@Command(scope = "onos", name = "remove-node",
12 + description = "Removes a new controller cluster node")
13 +public class NodeRemoveCommand extends AbstractShellCommand {
14 +
15 + @Argument(index = 0, name = "nodeId", description = "Node ID",
16 + required = true, multiValued = false)
17 + String nodeId = null;
18 +
19 + @Override
20 + protected void execute() {
21 + ClusterAdminService service = get(ClusterAdminService.class);
22 + service.removeNode(new NodeId(nodeId));
23 + }
24 +
25 +}
...@@ -17,7 +17,7 @@ import static com.google.common.collect.Lists.newArrayList; ...@@ -17,7 +17,7 @@ import static com.google.common.collect.Lists.newArrayList;
17 public class NodesListCommand extends AbstractShellCommand { 17 public class NodesListCommand extends AbstractShellCommand {
18 18
19 private static final String FMT = 19 private static final String FMT =
20 - "id=%s, ip=%s, state=%s %s"; 20 + "id=%s, address=%s:%s, state=%s %s";
21 21
22 @Override 22 @Override
23 protected void execute() { 23 protected void execute() {
...@@ -26,7 +26,7 @@ public class NodesListCommand extends AbstractShellCommand { ...@@ -26,7 +26,7 @@ public class NodesListCommand extends AbstractShellCommand {
26 Collections.sort(nodes, Comparators.NODE_COMPARATOR); 26 Collections.sort(nodes, Comparators.NODE_COMPARATOR);
27 ControllerNode self = service.getLocalNode(); 27 ControllerNode self = service.getLocalNode();
28 for (ControllerNode node : nodes) { 28 for (ControllerNode node : nodes) {
29 - print(FMT, node.id(), node.ip(), 29 + print(FMT, node.id(), node.ip(), node.tcpPort(),
30 service.getState(node.id()), 30 service.getState(node.id()),
31 node.equals(self) ? "*" : ""); 31 node.equals(self) ? "*" : "");
32 } 32 }
......
...@@ -103,4 +103,4 @@ public class FlowsListCommand extends AbstractShellCommand { ...@@ -103,4 +103,4 @@ public class FlowsListCommand extends AbstractShellCommand {
103 103
104 } 104 }
105 105
106 -}
...\ No newline at end of file ...\ No newline at end of file
106 +}
......
...@@ -5,6 +5,12 @@ ...@@ -5,6 +5,12 @@
5 <action class="org.onlab.onos.cli.NodesListCommand"/> 5 <action class="org.onlab.onos.cli.NodesListCommand"/>
6 </command> 6 </command>
7 <command> 7 <command>
8 + <action class="org.onlab.onos.cli.NodeAddCommand"/>
9 + </command>
10 + <command>
11 + <action class="org.onlab.onos.cli.NodeRemoveCommand"/>
12 + </command>
13 + <command>
8 <action class="org.onlab.onos.cli.MastersListCommand"/> 14 <action class="org.onlab.onos.cli.MastersListCommand"/>
9 <completers> 15 <completers>
10 <ref component-id="clusterIdCompleter"/> 16 <ref component-id="clusterIdCompleter"/>
......
1 package org.onlab.onos.cluster; 1 package org.onlab.onos.cluster;
2 2
3 +import org.onlab.packet.IpPrefix;
4 +
3 /** 5 /**
4 * Service for administering the cluster node membership. 6 * Service for administering the cluster node membership.
5 */ 7 */
6 public interface ClusterAdminService { 8 public interface ClusterAdminService {
7 9
8 /** 10 /**
11 + * Adds a new controller node to the cluster.
12 + *
13 + * @param nodeId controller node identifier
14 + * @param ip node IP listen address
15 + * @param tcpPort tcp listen port
16 + * @return newly added node
17 + */
18 + ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort);
19 +
20 + /**
9 * Removes the specified node from the cluster node list. 21 * Removes the specified node from the cluster node list.
10 * 22 *
11 * @param nodeId controller node identifier 23 * @param nodeId controller node identifier
......
1 package org.onlab.onos.cluster; 1 package org.onlab.onos.cluster;
2 2
3 import org.onlab.onos.store.Store; 3 import org.onlab.onos.store.Store;
4 +import org.onlab.packet.IpPrefix;
4 5
5 import java.util.Set; 6 import java.util.Set;
6 7
...@@ -40,6 +41,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate> ...@@ -40,6 +41,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate>
40 ControllerNode.State getState(NodeId nodeId); 41 ControllerNode.State getState(NodeId nodeId);
41 42
42 /** 43 /**
44 + * Adds a new controller node to the cluster.
45 + *
46 + * @param nodeId controller node identifier
47 + * @param ip node IP listen address
48 + * @param tcpPort tcp listen port
49 + * @return newly added node
50 + */
51 + ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort);
52 +
53 + /**
43 * Removes the specified node from the inventory of cluster nodes. 54 * Removes the specified node from the inventory of cluster nodes.
44 * 55 *
45 * @param nodeId controller instance identifier 56 * @param nodeId controller instance identifier
......
...@@ -35,4 +35,12 @@ public interface ControllerNode { ...@@ -35,4 +35,12 @@ public interface ControllerNode {
35 */ 35 */
36 IpPrefix ip(); 36 IpPrefix ip();
37 37
38 +
39 + /**
40 + * Returns the TCP port on which the node listens for connections.
41 + *
42 + * @return TCP port
43 + */
44 + int tcpPort();
45 +
38 } 46 }
......
...@@ -11,13 +11,17 @@ import static com.google.common.base.MoreObjects.toStringHelper; ...@@ -11,13 +11,17 @@ import static com.google.common.base.MoreObjects.toStringHelper;
11 */ 11 */
12 public class DefaultControllerNode implements ControllerNode { 12 public class DefaultControllerNode implements ControllerNode {
13 13
14 + private static final int DEFAULT_PORT = 9876;
15 +
14 private final NodeId id; 16 private final NodeId id;
15 private final IpPrefix ip; 17 private final IpPrefix ip;
18 + private final int tcpPort;
16 19
17 // For serialization 20 // For serialization
18 private DefaultControllerNode() { 21 private DefaultControllerNode() {
19 this.id = null; 22 this.id = null;
20 this.ip = null; 23 this.ip = null;
24 + this.tcpPort = 0;
21 } 25 }
22 26
23 /** 27 /**
...@@ -27,8 +31,19 @@ public class DefaultControllerNode implements ControllerNode { ...@@ -27,8 +31,19 @@ public class DefaultControllerNode implements ControllerNode {
27 * @param ip instance IP address 31 * @param ip instance IP address
28 */ 32 */
29 public DefaultControllerNode(NodeId id, IpPrefix ip) { 33 public DefaultControllerNode(NodeId id, IpPrefix ip) {
34 + this(id, ip, DEFAULT_PORT);
35 + }
36 +
37 + /**
38 + * Creates a new instance with the specified id and IP address and TCP port.
39 + *
40 + * @param id instance identifier
41 + * @param ip instance IP address
42 + */
43 + public DefaultControllerNode(NodeId id, IpPrefix ip, int tcpPort) {
30 this.id = id; 44 this.id = id;
31 this.ip = ip; 45 this.ip = ip;
46 + this.tcpPort = tcpPort;
32 } 47 }
33 48
34 @Override 49 @Override
...@@ -42,6 +57,11 @@ public class DefaultControllerNode implements ControllerNode { ...@@ -42,6 +57,11 @@ public class DefaultControllerNode implements ControllerNode {
42 } 57 }
43 58
44 @Override 59 @Override
60 + public int tcpPort() {
61 + return tcpPort;
62 + }
63 +
64 + @Override
45 public int hashCode() { 65 public int hashCode() {
46 return Objects.hash(id); 66 return Objects.hash(id);
47 } 67 }
...@@ -60,7 +80,8 @@ public class DefaultControllerNode implements ControllerNode { ...@@ -60,7 +80,8 @@ public class DefaultControllerNode implements ControllerNode {
60 80
61 @Override 81 @Override
62 public String toString() { 82 public String toString() {
63 - return toStringHelper(this).add("id", id).add("ip", ip).toString(); 83 + return toStringHelper(this).add("id", id)
84 + .add("ip", ip).add("tcpPort", tcpPort).toString();
64 } 85 }
65 86
66 } 87 }
......
1 +package org.onlab.onos.net.proxyarp;
2 +
3 +import org.onlab.packet.Ethernet;
4 +import org.onlab.packet.IpPrefix;
5 +
6 +/**
7 + * Service for processing arp requests on behalf of applications.
8 + */
9 +public interface ProxyArpService {
10 +
11 + /**
12 + * Returns whether this particular ip address is known to the system.
13 + *
14 + * @param addr
15 + * a ip address
16 + * @return true if know, false otherwise
17 + */
18 + boolean known(IpPrefix addr);
19 +
20 + /**
21 + * Sends a reply for a given request. If the host is not known then the arp
22 + * will be flooded at all edge ports.
23 + *
24 + * @param request
25 + * an arp request
26 + */
27 + void reply(Ethernet request);
28 +
29 +}
1 +/**
2 + * Base abstractions related to the proxy arp service.
3 + */
4 +package org.onlab.onos.net.proxyarp;
1 +package org.onlab.onos.store;
2 +
3 +import org.onlab.onos.cluster.MastershipTerm;
4 +import org.onlab.onos.net.DeviceId;
5 +
6 +// TODO: Consider renaming to DeviceClockService?
7 +/**
8 + * Interface for a logical clock service that vends per device timestamps.
9 + */
10 +public interface ClockService {
11 +
12 + /**
13 + * Returns a new timestamp for the specified deviceId.
14 + * @param deviceId device identifier.
15 + * @return timestamp.
16 + */
17 + public Timestamp getTimestamp(DeviceId deviceId);
18 +
19 + // TODO: Should this be here or separate as Admin service, etc.?
20 + /**
21 + * Updates the mastership term for the specified deviceId.
22 + * @param deviceId device identifier.
23 + * @param term mastership term.
24 + */
25 + public void setMastershipTerm(DeviceId deviceId, MastershipTerm term);
26 +}
1 /** 1 /**
2 * Abstractions for creating and interacting with distributed stores. 2 * Abstractions for creating and interacting with distributed stores.
3 */ 3 */
4 -package org.onlab.onos.store;
...\ No newline at end of file ...\ No newline at end of file
4 +package org.onlab.onos.store;
......
...@@ -40,13 +40,14 @@ ...@@ -40,13 +40,14 @@
40 Currently required for DistributedDeviceManagerTest. --> 40 Currently required for DistributedDeviceManagerTest. -->
41 <dependency> 41 <dependency>
42 <groupId>org.onlab.onos</groupId> 42 <groupId>org.onlab.onos</groupId>
43 - <artifactId>onos-core-store</artifactId> 43 + <artifactId>onos-core-hz-net</artifactId>
44 <version>${project.version}</version> 44 <version>${project.version}</version>
45 <scope>test</scope> 45 <scope>test</scope>
46 </dependency> 46 </dependency>
47 <dependency> 47 <dependency>
48 <groupId>org.onlab.onos</groupId> 48 <groupId>org.onlab.onos</groupId>
49 - <artifactId>onos-core-store</artifactId> 49 + <!-- FIXME: should be somewhere else -->
50 + <artifactId>onos-core-hz-common</artifactId>
50 <version>${project.version}</version> 51 <version>${project.version}</version>
51 <classifier>tests</classifier> 52 <classifier>tests</classifier>
52 <scope>test</scope> 53 <scope>test</scope>
......
...@@ -16,10 +16,12 @@ import org.onlab.onos.cluster.ControllerNode; ...@@ -16,10 +16,12 @@ import org.onlab.onos.cluster.ControllerNode;
16 import org.onlab.onos.cluster.NodeId; 16 import org.onlab.onos.cluster.NodeId;
17 import org.onlab.onos.event.AbstractListenerRegistry; 17 import org.onlab.onos.event.AbstractListenerRegistry;
18 import org.onlab.onos.event.EventDeliveryService; 18 import org.onlab.onos.event.EventDeliveryService;
19 +import org.onlab.packet.IpPrefix;
19 import org.slf4j.Logger; 20 import org.slf4j.Logger;
20 21
21 import java.util.Set; 22 import java.util.Set;
22 23
24 +import static com.google.common.base.Preconditions.checkArgument;
23 import static com.google.common.base.Preconditions.checkNotNull; 25 import static com.google.common.base.Preconditions.checkNotNull;
24 import static org.slf4j.LoggerFactory.getLogger; 26 import static org.slf4j.LoggerFactory.getLogger;
25 27
...@@ -81,6 +83,14 @@ public class ClusterManager implements ClusterService, ClusterAdminService { ...@@ -81,6 +83,14 @@ public class ClusterManager implements ClusterService, ClusterAdminService {
81 } 83 }
82 84
83 @Override 85 @Override
86 + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
87 + checkNotNull(nodeId, INSTANCE_ID_NULL);
88 + checkNotNull(ip, "IP address cannot be null");
89 + checkArgument(tcpPort > 5000, "TCP port must be > 5000");
90 + return store.addNode(nodeId, ip, tcpPort);
91 + }
92 +
93 + @Override
84 public void removeNode(NodeId nodeId) { 94 public void removeNode(NodeId nodeId) {
85 checkNotNull(nodeId, INSTANCE_ID_NULL); 95 checkNotNull(nodeId, INSTANCE_ID_NULL);
86 store.removeNode(nodeId); 96 store.removeNode(nodeId);
......
1 package org.onlab.onos.cluster.impl; 1 package org.onlab.onos.cluster.impl;
2 2
3 +import static com.google.common.base.Preconditions.checkNotNull;
4 +import static org.slf4j.LoggerFactory.getLogger;
5 +
6 +import java.util.Set;
7 +
3 import org.apache.felix.scr.annotations.Activate; 8 import org.apache.felix.scr.annotations.Activate;
4 import org.apache.felix.scr.annotations.Component; 9 import org.apache.felix.scr.annotations.Component;
5 import org.apache.felix.scr.annotations.Deactivate; 10 import org.apache.felix.scr.annotations.Deactivate;
...@@ -14,6 +19,7 @@ import org.onlab.onos.cluster.MastershipEvent; ...@@ -14,6 +19,7 @@ import org.onlab.onos.cluster.MastershipEvent;
14 import org.onlab.onos.cluster.MastershipListener; 19 import org.onlab.onos.cluster.MastershipListener;
15 import org.onlab.onos.cluster.MastershipService; 20 import org.onlab.onos.cluster.MastershipService;
16 import org.onlab.onos.cluster.MastershipStore; 21 import org.onlab.onos.cluster.MastershipStore;
22 +import org.onlab.onos.cluster.MastershipStoreDelegate;
17 import org.onlab.onos.cluster.MastershipTerm; 23 import org.onlab.onos.cluster.MastershipTerm;
18 import org.onlab.onos.cluster.MastershipTermService; 24 import org.onlab.onos.cluster.MastershipTermService;
19 import org.onlab.onos.cluster.NodeId; 25 import org.onlab.onos.cluster.NodeId;
...@@ -23,15 +29,10 @@ import org.onlab.onos.net.DeviceId; ...@@ -23,15 +29,10 @@ import org.onlab.onos.net.DeviceId;
23 import org.onlab.onos.net.MastershipRole; 29 import org.onlab.onos.net.MastershipRole;
24 import org.slf4j.Logger; 30 import org.slf4j.Logger;
25 31
26 -import java.util.Set;
27 -
28 -import static com.google.common.base.Preconditions.checkNotNull;
29 -import static org.slf4j.LoggerFactory.getLogger;
30 -
31 @Component(immediate = true) 32 @Component(immediate = true)
32 @Service 33 @Service
33 public class MastershipManager 34 public class MastershipManager
34 - implements MastershipService, MastershipAdminService { 35 +implements MastershipService, MastershipAdminService {
35 36
36 private static final String NODE_ID_NULL = "Node ID cannot be null"; 37 private static final String NODE_ID_NULL = "Node ID cannot be null";
37 private static final String DEVICE_ID_NULL = "Device ID cannot be null"; 38 private static final String DEVICE_ID_NULL = "Device ID cannot be null";
...@@ -40,7 +41,9 @@ public class MastershipManager ...@@ -40,7 +41,9 @@ public class MastershipManager
40 private final Logger log = getLogger(getClass()); 41 private final Logger log = getLogger(getClass());
41 42
42 protected final AbstractListenerRegistry<MastershipEvent, MastershipListener> 43 protected final AbstractListenerRegistry<MastershipEvent, MastershipListener>
43 - listenerRegistry = new AbstractListenerRegistry<>(); 44 + listenerRegistry = new AbstractListenerRegistry<>();
45 +
46 + private final MastershipStoreDelegate delegate = new InternalDelegate();
44 47
45 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) 48 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
46 protected MastershipStore store; 49 protected MastershipStore store;
...@@ -57,6 +60,7 @@ public class MastershipManager ...@@ -57,6 +60,7 @@ public class MastershipManager
57 public void activate() { 60 public void activate() {
58 eventDispatcher.addSink(MastershipEvent.class, listenerRegistry); 61 eventDispatcher.addSink(MastershipEvent.class, listenerRegistry);
59 clusterService.addListener(clusterListener); 62 clusterService.addListener(clusterListener);
63 + store.setDelegate(delegate);
60 log.info("Started"); 64 log.info("Started");
61 } 65 }
62 66
...@@ -64,6 +68,7 @@ public class MastershipManager ...@@ -64,6 +68,7 @@ public class MastershipManager
64 public void deactivate() { 68 public void deactivate() {
65 eventDispatcher.removeSink(MastershipEvent.class); 69 eventDispatcher.removeSink(MastershipEvent.class);
66 clusterService.removeListener(clusterListener); 70 clusterService.removeListener(clusterListener);
71 + store.unsetDelegate(delegate);
67 log.info("Stopped"); 72 log.info("Stopped");
68 } 73 }
69 74
...@@ -188,4 +193,15 @@ public class MastershipManager ...@@ -188,4 +193,15 @@ public class MastershipManager
188 } 193 }
189 194
190 } 195 }
196 +
197 + public class InternalDelegate implements MastershipStoreDelegate {
198 +
199 + @Override
200 + public void notify(MastershipEvent event) {
201 + log.info("dispatching mastership event {}", event);
202 + eventDispatcher.post(event);
203 + }
204 +
205 + }
206 +
191 } 207 }
......
1 /** 1 /**
2 * Subsystem for tracking controller cluster nodes. 2 * Subsystem for tracking controller cluster nodes.
3 */ 3 */
4 -package org.onlab.onos.cluster.impl;
...\ No newline at end of file ...\ No newline at end of file
4 +package org.onlab.onos.cluster.impl;
......
1 package org.onlab.onos.net.device.impl; 1 package org.onlab.onos.net.device.impl;
2 2
3 +import static com.google.common.base.Preconditions.checkNotNull;
4 +import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
5 +import static org.slf4j.LoggerFactory.getLogger;
6 +
7 +import java.util.List;
8 +
3 import org.apache.felix.scr.annotations.Activate; 9 import org.apache.felix.scr.annotations.Activate;
4 import org.apache.felix.scr.annotations.Component; 10 import org.apache.felix.scr.annotations.Component;
5 import org.apache.felix.scr.annotations.Deactivate; 11 import org.apache.felix.scr.annotations.Deactivate;
...@@ -11,6 +17,7 @@ import org.onlab.onos.cluster.MastershipEvent; ...@@ -11,6 +17,7 @@ import org.onlab.onos.cluster.MastershipEvent;
11 import org.onlab.onos.cluster.MastershipListener; 17 import org.onlab.onos.cluster.MastershipListener;
12 import org.onlab.onos.cluster.MastershipService; 18 import org.onlab.onos.cluster.MastershipService;
13 import org.onlab.onos.cluster.MastershipTermService; 19 import org.onlab.onos.cluster.MastershipTermService;
20 +import org.onlab.onos.cluster.MastershipTerm;
14 import org.onlab.onos.event.AbstractListenerRegistry; 21 import org.onlab.onos.event.AbstractListenerRegistry;
15 import org.onlab.onos.event.EventDeliveryService; 22 import org.onlab.onos.event.EventDeliveryService;
16 import org.onlab.onos.net.Device; 23 import org.onlab.onos.net.Device;
...@@ -31,22 +38,17 @@ import org.onlab.onos.net.device.DeviceStoreDelegate; ...@@ -31,22 +38,17 @@ import org.onlab.onos.net.device.DeviceStoreDelegate;
31 import org.onlab.onos.net.device.PortDescription; 38 import org.onlab.onos.net.device.PortDescription;
32 import org.onlab.onos.net.provider.AbstractProviderRegistry; 39 import org.onlab.onos.net.provider.AbstractProviderRegistry;
33 import org.onlab.onos.net.provider.AbstractProviderService; 40 import org.onlab.onos.net.provider.AbstractProviderService;
41 +import org.onlab.onos.store.ClockService;
34 import org.slf4j.Logger; 42 import org.slf4j.Logger;
35 43
36 -import java.util.List;
37 -
38 -import static com.google.common.base.Preconditions.checkNotNull;
39 -import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
40 -import static org.slf4j.LoggerFactory.getLogger;
41 -
42 /** 44 /**
43 * Provides implementation of the device SB &amp; NB APIs. 45 * Provides implementation of the device SB &amp; NB APIs.
44 */ 46 */
45 @Component(immediate = true) 47 @Component(immediate = true)
46 @Service 48 @Service
47 public class DeviceManager 49 public class DeviceManager
48 - extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService> 50 + extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
49 - implements DeviceService, DeviceAdminService, DeviceProviderRegistry { 51 + implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
50 52
51 private static final String DEVICE_ID_NULL = "Device ID cannot be null"; 53 private static final String DEVICE_ID_NULL = "Device ID cannot be null";
52 private static final String PORT_NUMBER_NULL = "Port number cannot be null"; 54 private static final String PORT_NUMBER_NULL = "Port number cannot be null";
...@@ -56,10 +58,10 @@ public class DeviceManager ...@@ -56,10 +58,10 @@ public class DeviceManager
56 58
57 private final Logger log = getLogger(getClass()); 59 private final Logger log = getLogger(getClass());
58 60
59 - protected final AbstractListenerRegistry<DeviceEvent, DeviceListener> 61 + protected final AbstractListenerRegistry<DeviceEvent, DeviceListener> listenerRegistry =
60 - listenerRegistry = new AbstractListenerRegistry<>(); 62 + new AbstractListenerRegistry<>();
61 63
62 - private DeviceStoreDelegate delegate = new InternalStoreDelegate(); 64 + private final DeviceStoreDelegate delegate = new InternalStoreDelegate();
63 65
64 private final MastershipListener mastershipListener = new InternalMastershipListener(); 66 private final MastershipListener mastershipListener = new InternalMastershipListener();
65 67
...@@ -77,6 +79,9 @@ public class DeviceManager ...@@ -77,6 +79,9 @@ public class DeviceManager
77 79
78 protected MastershipTermService termService; 80 protected MastershipTermService termService;
79 81
82 + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
83 + protected ClockService clockService;
84 +
80 @Activate 85 @Activate
81 public void activate() { 86 public void activate() {
82 store.setDelegate(delegate); 87 store.setDelegate(delegate);
...@@ -168,33 +173,36 @@ public class DeviceManager ...@@ -168,33 +173,36 @@ public class DeviceManager
168 } 173 }
169 174
170 @Override 175 @Override
171 - protected DeviceProviderService createProviderService(DeviceProvider provider) { 176 + protected DeviceProviderService createProviderService(
177 + DeviceProvider provider) {
172 return new InternalDeviceProviderService(provider); 178 return new InternalDeviceProviderService(provider);
173 } 179 }
174 180
175 // Personalized device provider service issued to the supplied provider. 181 // Personalized device provider service issued to the supplied provider.
176 private class InternalDeviceProviderService 182 private class InternalDeviceProviderService
177 - extends AbstractProviderService<DeviceProvider> 183 + extends AbstractProviderService<DeviceProvider>
178 - implements DeviceProviderService { 184 + implements DeviceProviderService {
179 185
180 InternalDeviceProviderService(DeviceProvider provider) { 186 InternalDeviceProviderService(DeviceProvider provider) {
181 super(provider); 187 super(provider);
182 } 188 }
183 189
184 @Override 190 @Override
185 - public void deviceConnected(DeviceId deviceId, DeviceDescription deviceDescription) { 191 + public void deviceConnected(DeviceId deviceId,
192 + DeviceDescription deviceDescription) {
186 checkNotNull(deviceId, DEVICE_ID_NULL); 193 checkNotNull(deviceId, DEVICE_ID_NULL);
187 checkNotNull(deviceDescription, DEVICE_DESCRIPTION_NULL); 194 checkNotNull(deviceDescription, DEVICE_DESCRIPTION_NULL);
188 checkValidity(); 195 checkValidity();
189 DeviceEvent event = store.createOrUpdateDevice(provider().id(), 196 DeviceEvent event = store.createOrUpdateDevice(provider().id(),
190 - deviceId, deviceDescription); 197 + deviceId, deviceDescription);
191 198
192 - // If there was a change of any kind, trigger role selection process. 199 + // If there was a change of any kind, trigger role selection
200 + // process.
193 if (event != null) { 201 if (event != null) {
194 log.info("Device {} connected", deviceId); 202 log.info("Device {} connected", deviceId);
195 mastershipService.requestRoleFor(deviceId); 203 mastershipService.requestRoleFor(deviceId);
196 provider().roleChanged(event.subject(), 204 provider().roleChanged(event.subject(),
197 - mastershipService.getLocalRole(deviceId)); 205 + mastershipService.getLocalRole(deviceId));
198 post(event); 206 post(event);
199 } 207 }
200 } 208 }
...@@ -214,25 +222,30 @@ public class DeviceManager ...@@ -214,25 +222,30 @@ public class DeviceManager
214 } 222 }
215 223
216 @Override 224 @Override
217 - public void updatePorts(DeviceId deviceId, List<PortDescription> portDescriptions) { 225 + public void updatePorts(DeviceId deviceId,
226 + List<PortDescription> portDescriptions) {
218 checkNotNull(deviceId, DEVICE_ID_NULL); 227 checkNotNull(deviceId, DEVICE_ID_NULL);
219 - checkNotNull(portDescriptions, "Port descriptions list cannot be null"); 228 + checkNotNull(portDescriptions,
229 + "Port descriptions list cannot be null");
220 checkValidity(); 230 checkValidity();
221 - List<DeviceEvent> events = store.updatePorts(deviceId, portDescriptions); 231 + List<DeviceEvent> events = store.updatePorts(deviceId,
232 + portDescriptions);
222 for (DeviceEvent event : events) { 233 for (DeviceEvent event : events) {
223 post(event); 234 post(event);
224 } 235 }
225 } 236 }
226 237
227 @Override 238 @Override
228 - public void portStatusChanged(DeviceId deviceId, PortDescription portDescription) { 239 + public void portStatusChanged(DeviceId deviceId,
240 + PortDescription portDescription) {
229 checkNotNull(deviceId, DEVICE_ID_NULL); 241 checkNotNull(deviceId, DEVICE_ID_NULL);
230 checkNotNull(portDescription, PORT_DESCRIPTION_NULL); 242 checkNotNull(portDescription, PORT_DESCRIPTION_NULL);
231 checkValidity(); 243 checkValidity();
232 - DeviceEvent event = store.updatePortStatus(deviceId, portDescription); 244 + DeviceEvent event = store.updatePortStatus(deviceId,
245 + portDescription);
233 if (event != null) { 246 if (event != null) {
234 - log.info("Device {} port {} status changed", deviceId, 247 + log.info("Device {} port {} status changed", deviceId, event
235 - event.port().number()); 248 + .port().number());
236 post(event); 249 post(event);
237 } 250 }
238 } 251 }
...@@ -240,8 +253,8 @@ public class DeviceManager ...@@ -240,8 +253,8 @@ public class DeviceManager
240 @Override 253 @Override
241 public void unableToAssertRole(DeviceId deviceId, MastershipRole role) { 254 public void unableToAssertRole(DeviceId deviceId, MastershipRole role) {
242 // FIXME: implement response to this notification 255 // FIXME: implement response to this notification
243 - log.warn("Failed to assert role [{}] onto Device {}", 256 + log.warn("Failed to assert role [{}] onto Device {}", role,
244 - role, deviceId); 257 + deviceId);
245 } 258 }
246 } 259 }
247 260
...@@ -253,18 +266,24 @@ public class DeviceManager ...@@ -253,18 +266,24 @@ public class DeviceManager
253 } 266 }
254 267
255 // Intercepts mastership events 268 // Intercepts mastership events
256 - private class InternalMastershipListener implements MastershipListener { 269 + private class InternalMastershipListener
270 + implements MastershipListener {
257 @Override 271 @Override
258 public void event(MastershipEvent event) { 272 public void event(MastershipEvent event) {
259 - // FIXME: for now we're taking action only on becoming master
260 if (event.master().equals(clusterService.getLocalNode().id())) { 273 if (event.master().equals(clusterService.getLocalNode().id())) {
274 + MastershipTerm term = mastershipService.requestTermService()
275 + .getMastershipTerm(event.subject());
276 + clockService.setMastershipTerm(event.subject(), term);
261 applyRole(event.subject(), MastershipRole.MASTER); 277 applyRole(event.subject(), MastershipRole.MASTER);
278 + } else {
279 + applyRole(event.subject(), MastershipRole.STANDBY);
262 } 280 }
263 } 281 }
264 } 282 }
265 283
266 // Store delegate to re-post events emitted from the store. 284 // Store delegate to re-post events emitted from the store.
267 - private class InternalStoreDelegate implements DeviceStoreDelegate { 285 + private class InternalStoreDelegate
286 + implements DeviceStoreDelegate {
268 @Override 287 @Override
269 public void notify(DeviceEvent event) { 288 public void notify(DeviceEvent event) {
270 post(event); 289 post(event);
......
1 +package org.onlab.onos.proxyarp.impl;
2 +
3 +import static com.google.common.base.Preconditions.checkArgument;
4 +import static com.google.common.base.Preconditions.checkNotNull;
5 +
6 +import java.nio.ByteBuffer;
7 +import java.util.Set;
8 +
9 +import org.apache.felix.scr.annotations.Reference;
10 +import org.apache.felix.scr.annotations.ReferenceCardinality;
11 +import org.onlab.onos.net.Host;
12 +import org.onlab.onos.net.flow.DefaultTrafficTreatment;
13 +import org.onlab.onos.net.flow.TrafficTreatment;
14 +import org.onlab.onos.net.host.HostService;
15 +import org.onlab.onos.net.packet.DefaultOutboundPacket;
16 +import org.onlab.onos.net.packet.PacketService;
17 +import org.onlab.onos.net.proxyarp.ProxyArpService;
18 +import org.onlab.onos.net.topology.TopologyService;
19 +import org.onlab.packet.ARP;
20 +import org.onlab.packet.Ethernet;
21 +import org.onlab.packet.IpPrefix;
22 +import org.onlab.packet.VlanId;
23 +
24 +public class ProxyArpManager implements ProxyArpService {
25 +
26 + private static final String MAC_ADDR_NULL = "Mac address cannot be null.";
27 + private static final String REQUEST_NULL = "Arp request cannot be null.";
28 + private static final String REQUEST_NOT_ARP = "Ethernet frame does not contain ARP request.";
29 + private static final String NOT_ARP_REQUEST = "ARP is not a request.";
30 +
31 + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
32 + protected HostService hostService;
33 +
34 + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
35 + protected PacketService packetService;
36 +
37 + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
38 + protected TopologyService topologyService;
39 +
40 + @Override
41 + public boolean known(IpPrefix addr) {
42 + checkNotNull(MAC_ADDR_NULL, addr);
43 + Set<Host> hosts = hostService.getHostsByIp(addr);
44 + return !hosts.isEmpty();
45 + }
46 +
47 + @Override
48 + public void reply(Ethernet request) {
49 + checkNotNull(REQUEST_NULL, request);
50 + checkArgument(request.getEtherType() == Ethernet.TYPE_ARP,
51 + REQUEST_NOT_ARP);
52 + ARP arp = (ARP) request.getPayload();
53 + checkArgument(arp.getOpCode() == ARP.OP_REQUEST, NOT_ARP_REQUEST);
54 +
55 + VlanId vlan = VlanId.vlanId(request.getVlanID());
56 + Set<Host> hosts = hostService.getHostsByIp(IpPrefix.valueOf(arp
57 + .getTargetProtocolAddress()));
58 +
59 + Host h = null;
60 + for (Host host : hosts) {
61 + if (host.vlan().equals(vlan)) {
62 + h = host;
63 + break;
64 + }
65 + }
66 +
67 + if (h == null) {
68 + flood(request);
69 + return;
70 + }
71 +
72 + Ethernet arpReply = buildArpReply(h, request);
73 + // TODO: check send status with host service.
74 + TrafficTreatment.Builder builder = new DefaultTrafficTreatment.Builder();
75 + builder.setOutput(h.location().port());
76 + packetService.emit(new DefaultOutboundPacket(h.location().deviceId(),
77 + builder.build(), ByteBuffer.wrap(arpReply.serialize())));
78 + }
79 +
80 + private void flood(Ethernet request) {
81 + // TODO: flood on all edge ports.
82 + }
83 +
84 + private Ethernet buildArpReply(Host h, Ethernet request) {
85 + Ethernet eth = new Ethernet();
86 + eth.setDestinationMACAddress(request.getSourceMACAddress());
87 + eth.setSourceMACAddress(h.mac().getAddress());
88 + eth.setEtherType(Ethernet.TYPE_ARP);
89 + ARP arp = new ARP();
90 + arp.setOpCode(ARP.OP_REPLY);
91 + arp.setSenderHardwareAddress(h.mac().getAddress());
92 + arp.setTargetHardwareAddress(request.getSourceMACAddress());
93 +
94 + arp.setTargetProtocolAddress(((ARP) request.getPayload())
95 + .getSenderProtocolAddress());
96 + arp.setSenderProtocolAddress(h.ipAddresses().iterator().next().toInt());
97 + eth.setPayload(arp);
98 + return eth;
99 + }
100 +}
1 +/**
2 + * Core subsystem for responding to arp requests.
3 + */
4 +package org.onlab.onos.proxyarp.impl;
...@@ -32,9 +32,9 @@ import org.onlab.onos.net.device.DeviceService; ...@@ -32,9 +32,9 @@ import org.onlab.onos.net.device.DeviceService;
32 import org.onlab.onos.net.device.PortDescription; 32 import org.onlab.onos.net.device.PortDescription;
33 import org.onlab.onos.net.provider.AbstractProvider; 33 import org.onlab.onos.net.provider.AbstractProvider;
34 import org.onlab.onos.net.provider.ProviderId; 34 import org.onlab.onos.net.provider.ProviderId;
35 +import org.onlab.onos.store.common.StoreManager;
36 +import org.onlab.onos.store.common.TestStoreManager;
35 import org.onlab.onos.store.device.impl.DistributedDeviceStore; 37 import org.onlab.onos.store.device.impl.DistributedDeviceStore;
36 -import org.onlab.onos.store.impl.StoreManager;
37 -import org.onlab.onos.store.impl.TestStoreManager;
38 import org.onlab.packet.IpPrefix; 38 import org.onlab.packet.IpPrefix;
39 39
40 import java.util.ArrayList; 40 import java.util.ArrayList;
...@@ -163,7 +163,7 @@ public class DistributedDeviceManagerTest { ...@@ -163,7 +163,7 @@ public class DistributedDeviceManagerTest {
163 public void deviceDisconnected() { 163 public void deviceDisconnected() {
164 connectDevice(DID1, SW1); 164 connectDevice(DID1, SW1);
165 connectDevice(DID2, SW1); 165 connectDevice(DID2, SW1);
166 - validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED); 166 + validateEvents(DEVICE_ADDED, DEVICE_ADDED);
167 assertTrue("device should be available", service.isAvailable(DID1)); 167 assertTrue("device should be available", service.isAvailable(DID1));
168 168
169 // Disconnect 169 // Disconnect
...@@ -182,10 +182,10 @@ public class DistributedDeviceManagerTest { ...@@ -182,10 +182,10 @@ public class DistributedDeviceManagerTest {
182 @Test 182 @Test
183 public void deviceUpdated() { 183 public void deviceUpdated() {
184 connectDevice(DID1, SW1); 184 connectDevice(DID1, SW1);
185 - validateEvents(DEVICE_ADDED, DEVICE_ADDED); 185 + validateEvents(DEVICE_ADDED);
186 186
187 connectDevice(DID1, SW2); 187 connectDevice(DID1, SW2);
188 - validateEvents(DEVICE_UPDATED, DEVICE_UPDATED); 188 + validateEvents(DEVICE_UPDATED);
189 } 189 }
190 190
191 @Test 191 @Test
...@@ -202,7 +202,7 @@ public class DistributedDeviceManagerTest { ...@@ -202,7 +202,7 @@ public class DistributedDeviceManagerTest {
202 pds.add(new DefaultPortDescription(P2, true)); 202 pds.add(new DefaultPortDescription(P2, true));
203 pds.add(new DefaultPortDescription(P3, true)); 203 pds.add(new DefaultPortDescription(P3, true));
204 providerService.updatePorts(DID1, pds); 204 providerService.updatePorts(DID1, pds);
205 - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED); 205 + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED);
206 pds.clear(); 206 pds.clear();
207 207
208 pds.add(new DefaultPortDescription(P1, false)); 208 pds.add(new DefaultPortDescription(P1, false));
...@@ -218,7 +218,7 @@ public class DistributedDeviceManagerTest { ...@@ -218,7 +218,7 @@ public class DistributedDeviceManagerTest {
218 pds.add(new DefaultPortDescription(P1, true)); 218 pds.add(new DefaultPortDescription(P1, true));
219 pds.add(new DefaultPortDescription(P2, true)); 219 pds.add(new DefaultPortDescription(P2, true));
220 providerService.updatePorts(DID1, pds); 220 providerService.updatePorts(DID1, pds);
221 - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED); 221 + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
222 222
223 providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false)); 223 providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false));
224 validateEvents(PORT_UPDATED); 224 validateEvents(PORT_UPDATED);
...@@ -233,7 +233,7 @@ public class DistributedDeviceManagerTest { ...@@ -233,7 +233,7 @@ public class DistributedDeviceManagerTest {
233 pds.add(new DefaultPortDescription(P1, true)); 233 pds.add(new DefaultPortDescription(P1, true));
234 pds.add(new DefaultPortDescription(P2, true)); 234 pds.add(new DefaultPortDescription(P2, true));
235 providerService.updatePorts(DID1, pds); 235 providerService.updatePorts(DID1, pds);
236 - validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED); 236 + validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
237 assertEquals("wrong port count", 2, service.getPorts(DID1).size()); 237 assertEquals("wrong port count", 2, service.getPorts(DID1).size());
238 238
239 Port port = service.getPort(DID1, P1); 239 Port port = service.getPort(DID1, P1);
...@@ -247,7 +247,7 @@ public class DistributedDeviceManagerTest { ...@@ -247,7 +247,7 @@ public class DistributedDeviceManagerTest {
247 connectDevice(DID2, SW2); 247 connectDevice(DID2, SW2);
248 assertEquals("incorrect device count", 2, service.getDeviceCount()); 248 assertEquals("incorrect device count", 2, service.getDeviceCount());
249 admin.removeDevice(DID1); 249 admin.removeDevice(DID1);
250 - validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED, DEVICE_REMOVED); 250 + validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED);
251 assertNull("device should not be found", service.getDevice(DID1)); 251 assertNull("device should not be found", service.getDevice(DID1));
252 assertNotNull("device should be found", service.getDevice(DID2)); 252 assertNotNull("device should be found", service.getDevice(DID2));
253 assertEquals("incorrect device count", 1, service.getDeviceCount()); 253 assertEquals("incorrect device count", 1, service.getDeviceCount());
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
20 <module>api</module> 20 <module>api</module>
21 <module>net</module> 21 <module>net</module>
22 <module>store</module> 22 <module>store</module>
23 - <module>trivial</module>
24 </modules> 23 </modules>
25 24
26 <dependencies> 25 <dependencies>
......
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0"
3 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
5 + <modelVersion>4.0.0</modelVersion>
6 +
7 + <parent>
8 + <groupId>org.onlab.onos</groupId>
9 + <artifactId>onos-core-store</artifactId>
10 + <version>1.0.0-SNAPSHOT</version>
11 + <relativePath>../pom.xml</relativePath>
12 + </parent>
13 +
14 + <artifactId>onos-core-dist</artifactId>
15 + <packaging>bundle</packaging>
16 +
17 + <description>ONOS Gossip based distributed store subsystems</description>
18 +
19 + <dependencies>
20 + <dependency>
21 + <groupId>org.onlab.onos</groupId>
22 + <artifactId>onos-api</artifactId>
23 + </dependency>
24 + <dependency>
25 + <groupId>org.onlab.onos</groupId>
26 + <artifactId>onos-core-serializers</artifactId>
27 + <version>${project.version}</version>
28 + </dependency>
29 +
30 +
31 + <dependency>
32 + <groupId>org.onlab.onos</groupId>
33 + <artifactId>onlab-nio</artifactId>
34 + <version>${project.version}</version>
35 + </dependency>
36 +
37 + <dependency>
38 + <groupId>com.fasterxml.jackson.core</groupId>
39 + <artifactId>jackson-databind</artifactId>
40 + </dependency>
41 + <dependency>
42 + <groupId>com.fasterxml.jackson.core</groupId>
43 + <artifactId>jackson-annotations</artifactId>
44 + </dependency>
45 +
46 + <dependency>
47 + <groupId>org.apache.felix</groupId>
48 + <artifactId>org.apache.felix.scr.annotations</artifactId>
49 + </dependency>
50 + <dependency>
51 + <groupId>de.javakaffee</groupId>
52 + <artifactId>kryo-serializers</artifactId>
53 + </dependency>
54 + </dependencies>
55 +
56 + <build>
57 + <plugins>
58 + <plugin>
59 + <groupId>org.apache.felix</groupId>
60 + <artifactId>maven-scr-plugin</artifactId>
61 + </plugin>
62 + </plugins>
63 + </build>
64 +
65 +</project>
1 +package org.onlab.onos.store.cluster.impl;
2 +
3 +import com.fasterxml.jackson.core.JsonEncoding;
4 +import com.fasterxml.jackson.core.JsonFactory;
5 +import com.fasterxml.jackson.databind.JsonNode;
6 +import com.fasterxml.jackson.databind.ObjectMapper;
7 +import com.fasterxml.jackson.databind.node.ArrayNode;
8 +import com.fasterxml.jackson.databind.node.ObjectNode;
9 +import org.onlab.onos.cluster.DefaultControllerNode;
10 +import org.onlab.onos.cluster.NodeId;
11 +import org.onlab.packet.IpPrefix;
12 +
13 +import java.io.File;
14 +import java.io.IOException;
15 +import java.util.HashSet;
16 +import java.util.Iterator;
17 +import java.util.Set;
18 +
19 +/**
20 + * Allows for reading and writing cluster definition as a JSON file.
21 + */
22 +public class ClusterDefinitionStore {
23 +
24 + private final File file;
25 +
26 + /**
27 + * Creates a reader/writer of the cluster definition file.
28 + *
29 + * @param filePath location of the definition file
30 + */
31 + public ClusterDefinitionStore(String filePath) {
32 + file = new File(filePath);
33 + }
34 +
35 + /**
36 + * Returns set of the controller nodes, including self.
37 + *
38 + * @return set of controller nodes
39 + */
40 + public Set<DefaultControllerNode> read() throws IOException {
41 + Set<DefaultControllerNode> nodes = new HashSet<>();
42 + ObjectMapper mapper = new ObjectMapper();
43 + ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file);
44 + Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements();
45 + while (it.hasNext()) {
46 + ObjectNode nodeDef = (ObjectNode) it.next();
47 + nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()),
48 + IpPrefix.valueOf(nodeDef.get("ip").asText()),
49 + nodeDef.get("tcpPort").asInt(9876)));
50 + }
51 + return nodes;
52 + }
53 +
54 + /**
55 + * Writes the given set of the controller nodes.
56 + *
57 + * @param nodes set of controller nodes
58 + */
59 + public void write(Set<DefaultControllerNode> nodes) throws IOException {
60 + ObjectMapper mapper = new ObjectMapper();
61 + ObjectNode clusterNodeDef = mapper.createObjectNode();
62 + ArrayNode nodeDefs = mapper.createArrayNode();
63 + clusterNodeDef.set("nodes", nodeDefs);
64 + for (DefaultControllerNode node : nodes) {
65 + ObjectNode nodeDef = mapper.createObjectNode();
66 + nodeDef.put("id", node.id().toString())
67 + .put("ip", node.ip().toString())
68 + .put("tcpPort", node.tcpPort());
69 + nodeDefs.add(nodeDef);
70 + }
71 + mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8),
72 + clusterNodeDef);
73 + }
74 +
75 +}
1 +package org.onlab.onos.store.cluster.impl;
2 +
3 +import com.google.common.collect.ImmutableSet;
4 +import org.apache.felix.scr.annotations.Activate;
5 +import org.apache.felix.scr.annotations.Component;
6 +import org.apache.felix.scr.annotations.Deactivate;
7 +import org.apache.felix.scr.annotations.Service;
8 +import org.onlab.nio.AcceptorLoop;
9 +import org.onlab.nio.IOLoop;
10 +import org.onlab.nio.MessageStream;
11 +import org.onlab.onos.cluster.ClusterEvent;
12 +import org.onlab.onos.cluster.ClusterStore;
13 +import org.onlab.onos.cluster.ClusterStoreDelegate;
14 +import org.onlab.onos.cluster.ControllerNode;
15 +import org.onlab.onos.cluster.DefaultControllerNode;
16 +import org.onlab.onos.cluster.NodeId;
17 +import org.onlab.onos.store.AbstractStore;
18 +import org.onlab.packet.IpPrefix;
19 +import org.slf4j.Logger;
20 +import org.slf4j.LoggerFactory;
21 +
22 +import java.io.IOException;
23 +import java.net.InetSocketAddress;
24 +import java.net.Socket;
25 +import java.net.SocketAddress;
26 +import java.nio.channels.ByteChannel;
27 +import java.nio.channels.SelectionKey;
28 +import java.nio.channels.ServerSocketChannel;
29 +import java.nio.channels.SocketChannel;
30 +import java.util.ArrayList;
31 +import java.util.List;
32 +import java.util.Map;
33 +import java.util.Objects;
34 +import java.util.Set;
35 +import java.util.Timer;
36 +import java.util.TimerTask;
37 +import java.util.concurrent.ConcurrentHashMap;
38 +import java.util.concurrent.ExecutorService;
39 +import java.util.concurrent.Executors;
40 +
41 +import static java.net.InetAddress.getByAddress;
42 +import static org.onlab.onos.cluster.ControllerNode.State;
43 +import static org.onlab.packet.IpPrefix.valueOf;
44 +import static org.onlab.util.Tools.namedThreads;
45 +
46 +/**
47 + * Distributed implementation of the cluster nodes store.
48 + */
49 +@Component(immediate = true)
50 +@Service
51 +public class DistributedClusterStore
52 + extends AbstractStore<ClusterEvent, ClusterStoreDelegate>
53 + implements ClusterStore {
54 +
55 + private static final int HELLO_MSG = 1;
56 + private static final int ECHO_MSG = 2;
57 +
58 + private final Logger log = LoggerFactory.getLogger(getClass());
59 +
60 + private static final long CONNECTION_CUSTODIAN_DELAY = 1000L;
61 + private static final long CONNECTION_CUSTODIAN_FREQUENCY = 5000;
62 +
63 + private static final long START_TIMEOUT = 1000;
64 + private static final long SELECT_TIMEOUT = 50;
65 + private static final int WORKERS = 3;
66 + private static final int COMM_BUFFER_SIZE = 32 * 1024;
67 + private static final int COMM_IDLE_TIME = 500;
68 +
69 + private static final boolean SO_NO_DELAY = false;
70 + private static final int SO_SEND_BUFFER_SIZE = COMM_BUFFER_SIZE;
71 + private static final int SO_RCV_BUFFER_SIZE = COMM_BUFFER_SIZE;
72 +
73 + private DefaultControllerNode self;
74 + private final Map<NodeId, DefaultControllerNode> nodes = new ConcurrentHashMap<>();
75 + private final Map<NodeId, State> states = new ConcurrentHashMap<>();
76 +
77 + // Means to track message streams to other nodes.
78 + private final Map<NodeId, TLVMessageStream> streams = new ConcurrentHashMap<>();
79 + private final Map<SocketChannel, DefaultControllerNode> nodesByChannel = new ConcurrentHashMap<>();
80 +
81 + // Executor pools for listening and managing connections to other nodes.
82 + private final ExecutorService listenExecutor =
83 + Executors.newSingleThreadExecutor(namedThreads("onos-comm-listen"));
84 + private final ExecutorService commExecutors =
85 + Executors.newFixedThreadPool(WORKERS, namedThreads("onos-comm-cluster"));
86 + private final ExecutorService heartbeatExecutor =
87 + Executors.newSingleThreadExecutor(namedThreads("onos-comm-heartbeat"));
88 +
89 + private final Timer timer = new Timer("onos-comm-initiator");
90 + private final TimerTask connectionCustodian = new ConnectionCustodian();
91 +
92 + private ListenLoop listenLoop;
93 + private List<CommLoop> commLoops = new ArrayList<>(WORKERS);
94 +
95 + @Activate
96 + public void activate() {
97 + loadClusterDefinition();
98 + startCommunications();
99 + startListening();
100 + startInitiating();
101 + log.info("Started");
102 + }
103 +
104 + @Deactivate
105 + public void deactivate() {
106 + listenLoop.shutdown();
107 + for (CommLoop loop : commLoops) {
108 + loop.shutdown();
109 + }
110 + log.info("Stopped");
111 + }
112 +
113 + // Loads the cluster definition file
114 + private void loadClusterDefinition() {
115 +// ClusterDefinitionStore cds = new ClusterDefinitionStore("../config/cluster.json");
116 +// try {
117 +// Set<DefaultControllerNode> storedNodes = cds.read();
118 +// for (DefaultControllerNode node : storedNodes) {
119 +// nodes.put(node.id(), node);
120 +// }
121 +// } catch (IOException e) {
122 +// log.error("Unable to read cluster definitions", e);
123 +// }
124 +
125 + // Establishes the controller's own identity.
126 + IpPrefix ip = valueOf(System.getProperty("onos.ip", "127.0.1.1"));
127 + self = nodes.get(new NodeId(ip.toString()));
128 +
129 + // As a fall-back, let's make sure we at least know who we are.
130 + if (self == null) {
131 + self = new DefaultControllerNode(new NodeId(ip.toString()), ip);
132 + nodes.put(self.id(), self);
133 + states.put(self.id(), State.ACTIVE);
134 + }
135 + }
136 +
137 + // Kicks off the IO loops.
138 + private void startCommunications() {
139 + for (int i = 0; i < WORKERS; i++) {
140 + try {
141 + CommLoop loop = new CommLoop();
142 + commLoops.add(loop);
143 + commExecutors.execute(loop);
144 + } catch (IOException e) {
145 + log.warn("Unable to start comm IO loop", e);
146 + }
147 + }
148 +
149 + // Wait for the IO loops to start
150 + for (CommLoop loop : commLoops) {
151 + if (!loop.awaitStart(START_TIMEOUT)) {
152 + log.warn("Comm loop did not start on-time; moving on...");
153 + }
154 + }
155 + }
156 +
157 + // Starts listening for connections from peer cluster members.
158 + private void startListening() {
159 + try {
160 + listenLoop = new ListenLoop(self.ip(), self.tcpPort());
161 + listenExecutor.execute(listenLoop);
162 + if (!listenLoop.awaitStart(START_TIMEOUT)) {
163 + log.warn("Listen loop did not start on-time; moving on...");
164 + }
165 + } catch (IOException e) {
166 + log.error("Unable to listen for cluster connections", e);
167 + }
168 + }
169 +
170 + /**
171 + * Initiates open connection request and registers the pending socket
172 + * channel with the given IO loop.
173 + *
174 + * @param loop loop with which the channel should be registered
175 + * @throws java.io.IOException if the socket could not be open or connected
176 + */
177 + private void openConnection(DefaultControllerNode node, CommLoop loop) throws IOException {
178 + SocketAddress sa = new InetSocketAddress(getByAddress(node.ip().toOctets()), node.tcpPort());
179 + SocketChannel ch = SocketChannel.open();
180 + nodesByChannel.put(ch, node);
181 + ch.configureBlocking(false);
182 + ch.connect(sa);
183 + loop.connectStream(ch);
184 + }
185 +
186 +
187 + // Attempts to connect to any nodes that do not have an associated connection.
188 + private void startInitiating() {
189 + timer.schedule(connectionCustodian, CONNECTION_CUSTODIAN_DELAY, CONNECTION_CUSTODIAN_FREQUENCY);
190 + }
191 +
192 + @Override
193 + public ControllerNode getLocalNode() {
194 + return self;
195 + }
196 +
197 + @Override
198 + public Set<ControllerNode> getNodes() {
199 + ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder();
200 + return builder.addAll(nodes.values()).build();
201 + }
202 +
203 + @Override
204 + public ControllerNode getNode(NodeId nodeId) {
205 + return nodes.get(nodeId);
206 + }
207 +
208 + @Override
209 + public State getState(NodeId nodeId) {
210 + State state = states.get(nodeId);
211 + return state == null ? State.INACTIVE : state;
212 + }
213 +
214 + @Override
215 + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
216 + DefaultControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort);
217 + nodes.put(nodeId, node);
218 + return node;
219 + }
220 +
221 + @Override
222 + public void removeNode(NodeId nodeId) {
223 + nodes.remove(nodeId);
224 + TLVMessageStream stream = streams.remove(nodeId);
225 + if (stream != null) {
226 + stream.close();
227 + }
228 + }
229 +
230 + // Listens and accepts inbound connections from other cluster nodes.
231 + private class ListenLoop extends AcceptorLoop {
232 + ListenLoop(IpPrefix ip, int tcpPort) throws IOException {
233 + super(SELECT_TIMEOUT, new InetSocketAddress(getByAddress(ip.toOctets()), tcpPort));
234 + }
235 +
236 + @Override
237 + protected void acceptConnection(ServerSocketChannel channel) throws IOException {
238 + SocketChannel sc = channel.accept();
239 + sc.configureBlocking(false);
240 +
241 + Socket so = sc.socket();
242 + so.setTcpNoDelay(SO_NO_DELAY);
243 + so.setReceiveBufferSize(SO_RCV_BUFFER_SIZE);
244 + so.setSendBufferSize(SO_SEND_BUFFER_SIZE);
245 +
246 + findLeastUtilizedLoop().acceptStream(sc);
247 + }
248 + }
249 +
250 + private class CommLoop extends IOLoop<TLVMessage, TLVMessageStream> {
251 + CommLoop() throws IOException {
252 + super(SELECT_TIMEOUT);
253 + }
254 +
255 + @Override
256 + protected TLVMessageStream createStream(ByteChannel byteChannel) {
257 + return new TLVMessageStream(this, byteChannel, COMM_BUFFER_SIZE, COMM_IDLE_TIME);
258 + }
259 +
260 + @Override
261 + protected void processMessages(List<TLVMessage> messages, MessageStream<TLVMessage> stream) {
262 + TLVMessageStream tlvStream = (TLVMessageStream) stream;
263 + for (TLVMessage message : messages) {
264 + // TODO: add type-based dispatching here... this is just a hack to get going
265 + if (message.type() == HELLO_MSG) {
266 + processHello(message, tlvStream);
267 + } else if (message.type() == ECHO_MSG) {
268 + processEcho(message, tlvStream);
269 + } else {
270 + log.info("Deal with other messages");
271 + }
272 + }
273 + }
274 +
275 + @Override
276 + public TLVMessageStream acceptStream(SocketChannel channel) {
277 + TLVMessageStream stream = super.acceptStream(channel);
278 + try {
279 + InetSocketAddress sa = (InetSocketAddress) channel.getRemoteAddress();
280 + log.info("Accepted connection from node {}", valueOf(sa.getAddress().getAddress()));
281 + stream.write(createHello(self));
282 +
283 + } catch (IOException e) {
284 + log.warn("Unable to accept connection from an unknown end-point", e);
285 + }
286 + return stream;
287 + }
288 +
289 + @Override
290 + public TLVMessageStream connectStream(SocketChannel channel) {
291 + TLVMessageStream stream = super.connectStream(channel);
292 + DefaultControllerNode node = nodesByChannel.get(channel);
293 + if (node != null) {
294 + log.debug("Opened connection to node {}", node.id());
295 + nodesByChannel.remove(channel);
296 + }
297 + return stream;
298 + }
299 +
300 + @Override
301 + protected void connect(SelectionKey key) throws IOException {
302 + try {
303 + super.connect(key);
304 + TLVMessageStream stream = (TLVMessageStream) key.attachment();
305 + send(stream, createHello(self));
306 + } catch (IOException e) {
307 + if (!Objects.equals(e.getMessage(), "Connection refused")) {
308 + throw e;
309 + }
310 + }
311 + }
312 +
313 + @Override
314 + protected void removeStream(MessageStream<TLVMessage> stream) {
315 + DefaultControllerNode node = ((TLVMessageStream) stream).node();
316 + if (node != null) {
317 + log.info("Closed connection to node {}", node.id());
318 + states.put(node.id(), State.INACTIVE);
319 + streams.remove(node.id());
320 + }
321 + super.removeStream(stream);
322 + }
323 + }
324 +
325 + // Processes a HELLO message from a peer controller node.
326 + private void processHello(TLVMessage message, TLVMessageStream stream) {
327 + // FIXME: pure hack for now
328 + String data = new String(message.data());
329 + String[] fields = data.split(":");
330 + DefaultControllerNode node = new DefaultControllerNode(new NodeId(fields[0]),
331 + valueOf(fields[1]),
332 + Integer.parseInt(fields[2]));
333 + stream.setNode(node);
334 + nodes.put(node.id(), node);
335 + streams.put(node.id(), stream);
336 + states.put(node.id(), State.ACTIVE);
337 + }
338 +
339 + // Processes an ECHO message from a peer controller node.
340 + private void processEcho(TLVMessage message, TLVMessageStream tlvStream) {
341 + // TODO: implement heart-beat refresh
342 + log.info("Dealing with echoes...");
343 + }
344 +
345 + // Sends message to the specified stream.
346 + private void send(TLVMessageStream stream, TLVMessage message) {
347 + try {
348 + stream.write(message);
349 + } catch (IOException e) {
350 + log.warn("Unable to send message to {}", stream.node().id());
351 + }
352 + }
353 +
354 + // Creates a hello message to be sent to a peer controller node.
355 + private TLVMessage createHello(DefaultControllerNode self) {
356 + return new TLVMessage(HELLO_MSG, (self.id() + ":" + self.ip() + ":" + self.tcpPort()).getBytes());
357 + }
358 +
359 + // Sweeps through all controller nodes and attempts to open connection to
360 + // those that presently do not have one.
361 + private class ConnectionCustodian extends TimerTask {
362 + @Override
363 + public void run() {
364 + for (DefaultControllerNode node : nodes.values()) {
365 + if (node != self && !streams.containsKey(node.id())) {
366 + try {
367 + openConnection(node, findLeastUtilizedLoop());
368 + } catch (IOException e) {
369 + log.debug("Unable to connect", e);
370 + }
371 + }
372 + }
373 + }
374 + }
375 +
376 + // Finds the least utilities IO loop.
377 + private CommLoop findLeastUtilizedLoop() {
378 + CommLoop leastUtilized = null;
379 + int minCount = Integer.MAX_VALUE;
380 + for (CommLoop loop : commLoops) {
381 + int count = loop.streamCount();
382 + if (count == 0) {
383 + return loop;
384 + }
385 +
386 + if (count < minCount) {
387 + leastUtilized = loop;
388 + minCount = count;
389 + }
390 + }
391 + return leastUtilized;
392 + }
393 +}
1 +package org.onlab.onos.store.cluster.impl;
2 +
3 +import org.onlab.nio.AbstractMessage;
4 +
5 +import java.util.Objects;
6 +
7 +import static com.google.common.base.MoreObjects.toStringHelper;
8 +
9 +/**
10 + * Base message for cluster-wide communications using TLVs.
11 + */
12 +public class TLVMessage extends AbstractMessage {
13 +
14 + private final int type;
15 + private final byte[] data;
16 +
17 + /**
18 + * Creates an immutable TLV message.
19 + *
20 + * @param type message type
21 + * @param data message data bytes
22 + */
23 + public TLVMessage(int type, byte[] data) {
24 + this.length = data.length + TLVMessageStream.METADATA_LENGTH;
25 + this.type = type;
26 + this.data = data;
27 + }
28 +
29 + /**
30 + * Returns the message type indicator.
31 + *
32 + * @return message type
33 + */
34 + public int type() {
35 + return type;
36 + }
37 +
38 + /**
39 + * Returns the data bytes.
40 + *
41 + * @return message data
42 + */
43 + public byte[] data() {
44 + return data;
45 + }
46 +
47 + @Override
48 + public int hashCode() {
49 + return Objects.hash(type, data);
50 + }
51 +
52 + @Override
53 + public boolean equals(Object obj) {
54 + if (this == obj) {
55 + return true;
56 + }
57 + if (obj == null || getClass() != obj.getClass()) {
58 + return false;
59 + }
60 + final TLVMessage other = (TLVMessage) obj;
61 + return Objects.equals(this.type, other.type) &&
62 + Objects.equals(this.data, other.data);
63 + }
64 +
65 + @Override
66 + public String toString() {
67 + return toStringHelper(this).add("type", type).add("length", length).toString();
68 + }
69 +
70 +}
1 +package org.onlab.onos.store.cluster.impl;
2 +
3 +import org.onlab.nio.IOLoop;
4 +import org.onlab.nio.MessageStream;
5 +import org.onlab.onos.cluster.DefaultControllerNode;
6 +
7 +import java.nio.ByteBuffer;
8 +import java.nio.channels.ByteChannel;
9 +
10 +import static com.google.common.base.Preconditions.checkState;
11 +
12 +/**
13 + * Stream for transferring TLV messages between cluster members.
14 + */
15 +public class TLVMessageStream extends MessageStream<TLVMessage> {
16 +
17 + public static final int METADATA_LENGTH = 16; // 8 + 4 + 4
18 +
19 + private static final int LENGTH_OFFSET = 12;
20 + private static final long MARKER = 0xfeedcafecafefeedL;
21 +
22 + private DefaultControllerNode node;
23 +
24 + /**
25 + * Creates a message stream associated with the specified IO loop and
26 + * backed by the given byte channel.
27 + *
28 + * @param loop IO loop
29 + * @param byteChannel backing byte channel
30 + * @param bufferSize size of the backing byte buffers
31 + * @param maxIdleMillis maximum number of millis the stream can be idle
32 + */
33 + protected TLVMessageStream(IOLoop<TLVMessage, ?> loop, ByteChannel byteChannel,
34 + int bufferSize, int maxIdleMillis) {
35 + super(loop, byteChannel, bufferSize, maxIdleMillis);
36 + }
37 +
38 + /**
39 + * Returns the node with which this stream is associated.
40 + *
41 + * @return controller node
42 + */
43 + DefaultControllerNode node() {
44 + return node;
45 + }
46 +
47 + /**
48 + * Sets the node with which this stream is affiliated.
49 + *
50 + * @param node controller node
51 + */
52 + void setNode(DefaultControllerNode node) {
53 + checkState(this.node == null, "Stream is already bound to a node");
54 + this.node = node;
55 + }
56 +
57 + @Override
58 + protected TLVMessage read(ByteBuffer buffer) {
59 + // Do we have enough bytes to read the header? If not, bail.
60 + if (buffer.remaining() < METADATA_LENGTH) {
61 + return null;
62 + }
63 +
64 + // Peek at the length and if we have enough to read the entire message
65 + // go ahead, otherwise bail.
66 + int length = buffer.getInt(buffer.position() + LENGTH_OFFSET);
67 + if (buffer.remaining() < length) {
68 + return null;
69 + }
70 +
71 + // At this point, we have enough data to read a complete message.
72 + long marker = buffer.getLong();
73 + checkState(marker == MARKER, "Incorrect message marker");
74 +
75 + int type = buffer.getInt();
76 + length = buffer.getInt();
77 +
78 + // TODO: add deserialization hook here
79 + byte[] data = new byte[length - METADATA_LENGTH];
80 + buffer.get(data);
81 +
82 + return new TLVMessage(type, data);
83 + }
84 +
85 + @Override
86 + protected void write(TLVMessage message, ByteBuffer buffer) {
87 + buffer.putLong(MARKER);
88 + buffer.putInt(message.type());
89 + buffer.putInt(message.length());
90 +
91 + // TODO: add serialization hook here
92 + buffer.put(message.data());
93 + }
94 +
95 +}
1 +package org.onlab.onos.store.device.impl;
2 +
3 +import static org.slf4j.LoggerFactory.getLogger;
4 +
5 +import java.util.concurrent.ConcurrentHashMap;
6 +import java.util.concurrent.ConcurrentMap;
7 +import java.util.concurrent.atomic.AtomicInteger;
8 +
9 +import org.apache.felix.scr.annotations.Activate;
10 +import org.apache.felix.scr.annotations.Component;
11 +import org.apache.felix.scr.annotations.Deactivate;
12 +import org.apache.felix.scr.annotations.Service;
13 +import org.onlab.onos.cluster.MastershipTerm;
14 +import org.onlab.onos.net.DeviceId;
15 +import org.onlab.onos.store.ClockService;
16 +import org.onlab.onos.store.Timestamp;
17 +import org.onlab.onos.store.impl.OnosTimestamp;
18 +import org.slf4j.Logger;
19 +
20 +@Component(immediate = true)
21 +@Service
22 +public class OnosClockService implements ClockService {
23 +
24 + private final Logger log = getLogger(getClass());
25 +
26 + // TODO: Implement per device ticker that is reset to 0 at the beginning of a new term.
27 + private final AtomicInteger ticker = new AtomicInteger(0);
28 + private ConcurrentMap<DeviceId, MastershipTerm> deviceMastershipTerms = new ConcurrentHashMap<>();
29 +
30 + @Activate
31 + public void activate() {
32 + log.info("Started");
33 + }
34 +
35 + @Deactivate
36 + public void deactivate() {
37 + log.info("Stopped");
38 + }
39 +
40 + @Override
41 + public Timestamp getTimestamp(DeviceId deviceId) {
42 + MastershipTerm term = deviceMastershipTerms.get(deviceId);
43 + if (term == null) {
44 + throw new IllegalStateException("Requesting timestamp for a deviceId without mastership");
45 + }
46 + return new OnosTimestamp(term.termNumber(), ticker.incrementAndGet());
47 + }
48 +
49 + @Override
50 + public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
51 + deviceMastershipTerms.put(deviceId, term);
52 + }
53 +}
1 +package org.onlab.onos.store.device.impl;
2 +
3 +import static com.google.common.base.Predicates.notNull;
4 +import static com.google.common.base.Preconditions.checkState;
5 +
6 +import com.google.common.collect.FluentIterable;
7 +import com.google.common.collect.ImmutableSet;
8 +import com.google.common.collect.ImmutableSet.Builder;
9 +
10 +import org.apache.felix.scr.annotations.Activate;
11 +import org.apache.felix.scr.annotations.Component;
12 +import org.apache.felix.scr.annotations.Deactivate;
13 +import org.apache.felix.scr.annotations.Reference;
14 +import org.apache.felix.scr.annotations.ReferenceCardinality;
15 +import org.apache.felix.scr.annotations.Service;
16 +import org.onlab.onos.net.DefaultDevice;
17 +import org.onlab.onos.net.DefaultPort;
18 +import org.onlab.onos.net.Device;
19 +import org.onlab.onos.net.DeviceId;
20 +import org.onlab.onos.net.Port;
21 +import org.onlab.onos.net.PortNumber;
22 +import org.onlab.onos.net.device.DeviceDescription;
23 +import org.onlab.onos.net.device.DeviceEvent;
24 +import org.onlab.onos.net.device.DeviceStore;
25 +import org.onlab.onos.net.device.DeviceStoreDelegate;
26 +import org.onlab.onos.net.device.PortDescription;
27 +import org.onlab.onos.net.provider.ProviderId;
28 +import org.onlab.onos.store.AbstractStore;
29 +import org.onlab.onos.store.ClockService;
30 +import org.onlab.onos.store.Timestamp;
31 +import org.slf4j.Logger;
32 +
33 +import java.util.ArrayList;
34 +import java.util.Collections;
35 +import java.util.HashMap;
36 +import java.util.HashSet;
37 +import java.util.Iterator;
38 +import java.util.List;
39 +import java.util.Map;
40 +import java.util.Objects;
41 +import java.util.Set;
42 +import java.util.concurrent.ConcurrentHashMap;
43 +
44 +import static com.google.common.base.Preconditions.checkArgument;
45 +import static org.onlab.onos.net.device.DeviceEvent.Type.*;
46 +import static org.slf4j.LoggerFactory.getLogger;
47 +
48 +/**
49 + * Manages inventory of infrastructure devices using a protocol that takes into consideration
50 + * the order in which device events occur.
51 + */
52 +@Component(immediate = true)
53 +@Service
54 +public class OnosDistributedDeviceStore
55 + extends AbstractStore<DeviceEvent, DeviceStoreDelegate>
56 + implements DeviceStore {
57 +
58 + private final Logger log = getLogger(getClass());
59 +
60 + public static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
61 +
62 + private ConcurrentHashMap<DeviceId, VersionedValue<Device>> devices;
63 + private ConcurrentHashMap<DeviceId, Map<PortNumber, VersionedValue<Port>>> devicePorts;
64 +
65 + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
66 + protected ClockService clockService;
67 +
68 + @Activate
69 + public void activate() {
70 +
71 + devices = new ConcurrentHashMap<>();
72 + devicePorts = new ConcurrentHashMap<>();
73 +
74 + log.info("Started");
75 + }
76 +
77 + @Deactivate
78 + public void deactivate() {
79 + log.info("Stopped");
80 + }
81 +
82 + @Override
83 + public int getDeviceCount() {
84 + return devices.size();
85 + }
86 +
87 + @Override
88 + public Iterable<Device> getDevices() {
89 + Builder<Device> builder = ImmutableSet.builder();
90 + synchronized (this) {
91 + for (VersionedValue<Device> device : devices.values()) {
92 + builder.add(device.entity());
93 + }
94 + return builder.build();
95 + }
96 + }
97 +
98 + @Override
99 + public Device getDevice(DeviceId deviceId) {
100 + VersionedValue<Device> device = devices.get(deviceId);
101 + checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
102 + return device.entity();
103 + }
104 +
105 + @Override
106 + public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
107 + DeviceDescription deviceDescription) {
108 + Timestamp newTimestamp = clockService.getTimestamp(deviceId);
109 + VersionedValue<Device> device = devices.get(deviceId);
110 +
111 + if (device == null) {
112 + return createDevice(providerId, deviceId, deviceDescription, newTimestamp);
113 + }
114 +
115 + checkState(newTimestamp.compareTo(device.timestamp()) > 0,
116 + "Existing device has a timestamp in the future!");
117 +
118 + return updateDevice(providerId, device.entity(), deviceDescription, newTimestamp);
119 + }
120 +
121 + // Creates the device and returns the appropriate event if necessary.
122 + private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
123 + DeviceDescription desc, Timestamp timestamp) {
124 + Device device = new DefaultDevice(providerId, deviceId, desc.type(),
125 + desc.manufacturer(),
126 + desc.hwVersion(), desc.swVersion(),
127 + desc.serialNumber());
128 +
129 + devices.put(deviceId, new VersionedValue<>(device, true, timestamp));
130 + // TODO,FIXME: broadcast a message telling peers of a device event.
131 + return new DeviceEvent(DEVICE_ADDED, device, null);
132 + }
133 +
134 + // Updates the device and returns the appropriate event if necessary.
135 + private DeviceEvent updateDevice(ProviderId providerId, Device device,
136 + DeviceDescription desc, Timestamp timestamp) {
137 + // We allow only certain attributes to trigger update
138 + if (!Objects.equals(device.hwVersion(), desc.hwVersion()) ||
139 + !Objects.equals(device.swVersion(), desc.swVersion())) {
140 +
141 + Device updated = new DefaultDevice(providerId, device.id(),
142 + desc.type(),
143 + desc.manufacturer(),
144 + desc.hwVersion(),
145 + desc.swVersion(),
146 + desc.serialNumber());
147 + devices.put(device.id(), new VersionedValue<Device>(updated, true, timestamp));
148 + // FIXME: broadcast a message telling peers of a device event.
149 + return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, updated, null);
150 + }
151 +
152 + // Otherwise merely attempt to change availability
153 + Device updated = new DefaultDevice(providerId, device.id(),
154 + desc.type(),
155 + desc.manufacturer(),
156 + desc.hwVersion(),
157 + desc.swVersion(),
158 + desc.serialNumber());
159 +
160 + VersionedValue<Device> oldDevice = devices.put(device.id(),
161 + new VersionedValue<Device>(updated, true, timestamp));
162 + if (!oldDevice.isUp()) {
163 + return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
164 + } else {
165 + return null;
166 + }
167 + }
168 +
169 + @Override
170 + public DeviceEvent markOffline(DeviceId deviceId) {
171 + VersionedValue<Device> device = devices.get(deviceId);
172 + boolean willRemove = device != null && device.isUp();
173 + if (!willRemove) {
174 + return null;
175 + }
176 + Timestamp timestamp = clockService.getTimestamp(deviceId);
177 + if (replaceIfLatest(device.entity(), false, timestamp)) {
178 + return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device.entity(), null);
179 + }
180 + return null;
181 + }
182 +
183 + // Replace existing value if its timestamp is older.
184 + private synchronized boolean replaceIfLatest(Device device, boolean isUp, Timestamp timestamp) {
185 + VersionedValue<Device> existingValue = devices.get(device.id());
186 + if (timestamp.compareTo(existingValue.timestamp()) > 0) {
187 + devices.put(device.id(), new VersionedValue<Device>(device, isUp, timestamp));
188 + return true;
189 + }
190 + return false;
191 + }
192 +
193 + @Override
194 + public List<DeviceEvent> updatePorts(DeviceId deviceId,
195 + List<PortDescription> portDescriptions) {
196 + List<DeviceEvent> events = new ArrayList<>();
197 + synchronized (this) {
198 + VersionedValue<Device> device = devices.get(deviceId);
199 + checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
200 + Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
201 + Timestamp newTimestamp = clockService.getTimestamp(deviceId);
202 +
203 + // Add new ports
204 + Set<PortNumber> processed = new HashSet<>();
205 + for (PortDescription portDescription : portDescriptions) {
206 + VersionedValue<Port> port = ports.get(portDescription.portNumber());
207 + if (port == null) {
208 + events.add(createPort(device, portDescription, ports, newTimestamp));
209 + }
210 + checkState(newTimestamp.compareTo(port.timestamp()) > 0,
211 + "Existing port state has a timestamp in the future!");
212 + events.add(updatePort(device.entity(), port.entity(), portDescription, ports, newTimestamp));
213 + processed.add(portDescription.portNumber());
214 + }
215 +
216 + updatePortMap(deviceId, ports);
217 +
218 + events.addAll(pruneOldPorts(device.entity(), ports, processed));
219 + }
220 + return FluentIterable.from(events).filter(notNull()).toList();
221 + }
222 +
223 + // Creates a new port based on the port description adds it to the map and
224 + // Returns corresponding event.
225 + //@GuardedBy("this")
226 + private DeviceEvent createPort(VersionedValue<Device> device, PortDescription portDescription,
227 + Map<PortNumber, VersionedValue<Port>> ports, Timestamp timestamp) {
228 + Port port = new DefaultPort(device.entity(), portDescription.portNumber(),
229 + portDescription.isEnabled());
230 + ports.put(port.number(), new VersionedValue<Port>(port, true, timestamp));
231 + updatePortMap(device.entity().id(), ports);
232 + return new DeviceEvent(PORT_ADDED, device.entity(), port);
233 + }
234 +
235 + // Checks if the specified port requires update and if so, it replaces the
236 + // existing entry in the map and returns corresponding event.
237 + //@GuardedBy("this")
238 + private DeviceEvent updatePort(Device device, Port port,
239 + PortDescription portDescription,
240 + Map<PortNumber, VersionedValue<Port>> ports,
241 + Timestamp timestamp) {
242 + if (port.isEnabled() != portDescription.isEnabled()) {
243 + VersionedValue<Port> updatedPort = new VersionedValue<Port>(
244 + new DefaultPort(device, portDescription.portNumber(),
245 + portDescription.isEnabled()),
246 + portDescription.isEnabled(),
247 + timestamp);
248 + ports.put(port.number(), updatedPort);
249 + updatePortMap(device.id(), ports);
250 + return new DeviceEvent(PORT_UPDATED, device, updatedPort.entity());
251 + }
252 + return null;
253 + }
254 +
255 + // Prunes the specified list of ports based on which ports are in the
256 + // processed list and returns list of corresponding events.
257 + //@GuardedBy("this")
258 + private List<DeviceEvent> pruneOldPorts(Device device,
259 + Map<PortNumber, VersionedValue<Port>> ports,
260 + Set<PortNumber> processed) {
261 + List<DeviceEvent> events = new ArrayList<>();
262 + Iterator<PortNumber> iterator = ports.keySet().iterator();
263 + while (iterator.hasNext()) {
264 + PortNumber portNumber = iterator.next();
265 + if (!processed.contains(portNumber)) {
266 + events.add(new DeviceEvent(PORT_REMOVED, device,
267 + ports.get(portNumber).entity()));
268 + iterator.remove();
269 + }
270 + }
271 + if (!events.isEmpty()) {
272 + updatePortMap(device.id(), ports);
273 + }
274 + return events;
275 + }
276 +
277 + // Gets the map of ports for the specified device; if one does not already
278 + // exist, it creates and registers a new one.
279 + // WARN: returned value is a copy, changes made to the Map
280 + // needs to be written back using updatePortMap
281 + //@GuardedBy("this")
282 + private Map<PortNumber, VersionedValue<Port>> getPortMap(DeviceId deviceId) {
283 + Map<PortNumber, VersionedValue<Port>> ports = devicePorts.get(deviceId);
284 + if (ports == null) {
285 + ports = new HashMap<>();
286 + // this probably is waste of time in most cases.
287 + updatePortMap(deviceId, ports);
288 + }
289 + return ports;
290 + }
291 +
292 + //@GuardedBy("this")
293 + private void updatePortMap(DeviceId deviceId, Map<PortNumber, VersionedValue<Port>> ports) {
294 + devicePorts.put(deviceId, ports);
295 + }
296 +
297 + @Override
298 + public DeviceEvent updatePortStatus(DeviceId deviceId,
299 + PortDescription portDescription) {
300 + VersionedValue<Device> device = devices.get(deviceId);
301 + checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
302 + Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
303 + VersionedValue<Port> port = ports.get(portDescription.portNumber());
304 + Timestamp timestamp = clockService.getTimestamp(deviceId);
305 + return updatePort(device.entity(), port.entity(), portDescription, ports, timestamp);
306 + }
307 +
308 + @Override
309 + public List<Port> getPorts(DeviceId deviceId) {
310 + Map<PortNumber, VersionedValue<Port>> versionedPorts = devicePorts.get(deviceId);
311 + if (versionedPorts == null) {
312 + return Collections.emptyList();
313 + }
314 + List<Port> ports = new ArrayList<>();
315 + for (VersionedValue<Port> port : versionedPorts.values()) {
316 + ports.add(port.entity());
317 + }
318 + return ports;
319 + }
320 +
321 + @Override
322 + public Port getPort(DeviceId deviceId, PortNumber portNumber) {
323 + Map<PortNumber, VersionedValue<Port>> ports = devicePorts.get(deviceId);
324 + return ports == null ? null : ports.get(portNumber).entity();
325 + }
326 +
327 + @Override
328 + public boolean isAvailable(DeviceId deviceId) {
329 + return devices.get(deviceId).isUp();
330 + }
331 +
332 + @Override
333 + public DeviceEvent removeDevice(DeviceId deviceId) {
334 + VersionedValue<Device> previousDevice = devices.remove(deviceId);
335 + return previousDevice == null ? null :
336 + new DeviceEvent(DEVICE_REMOVED, previousDevice.entity(), null);
337 + }
338 +}
1 +package org.onlab.onos.store.device.impl;
2 +
3 +import org.onlab.onos.store.Timestamp;
4 +
5 +/**
6 + * Wrapper class for a entity that is versioned
7 + * and can either be up or down.
8 + *
9 + * @param <T> type of the value.
10 + */
11 +public class VersionedValue<T> {
12 + private final T entity;
13 + private final Timestamp timestamp;
14 + private final boolean isUp;
15 +
16 + public VersionedValue(T entity, boolean isUp, Timestamp timestamp) {
17 + this.entity = entity;
18 + this.isUp = isUp;
19 + this.timestamp = timestamp;
20 + }
21 +
22 + /**
23 + * Returns the value.
24 + * @return value.
25 + */
26 + public T entity() {
27 + return entity;
28 + }
29 +
30 + /**
31 + * Tells whether the entity is up or down.
32 + * @return true if up, false otherwise.
33 + */
34 + public boolean isUp() {
35 + return isUp;
36 + }
37 +
38 + /**
39 + * Returns the timestamp (version) associated with this entity.
40 + * @return timestamp.
41 + */
42 + public Timestamp timestamp() {
43 + return timestamp;
44 + }
45 +}
1 +/**
2 + * Implementation of device store using distributed structures.
3 + */
4 +package org.onlab.onos.store.device.impl;
1 +package org.onlab.onos.store.flow.impl;
2 +
3 +import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
4 +import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
5 +import static org.slf4j.LoggerFactory.getLogger;
6 +
7 +import java.util.Collection;
8 +import java.util.Collections;
9 +
10 +import org.apache.felix.scr.annotations.Activate;
11 +import org.apache.felix.scr.annotations.Component;
12 +import org.apache.felix.scr.annotations.Deactivate;
13 +import org.apache.felix.scr.annotations.Service;
14 +import org.onlab.onos.ApplicationId;
15 +import org.onlab.onos.net.DeviceId;
16 +import org.onlab.onos.net.flow.DefaultFlowRule;
17 +import org.onlab.onos.net.flow.FlowRule;
18 +import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
19 +import org.onlab.onos.net.flow.FlowRuleEvent;
20 +import org.onlab.onos.net.flow.FlowRuleEvent.Type;
21 +import org.onlab.onos.net.flow.FlowRuleStore;
22 +import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
23 +import org.onlab.onos.store.AbstractStore;
24 +import org.slf4j.Logger;
25 +
26 +import com.google.common.collect.ArrayListMultimap;
27 +import com.google.common.collect.ImmutableSet;
28 +import com.google.common.collect.Multimap;
29 +
30 +/**
31 + * Manages inventory of flow rules using trivial in-memory implementation.
32 + */
33 +//FIXME: I LIE I AM NOT DISTRIBUTED
34 +@Component(immediate = true)
35 +@Service
36 +public class DistributedFlowRuleStore
37 +extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
38 +implements FlowRuleStore {
39 +
40 + private final Logger log = getLogger(getClass());
41 +
42 + // store entries as a pile of rules, no info about device tables
43 + private final Multimap<DeviceId, FlowRule> flowEntries =
44 + ArrayListMultimap.<DeviceId, FlowRule>create();
45 +
46 + private final Multimap<ApplicationId, FlowRule> flowEntriesById =
47 + ArrayListMultimap.<ApplicationId, FlowRule>create();
48 +
49 + @Activate
50 + public void activate() {
51 + log.info("Started");
52 + }
53 +
54 + @Deactivate
55 + public void deactivate() {
56 + log.info("Stopped");
57 + }
58 +
59 +
60 + @Override
61 + public synchronized FlowRule getFlowRule(FlowRule rule) {
62 + for (FlowRule f : flowEntries.get(rule.deviceId())) {
63 + if (f.equals(rule)) {
64 + return f;
65 + }
66 + }
67 + return null;
68 + }
69 +
70 + @Override
71 + public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
72 + Collection<FlowRule> rules = flowEntries.get(deviceId);
73 + if (rules == null) {
74 + return Collections.emptyList();
75 + }
76 + return ImmutableSet.copyOf(rules);
77 + }
78 +
79 + @Override
80 + public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
81 + Collection<FlowRule> rules = flowEntriesById.get(appId);
82 + if (rules == null) {
83 + return Collections.emptyList();
84 + }
85 + return ImmutableSet.copyOf(rules);
86 + }
87 +
88 + @Override
89 + public synchronized void storeFlowRule(FlowRule rule) {
90 + FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
91 + DeviceId did = f.deviceId();
92 + if (!flowEntries.containsEntry(did, f)) {
93 + flowEntries.put(did, f);
94 + flowEntriesById.put(rule.appId(), f);
95 + }
96 + }
97 +
98 + @Override
99 + public synchronized void deleteFlowRule(FlowRule rule) {
100 + FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
101 + DeviceId did = f.deviceId();
102 +
103 + /*
104 + * find the rule and mark it for deletion.
105 + * Ultimately a flow removed will come remove it.
106 + */
107 +
108 + if (flowEntries.containsEntry(did, f)) {
109 + //synchronized (flowEntries) {
110 + flowEntries.remove(did, f);
111 + flowEntries.put(did, f);
112 + flowEntriesById.remove(rule.appId(), rule);
113 + //}
114 + }
115 + }
116 +
117 + @Override
118 + public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
119 + DeviceId did = rule.deviceId();
120 +
121 + // check if this new rule is an update to an existing entry
122 + if (flowEntries.containsEntry(did, rule)) {
123 + //synchronized (flowEntries) {
124 + // Multimaps support duplicates so we have to remove our rule
125 + // and replace it with the current version.
126 + flowEntries.remove(did, rule);
127 + flowEntries.put(did, rule);
128 + //}
129 + return new FlowRuleEvent(Type.RULE_UPDATED, rule);
130 + }
131 +
132 + flowEntries.put(did, rule);
133 + return new FlowRuleEvent(RULE_ADDED, rule);
134 + }
135 +
136 + @Override
137 + public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
138 + //synchronized (this) {
139 + if (flowEntries.remove(rule.deviceId(), rule)) {
140 + return new FlowRuleEvent(RULE_REMOVED, rule);
141 + } else {
142 + return null;
143 + }
144 + //}
145 + }
146 +
147 +
148 +
149 +
150 +
151 +
152 +
153 +}
1 +package org.onlab.onos.store.host.impl;
2 +
3 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
4 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
5 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
6 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
7 +import static org.slf4j.LoggerFactory.getLogger;
8 +
9 +import java.util.Collections;
10 +import java.util.HashSet;
11 +import java.util.Map;
12 +import java.util.Set;
13 +import java.util.concurrent.ConcurrentHashMap;
14 +
15 +import org.apache.felix.scr.annotations.Activate;
16 +import org.apache.felix.scr.annotations.Component;
17 +import org.apache.felix.scr.annotations.Deactivate;
18 +import org.apache.felix.scr.annotations.Service;
19 +import org.onlab.onos.net.ConnectPoint;
20 +import org.onlab.onos.net.DefaultHost;
21 +import org.onlab.onos.net.DeviceId;
22 +import org.onlab.onos.net.Host;
23 +import org.onlab.onos.net.HostId;
24 +import org.onlab.onos.net.host.HostDescription;
25 +import org.onlab.onos.net.host.HostEvent;
26 +import org.onlab.onos.net.host.HostStore;
27 +import org.onlab.onos.net.host.HostStoreDelegate;
28 +import org.onlab.onos.net.host.PortAddresses;
29 +import org.onlab.onos.net.provider.ProviderId;
30 +import org.onlab.onos.store.AbstractStore;
31 +import org.onlab.packet.IpPrefix;
32 +import org.onlab.packet.MacAddress;
33 +import org.onlab.packet.VlanId;
34 +import org.slf4j.Logger;
35 +
36 +import com.google.common.collect.HashMultimap;
37 +import com.google.common.collect.ImmutableSet;
38 +import com.google.common.collect.Multimap;
39 +import com.google.common.collect.Sets;
40 +
41 +/**
42 + * Manages inventory of end-station hosts using trivial in-memory
43 + * implementation.
44 + */
45 +//FIXME: I LIE I AM NOT DISTRIBUTED
46 +@Component(immediate = true)
47 +@Service
48 +public class DistributedHostStore
49 +extends AbstractStore<HostEvent, HostStoreDelegate>
50 +implements HostStore {
51 +
52 + private final Logger log = getLogger(getClass());
53 +
54 + // Host inventory
55 + private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
56 +
57 + // Hosts tracked by their location
58 + private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
59 +
60 + private final Map<ConnectPoint, PortAddresses> portAddresses =
61 + new ConcurrentHashMap<>();
62 +
63 + @Activate
64 + public void activate() {
65 + log.info("Started");
66 + }
67 +
68 + @Deactivate
69 + public void deactivate() {
70 + log.info("Stopped");
71 + }
72 +
73 + @Override
74 + public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
75 + HostDescription hostDescription) {
76 + Host host = hosts.get(hostId);
77 + if (host == null) {
78 + return createHost(providerId, hostId, hostDescription);
79 + }
80 + return updateHost(providerId, host, hostDescription);
81 + }
82 +
83 + // creates a new host and sends HOST_ADDED
84 + private HostEvent createHost(ProviderId providerId, HostId hostId,
85 + HostDescription descr) {
86 + DefaultHost newhost = new DefaultHost(providerId, hostId,
87 + descr.hwAddress(),
88 + descr.vlan(),
89 + descr.location(),
90 + descr.ipAddresses());
91 + synchronized (this) {
92 + hosts.put(hostId, newhost);
93 + locations.put(descr.location(), newhost);
94 + }
95 + return new HostEvent(HOST_ADDED, newhost);
96 + }
97 +
98 + // checks for type of update to host, sends appropriate event
99 + private HostEvent updateHost(ProviderId providerId, Host host,
100 + HostDescription descr) {
101 + DefaultHost updated;
102 + HostEvent event;
103 + if (!host.location().equals(descr.location())) {
104 + updated = new DefaultHost(providerId, host.id(),
105 + host.mac(),
106 + host.vlan(),
107 + descr.location(),
108 + host.ipAddresses());
109 + event = new HostEvent(HOST_MOVED, updated);
110 +
111 + } else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
112 + updated = new DefaultHost(providerId, host.id(),
113 + host.mac(),
114 + host.vlan(),
115 + descr.location(),
116 + descr.ipAddresses());
117 + event = new HostEvent(HOST_UPDATED, updated);
118 + } else {
119 + return null;
120 + }
121 + synchronized (this) {
122 + hosts.put(host.id(), updated);
123 + locations.remove(host.location(), host);
124 + locations.put(updated.location(), updated);
125 + }
126 + return event;
127 + }
128 +
129 + @Override
130 + public HostEvent removeHost(HostId hostId) {
131 + synchronized (this) {
132 + Host host = hosts.remove(hostId);
133 + if (host != null) {
134 + locations.remove((host.location()), host);
135 + return new HostEvent(HOST_REMOVED, host);
136 + }
137 + return null;
138 + }
139 + }
140 +
141 + @Override
142 + public int getHostCount() {
143 + return hosts.size();
144 + }
145 +
146 + @Override
147 + public Iterable<Host> getHosts() {
148 + return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
149 + }
150 +
151 + @Override
152 + public Host getHost(HostId hostId) {
153 + return hosts.get(hostId);
154 + }
155 +
156 + @Override
157 + public Set<Host> getHosts(VlanId vlanId) {
158 + Set<Host> vlanset = new HashSet<>();
159 + for (Host h : hosts.values()) {
160 + if (h.vlan().equals(vlanId)) {
161 + vlanset.add(h);
162 + }
163 + }
164 + return vlanset;
165 + }
166 +
167 + @Override
168 + public Set<Host> getHosts(MacAddress mac) {
169 + Set<Host> macset = new HashSet<>();
170 + for (Host h : hosts.values()) {
171 + if (h.mac().equals(mac)) {
172 + macset.add(h);
173 + }
174 + }
175 + return macset;
176 + }
177 +
178 + @Override
179 + public Set<Host> getHosts(IpPrefix ip) {
180 + Set<Host> ipset = new HashSet<>();
181 + for (Host h : hosts.values()) {
182 + if (h.ipAddresses().contains(ip)) {
183 + ipset.add(h);
184 + }
185 + }
186 + return ipset;
187 + }
188 +
189 + @Override
190 + public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
191 + return ImmutableSet.copyOf(locations.get(connectPoint));
192 + }
193 +
194 + @Override
195 + public Set<Host> getConnectedHosts(DeviceId deviceId) {
196 + Set<Host> hostset = new HashSet<>();
197 + for (ConnectPoint p : locations.keySet()) {
198 + if (p.deviceId().equals(deviceId)) {
199 + hostset.addAll(locations.get(p));
200 + }
201 + }
202 + return hostset;
203 + }
204 +
205 + @Override
206 + public void updateAddressBindings(PortAddresses addresses) {
207 + synchronized (portAddresses) {
208 + PortAddresses existing = portAddresses.get(addresses.connectPoint());
209 + if (existing == null) {
210 + portAddresses.put(addresses.connectPoint(), addresses);
211 + } else {
212 + Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
213 + .immutableCopy();
214 +
215 + MacAddress newMac = (addresses.mac() == null) ? existing.mac()
216 + : addresses.mac();
217 +
218 + PortAddresses newAddresses =
219 + new PortAddresses(addresses.connectPoint(), union, newMac);
220 +
221 + portAddresses.put(newAddresses.connectPoint(), newAddresses);
222 + }
223 + }
224 + }
225 +
226 + @Override
227 + public void removeAddressBindings(PortAddresses addresses) {
228 + synchronized (portAddresses) {
229 + PortAddresses existing = portAddresses.get(addresses.connectPoint());
230 + if (existing != null) {
231 + Set<IpPrefix> difference =
232 + Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
233 +
234 + // If they removed the existing mac, set the new mac to null.
235 + // Otherwise, keep the existing mac.
236 + MacAddress newMac = existing.mac();
237 + if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
238 + newMac = null;
239 + }
240 +
241 + PortAddresses newAddresses =
242 + new PortAddresses(addresses.connectPoint(), difference, newMac);
243 +
244 + portAddresses.put(newAddresses.connectPoint(), newAddresses);
245 + }
246 + }
247 + }
248 +
249 + @Override
250 + public void clearAddressBindings(ConnectPoint connectPoint) {
251 + synchronized (portAddresses) {
252 + portAddresses.remove(connectPoint);
253 + }
254 + }
255 +
256 + @Override
257 + public Set<PortAddresses> getAddressBindings() {
258 + synchronized (portAddresses) {
259 + return new HashSet<>(portAddresses.values());
260 + }
261 + }
262 +
263 + @Override
264 + public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
265 + PortAddresses addresses;
266 +
267 + synchronized (portAddresses) {
268 + addresses = portAddresses.get(connectPoint);
269 + }
270 +
271 + if (addresses == null) {
272 + addresses = new PortAddresses(connectPoint, null, null);
273 + }
274 +
275 + return addresses;
276 + }
277 +
278 +}
1 package org.onlab.onos.store.impl; 1 package org.onlab.onos.store.impl;
2 2
3 -import static com.google.common.base.Preconditions.checkNotNull;
4 import static com.google.common.base.Preconditions.checkArgument; 3 import static com.google.common.base.Preconditions.checkArgument;
5 4
6 import java.util.Objects; 5 import java.util.Objects;
7 6
8 -import org.onlab.onos.net.ElementId;
9 import org.onlab.onos.store.Timestamp; 7 import org.onlab.onos.store.Timestamp;
10 8
11 import com.google.common.base.MoreObjects; 9 import com.google.common.base.MoreObjects;
...@@ -14,22 +12,20 @@ import com.google.common.collect.ComparisonChain; ...@@ -14,22 +12,20 @@ import com.google.common.collect.ComparisonChain;
14 // If it is store specific, implement serializable interfaces? 12 // If it is store specific, implement serializable interfaces?
15 /** 13 /**
16 * Default implementation of Timestamp. 14 * Default implementation of Timestamp.
15 + * TODO: Better documentation.
17 */ 16 */
18 public final class OnosTimestamp implements Timestamp { 17 public final class OnosTimestamp implements Timestamp {
19 18
20 - private final ElementId id;
21 private final int termNumber; 19 private final int termNumber;
22 private final int sequenceNumber; 20 private final int sequenceNumber;
23 21
24 /** 22 /**
25 * Default version tuple. 23 * Default version tuple.
26 * 24 *
27 - * @param id identifier of the element
28 * @param termNumber the mastership termNumber 25 * @param termNumber the mastership termNumber
29 * @param sequenceNumber the sequenceNumber number within the termNumber 26 * @param sequenceNumber the sequenceNumber number within the termNumber
30 */ 27 */
31 - public OnosTimestamp(ElementId id, int termNumber, int sequenceNumber) { 28 + public OnosTimestamp(int termNumber, int sequenceNumber) {
32 - this.id = checkNotNull(id);
33 this.termNumber = termNumber; 29 this.termNumber = termNumber;
34 this.sequenceNumber = sequenceNumber; 30 this.sequenceNumber = sequenceNumber;
35 } 31 }
...@@ -38,9 +34,6 @@ public final class OnosTimestamp implements Timestamp { ...@@ -38,9 +34,6 @@ public final class OnosTimestamp implements Timestamp {
38 public int compareTo(Timestamp o) { 34 public int compareTo(Timestamp o) {
39 checkArgument(o instanceof OnosTimestamp, "Must be OnosTimestamp", o); 35 checkArgument(o instanceof OnosTimestamp, "Must be OnosTimestamp", o);
40 OnosTimestamp that = (OnosTimestamp) o; 36 OnosTimestamp that = (OnosTimestamp) o;
41 - checkArgument(this.id.equals(that.id),
42 - "Cannot compare version for different element this:%s, that:%s",
43 - this, that);
44 37
45 return ComparisonChain.start() 38 return ComparisonChain.start()
46 .compare(this.termNumber, that.termNumber) 39 .compare(this.termNumber, that.termNumber)
...@@ -50,7 +43,7 @@ public final class OnosTimestamp implements Timestamp { ...@@ -50,7 +43,7 @@ public final class OnosTimestamp implements Timestamp {
50 43
51 @Override 44 @Override
52 public int hashCode() { 45 public int hashCode() {
53 - return Objects.hash(id, termNumber, sequenceNumber); 46 + return Objects.hash(termNumber, sequenceNumber);
54 } 47 }
55 48
56 @Override 49 @Override
...@@ -62,30 +55,19 @@ public final class OnosTimestamp implements Timestamp { ...@@ -62,30 +55,19 @@ public final class OnosTimestamp implements Timestamp {
62 return false; 55 return false;
63 } 56 }
64 OnosTimestamp that = (OnosTimestamp) obj; 57 OnosTimestamp that = (OnosTimestamp) obj;
65 - return Objects.equals(this.id, that.id) && 58 + return Objects.equals(this.termNumber, that.termNumber) &&
66 - Objects.equals(this.termNumber, that.termNumber) &&
67 Objects.equals(this.sequenceNumber, that.sequenceNumber); 59 Objects.equals(this.sequenceNumber, that.sequenceNumber);
68 } 60 }
69 61
70 @Override 62 @Override
71 public String toString() { 63 public String toString() {
72 return MoreObjects.toStringHelper(getClass()) 64 return MoreObjects.toStringHelper(getClass())
73 - .add("id", id)
74 .add("termNumber", termNumber) 65 .add("termNumber", termNumber)
75 .add("sequenceNumber", sequenceNumber) 66 .add("sequenceNumber", sequenceNumber)
76 .toString(); 67 .toString();
77 } 68 }
78 69
79 /** 70 /**
80 - * Returns the element.
81 - *
82 - * @return element identifier
83 - */
84 - public ElementId id() {
85 - return id;
86 - }
87 -
88 - /**
89 * Returns the termNumber. 71 * Returns the termNumber.
90 * 72 *
91 * @return termNumber 73 * @return termNumber
......
1 +package org.onlab.onos.store.link.impl;
2 +
3 +import static org.onlab.onos.net.Link.Type.DIRECT;
4 +import static org.onlab.onos.net.Link.Type.INDIRECT;
5 +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED;
6 +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED;
7 +import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED;
8 +import static org.slf4j.LoggerFactory.getLogger;
9 +
10 +import java.util.HashSet;
11 +import java.util.Set;
12 +import java.util.concurrent.ConcurrentHashMap;
13 +import java.util.concurrent.ConcurrentMap;
14 +
15 +import org.apache.felix.scr.annotations.Activate;
16 +import org.apache.felix.scr.annotations.Component;
17 +import org.apache.felix.scr.annotations.Deactivate;
18 +import org.apache.felix.scr.annotations.Reference;
19 +import org.apache.felix.scr.annotations.ReferenceCardinality;
20 +import org.apache.felix.scr.annotations.Service;
21 +import org.onlab.onos.net.ConnectPoint;
22 +import org.onlab.onos.net.DefaultLink;
23 +import org.onlab.onos.net.DeviceId;
24 +import org.onlab.onos.net.Link;
25 +import org.onlab.onos.net.LinkKey;
26 +import org.onlab.onos.net.link.LinkDescription;
27 +import org.onlab.onos.net.link.LinkEvent;
28 +import org.onlab.onos.net.link.LinkStore;
29 +import org.onlab.onos.net.link.LinkStoreDelegate;
30 +import org.onlab.onos.net.provider.ProviderId;
31 +import org.onlab.onos.store.AbstractStore;
32 +import org.onlab.onos.store.ClockService;
33 +import org.onlab.onos.store.Timestamp;
34 +import org.onlab.onos.store.device.impl.VersionedValue;
35 +import org.slf4j.Logger;
36 +
37 +import com.google.common.collect.HashMultimap;
38 +import com.google.common.collect.ImmutableSet;
39 +import com.google.common.collect.Multimap;
40 +import com.google.common.collect.ImmutableSet.Builder;
41 +
42 +import static com.google.common.base.Preconditions.checkArgument;
43 +import static com.google.common.base.Preconditions.checkState;
44 +
45 +/**
46 + * Manages inventory of infrastructure links using a protocol that takes into consideration
47 + * the order in which events occur.
48 + */
49 +// FIXME: This does not yet implement the full protocol.
50 +// The full protocol requires the sender of LLDP message to include the
51 +// version information of src device/port and the receiver to
52 +// take that into account when figuring out if a more recent src
53 +// device/port down event renders the link discovery obsolete.
54 +@Component(immediate = true)
55 +@Service
56 +public class OnosDistributedLinkStore
57 + extends AbstractStore<LinkEvent, LinkStoreDelegate>
58 + implements LinkStore {
59 +
60 + private final Logger log = getLogger(getClass());
61 +
62 + // Link inventory
63 + private ConcurrentMap<LinkKey, VersionedValue<Link>> links;
64 +
65 + public static final String LINK_NOT_FOUND = "Link between %s and %s not found";
66 +
67 + // TODO synchronize?
68 + // Egress and ingress link sets
69 + private final Multimap<DeviceId, VersionedValue<Link>> srcLinks = HashMultimap.create();
70 + private final Multimap<DeviceId, VersionedValue<Link>> dstLinks = HashMultimap.create();
71 +
72 + @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
73 + protected ClockService clockService;
74 +
75 + @Activate
76 + public void activate() {
77 +
78 + links = new ConcurrentHashMap<>();
79 +
80 + log.info("Started");
81 + }
82 +
83 + @Deactivate
84 + public void deactivate() {
85 + log.info("Stopped");
86 + }
87 +
88 + @Override
89 + public int getLinkCount() {
90 + return links.size();
91 + }
92 +
93 + @Override
94 + public Iterable<Link> getLinks() {
95 + Builder<Link> builder = ImmutableSet.builder();
96 + synchronized (this) {
97 + for (VersionedValue<Link> link : links.values()) {
98 + builder.add(link.entity());
99 + }
100 + return builder.build();
101 + }
102 + }
103 +
104 + @Override
105 + public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
106 + Set<VersionedValue<Link>> egressLinks = ImmutableSet.copyOf(srcLinks.get(deviceId));
107 + Set<Link> rawEgressLinks = new HashSet<>();
108 + for (VersionedValue<Link> link : egressLinks) {
109 + rawEgressLinks.add(link.entity());
110 + }
111 + return rawEgressLinks;
112 + }
113 +
114 + @Override
115 + public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
116 + Set<VersionedValue<Link>> ingressLinks = ImmutableSet.copyOf(dstLinks.get(deviceId));
117 + Set<Link> rawIngressLinks = new HashSet<>();
118 + for (VersionedValue<Link> link : ingressLinks) {
119 + rawIngressLinks.add(link.entity());
120 + }
121 + return rawIngressLinks;
122 + }
123 +
124 + @Override
125 + public Link getLink(ConnectPoint src, ConnectPoint dst) {
126 + VersionedValue<Link> link = links.get(new LinkKey(src, dst));
127 + checkArgument(link != null, "LINK_NOT_FOUND", src, dst);
128 + return link.entity();
129 + }
130 +
131 + @Override
132 + public Set<Link> getEgressLinks(ConnectPoint src) {
133 + Set<Link> egressLinks = new HashSet<>();
134 + for (VersionedValue<Link> link : srcLinks.get(src.deviceId())) {
135 + if (link.entity().src().equals(src)) {
136 + egressLinks.add(link.entity());
137 + }
138 + }
139 + return egressLinks;
140 + }
141 +
142 + @Override
143 + public Set<Link> getIngressLinks(ConnectPoint dst) {
144 + Set<Link> ingressLinks = new HashSet<>();
145 + for (VersionedValue<Link> link : dstLinks.get(dst.deviceId())) {
146 + if (link.entity().dst().equals(dst)) {
147 + ingressLinks.add(link.entity());
148 + }
149 + }
150 + return ingressLinks;
151 + }
152 +
153 + @Override
154 + public LinkEvent createOrUpdateLink(ProviderId providerId,
155 + LinkDescription linkDescription) {
156 +
157 + final DeviceId destinationDeviceId = linkDescription.dst().deviceId();
158 + final Timestamp newTimestamp = clockService.getTimestamp(destinationDeviceId);
159 +
160 + LinkKey key = new LinkKey(linkDescription.src(), linkDescription.dst());
161 + VersionedValue<Link> link = links.get(key);
162 + if (link == null) {
163 + return createLink(providerId, key, linkDescription, newTimestamp);
164 + }
165 +
166 + checkState(newTimestamp.compareTo(link.timestamp()) > 0,
167 + "Existing Link has a timestamp in the future!");
168 +
169 + return updateLink(providerId, link, key, linkDescription, newTimestamp);
170 + }
171 +
172 + // Creates and stores the link and returns the appropriate event.
173 + private LinkEvent createLink(ProviderId providerId, LinkKey key,
174 + LinkDescription linkDescription, Timestamp timestamp) {
175 + VersionedValue<Link> link = new VersionedValue<Link>(new DefaultLink(providerId, key.src(), key.dst(),
176 + linkDescription.type()), true, timestamp);
177 + synchronized (this) {
178 + links.put(key, link);
179 + addNewLink(link, timestamp);
180 + }
181 + // FIXME: notify peers.
182 + return new LinkEvent(LINK_ADDED, link.entity());
183 + }
184 +
185 + // update Egress and ingress link sets
186 + private void addNewLink(VersionedValue<Link> link, Timestamp timestamp) {
187 + Link rawLink = link.entity();
188 + synchronized (this) {
189 + srcLinks.put(rawLink.src().deviceId(), link);
190 + dstLinks.put(rawLink.dst().deviceId(), link);
191 + }
192 + }
193 +
194 + // Updates, if necessary the specified link and returns the appropriate event.
195 + private LinkEvent updateLink(ProviderId providerId, VersionedValue<Link> existingLink,
196 + LinkKey key, LinkDescription linkDescription, Timestamp timestamp) {
197 + // FIXME confirm Link update condition is OK
198 + if (existingLink.entity().type() == INDIRECT && linkDescription.type() == DIRECT) {
199 + synchronized (this) {
200 +
201 + VersionedValue<Link> updatedLink = new VersionedValue<Link>(
202 + new DefaultLink(providerId, existingLink.entity().src(), existingLink.entity().dst(),
203 + linkDescription.type()), true, timestamp);
204 + links.replace(key, existingLink, updatedLink);
205 +
206 + replaceLink(existingLink, updatedLink);
207 + // FIXME: notify peers.
208 + return new LinkEvent(LINK_UPDATED, updatedLink.entity());
209 + }
210 + }
211 + return null;
212 + }
213 +
214 + // update Egress and ingress link sets
215 + private void replaceLink(VersionedValue<Link> current, VersionedValue<Link> updated) {
216 + synchronized (this) {
217 + srcLinks.remove(current.entity().src().deviceId(), current);
218 + dstLinks.remove(current.entity().dst().deviceId(), current);
219 +
220 + srcLinks.put(current.entity().src().deviceId(), updated);
221 + dstLinks.put(current.entity().dst().deviceId(), updated);
222 + }
223 + }
224 +
225 + @Override
226 + public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
227 + synchronized (this) {
228 + LinkKey key = new LinkKey(src, dst);
229 + VersionedValue<Link> link = links.remove(key);
230 + if (link != null) {
231 + removeLink(link);
232 + // notify peers
233 + return new LinkEvent(LINK_REMOVED, link.entity());
234 + }
235 + return null;
236 + }
237 + }
238 +
239 + // update Egress and ingress link sets
240 + private void removeLink(VersionedValue<Link> link) {
241 + synchronized (this) {
242 + srcLinks.remove(link.entity().src().deviceId(), link);
243 + dstLinks.remove(link.entity().dst().deviceId(), link);
244 + }
245 + }
246 +}
1 package org.onlab.onos.store.serializers; 1 package org.onlab.onos.store.serializers;
2 2
3 -import org.onlab.onos.net.ElementId;
4 import org.onlab.onos.store.impl.OnosTimestamp; 3 import org.onlab.onos.store.impl.OnosTimestamp;
5 4
6 import com.esotericsoftware.kryo.Kryo; 5 import com.esotericsoftware.kryo.Kryo;
...@@ -20,18 +19,17 @@ public class OnosTimestampSerializer extends Serializer<OnosTimestamp> { ...@@ -20,18 +19,17 @@ public class OnosTimestampSerializer extends Serializer<OnosTimestamp> {
20 // non-null, immutable 19 // non-null, immutable
21 super(false, true); 20 super(false, true);
22 } 21 }
22 +
23 @Override 23 @Override
24 public void write(Kryo kryo, Output output, OnosTimestamp object) { 24 public void write(Kryo kryo, Output output, OnosTimestamp object) {
25 - kryo.writeClassAndObject(output, object.id());
26 output.writeInt(object.termNumber()); 25 output.writeInt(object.termNumber());
27 output.writeInt(object.sequenceNumber()); 26 output.writeInt(object.sequenceNumber());
28 } 27 }
29 28
30 @Override 29 @Override
31 public OnosTimestamp read(Kryo kryo, Input input, Class<OnosTimestamp> type) { 30 public OnosTimestamp read(Kryo kryo, Input input, Class<OnosTimestamp> type) {
32 - ElementId id = (ElementId) kryo.readClassAndObject(input);
33 final int term = input.readInt(); 31 final int term = input.readInt();
34 final int sequence = input.readInt(); 32 final int sequence = input.readInt();
35 - return new OnosTimestamp(id, term, sequence); 33 + return new OnosTimestamp(term, sequence);
36 } 34 }
37 } 35 }
......
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import com.google.common.collect.ImmutableMap;
4 +import com.google.common.collect.ImmutableSet;
5 +import com.google.common.collect.ImmutableSetMultimap;
6 +import org.onlab.graph.DijkstraGraphSearch;
7 +import org.onlab.graph.GraphPathSearch;
8 +import org.onlab.graph.TarjanGraphSearch;
9 +import org.onlab.onos.net.AbstractModel;
10 +import org.onlab.onos.net.ConnectPoint;
11 +import org.onlab.onos.net.DefaultPath;
12 +import org.onlab.onos.net.DeviceId;
13 +import org.onlab.onos.net.Link;
14 +import org.onlab.onos.net.Path;
15 +import org.onlab.onos.net.provider.ProviderId;
16 +import org.onlab.onos.net.topology.ClusterId;
17 +import org.onlab.onos.net.topology.DefaultTopologyCluster;
18 +import org.onlab.onos.net.topology.DefaultTopologyVertex;
19 +import org.onlab.onos.net.topology.GraphDescription;
20 +import org.onlab.onos.net.topology.LinkWeight;
21 +import org.onlab.onos.net.topology.Topology;
22 +import org.onlab.onos.net.topology.TopologyCluster;
23 +import org.onlab.onos.net.topology.TopologyEdge;
24 +import org.onlab.onos.net.topology.TopologyGraph;
25 +import org.onlab.onos.net.topology.TopologyVertex;
26 +
27 +import java.util.ArrayList;
28 +import java.util.List;
29 +import java.util.Map;
30 +import java.util.Set;
31 +
32 +import static com.google.common.base.MoreObjects.toStringHelper;
33 +import static com.google.common.collect.ImmutableSetMultimap.Builder;
34 +import static org.onlab.graph.GraphPathSearch.Result;
35 +import static org.onlab.graph.TarjanGraphSearch.SCCResult;
36 +import static org.onlab.onos.net.Link.Type.INDIRECT;
37 +
38 +/**
39 + * Default implementation of the topology descriptor. This carries the
40 + * backing topology data.
41 + */
42 +public class DefaultTopology extends AbstractModel implements Topology {
43 +
44 + private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
45 + new DijkstraGraphSearch<>();
46 + private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
47 + new TarjanGraphSearch<>();
48 +
49 + private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
50 +
51 + private final long time;
52 + private final TopologyGraph graph;
53 +
54 + private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
55 + private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
56 + private final ImmutableSetMultimap<PathKey, Path> paths;
57 +
58 + private final ImmutableMap<ClusterId, TopologyCluster> clusters;
59 + private final ImmutableSet<ConnectPoint> infrastructurePoints;
60 + private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
61 +
62 + private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
63 + private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
64 + private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
65 +
66 +
67 + /**
68 + * Creates a topology descriptor attributed to the specified provider.
69 + *
70 + * @param providerId identity of the provider
71 + * @param description data describing the new topology
72 + */
73 + DefaultTopology(ProviderId providerId, GraphDescription description) {
74 + super(providerId);
75 + this.time = description.timestamp();
76 +
77 + // Build the graph
78 + this.graph = new DefaultTopologyGraph(description.vertexes(),
79 + description.edges());
80 +
81 + this.results = searchForShortestPaths();
82 + this.paths = buildPaths();
83 +
84 + this.clusterResults = searchForClusters();
85 + this.clusters = buildTopologyClusters();
86 +
87 + buildIndexes();
88 +
89 + this.broadcastSets = buildBroadcastSets();
90 + this.infrastructurePoints = findInfrastructurePoints();
91 + }
92 +
93 + @Override
94 + public long time() {
95 + return time;
96 + }
97 +
98 + @Override
99 + public int clusterCount() {
100 + return clusters.size();
101 + }
102 +
103 + @Override
104 + public int deviceCount() {
105 + return graph.getVertexes().size();
106 + }
107 +
108 + @Override
109 + public int linkCount() {
110 + return graph.getEdges().size();
111 + }
112 +
113 + @Override
114 + public int pathCount() {
115 + return paths.size();
116 + }
117 +
118 + /**
119 + * Returns the backing topology graph.
120 + *
121 + * @return topology graph
122 + */
123 + TopologyGraph getGraph() {
124 + return graph;
125 + }
126 +
127 + /**
128 + * Returns the set of topology clusters.
129 + *
130 + * @return set of clusters
131 + */
132 + Set<TopologyCluster> getClusters() {
133 + return ImmutableSet.copyOf(clusters.values());
134 + }
135 +
136 + /**
137 + * Returns the specified topology cluster.
138 + *
139 + * @param clusterId cluster identifier
140 + * @return topology cluster
141 + */
142 + TopologyCluster getCluster(ClusterId clusterId) {
143 + return clusters.get(clusterId);
144 + }
145 +
146 + /**
147 + * Returns the topology cluster that contains the given device.
148 + *
149 + * @param deviceId device identifier
150 + * @return topology cluster
151 + */
152 + TopologyCluster getCluster(DeviceId deviceId) {
153 + return clustersByDevice.get(deviceId);
154 + }
155 +
156 + /**
157 + * Returns the set of cluster devices.
158 + *
159 + * @param cluster topology cluster
160 + * @return cluster devices
161 + */
162 + Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
163 + return devicesByCluster.get(cluster);
164 + }
165 +
166 + /**
167 + * Returns the set of cluster links.
168 + *
169 + * @param cluster topology cluster
170 + * @return cluster links
171 + */
172 + Set<Link> getClusterLinks(TopologyCluster cluster) {
173 + return linksByCluster.get(cluster);
174 + }
175 +
176 + /**
177 + * Indicates whether the given point is an infrastructure link end-point.
178 + *
179 + * @param connectPoint connection point
180 + * @return true if infrastructure
181 + */
182 + boolean isInfrastructure(ConnectPoint connectPoint) {
183 + return infrastructurePoints.contains(connectPoint);
184 + }
185 +
186 + /**
187 + * Indicates whether the given point is part of a broadcast set.
188 + *
189 + * @param connectPoint connection point
190 + * @return true if in broadcast set
191 + */
192 + boolean isBroadcastPoint(ConnectPoint connectPoint) {
193 + // Any non-infrastructure, i.e. edge points are assumed to be OK.
194 + if (!isInfrastructure(connectPoint)) {
195 + return true;
196 + }
197 +
198 + // Find the cluster to which the device belongs.
199 + TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
200 + if (cluster == null) {
201 + throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
202 + }
203 +
204 + // If the broadcast set is null or empty, or if the point explicitly
205 + // belongs to it, return true;
206 + Set<ConnectPoint> points = broadcastSets.get(cluster.id());
207 + return points == null || points.isEmpty() || points.contains(connectPoint);
208 + }
209 +
210 + /**
211 + * Returns the size of the cluster broadcast set.
212 + *
213 + * @param clusterId cluster identifier
214 + * @return size of the cluster broadcast set
215 + */
216 + int broadcastSetSize(ClusterId clusterId) {
217 + return broadcastSets.get(clusterId).size();
218 + }
219 +
220 + /**
221 + * Returns the set of pre-computed shortest paths between source and
222 + * destination devices.
223 + *
224 + * @param src source device
225 + * @param dst destination device
226 + * @return set of shortest paths
227 + */
228 + Set<Path> getPaths(DeviceId src, DeviceId dst) {
229 + return paths.get(new PathKey(src, dst));
230 + }
231 +
232 + /**
233 + * Computes on-demand the set of shortest paths between source and
234 + * destination devices.
235 + *
236 + * @param src source device
237 + * @param dst destination device
238 + * @return set of shortest paths
239 + */
240 + Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
241 + GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
242 + DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
243 + new DefaultTopologyVertex(dst), weight);
244 + ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
245 + for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
246 + builder.add(networkPath(path));
247 + }
248 + return builder.build();
249 + }
250 +
251 +
252 + // Searches the graph for all shortest paths and returns the search results.
253 + private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
254 + ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
255 +
256 + // Search graph paths for each source to all destinations.
257 + LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
258 + for (TopologyVertex src : graph.getVertexes()) {
259 + builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
260 + }
261 + return builder.build();
262 + }
263 +
264 + // Builds network paths from the graph path search results
265 + private ImmutableSetMultimap<PathKey, Path> buildPaths() {
266 + Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
267 + for (DeviceId deviceId : results.keySet()) {
268 + Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
269 + for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
270 + builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
271 + networkPath(path));
272 + }
273 + }
274 + return builder.build();
275 + }
276 +
277 + // Converts graph path to a network path with the same cost.
278 + private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
279 + List<Link> links = new ArrayList<>();
280 + for (TopologyEdge edge : path.edges()) {
281 + links.add(edge.link());
282 + }
283 + return new DefaultPath(PID, links, path.cost());
284 + }
285 +
286 +
287 + // Searches for SCC clusters in the network topology graph using Tarjan
288 + // algorithm.
289 + private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
290 + return TARJAN.search(graph, new NoIndirectLinksWeight());
291 + }
292 +
293 + // Builds the topology clusters and returns the id-cluster bindings.
294 + private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
295 + ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
296 + SCCResult<TopologyVertex, TopologyEdge> result =
297 + TARJAN.search(graph, new NoIndirectLinksWeight());
298 +
299 + // Extract both vertexes and edges from the results; the lists form
300 + // pairs along the same index.
301 + List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
302 + List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
303 +
304 + // Scan over the lists and create a cluster from the results.
305 + for (int i = 0, n = result.clusterCount(); i < n; i++) {
306 + Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
307 + Set<TopologyEdge> edgeSet = clusterEdges.get(i);
308 +
309 + ClusterId cid = ClusterId.clusterId(i);
310 + DefaultTopologyCluster cluster =
311 + new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
312 + findRoot(vertexSet).deviceId());
313 + clusterBuilder.put(cid, cluster);
314 + }
315 + return clusterBuilder.build();
316 + }
317 +
318 + // Finds the vertex whose device id is the lexicographical minimum in the
319 + // specified set.
320 + private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
321 + TopologyVertex minVertex = null;
322 + for (TopologyVertex vertex : vertexSet) {
323 + if (minVertex == null ||
324 + minVertex.deviceId().toString()
325 + .compareTo(minVertex.deviceId().toString()) < 0) {
326 + minVertex = vertex;
327 + }
328 + }
329 + return minVertex;
330 + }
331 +
332 + // Processes a map of broadcast sets for each cluster.
333 + private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
334 + Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
335 + for (TopologyCluster cluster : clusters.values()) {
336 + addClusterBroadcastSet(cluster, builder);
337 + }
338 + return builder.build();
339 + }
340 +
341 + // Finds all broadcast points for the cluster. These are those connection
342 + // points which lie along the shortest paths between the cluster root and
343 + // all other devices within the cluster.
344 + private void addClusterBroadcastSet(TopologyCluster cluster,
345 + Builder<ClusterId, ConnectPoint> builder) {
346 + // Use the graph root search results to build the broadcast set.
347 + Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
348 + for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
349 + TopologyVertex vertex = entry.getKey();
350 +
351 + // Ignore any parents that lead outside the cluster.
352 + if (clustersByDevice.get(vertex.deviceId()) != cluster) {
353 + continue;
354 + }
355 +
356 + // Ignore any back-link sets that are empty.
357 + Set<TopologyEdge> parents = entry.getValue();
358 + if (parents.isEmpty()) {
359 + continue;
360 + }
361 +
362 + // Use the first back-link source and destinations to add to the
363 + // broadcast set.
364 + Link link = parents.iterator().next().link();
365 + builder.put(cluster.id(), link.src());
366 + builder.put(cluster.id(), link.dst());
367 + }
368 + }
369 +
370 + // Collects and returns an set of all infrastructure link end-points.
371 + private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
372 + ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
373 + for (TopologyEdge edge : graph.getEdges()) {
374 + builder.add(edge.link().src());
375 + builder.add(edge.link().dst());
376 + }
377 + return builder.build();
378 + }
379 +
380 + // Builds cluster-devices, cluster-links and device-cluster indexes.
381 + private void buildIndexes() {
382 + // Prepare the index builders
383 + ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
384 + ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
385 + ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
386 +
387 + // Now scan through all the clusters
388 + for (TopologyCluster cluster : clusters.values()) {
389 + int i = cluster.id().index();
390 +
391 + // Scan through all the cluster vertexes.
392 + for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
393 + devicesBuilder.put(cluster, vertex.deviceId());
394 + clusterBuilder.put(vertex.deviceId(), cluster);
395 + }
396 +
397 + // Scan through all the cluster edges.
398 + for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
399 + linksBuilder.put(cluster, edge.link());
400 + }
401 + }
402 +
403 + // Finalize all indexes.
404 + clustersByDevice = clusterBuilder.build();
405 + devicesByCluster = devicesBuilder.build();
406 + linksByCluster = linksBuilder.build();
407 + }
408 +
409 + // Link weight for measuring link cost as hop count with indirect links
410 + // being as expensive as traversing the entire graph to assume the worst.
411 + private static class HopCountLinkWeight implements LinkWeight {
412 + private final int indirectLinkCost;
413 +
414 + HopCountLinkWeight(int indirectLinkCost) {
415 + this.indirectLinkCost = indirectLinkCost;
416 + }
417 +
418 + @Override
419 + public double weight(TopologyEdge edge) {
420 + // To force preference to use direct paths first, make indirect
421 + // links as expensive as the linear vertex traversal.
422 + return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
423 + }
424 + }
425 +
426 + // Link weight for preventing traversal over indirect links.
427 + private static class NoIndirectLinksWeight implements LinkWeight {
428 + @Override
429 + public double weight(TopologyEdge edge) {
430 + return edge.link().type() == INDIRECT ? -1 : 1;
431 + }
432 + }
433 +
434 + @Override
435 + public String toString() {
436 + return toStringHelper(this)
437 + .add("time", time)
438 + .add("clusters", clusterCount())
439 + .add("devices", deviceCount())
440 + .add("links", linkCount())
441 + .add("pathCount", pathCount())
442 + .toString();
443 + }
444 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import org.onlab.graph.AdjacencyListsGraph;
4 +import org.onlab.onos.net.topology.TopologyEdge;
5 +import org.onlab.onos.net.topology.TopologyGraph;
6 +import org.onlab.onos.net.topology.TopologyVertex;
7 +
8 +import java.util.Set;
9 +
10 +/**
11 + * Default implementation of an immutable topology graph based on a generic
12 + * implementation of adjacency lists graph.
13 + */
14 +public class DefaultTopologyGraph
15 + extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
16 + implements TopologyGraph {
17 +
18 + /**
19 + * Creates a topology graph comprising of the specified vertexes and edges.
20 + *
21 + * @param vertexes set of graph vertexes
22 + * @param edges set of graph edges
23 + */
24 + public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
25 + super(vertexes, edges);
26 + }
27 +
28 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import static org.slf4j.LoggerFactory.getLogger;
4 +
5 +import java.util.List;
6 +import java.util.Set;
7 +
8 +import org.apache.felix.scr.annotations.Activate;
9 +import org.apache.felix.scr.annotations.Component;
10 +import org.apache.felix.scr.annotations.Deactivate;
11 +import org.apache.felix.scr.annotations.Service;
12 +import org.onlab.onos.event.Event;
13 +import org.onlab.onos.net.ConnectPoint;
14 +import org.onlab.onos.net.DeviceId;
15 +import org.onlab.onos.net.Link;
16 +import org.onlab.onos.net.Path;
17 +import org.onlab.onos.net.provider.ProviderId;
18 +import org.onlab.onos.net.topology.ClusterId;
19 +import org.onlab.onos.net.topology.GraphDescription;
20 +import org.onlab.onos.net.topology.LinkWeight;
21 +import org.onlab.onos.net.topology.Topology;
22 +import org.onlab.onos.net.topology.TopologyCluster;
23 +import org.onlab.onos.net.topology.TopologyEvent;
24 +import org.onlab.onos.net.topology.TopologyGraph;
25 +import org.onlab.onos.net.topology.TopologyStore;
26 +import org.onlab.onos.net.topology.TopologyStoreDelegate;
27 +import org.onlab.onos.store.AbstractStore;
28 +import org.slf4j.Logger;
29 +
30 +/**
31 + * Manages inventory of topology snapshots using trivial in-memory
32 + * structures implementation.
33 + */
34 +//FIXME: I LIE I AM NOT DISTRIBUTED
35 +@Component(immediate = true)
36 +@Service
37 +public class DistributedTopologyStore
38 +extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
39 +implements TopologyStore {
40 +
41 + private final Logger log = getLogger(getClass());
42 +
43 + private volatile DefaultTopology current;
44 +
45 + @Activate
46 + public void activate() {
47 + log.info("Started");
48 + }
49 +
50 + @Deactivate
51 + public void deactivate() {
52 + log.info("Stopped");
53 + }
54 + @Override
55 + public Topology currentTopology() {
56 + return current;
57 + }
58 +
59 + @Override
60 + public boolean isLatest(Topology topology) {
61 + // Topology is current only if it is the same as our current topology
62 + return topology == current;
63 + }
64 +
65 + @Override
66 + public TopologyGraph getGraph(Topology topology) {
67 + return defaultTopology(topology).getGraph();
68 + }
69 +
70 + @Override
71 + public Set<TopologyCluster> getClusters(Topology topology) {
72 + return defaultTopology(topology).getClusters();
73 + }
74 +
75 + @Override
76 + public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
77 + return defaultTopology(topology).getCluster(clusterId);
78 + }
79 +
80 + @Override
81 + public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
82 + return defaultTopology(topology).getClusterDevices(cluster);
83 + }
84 +
85 + @Override
86 + public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
87 + return defaultTopology(topology).getClusterLinks(cluster);
88 + }
89 +
90 + @Override
91 + public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
92 + return defaultTopology(topology).getPaths(src, dst);
93 + }
94 +
95 + @Override
96 + public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
97 + LinkWeight weight) {
98 + return defaultTopology(topology).getPaths(src, dst, weight);
99 + }
100 +
101 + @Override
102 + public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
103 + return defaultTopology(topology).isInfrastructure(connectPoint);
104 + }
105 +
106 + @Override
107 + public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
108 + return defaultTopology(topology).isBroadcastPoint(connectPoint);
109 + }
110 +
111 + @Override
112 + public TopologyEvent updateTopology(ProviderId providerId,
113 + GraphDescription graphDescription,
114 + List<Event> reasons) {
115 + // First off, make sure that what we're given is indeed newer than
116 + // what we already have.
117 + if (current != null && graphDescription.timestamp() < current.time()) {
118 + return null;
119 + }
120 +
121 + // Have the default topology construct self from the description data.
122 + DefaultTopology newTopology =
123 + new DefaultTopology(providerId, graphDescription);
124 +
125 + // Promote the new topology to current and return a ready-to-send event.
126 + synchronized (this) {
127 + current = newTopology;
128 + return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
129 + }
130 + }
131 +
132 + // Validates the specified topology and returns it as a default
133 + private DefaultTopology defaultTopology(Topology topology) {
134 + if (topology instanceof DefaultTopology) {
135 + return (DefaultTopology) topology;
136 + }
137 + throw new IllegalArgumentException("Topology class " + topology.getClass() +
138 + " not supported");
139 + }
140 +
141 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import org.onlab.onos.net.DeviceId;
4 +
5 +import java.util.Objects;
6 +
7 +/**
8 + * Key for filing pre-computed paths between source and destination devices.
9 + */
10 +class PathKey {
11 + private final DeviceId src;
12 + private final DeviceId dst;
13 +
14 + /**
15 + * Creates a path key from the given source/dest pair.
16 + * @param src source device
17 + * @param dst destination device
18 + */
19 + PathKey(DeviceId src, DeviceId dst) {
20 + this.src = src;
21 + this.dst = dst;
22 + }
23 +
24 + @Override
25 + public int hashCode() {
26 + return Objects.hash(src, dst);
27 + }
28 +
29 + @Override
30 + public boolean equals(Object obj) {
31 + if (this == obj) {
32 + return true;
33 + }
34 + if (obj instanceof PathKey) {
35 + final PathKey other = (PathKey) obj;
36 + return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
37 + }
38 + return false;
39 + }
40 +}
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0"
3 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
5 + <modelVersion>4.0.0</modelVersion>
6 +
7 + <parent>
8 + <groupId>org.onlab.onos</groupId>
9 + <artifactId>onos-core-hz</artifactId>
10 + <version>1.0.0-SNAPSHOT</version>
11 + <relativePath>../pom.xml</relativePath>
12 + </parent>
13 +
14 + <artifactId>onos-core-hz-cluster</artifactId>
15 + <packaging>bundle</packaging>
16 +
17 + <description>ONOS Hazelcast based distributed store subsystems</description>
18 +
19 + <dependencies>
20 + <dependency>
21 + <groupId>org.onlab.onos</groupId>
22 + <artifactId>onos-api</artifactId>
23 + </dependency>
24 + <dependency>
25 + <groupId>org.onlab.onos</groupId>
26 + <artifactId>onos-core-serializers</artifactId>
27 + <version>${project.version}</version>
28 + </dependency>
29 + <dependency>
30 + <groupId>org.onlab.onos</groupId>
31 + <artifactId>onos-core-hz-common</artifactId>
32 + <version>${project.version}</version>
33 + </dependency>
34 + <dependency>
35 + <groupId>org.onlab.onos</groupId>
36 + <artifactId>onos-core-hz-common</artifactId>
37 + <classifier>tests</classifier>
38 + <scope>test</scope>
39 + <version>${project.version}</version>
40 + </dependency>
41 + <dependency>
42 + <groupId>org.apache.felix</groupId>
43 + <artifactId>org.apache.felix.scr.annotations</artifactId>
44 + </dependency>
45 + <dependency>
46 + <groupId>com.hazelcast</groupId>
47 + <artifactId>hazelcast</artifactId>
48 + </dependency>
49 + <dependency>
50 + <groupId>de.javakaffee</groupId>
51 + <artifactId>kryo-serializers</artifactId>
52 + </dependency>
53 + </dependencies>
54 +
55 + <build>
56 + <plugins>
57 + <plugin>
58 + <groupId>org.apache.felix</groupId>
59 + <artifactId>maven-scr-plugin</artifactId>
60 + </plugin>
61 + </plugins>
62 + </build>
63 +
64 +</project>
...@@ -8,6 +8,7 @@ import com.hazelcast.core.Member; ...@@ -8,6 +8,7 @@ import com.hazelcast.core.Member;
8 import com.hazelcast.core.MemberAttributeEvent; 8 import com.hazelcast.core.MemberAttributeEvent;
9 import com.hazelcast.core.MembershipEvent; 9 import com.hazelcast.core.MembershipEvent;
10 import com.hazelcast.core.MembershipListener; 10 import com.hazelcast.core.MembershipListener;
11 +
11 import org.apache.felix.scr.annotations.Activate; 12 import org.apache.felix.scr.annotations.Activate;
12 import org.apache.felix.scr.annotations.Component; 13 import org.apache.felix.scr.annotations.Component;
13 import org.apache.felix.scr.annotations.Deactivate; 14 import org.apache.felix.scr.annotations.Deactivate;
...@@ -18,9 +19,9 @@ import org.onlab.onos.cluster.ClusterStoreDelegate; ...@@ -18,9 +19,9 @@ import org.onlab.onos.cluster.ClusterStoreDelegate;
18 import org.onlab.onos.cluster.ControllerNode; 19 import org.onlab.onos.cluster.ControllerNode;
19 import org.onlab.onos.cluster.DefaultControllerNode; 20 import org.onlab.onos.cluster.DefaultControllerNode;
20 import org.onlab.onos.cluster.NodeId; 21 import org.onlab.onos.cluster.NodeId;
21 -import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache; 22 +import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
22 -import org.onlab.onos.store.impl.AbstractDistributedStore; 23 +import org.onlab.onos.store.common.AbstractHazelcastStore;
23 -import org.onlab.onos.store.impl.OptionalCacheLoader; 24 +import org.onlab.onos.store.common.OptionalCacheLoader;
24 import org.onlab.packet.IpPrefix; 25 import org.onlab.packet.IpPrefix;
25 26
26 import java.util.Map; 27 import java.util.Map;
...@@ -38,7 +39,7 @@ import static org.onlab.onos.cluster.ControllerNode.State; ...@@ -38,7 +39,7 @@ import static org.onlab.onos.cluster.ControllerNode.State;
38 @Component(immediate = true) 39 @Component(immediate = true)
39 @Service 40 @Service
40 public class DistributedClusterStore 41 public class DistributedClusterStore
41 - extends AbstractDistributedStore<ClusterEvent, ClusterStoreDelegate> 42 + extends AbstractHazelcastStore<ClusterEvent, ClusterStoreDelegate>
42 implements ClusterStore { 43 implements ClusterStore {
43 44
44 private IMap<byte[], byte[]> rawNodes; 45 private IMap<byte[], byte[]> rawNodes;
...@@ -57,7 +58,7 @@ public class DistributedClusterStore ...@@ -57,7 +58,7 @@ public class DistributedClusterStore
57 OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader 58 OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader
58 = new OptionalCacheLoader<>(storeService, rawNodes); 59 = new OptionalCacheLoader<>(storeService, rawNodes);
59 nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); 60 nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
60 - rawNodes.addEntryListener(new RemoteEventHandler<>(nodes), true); 61 + rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true);
61 62
62 loadClusterNodes(); 63 loadClusterNodes();
63 64
...@@ -67,7 +68,7 @@ public class DistributedClusterStore ...@@ -67,7 +68,7 @@ public class DistributedClusterStore
67 // Loads the initial set of cluster nodes 68 // Loads the initial set of cluster nodes
68 private void loadClusterNodes() { 69 private void loadClusterNodes() {
69 for (Member member : theInstance.getCluster().getMembers()) { 70 for (Member member : theInstance.getCluster().getMembers()) {
70 - addMember(member); 71 + addNode(node(member));
71 } 72 }
72 } 73 }
73 74
...@@ -103,6 +104,11 @@ public class DistributedClusterStore ...@@ -103,6 +104,11 @@ public class DistributedClusterStore
103 } 104 }
104 105
105 @Override 106 @Override
107 + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
108 + return addNode(new DefaultControllerNode(nodeId, ip, tcpPort));
109 + }
110 +
111 + @Override
106 public void removeNode(NodeId nodeId) { 112 public void removeNode(NodeId nodeId) {
107 synchronized (this) { 113 synchronized (this) {
108 rawNodes.remove(serialize(nodeId)); 114 rawNodes.remove(serialize(nodeId));
...@@ -111,8 +117,7 @@ public class DistributedClusterStore ...@@ -111,8 +117,7 @@ public class DistributedClusterStore
111 } 117 }
112 118
113 // Adds a new node based on the specified member 119 // Adds a new node based on the specified member
114 - private synchronized ControllerNode addMember(Member member) { 120 + private synchronized ControllerNode addNode(DefaultControllerNode node) {
115 - DefaultControllerNode node = node(member);
116 rawNodes.put(serialize(node.id()), serialize(node)); 121 rawNodes.put(serialize(node.id()), serialize(node));
117 nodes.put(node.id(), Optional.of(node)); 122 nodes.put(node.id(), Optional.of(node));
118 states.put(node.id(), State.ACTIVE); 123 states.put(node.id(), State.ACTIVE);
...@@ -135,7 +140,7 @@ public class DistributedClusterStore ...@@ -135,7 +140,7 @@ public class DistributedClusterStore
135 @Override 140 @Override
136 public void memberAdded(MembershipEvent membershipEvent) { 141 public void memberAdded(MembershipEvent membershipEvent) {
137 log.info("Member {} added", membershipEvent.getMember()); 142 log.info("Member {} added", membershipEvent.getMember());
138 - ControllerNode node = addMember(membershipEvent.getMember()); 143 + ControllerNode node = addNode(node(membershipEvent.getMember()));
139 notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node)); 144 notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node));
140 } 145 }
141 146
......
1 package org.onlab.onos.store.cluster.impl; 1 package org.onlab.onos.store.cluster.impl;
2 2
3 -import com.google.common.base.Optional; 3 +import static com.google.common.cache.CacheBuilder.newBuilder;
4 -import com.google.common.cache.LoadingCache; 4 +import static org.onlab.onos.cluster.MastershipEvent.Type.MASTER_CHANGED;
5 -import com.google.common.collect.ImmutableSet; 5 +
6 -import com.hazelcast.core.IMap; 6 +import java.util.Map;
7 +import java.util.Objects;
8 +import java.util.Set;
7 9
8 import org.apache.felix.scr.annotations.Activate; 10 import org.apache.felix.scr.annotations.Activate;
9 import org.apache.felix.scr.annotations.Component; 11 import org.apache.felix.scr.annotations.Component;
...@@ -19,15 +21,14 @@ import org.onlab.onos.cluster.MastershipTerm; ...@@ -19,15 +21,14 @@ import org.onlab.onos.cluster.MastershipTerm;
19 import org.onlab.onos.cluster.NodeId; 21 import org.onlab.onos.cluster.NodeId;
20 import org.onlab.onos.net.DeviceId; 22 import org.onlab.onos.net.DeviceId;
21 import org.onlab.onos.net.MastershipRole; 23 import org.onlab.onos.net.MastershipRole;
22 -import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache; 24 +import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
23 -import org.onlab.onos.store.impl.AbstractDistributedStore; 25 +import org.onlab.onos.store.common.AbstractHazelcastStore;
24 -import org.onlab.onos.store.impl.OptionalCacheLoader; 26 +import org.onlab.onos.store.common.OptionalCacheLoader;
25 27
26 -import java.util.Map; 28 +import com.google.common.base.Optional;
27 -import java.util.Objects; 29 +import com.google.common.cache.LoadingCache;
28 -import java.util.Set; 30 +import com.google.common.collect.ImmutableSet;
29 - 31 +import com.hazelcast.core.IMap;
30 -import static com.google.common.cache.CacheBuilder.newBuilder;
31 32
32 /** 33 /**
33 * Distributed implementation of the cluster nodes store. 34 * Distributed implementation of the cluster nodes store.
...@@ -35,8 +36,8 @@ import static com.google.common.cache.CacheBuilder.newBuilder; ...@@ -35,8 +36,8 @@ import static com.google.common.cache.CacheBuilder.newBuilder;
35 @Component(immediate = true) 36 @Component(immediate = true)
36 @Service 37 @Service
37 public class DistributedMastershipStore 38 public class DistributedMastershipStore
38 - extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate> 39 +extends AbstractHazelcastStore<MastershipEvent, MastershipStoreDelegate>
39 - implements MastershipStore { 40 +implements MastershipStore {
40 41
41 private IMap<byte[], byte[]> rawMasters; 42 private IMap<byte[], byte[]> rawMasters;
42 private LoadingCache<DeviceId, Optional<NodeId>> masters; 43 private LoadingCache<DeviceId, Optional<NodeId>> masters;
...@@ -51,9 +52,9 @@ public class DistributedMastershipStore ...@@ -51,9 +52,9 @@ public class DistributedMastershipStore
51 52
52 rawMasters = theInstance.getMap("masters"); 53 rawMasters = theInstance.getMap("masters");
53 OptionalCacheLoader<DeviceId, NodeId> nodeLoader 54 OptionalCacheLoader<DeviceId, NodeId> nodeLoader
54 - = new OptionalCacheLoader<>(storeService, rawMasters); 55 + = new OptionalCacheLoader<>(storeService, rawMasters);
55 masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); 56 masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
56 - rawMasters.addEntryListener(new RemoteEventHandler<>(masters), true); 57 + rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true);
57 58
58 loadMasters(); 59 loadMasters();
59 60
...@@ -128,4 +129,25 @@ public class DistributedMastershipStore ...@@ -128,4 +129,25 @@ public class DistributedMastershipStore
128 return null; 129 return null;
129 } 130 }
130 131
132 + private class RemoteMasterShipEventHandler extends RemoteCacheEventHandler<DeviceId, NodeId> {
133 + public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) {
134 + super(cache);
135 + }
136 +
137 + @Override
138 + protected void onAdd(DeviceId deviceId, NodeId nodeId) {
139 + notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
140 + }
141 +
142 + @Override
143 + protected void onRemove(DeviceId deviceId, NodeId nodeId) {
144 + notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
145 + }
146 +
147 + @Override
148 + protected void onUpdate(DeviceId deviceId, NodeId oldNodeId, NodeId nodeId) {
149 + notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
150 + }
151 + }
152 +
131 } 153 }
......
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0"
3 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
5 + <modelVersion>4.0.0</modelVersion>
6 +
7 + <parent>
8 + <groupId>org.onlab.onos</groupId>
9 + <artifactId>onos-core-hz</artifactId>
10 + <version>1.0.0-SNAPSHOT</version>
11 + <relativePath>../pom.xml</relativePath>
12 + </parent>
13 +
14 + <artifactId>onos-core-hz-common</artifactId>
15 + <packaging>bundle</packaging>
16 +
17 + <description>ONOS Hazelcast based distributed store subsystems</description>
18 +
19 + <dependencies>
20 + <dependency>
21 + <groupId>org.onlab.onos</groupId>
22 + <artifactId>onos-api</artifactId>
23 + </dependency>
24 + <dependency>
25 + <groupId>org.onlab.onos</groupId>
26 + <artifactId>onos-core-serializers</artifactId>
27 + <version>${project.version}</version>
28 + </dependency>
29 + <dependency>
30 + <groupId>org.apache.felix</groupId>
31 + <artifactId>org.apache.felix.scr.annotations</artifactId>
32 + </dependency>
33 + <dependency>
34 + <groupId>com.hazelcast</groupId>
35 + <artifactId>hazelcast</artifactId>
36 + </dependency>
37 + <dependency>
38 + <groupId>de.javakaffee</groupId>
39 + <artifactId>kryo-serializers</artifactId>
40 + </dependency>
41 + </dependencies>
42 +
43 + <build>
44 + <plugins>
45 + <plugin>
46 + <groupId>org.apache.felix</groupId>
47 + <artifactId>maven-scr-plugin</artifactId>
48 + </plugin>
49 + </plugins>
50 + </build>
51 +
52 +</project>
1 -package org.onlab.onos.store.impl; 1 +package org.onlab.onos.store.common;
2 2
3 import java.util.concurrent.Callable; 3 import java.util.concurrent.Callable;
4 import java.util.concurrent.ExecutionException; 4 import java.util.concurrent.ExecutionException;
......
1 -package org.onlab.onos.store.impl; 1 +package org.onlab.onos.store.common;
2 2
3 import com.google.common.base.Optional; 3 import com.google.common.base.Optional;
4 import com.google.common.cache.LoadingCache; 4 import com.google.common.cache.LoadingCache;
...@@ -6,6 +6,8 @@ import com.hazelcast.core.EntryAdapter; ...@@ -6,6 +6,8 @@ import com.hazelcast.core.EntryAdapter;
6 import com.hazelcast.core.EntryEvent; 6 import com.hazelcast.core.EntryEvent;
7 import com.hazelcast.core.HazelcastInstance; 7 import com.hazelcast.core.HazelcastInstance;
8 import com.hazelcast.core.MapEvent; 8 import com.hazelcast.core.MapEvent;
9 +import com.hazelcast.core.Member;
10 +
9 import org.apache.felix.scr.annotations.Activate; 11 import org.apache.felix.scr.annotations.Activate;
10 import org.apache.felix.scr.annotations.Component; 12 import org.apache.felix.scr.annotations.Component;
11 import org.apache.felix.scr.annotations.Reference; 13 import org.apache.felix.scr.annotations.Reference;
...@@ -13,7 +15,6 @@ import org.apache.felix.scr.annotations.ReferenceCardinality; ...@@ -13,7 +15,6 @@ import org.apache.felix.scr.annotations.ReferenceCardinality;
13 import org.onlab.onos.event.Event; 15 import org.onlab.onos.event.Event;
14 import org.onlab.onos.store.AbstractStore; 16 import org.onlab.onos.store.AbstractStore;
15 import org.onlab.onos.store.StoreDelegate; 17 import org.onlab.onos.store.StoreDelegate;
16 -import org.onlab.onos.store.common.StoreService;
17 import org.slf4j.Logger; 18 import org.slf4j.Logger;
18 19
19 import static com.google.common.base.Preconditions.checkNotNull; 20 import static com.google.common.base.Preconditions.checkNotNull;
...@@ -23,7 +24,7 @@ import static org.slf4j.LoggerFactory.getLogger; ...@@ -23,7 +24,7 @@ import static org.slf4j.LoggerFactory.getLogger;
23 * Abstraction of a distributed store based on Hazelcast. 24 * Abstraction of a distributed store based on Hazelcast.
24 */ 25 */
25 @Component(componentAbstract = true) 26 @Component(componentAbstract = true)
26 -public abstract class AbstractDistributedStore<E extends Event, D extends StoreDelegate<E>> 27 +public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDelegate<E>>
27 extends AbstractStore<E, D> { 28 extends AbstractStore<E, D> {
28 29
29 protected final Logger log = getLogger(getClass()); 30 protected final Logger log = getLogger(getClass());
...@@ -66,8 +67,9 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD ...@@ -66,8 +67,9 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD
66 * @param <K> IMap key type after deserialization 67 * @param <K> IMap key type after deserialization
67 * @param <V> IMap value type after deserialization 68 * @param <V> IMap value type after deserialization
68 */ 69 */
69 - public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { 70 + public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
70 71
72 + private final Member localMember;
71 private LoadingCache<K, Optional<V>> cache; 73 private LoadingCache<K, Optional<V>> cache;
72 74
73 /** 75 /**
...@@ -75,17 +77,26 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD ...@@ -75,17 +77,26 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD
75 * 77 *
76 * @param cache cache to update 78 * @param cache cache to update
77 */ 79 */
78 - public RemoteEventHandler(LoadingCache<K, Optional<V>> cache) { 80 + public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) {
81 + this.localMember = theInstance.getCluster().getLocalMember();
79 this.cache = checkNotNull(cache); 82 this.cache = checkNotNull(cache);
80 } 83 }
81 84
82 @Override 85 @Override
83 public void mapCleared(MapEvent event) { 86 public void mapCleared(MapEvent event) {
87 + if (localMember.equals(event.getMember())) {
88 + // ignore locally triggered event
89 + return;
90 + }
84 cache.invalidateAll(); 91 cache.invalidateAll();
85 } 92 }
86 93
87 @Override 94 @Override
88 public void entryAdded(EntryEvent<byte[], byte[]> event) { 95 public void entryAdded(EntryEvent<byte[], byte[]> event) {
96 + if (localMember.equals(event.getMember())) {
97 + // ignore locally triggered event
98 + return;
99 + }
89 K key = deserialize(event.getKey()); 100 K key = deserialize(event.getKey());
90 V newVal = deserialize(event.getValue()); 101 V newVal = deserialize(event.getValue());
91 Optional<V> newValue = Optional.of(newVal); 102 Optional<V> newValue = Optional.of(newVal);
...@@ -95,6 +106,10 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD ...@@ -95,6 +106,10 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD
95 106
96 @Override 107 @Override
97 public void entryUpdated(EntryEvent<byte[], byte[]> event) { 108 public void entryUpdated(EntryEvent<byte[], byte[]> event) {
109 + if (localMember.equals(event.getMember())) {
110 + // ignore locally triggered event
111 + return;
112 + }
98 K key = deserialize(event.getKey()); 113 K key = deserialize(event.getKey());
99 V oldVal = deserialize(event.getOldValue()); 114 V oldVal = deserialize(event.getOldValue());
100 Optional<V> oldValue = Optional.fromNullable(oldVal); 115 Optional<V> oldValue = Optional.fromNullable(oldVal);
...@@ -106,6 +121,10 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD ...@@ -106,6 +121,10 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD
106 121
107 @Override 122 @Override
108 public void entryRemoved(EntryEvent<byte[], byte[]> event) { 123 public void entryRemoved(EntryEvent<byte[], byte[]> event) {
124 + if (localMember.equals(event.getMember())) {
125 + // ignore locally triggered event
126 + return;
127 + }
109 K key = deserialize(event.getKey()); 128 K key = deserialize(event.getKey());
110 V val = deserialize(event.getOldValue()); 129 V val = deserialize(event.getOldValue());
111 cache.invalidate(key); 130 cache.invalidate(key);
...@@ -141,4 +160,80 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD ...@@ -141,4 +160,80 @@ public abstract class AbstractDistributedStore<E extends Event, D extends StoreD
141 } 160 }
142 } 161 }
143 162
163 + /**
164 + * Distributed object remote event entry listener.
165 + *
166 + * @param <K> Entry key type after deserialization
167 + * @param <V> Entry value type after deserialization
168 + */
169 + public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
170 +
171 + private final Member localMember;
172 +
173 + public RemoteEventHandler() {
174 + this.localMember = theInstance.getCluster().getLocalMember();
175 + }
176 + @Override
177 + public void entryAdded(EntryEvent<byte[], byte[]> event) {
178 + if (localMember.equals(event.getMember())) {
179 + // ignore locally triggered event
180 + return;
181 + }
182 + K key = deserialize(event.getKey());
183 + V newVal = deserialize(event.getValue());
184 + onAdd(key, newVal);
185 + }
186 +
187 + @Override
188 + public void entryRemoved(EntryEvent<byte[], byte[]> event) {
189 + if (localMember.equals(event.getMember())) {
190 + // ignore locally triggered event
191 + return;
192 + }
193 + K key = deserialize(event.getKey());
194 + V val = deserialize(event.getValue());
195 + onRemove(key, val);
196 + }
197 +
198 + @Override
199 + public void entryUpdated(EntryEvent<byte[], byte[]> event) {
200 + if (localMember.equals(event.getMember())) {
201 + // ignore locally triggered event
202 + return;
203 + }
204 + K key = deserialize(event.getKey());
205 + V oldVal = deserialize(event.getOldValue());
206 + V newVal = deserialize(event.getValue());
207 + onUpdate(key, oldVal, newVal);
208 + }
209 +
210 + /**
211 + * Remote entry addition hook.
212 + *
213 + * @param key new key
214 + * @param newVal new value
215 + */
216 + protected void onAdd(K key, V newVal) {
217 + }
218 +
219 + /**
220 + * Remote entry update hook.
221 + *
222 + * @param key new key
223 + * @param oldValue old value
224 + * @param newVal new value
225 + */
226 + protected void onUpdate(K key, V oldValue, V newVal) {
227 + }
228 +
229 + /**
230 + * Remote entry remove hook.
231 + *
232 + * @param key new key
233 + * @param val old value
234 + */
235 + protected void onRemove(K key, V val) {
236 + }
237 + }
238 +
144 } 239 }
......
1 -package org.onlab.onos.store.impl; 1 +package org.onlab.onos.store.common;
2 2
3 import static com.google.common.base.Preconditions.checkNotNull; 3 import static com.google.common.base.Preconditions.checkNotNull;
4 4
5 -import org.onlab.onos.store.common.StoreService;
6 -
7 import com.google.common.base.Optional; 5 import com.google.common.base.Optional;
8 import com.google.common.cache.CacheLoader; 6 import com.google.common.cache.CacheLoader;
9 import com.hazelcast.core.IMap; 7 import com.hazelcast.core.IMap;
......
1 -package org.onlab.onos.store.impl; 1 +package org.onlab.onos.store.common;
2 2
3 import com.hazelcast.config.Config; 3 import com.hazelcast.config.Config;
4 import com.hazelcast.config.FileSystemXmlConfig; 4 import com.hazelcast.config.FileSystemXmlConfig;
...@@ -27,7 +27,6 @@ import org.onlab.onos.net.MastershipRole; ...@@ -27,7 +27,6 @@ import org.onlab.onos.net.MastershipRole;
27 import org.onlab.onos.net.Port; 27 import org.onlab.onos.net.Port;
28 import org.onlab.onos.net.PortNumber; 28 import org.onlab.onos.net.PortNumber;
29 import org.onlab.onos.net.provider.ProviderId; 29 import org.onlab.onos.net.provider.ProviderId;
30 -import org.onlab.onos.store.common.StoreService;
31 import org.onlab.onos.store.serializers.ConnectPointSerializer; 30 import org.onlab.onos.store.serializers.ConnectPointSerializer;
32 import org.onlab.onos.store.serializers.DefaultLinkSerializer; 31 import org.onlab.onos.store.serializers.DefaultLinkSerializer;
33 import org.onlab.onos.store.serializers.DefaultPortSerializer; 32 import org.onlab.onos.store.serializers.DefaultPortSerializer;
...@@ -35,7 +34,6 @@ import org.onlab.onos.store.serializers.DeviceIdSerializer; ...@@ -35,7 +34,6 @@ import org.onlab.onos.store.serializers.DeviceIdSerializer;
35 import org.onlab.onos.store.serializers.IpPrefixSerializer; 34 import org.onlab.onos.store.serializers.IpPrefixSerializer;
36 import org.onlab.onos.store.serializers.LinkKeySerializer; 35 import org.onlab.onos.store.serializers.LinkKeySerializer;
37 import org.onlab.onos.store.serializers.NodeIdSerializer; 36 import org.onlab.onos.store.serializers.NodeIdSerializer;
38 -import org.onlab.onos.store.serializers.OnosTimestampSerializer;
39 import org.onlab.onos.store.serializers.PortNumberSerializer; 37 import org.onlab.onos.store.serializers.PortNumberSerializer;
40 import org.onlab.onos.store.serializers.ProviderIdSerializer; 38 import org.onlab.onos.store.serializers.ProviderIdSerializer;
41 import org.onlab.packet.IpPrefix; 39 import org.onlab.packet.IpPrefix;
...@@ -102,7 +100,6 @@ public class StoreManager implements StoreService { ...@@ -102,7 +100,6 @@ public class StoreManager implements StoreService {
102 .register(DeviceId.class, new DeviceIdSerializer()) 100 .register(DeviceId.class, new DeviceIdSerializer())
103 .register(PortNumber.class, new PortNumberSerializer()) 101 .register(PortNumber.class, new PortNumberSerializer())
104 .register(DefaultPort.class, new DefaultPortSerializer()) 102 .register(DefaultPort.class, new DefaultPortSerializer())
105 - .register(OnosTimestamp.class, new OnosTimestampSerializer())
106 .register(LinkKey.class, new LinkKeySerializer()) 103 .register(LinkKey.class, new LinkKeySerializer())
107 .register(ConnectPoint.class, new ConnectPointSerializer()) 104 .register(ConnectPoint.class, new ConnectPointSerializer())
108 .register(DefaultLink.class, new DefaultLinkSerializer()) 105 .register(DefaultLink.class, new DefaultLinkSerializer())
......
1 -package org.onlab.onos.store.impl; 1 +package org.onlab.onos.store.common;
2 2
3 import java.io.FileNotFoundException; 3 import java.io.FileNotFoundException;
4 import java.util.UUID; 4 import java.util.UUID;
......
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0"
3 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
5 + <modelVersion>4.0.0</modelVersion>
6 +
7 + <parent>
8 + <groupId>org.onlab.onos</groupId>
9 + <artifactId>onos-core-hz</artifactId>
10 + <version>1.0.0-SNAPSHOT</version>
11 + <relativePath>../pom.xml</relativePath>
12 + </parent>
13 +
14 + <artifactId>onos-core-hz-net</artifactId>
15 + <packaging>bundle</packaging>
16 +
17 + <description>ONOS Hazelcast based distributed store subsystems</description>
18 +
19 + <dependencies>
20 + <dependency>
21 + <groupId>org.onlab.onos</groupId>
22 + <artifactId>onos-api</artifactId>
23 + </dependency>
24 + <dependency>
25 + <groupId>org.onlab.onos</groupId>
26 + <artifactId>onos-core-serializers</artifactId>
27 + <version>${project.version}</version>
28 + </dependency>
29 + <dependency>
30 + <groupId>org.onlab.onos</groupId>
31 + <artifactId>onos-core-hz-common</artifactId>
32 + <version>${project.version}</version>
33 + </dependency>
34 + <dependency>
35 + <groupId>org.onlab.onos</groupId>
36 + <artifactId>onos-core-hz-common</artifactId>
37 + <classifier>tests</classifier>
38 + <scope>test</scope>
39 + <version>${project.version}</version>
40 + </dependency>
41 + <dependency>
42 + <groupId>org.apache.felix</groupId>
43 + <artifactId>org.apache.felix.scr.annotations</artifactId>
44 + </dependency>
45 + <dependency>
46 + <groupId>com.hazelcast</groupId>
47 + <artifactId>hazelcast</artifactId>
48 + </dependency>
49 + <dependency>
50 + <groupId>de.javakaffee</groupId>
51 + <artifactId>kryo-serializers</artifactId>
52 + </dependency>
53 + </dependencies>
54 +
55 + <build>
56 + <plugins>
57 + <plugin>
58 + <groupId>org.apache.felix</groupId>
59 + <artifactId>maven-scr-plugin</artifactId>
60 + </plugin>
61 + </plugins>
62 + </build>
63 +
64 +</project>
1 package org.onlab.onos.store.device.impl; 1 package org.onlab.onos.store.device.impl;
2 2
3 import static com.google.common.base.Predicates.notNull; 3 import static com.google.common.base.Predicates.notNull;
4 +
4 import com.google.common.base.Optional; 5 import com.google.common.base.Optional;
5 import com.google.common.cache.LoadingCache; 6 import com.google.common.cache.LoadingCache;
6 import com.google.common.collect.FluentIterable; 7 import com.google.common.collect.FluentIterable;
...@@ -26,9 +27,9 @@ import org.onlab.onos.net.device.DeviceStore; ...@@ -26,9 +27,9 @@ import org.onlab.onos.net.device.DeviceStore;
26 import org.onlab.onos.net.device.DeviceStoreDelegate; 27 import org.onlab.onos.net.device.DeviceStoreDelegate;
27 import org.onlab.onos.net.device.PortDescription; 28 import org.onlab.onos.net.device.PortDescription;
28 import org.onlab.onos.net.provider.ProviderId; 29 import org.onlab.onos.net.provider.ProviderId;
29 -import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache; 30 +import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
30 -import org.onlab.onos.store.impl.AbstractDistributedStore; 31 +import org.onlab.onos.store.common.AbstractHazelcastStore;
31 -import org.onlab.onos.store.impl.OptionalCacheLoader; 32 +import org.onlab.onos.store.common.OptionalCacheLoader;
32 import org.slf4j.Logger; 33 import org.slf4j.Logger;
33 34
34 import java.util.ArrayList; 35 import java.util.ArrayList;
...@@ -52,7 +53,7 @@ import static org.slf4j.LoggerFactory.getLogger; ...@@ -52,7 +53,7 @@ import static org.slf4j.LoggerFactory.getLogger;
52 @Component(immediate = true) 53 @Component(immediate = true)
53 @Service 54 @Service
54 public class DistributedDeviceStore 55 public class DistributedDeviceStore
55 - extends AbstractDistributedStore<DeviceEvent, DeviceStoreDelegate> 56 + extends AbstractHazelcastStore<DeviceEvent, DeviceStoreDelegate>
56 implements DeviceStore { 57 implements DeviceStore {
57 58
58 private final Logger log = getLogger(getClass()); 59 private final Logger log = getLogger(getClass());
...@@ -71,6 +72,10 @@ public class DistributedDeviceStore ...@@ -71,6 +72,10 @@ public class DistributedDeviceStore
71 private IMap<byte[], byte[]> rawDevicePorts; 72 private IMap<byte[], byte[]> rawDevicePorts;
72 private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts; 73 private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts;
73 74
75 + private String devicesListener;
76 +
77 + private String portsListener;
78 +
74 @Override 79 @Override
75 @Activate 80 @Activate
76 public void activate() { 81 public void activate() {
...@@ -85,7 +90,7 @@ public class DistributedDeviceStore ...@@ -85,7 +90,7 @@ public class DistributedDeviceStore
85 = new OptionalCacheLoader<>(storeService, rawDevices); 90 = new OptionalCacheLoader<>(storeService, rawDevices);
86 devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader)); 91 devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader));
87 // refresh/populate cache based on notification from other instance 92 // refresh/populate cache based on notification from other instance
88 - rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue); 93 + devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
89 94
90 // TODO cache availableDevices 95 // TODO cache availableDevices
91 availableDevices = theInstance.getSet("availableDevices"); 96 availableDevices = theInstance.getSet("availableDevices");
...@@ -95,7 +100,7 @@ public class DistributedDeviceStore ...@@ -95,7 +100,7 @@ public class DistributedDeviceStore
95 = new OptionalCacheLoader<>(storeService, rawDevicePorts); 100 = new OptionalCacheLoader<>(storeService, rawDevicePorts);
96 devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader)); 101 devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader));
97 // refresh/populate cache based on notification from other instance 102 // refresh/populate cache based on notification from other instance
98 - rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue); 103 + portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
99 104
100 loadDeviceCache(); 105 loadDeviceCache();
101 loadDevicePortsCache(); 106 loadDevicePortsCache();
...@@ -105,6 +110,8 @@ public class DistributedDeviceStore ...@@ -105,6 +110,8 @@ public class DistributedDeviceStore
105 110
106 @Deactivate 111 @Deactivate
107 public void deactivate() { 112 public void deactivate() {
113 + rawDevicePorts.removeEntryListener(portsListener);
114 + rawDevices.removeEntryListener(devicesListener);
108 log.info("Stopped"); 115 log.info("Stopped");
109 } 116 }
110 117
...@@ -353,7 +360,7 @@ public class DistributedDeviceStore ...@@ -353,7 +360,7 @@ public class DistributedDeviceStore
353 } 360 }
354 } 361 }
355 362
356 - private class RemoteDeviceEventHandler extends RemoteEventHandler<DeviceId, DefaultDevice> { 363 + private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> {
357 public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) { 364 public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) {
358 super(cache); 365 super(cache);
359 } 366 }
...@@ -374,7 +381,7 @@ public class DistributedDeviceStore ...@@ -374,7 +381,7 @@ public class DistributedDeviceStore
374 } 381 }
375 } 382 }
376 383
377 - private class RemotePortEventHandler extends RemoteEventHandler<DeviceId, Map<PortNumber, Port>> { 384 + private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> {
378 public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) { 385 public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) {
379 super(cache); 386 super(cache);
380 } 387 }
......
1 +package org.onlab.onos.store.device.impl;
2 +
3 +import org.apache.felix.scr.annotations.Component;
4 +import org.apache.felix.scr.annotations.Service;
5 +import org.onlab.onos.cluster.MastershipTerm;
6 +import org.onlab.onos.net.DeviceId;
7 +import org.onlab.onos.store.ClockService;
8 +import org.onlab.onos.store.Timestamp;
9 +
10 +// FIXME: Code clone in onos-core-trivial, onos-core-hz-net
11 +/**
12 + * Dummy implementation of {@link ClockService}.
13 + */
14 +@Component(immediate = true)
15 +@Service
16 +public class NoOpClockService implements ClockService {
17 +
18 + @Override
19 + public Timestamp getTimestamp(DeviceId deviceId) {
20 + return new Timestamp() {
21 +
22 + @Override
23 + public int compareTo(Timestamp o) {
24 + throw new IllegalStateException("Never expected to be used.");
25 + }
26 + };
27 + }
28 +
29 + @Override
30 + public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
31 + }
32 +}
1 +package org.onlab.onos.store.flow.impl;
2 +
3 +import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
4 +import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
5 +import static org.slf4j.LoggerFactory.getLogger;
6 +
7 +import java.util.Collection;
8 +import java.util.Collections;
9 +
10 +import org.apache.felix.scr.annotations.Activate;
11 +import org.apache.felix.scr.annotations.Component;
12 +import org.apache.felix.scr.annotations.Deactivate;
13 +import org.apache.felix.scr.annotations.Service;
14 +import org.onlab.onos.ApplicationId;
15 +import org.onlab.onos.net.DeviceId;
16 +import org.onlab.onos.net.flow.DefaultFlowRule;
17 +import org.onlab.onos.net.flow.FlowRule;
18 +import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
19 +import org.onlab.onos.net.flow.FlowRuleEvent;
20 +import org.onlab.onos.net.flow.FlowRuleEvent.Type;
21 +import org.onlab.onos.net.flow.FlowRuleStore;
22 +import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
23 +import org.onlab.onos.store.AbstractStore;
24 +import org.slf4j.Logger;
25 +
26 +import com.google.common.collect.ArrayListMultimap;
27 +import com.google.common.collect.ImmutableSet;
28 +import com.google.common.collect.Multimap;
29 +
30 +/**
31 + * Manages inventory of flow rules using trivial in-memory implementation.
32 + */
33 +//FIXME: I LIE I AM NOT DISTRIBUTED
34 +@Component(immediate = true)
35 +@Service
36 +public class DistributedFlowRuleStore
37 +extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
38 +implements FlowRuleStore {
39 +
40 + private final Logger log = getLogger(getClass());
41 +
42 + // store entries as a pile of rules, no info about device tables
43 + private final Multimap<DeviceId, FlowRule> flowEntries =
44 + ArrayListMultimap.<DeviceId, FlowRule>create();
45 +
46 + private final Multimap<ApplicationId, FlowRule> flowEntriesById =
47 + ArrayListMultimap.<ApplicationId, FlowRule>create();
48 +
49 + @Activate
50 + public void activate() {
51 + log.info("Started");
52 + }
53 +
54 + @Deactivate
55 + public void deactivate() {
56 + log.info("Stopped");
57 + }
58 +
59 +
60 + @Override
61 + public synchronized FlowRule getFlowRule(FlowRule rule) {
62 + for (FlowRule f : flowEntries.get(rule.deviceId())) {
63 + if (f.equals(rule)) {
64 + return f;
65 + }
66 + }
67 + return null;
68 + }
69 +
70 + @Override
71 + public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
72 + Collection<FlowRule> rules = flowEntries.get(deviceId);
73 + if (rules == null) {
74 + return Collections.emptyList();
75 + }
76 + return ImmutableSet.copyOf(rules);
77 + }
78 +
79 + @Override
80 + public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
81 + Collection<FlowRule> rules = flowEntriesById.get(appId);
82 + if (rules == null) {
83 + return Collections.emptyList();
84 + }
85 + return ImmutableSet.copyOf(rules);
86 + }
87 +
88 + @Override
89 + public synchronized void storeFlowRule(FlowRule rule) {
90 + FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
91 + DeviceId did = f.deviceId();
92 + if (!flowEntries.containsEntry(did, f)) {
93 + flowEntries.put(did, f);
94 + flowEntriesById.put(rule.appId(), f);
95 + }
96 + }
97 +
98 + @Override
99 + public synchronized void deleteFlowRule(FlowRule rule) {
100 + FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
101 + DeviceId did = f.deviceId();
102 +
103 + /*
104 + * find the rule and mark it for deletion.
105 + * Ultimately a flow removed will come remove it.
106 + */
107 +
108 + if (flowEntries.containsEntry(did, f)) {
109 + //synchronized (flowEntries) {
110 + flowEntries.remove(did, f);
111 + flowEntries.put(did, f);
112 + flowEntriesById.remove(rule.appId(), rule);
113 + //}
114 + }
115 + }
116 +
117 + @Override
118 + public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
119 + DeviceId did = rule.deviceId();
120 +
121 + // check if this new rule is an update to an existing entry
122 + if (flowEntries.containsEntry(did, rule)) {
123 + //synchronized (flowEntries) {
124 + // Multimaps support duplicates so we have to remove our rule
125 + // and replace it with the current version.
126 + flowEntries.remove(did, rule);
127 + flowEntries.put(did, rule);
128 + //}
129 + return new FlowRuleEvent(Type.RULE_UPDATED, rule);
130 + }
131 +
132 + flowEntries.put(did, rule);
133 + return new FlowRuleEvent(RULE_ADDED, rule);
134 + }
135 +
136 + @Override
137 + public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
138 + //synchronized (this) {
139 + if (flowEntries.remove(rule.deviceId(), rule)) {
140 + return new FlowRuleEvent(RULE_REMOVED, rule);
141 + } else {
142 + return null;
143 + }
144 + //}
145 + }
146 +
147 +
148 +
149 +
150 +
151 +
152 +
153 +}
1 +package org.onlab.onos.store.host.impl;
2 +
3 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
4 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
5 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
6 +import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
7 +import static org.slf4j.LoggerFactory.getLogger;
8 +
9 +import java.util.Collections;
10 +import java.util.HashSet;
11 +import java.util.Map;
12 +import java.util.Set;
13 +import java.util.concurrent.ConcurrentHashMap;
14 +
15 +import org.apache.felix.scr.annotations.Activate;
16 +import org.apache.felix.scr.annotations.Component;
17 +import org.apache.felix.scr.annotations.Deactivate;
18 +import org.apache.felix.scr.annotations.Service;
19 +import org.onlab.onos.net.ConnectPoint;
20 +import org.onlab.onos.net.DefaultHost;
21 +import org.onlab.onos.net.DeviceId;
22 +import org.onlab.onos.net.Host;
23 +import org.onlab.onos.net.HostId;
24 +import org.onlab.onos.net.host.HostDescription;
25 +import org.onlab.onos.net.host.HostEvent;
26 +import org.onlab.onos.net.host.HostStore;
27 +import org.onlab.onos.net.host.HostStoreDelegate;
28 +import org.onlab.onos.net.host.PortAddresses;
29 +import org.onlab.onos.net.provider.ProviderId;
30 +import org.onlab.onos.store.AbstractStore;
31 +import org.onlab.packet.IpPrefix;
32 +import org.onlab.packet.MacAddress;
33 +import org.onlab.packet.VlanId;
34 +import org.slf4j.Logger;
35 +
36 +import com.google.common.collect.HashMultimap;
37 +import com.google.common.collect.ImmutableSet;
38 +import com.google.common.collect.Multimap;
39 +import com.google.common.collect.Sets;
40 +
41 +/**
42 + * Manages inventory of end-station hosts using trivial in-memory
43 + * implementation.
44 + */
45 +//FIXME: I LIE I AM NOT DISTRIBUTED
46 +@Component(immediate = true)
47 +@Service
48 +public class DistributedHostStore
49 +extends AbstractStore<HostEvent, HostStoreDelegate>
50 +implements HostStore {
51 +
52 + private final Logger log = getLogger(getClass());
53 +
54 + // Host inventory
55 + private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
56 +
57 + // Hosts tracked by their location
58 + private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
59 +
60 + private final Map<ConnectPoint, PortAddresses> portAddresses =
61 + new ConcurrentHashMap<>();
62 +
63 + @Activate
64 + public void activate() {
65 + log.info("Started");
66 + }
67 +
68 + @Deactivate
69 + public void deactivate() {
70 + log.info("Stopped");
71 + }
72 +
73 + @Override
74 + public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
75 + HostDescription hostDescription) {
76 + Host host = hosts.get(hostId);
77 + if (host == null) {
78 + return createHost(providerId, hostId, hostDescription);
79 + }
80 + return updateHost(providerId, host, hostDescription);
81 + }
82 +
83 + // creates a new host and sends HOST_ADDED
84 + private HostEvent createHost(ProviderId providerId, HostId hostId,
85 + HostDescription descr) {
86 + DefaultHost newhost = new DefaultHost(providerId, hostId,
87 + descr.hwAddress(),
88 + descr.vlan(),
89 + descr.location(),
90 + descr.ipAddresses());
91 + synchronized (this) {
92 + hosts.put(hostId, newhost);
93 + locations.put(descr.location(), newhost);
94 + }
95 + return new HostEvent(HOST_ADDED, newhost);
96 + }
97 +
98 + // checks for type of update to host, sends appropriate event
99 + private HostEvent updateHost(ProviderId providerId, Host host,
100 + HostDescription descr) {
101 + DefaultHost updated;
102 + HostEvent event;
103 + if (!host.location().equals(descr.location())) {
104 + updated = new DefaultHost(providerId, host.id(),
105 + host.mac(),
106 + host.vlan(),
107 + descr.location(),
108 + host.ipAddresses());
109 + event = new HostEvent(HOST_MOVED, updated);
110 +
111 + } else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
112 + updated = new DefaultHost(providerId, host.id(),
113 + host.mac(),
114 + host.vlan(),
115 + descr.location(),
116 + descr.ipAddresses());
117 + event = new HostEvent(HOST_UPDATED, updated);
118 + } else {
119 + return null;
120 + }
121 + synchronized (this) {
122 + hosts.put(host.id(), updated);
123 + locations.remove(host.location(), host);
124 + locations.put(updated.location(), updated);
125 + }
126 + return event;
127 + }
128 +
129 + @Override
130 + public HostEvent removeHost(HostId hostId) {
131 + synchronized (this) {
132 + Host host = hosts.remove(hostId);
133 + if (host != null) {
134 + locations.remove((host.location()), host);
135 + return new HostEvent(HOST_REMOVED, host);
136 + }
137 + return null;
138 + }
139 + }
140 +
141 + @Override
142 + public int getHostCount() {
143 + return hosts.size();
144 + }
145 +
146 + @Override
147 + public Iterable<Host> getHosts() {
148 + return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
149 + }
150 +
151 + @Override
152 + public Host getHost(HostId hostId) {
153 + return hosts.get(hostId);
154 + }
155 +
156 + @Override
157 + public Set<Host> getHosts(VlanId vlanId) {
158 + Set<Host> vlanset = new HashSet<>();
159 + for (Host h : hosts.values()) {
160 + if (h.vlan().equals(vlanId)) {
161 + vlanset.add(h);
162 + }
163 + }
164 + return vlanset;
165 + }
166 +
167 + @Override
168 + public Set<Host> getHosts(MacAddress mac) {
169 + Set<Host> macset = new HashSet<>();
170 + for (Host h : hosts.values()) {
171 + if (h.mac().equals(mac)) {
172 + macset.add(h);
173 + }
174 + }
175 + return macset;
176 + }
177 +
178 + @Override
179 + public Set<Host> getHosts(IpPrefix ip) {
180 + Set<Host> ipset = new HashSet<>();
181 + for (Host h : hosts.values()) {
182 + if (h.ipAddresses().contains(ip)) {
183 + ipset.add(h);
184 + }
185 + }
186 + return ipset;
187 + }
188 +
189 + @Override
190 + public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
191 + return ImmutableSet.copyOf(locations.get(connectPoint));
192 + }
193 +
194 + @Override
195 + public Set<Host> getConnectedHosts(DeviceId deviceId) {
196 + Set<Host> hostset = new HashSet<>();
197 + for (ConnectPoint p : locations.keySet()) {
198 + if (p.deviceId().equals(deviceId)) {
199 + hostset.addAll(locations.get(p));
200 + }
201 + }
202 + return hostset;
203 + }
204 +
205 + @Override
206 + public void updateAddressBindings(PortAddresses addresses) {
207 + synchronized (portAddresses) {
208 + PortAddresses existing = portAddresses.get(addresses.connectPoint());
209 + if (existing == null) {
210 + portAddresses.put(addresses.connectPoint(), addresses);
211 + } else {
212 + Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
213 + .immutableCopy();
214 +
215 + MacAddress newMac = (addresses.mac() == null) ? existing.mac()
216 + : addresses.mac();
217 +
218 + PortAddresses newAddresses =
219 + new PortAddresses(addresses.connectPoint(), union, newMac);
220 +
221 + portAddresses.put(newAddresses.connectPoint(), newAddresses);
222 + }
223 + }
224 + }
225 +
226 + @Override
227 + public void removeAddressBindings(PortAddresses addresses) {
228 + synchronized (portAddresses) {
229 + PortAddresses existing = portAddresses.get(addresses.connectPoint());
230 + if (existing != null) {
231 + Set<IpPrefix> difference =
232 + Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
233 +
234 + // If they removed the existing mac, set the new mac to null.
235 + // Otherwise, keep the existing mac.
236 + MacAddress newMac = existing.mac();
237 + if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
238 + newMac = null;
239 + }
240 +
241 + PortAddresses newAddresses =
242 + new PortAddresses(addresses.connectPoint(), difference, newMac);
243 +
244 + portAddresses.put(newAddresses.connectPoint(), newAddresses);
245 + }
246 + }
247 + }
248 +
249 + @Override
250 + public void clearAddressBindings(ConnectPoint connectPoint) {
251 + synchronized (portAddresses) {
252 + portAddresses.remove(connectPoint);
253 + }
254 + }
255 +
256 + @Override
257 + public Set<PortAddresses> getAddressBindings() {
258 + synchronized (portAddresses) {
259 + return new HashSet<>(portAddresses.values());
260 + }
261 + }
262 +
263 + @Override
264 + public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
265 + PortAddresses addresses;
266 +
267 + synchronized (portAddresses) {
268 + addresses = portAddresses.get(connectPoint);
269 + }
270 +
271 + if (addresses == null) {
272 + addresses = new PortAddresses(connectPoint, null, null);
273 + }
274 +
275 + return addresses;
276 + }
277 +
278 +}
...@@ -10,6 +10,7 @@ import static org.slf4j.LoggerFactory.getLogger; ...@@ -10,6 +10,7 @@ import static org.slf4j.LoggerFactory.getLogger;
10 10
11 import java.util.HashSet; 11 import java.util.HashSet;
12 import java.util.Set; 12 import java.util.Set;
13 +
13 import org.apache.felix.scr.annotations.Activate; 14 import org.apache.felix.scr.annotations.Activate;
14 import org.apache.felix.scr.annotations.Component; 15 import org.apache.felix.scr.annotations.Component;
15 import org.apache.felix.scr.annotations.Deactivate; 16 import org.apache.felix.scr.annotations.Deactivate;
...@@ -24,9 +25,9 @@ import org.onlab.onos.net.link.LinkEvent; ...@@ -24,9 +25,9 @@ import org.onlab.onos.net.link.LinkEvent;
24 import org.onlab.onos.net.link.LinkStore; 25 import org.onlab.onos.net.link.LinkStore;
25 import org.onlab.onos.net.link.LinkStoreDelegate; 26 import org.onlab.onos.net.link.LinkStoreDelegate;
26 import org.onlab.onos.net.provider.ProviderId; 27 import org.onlab.onos.net.provider.ProviderId;
27 -import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache; 28 +import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
28 -import org.onlab.onos.store.impl.AbstractDistributedStore; 29 +import org.onlab.onos.store.common.AbstractHazelcastStore;
29 -import org.onlab.onos.store.impl.OptionalCacheLoader; 30 +import org.onlab.onos.store.common.OptionalCacheLoader;
30 import org.slf4j.Logger; 31 import org.slf4j.Logger;
31 32
32 import com.google.common.base.Optional; 33 import com.google.common.base.Optional;
...@@ -43,7 +44,7 @@ import com.hazelcast.core.IMap; ...@@ -43,7 +44,7 @@ import com.hazelcast.core.IMap;
43 @Component(immediate = true) 44 @Component(immediate = true)
44 @Service 45 @Service
45 public class DistributedLinkStore 46 public class DistributedLinkStore
46 - extends AbstractDistributedStore<LinkEvent, LinkStoreDelegate> 47 + extends AbstractHazelcastStore<LinkEvent, LinkStoreDelegate>
47 implements LinkStore { 48 implements LinkStore {
48 49
49 private final Logger log = getLogger(getClass()); 50 private final Logger log = getLogger(getClass());
...@@ -57,6 +58,8 @@ public class DistributedLinkStore ...@@ -57,6 +58,8 @@ public class DistributedLinkStore
57 private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); 58 private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
58 private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); 59 private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
59 60
61 + private String linksListener;
62 +
60 @Override 63 @Override
61 @Activate 64 @Activate
62 public void activate() { 65 public void activate() {
...@@ -70,7 +73,7 @@ public class DistributedLinkStore ...@@ -70,7 +73,7 @@ public class DistributedLinkStore
70 = new OptionalCacheLoader<>(storeService, rawLinks); 73 = new OptionalCacheLoader<>(storeService, rawLinks);
71 links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader)); 74 links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader));
72 // refresh/populate cache based on notification from other instance 75 // refresh/populate cache based on notification from other instance
73 - rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue); 76 + linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
74 77
75 loadLinkCache(); 78 loadLinkCache();
76 79
...@@ -79,7 +82,7 @@ public class DistributedLinkStore ...@@ -79,7 +82,7 @@ public class DistributedLinkStore
79 82
80 @Deactivate 83 @Deactivate
81 public void deactivate() { 84 public void deactivate() {
82 - super.activate(); 85 + rawLinks.removeEntryListener(linksListener);
83 log.info("Stopped"); 86 log.info("Stopped");
84 } 87 }
85 88
...@@ -232,7 +235,7 @@ public class DistributedLinkStore ...@@ -232,7 +235,7 @@ public class DistributedLinkStore
232 } 235 }
233 } 236 }
234 237
235 - private class RemoteLinkEventHandler extends RemoteEventHandler<LinkKey, DefaultLink> { 238 + private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> {
236 public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) { 239 public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) {
237 super(cache); 240 super(cache);
238 } 241 }
......
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import com.google.common.collect.ImmutableMap;
4 +import com.google.common.collect.ImmutableSet;
5 +import com.google.common.collect.ImmutableSetMultimap;
6 +import org.onlab.graph.DijkstraGraphSearch;
7 +import org.onlab.graph.GraphPathSearch;
8 +import org.onlab.graph.TarjanGraphSearch;
9 +import org.onlab.onos.net.AbstractModel;
10 +import org.onlab.onos.net.ConnectPoint;
11 +import org.onlab.onos.net.DefaultPath;
12 +import org.onlab.onos.net.DeviceId;
13 +import org.onlab.onos.net.Link;
14 +import org.onlab.onos.net.Path;
15 +import org.onlab.onos.net.provider.ProviderId;
16 +import org.onlab.onos.net.topology.ClusterId;
17 +import org.onlab.onos.net.topology.DefaultTopologyCluster;
18 +import org.onlab.onos.net.topology.DefaultTopologyVertex;
19 +import org.onlab.onos.net.topology.GraphDescription;
20 +import org.onlab.onos.net.topology.LinkWeight;
21 +import org.onlab.onos.net.topology.Topology;
22 +import org.onlab.onos.net.topology.TopologyCluster;
23 +import org.onlab.onos.net.topology.TopologyEdge;
24 +import org.onlab.onos.net.topology.TopologyGraph;
25 +import org.onlab.onos.net.topology.TopologyVertex;
26 +
27 +import java.util.ArrayList;
28 +import java.util.List;
29 +import java.util.Map;
30 +import java.util.Set;
31 +
32 +import static com.google.common.base.MoreObjects.toStringHelper;
33 +import static com.google.common.collect.ImmutableSetMultimap.Builder;
34 +import static org.onlab.graph.GraphPathSearch.Result;
35 +import static org.onlab.graph.TarjanGraphSearch.SCCResult;
36 +import static org.onlab.onos.net.Link.Type.INDIRECT;
37 +
38 +/**
39 + * Default implementation of the topology descriptor. This carries the
40 + * backing topology data.
41 + */
42 +public class DefaultTopology extends AbstractModel implements Topology {
43 +
44 + private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
45 + new DijkstraGraphSearch<>();
46 + private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
47 + new TarjanGraphSearch<>();
48 +
49 + private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
50 +
51 + private final long time;
52 + private final TopologyGraph graph;
53 +
54 + private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
55 + private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
56 + private final ImmutableSetMultimap<PathKey, Path> paths;
57 +
58 + private final ImmutableMap<ClusterId, TopologyCluster> clusters;
59 + private final ImmutableSet<ConnectPoint> infrastructurePoints;
60 + private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
61 +
62 + private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
63 + private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
64 + private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
65 +
66 +
67 + /**
68 + * Creates a topology descriptor attributed to the specified provider.
69 + *
70 + * @param providerId identity of the provider
71 + * @param description data describing the new topology
72 + */
73 + DefaultTopology(ProviderId providerId, GraphDescription description) {
74 + super(providerId);
75 + this.time = description.timestamp();
76 +
77 + // Build the graph
78 + this.graph = new DefaultTopologyGraph(description.vertexes(),
79 + description.edges());
80 +
81 + this.results = searchForShortestPaths();
82 + this.paths = buildPaths();
83 +
84 + this.clusterResults = searchForClusters();
85 + this.clusters = buildTopologyClusters();
86 +
87 + buildIndexes();
88 +
89 + this.broadcastSets = buildBroadcastSets();
90 + this.infrastructurePoints = findInfrastructurePoints();
91 + }
92 +
93 + @Override
94 + public long time() {
95 + return time;
96 + }
97 +
98 + @Override
99 + public int clusterCount() {
100 + return clusters.size();
101 + }
102 +
103 + @Override
104 + public int deviceCount() {
105 + return graph.getVertexes().size();
106 + }
107 +
108 + @Override
109 + public int linkCount() {
110 + return graph.getEdges().size();
111 + }
112 +
113 + @Override
114 + public int pathCount() {
115 + return paths.size();
116 + }
117 +
118 + /**
119 + * Returns the backing topology graph.
120 + *
121 + * @return topology graph
122 + */
123 + TopologyGraph getGraph() {
124 + return graph;
125 + }
126 +
127 + /**
128 + * Returns the set of topology clusters.
129 + *
130 + * @return set of clusters
131 + */
132 + Set<TopologyCluster> getClusters() {
133 + return ImmutableSet.copyOf(clusters.values());
134 + }
135 +
136 + /**
137 + * Returns the specified topology cluster.
138 + *
139 + * @param clusterId cluster identifier
140 + * @return topology cluster
141 + */
142 + TopologyCluster getCluster(ClusterId clusterId) {
143 + return clusters.get(clusterId);
144 + }
145 +
146 + /**
147 + * Returns the topology cluster that contains the given device.
148 + *
149 + * @param deviceId device identifier
150 + * @return topology cluster
151 + */
152 + TopologyCluster getCluster(DeviceId deviceId) {
153 + return clustersByDevice.get(deviceId);
154 + }
155 +
156 + /**
157 + * Returns the set of cluster devices.
158 + *
159 + * @param cluster topology cluster
160 + * @return cluster devices
161 + */
162 + Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
163 + return devicesByCluster.get(cluster);
164 + }
165 +
166 + /**
167 + * Returns the set of cluster links.
168 + *
169 + * @param cluster topology cluster
170 + * @return cluster links
171 + */
172 + Set<Link> getClusterLinks(TopologyCluster cluster) {
173 + return linksByCluster.get(cluster);
174 + }
175 +
176 + /**
177 + * Indicates whether the given point is an infrastructure link end-point.
178 + *
179 + * @param connectPoint connection point
180 + * @return true if infrastructure
181 + */
182 + boolean isInfrastructure(ConnectPoint connectPoint) {
183 + return infrastructurePoints.contains(connectPoint);
184 + }
185 +
186 + /**
187 + * Indicates whether the given point is part of a broadcast set.
188 + *
189 + * @param connectPoint connection point
190 + * @return true if in broadcast set
191 + */
192 + boolean isBroadcastPoint(ConnectPoint connectPoint) {
193 + // Any non-infrastructure, i.e. edge points are assumed to be OK.
194 + if (!isInfrastructure(connectPoint)) {
195 + return true;
196 + }
197 +
198 + // Find the cluster to which the device belongs.
199 + TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
200 + if (cluster == null) {
201 + throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
202 + }
203 +
204 + // If the broadcast set is null or empty, or if the point explicitly
205 + // belongs to it, return true;
206 + Set<ConnectPoint> points = broadcastSets.get(cluster.id());
207 + return points == null || points.isEmpty() || points.contains(connectPoint);
208 + }
209 +
210 + /**
211 + * Returns the size of the cluster broadcast set.
212 + *
213 + * @param clusterId cluster identifier
214 + * @return size of the cluster broadcast set
215 + */
216 + int broadcastSetSize(ClusterId clusterId) {
217 + return broadcastSets.get(clusterId).size();
218 + }
219 +
220 + /**
221 + * Returns the set of pre-computed shortest paths between source and
222 + * destination devices.
223 + *
224 + * @param src source device
225 + * @param dst destination device
226 + * @return set of shortest paths
227 + */
228 + Set<Path> getPaths(DeviceId src, DeviceId dst) {
229 + return paths.get(new PathKey(src, dst));
230 + }
231 +
232 + /**
233 + * Computes on-demand the set of shortest paths between source and
234 + * destination devices.
235 + *
236 + * @param src source device
237 + * @param dst destination device
238 + * @return set of shortest paths
239 + */
240 + Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
241 + GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
242 + DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
243 + new DefaultTopologyVertex(dst), weight);
244 + ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
245 + for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
246 + builder.add(networkPath(path));
247 + }
248 + return builder.build();
249 + }
250 +
251 +
252 + // Searches the graph for all shortest paths and returns the search results.
253 + private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
254 + ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
255 +
256 + // Search graph paths for each source to all destinations.
257 + LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
258 + for (TopologyVertex src : graph.getVertexes()) {
259 + builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
260 + }
261 + return builder.build();
262 + }
263 +
264 + // Builds network paths from the graph path search results
265 + private ImmutableSetMultimap<PathKey, Path> buildPaths() {
266 + Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
267 + for (DeviceId deviceId : results.keySet()) {
268 + Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
269 + for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
270 + builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
271 + networkPath(path));
272 + }
273 + }
274 + return builder.build();
275 + }
276 +
277 + // Converts graph path to a network path with the same cost.
278 + private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
279 + List<Link> links = new ArrayList<>();
280 + for (TopologyEdge edge : path.edges()) {
281 + links.add(edge.link());
282 + }
283 + return new DefaultPath(PID, links, path.cost());
284 + }
285 +
286 +
287 + // Searches for SCC clusters in the network topology graph using Tarjan
288 + // algorithm.
289 + private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
290 + return TARJAN.search(graph, new NoIndirectLinksWeight());
291 + }
292 +
293 + // Builds the topology clusters and returns the id-cluster bindings.
294 + private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
295 + ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
296 + SCCResult<TopologyVertex, TopologyEdge> result =
297 + TARJAN.search(graph, new NoIndirectLinksWeight());
298 +
299 + // Extract both vertexes and edges from the results; the lists form
300 + // pairs along the same index.
301 + List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
302 + List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
303 +
304 + // Scan over the lists and create a cluster from the results.
305 + for (int i = 0, n = result.clusterCount(); i < n; i++) {
306 + Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
307 + Set<TopologyEdge> edgeSet = clusterEdges.get(i);
308 +
309 + ClusterId cid = ClusterId.clusterId(i);
310 + DefaultTopologyCluster cluster =
311 + new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
312 + findRoot(vertexSet).deviceId());
313 + clusterBuilder.put(cid, cluster);
314 + }
315 + return clusterBuilder.build();
316 + }
317 +
318 + // Finds the vertex whose device id is the lexicographical minimum in the
319 + // specified set.
320 + private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
321 + TopologyVertex minVertex = null;
322 + for (TopologyVertex vertex : vertexSet) {
323 + if (minVertex == null ||
324 + minVertex.deviceId().toString()
325 + .compareTo(minVertex.deviceId().toString()) < 0) {
326 + minVertex = vertex;
327 + }
328 + }
329 + return minVertex;
330 + }
331 +
332 + // Processes a map of broadcast sets for each cluster.
333 + private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
334 + Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
335 + for (TopologyCluster cluster : clusters.values()) {
336 + addClusterBroadcastSet(cluster, builder);
337 + }
338 + return builder.build();
339 + }
340 +
341 + // Finds all broadcast points for the cluster. These are those connection
342 + // points which lie along the shortest paths between the cluster root and
343 + // all other devices within the cluster.
344 + private void addClusterBroadcastSet(TopologyCluster cluster,
345 + Builder<ClusterId, ConnectPoint> builder) {
346 + // Use the graph root search results to build the broadcast set.
347 + Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
348 + for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
349 + TopologyVertex vertex = entry.getKey();
350 +
351 + // Ignore any parents that lead outside the cluster.
352 + if (clustersByDevice.get(vertex.deviceId()) != cluster) {
353 + continue;
354 + }
355 +
356 + // Ignore any back-link sets that are empty.
357 + Set<TopologyEdge> parents = entry.getValue();
358 + if (parents.isEmpty()) {
359 + continue;
360 + }
361 +
362 + // Use the first back-link source and destinations to add to the
363 + // broadcast set.
364 + Link link = parents.iterator().next().link();
365 + builder.put(cluster.id(), link.src());
366 + builder.put(cluster.id(), link.dst());
367 + }
368 + }
369 +
370 + // Collects and returns an set of all infrastructure link end-points.
371 + private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
372 + ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
373 + for (TopologyEdge edge : graph.getEdges()) {
374 + builder.add(edge.link().src());
375 + builder.add(edge.link().dst());
376 + }
377 + return builder.build();
378 + }
379 +
380 + // Builds cluster-devices, cluster-links and device-cluster indexes.
381 + private void buildIndexes() {
382 + // Prepare the index builders
383 + ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
384 + ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
385 + ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
386 +
387 + // Now scan through all the clusters
388 + for (TopologyCluster cluster : clusters.values()) {
389 + int i = cluster.id().index();
390 +
391 + // Scan through all the cluster vertexes.
392 + for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
393 + devicesBuilder.put(cluster, vertex.deviceId());
394 + clusterBuilder.put(vertex.deviceId(), cluster);
395 + }
396 +
397 + // Scan through all the cluster edges.
398 + for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
399 + linksBuilder.put(cluster, edge.link());
400 + }
401 + }
402 +
403 + // Finalize all indexes.
404 + clustersByDevice = clusterBuilder.build();
405 + devicesByCluster = devicesBuilder.build();
406 + linksByCluster = linksBuilder.build();
407 + }
408 +
409 + // Link weight for measuring link cost as hop count with indirect links
410 + // being as expensive as traversing the entire graph to assume the worst.
411 + private static class HopCountLinkWeight implements LinkWeight {
412 + private final int indirectLinkCost;
413 +
414 + HopCountLinkWeight(int indirectLinkCost) {
415 + this.indirectLinkCost = indirectLinkCost;
416 + }
417 +
418 + @Override
419 + public double weight(TopologyEdge edge) {
420 + // To force preference to use direct paths first, make indirect
421 + // links as expensive as the linear vertex traversal.
422 + return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
423 + }
424 + }
425 +
426 + // Link weight for preventing traversal over indirect links.
427 + private static class NoIndirectLinksWeight implements LinkWeight {
428 + @Override
429 + public double weight(TopologyEdge edge) {
430 + return edge.link().type() == INDIRECT ? -1 : 1;
431 + }
432 + }
433 +
434 + @Override
435 + public String toString() {
436 + return toStringHelper(this)
437 + .add("time", time)
438 + .add("clusters", clusterCount())
439 + .add("devices", deviceCount())
440 + .add("links", linkCount())
441 + .add("pathCount", pathCount())
442 + .toString();
443 + }
444 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import org.onlab.graph.AdjacencyListsGraph;
4 +import org.onlab.onos.net.topology.TopologyEdge;
5 +import org.onlab.onos.net.topology.TopologyGraph;
6 +import org.onlab.onos.net.topology.TopologyVertex;
7 +
8 +import java.util.Set;
9 +
10 +/**
11 + * Default implementation of an immutable topology graph based on a generic
12 + * implementation of adjacency lists graph.
13 + */
14 +public class DefaultTopologyGraph
15 + extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
16 + implements TopologyGraph {
17 +
18 + /**
19 + * Creates a topology graph comprising of the specified vertexes and edges.
20 + *
21 + * @param vertexes set of graph vertexes
22 + * @param edges set of graph edges
23 + */
24 + public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
25 + super(vertexes, edges);
26 + }
27 +
28 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import static org.slf4j.LoggerFactory.getLogger;
4 +
5 +import java.util.List;
6 +import java.util.Set;
7 +
8 +import org.apache.felix.scr.annotations.Activate;
9 +import org.apache.felix.scr.annotations.Component;
10 +import org.apache.felix.scr.annotations.Deactivate;
11 +import org.apache.felix.scr.annotations.Service;
12 +import org.onlab.onos.event.Event;
13 +import org.onlab.onos.net.ConnectPoint;
14 +import org.onlab.onos.net.DeviceId;
15 +import org.onlab.onos.net.Link;
16 +import org.onlab.onos.net.Path;
17 +import org.onlab.onos.net.provider.ProviderId;
18 +import org.onlab.onos.net.topology.ClusterId;
19 +import org.onlab.onos.net.topology.GraphDescription;
20 +import org.onlab.onos.net.topology.LinkWeight;
21 +import org.onlab.onos.net.topology.Topology;
22 +import org.onlab.onos.net.topology.TopologyCluster;
23 +import org.onlab.onos.net.topology.TopologyEvent;
24 +import org.onlab.onos.net.topology.TopologyGraph;
25 +import org.onlab.onos.net.topology.TopologyStore;
26 +import org.onlab.onos.net.topology.TopologyStoreDelegate;
27 +import org.onlab.onos.store.AbstractStore;
28 +import org.slf4j.Logger;
29 +
30 +/**
31 + * Manages inventory of topology snapshots using trivial in-memory
32 + * structures implementation.
33 + */
34 +//FIXME: I LIE I AM NOT DISTRIBUTED
35 +@Component(immediate = true)
36 +@Service
37 +public class DistributedTopologyStore
38 +extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
39 +implements TopologyStore {
40 +
41 + private final Logger log = getLogger(getClass());
42 +
43 + private volatile DefaultTopology current;
44 +
45 + @Activate
46 + public void activate() {
47 + log.info("Started");
48 + }
49 +
50 + @Deactivate
51 + public void deactivate() {
52 + log.info("Stopped");
53 + }
54 + @Override
55 + public Topology currentTopology() {
56 + return current;
57 + }
58 +
59 + @Override
60 + public boolean isLatest(Topology topology) {
61 + // Topology is current only if it is the same as our current topology
62 + return topology == current;
63 + }
64 +
65 + @Override
66 + public TopologyGraph getGraph(Topology topology) {
67 + return defaultTopology(topology).getGraph();
68 + }
69 +
70 + @Override
71 + public Set<TopologyCluster> getClusters(Topology topology) {
72 + return defaultTopology(topology).getClusters();
73 + }
74 +
75 + @Override
76 + public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
77 + return defaultTopology(topology).getCluster(clusterId);
78 + }
79 +
80 + @Override
81 + public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
82 + return defaultTopology(topology).getClusterDevices(cluster);
83 + }
84 +
85 + @Override
86 + public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
87 + return defaultTopology(topology).getClusterLinks(cluster);
88 + }
89 +
90 + @Override
91 + public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
92 + return defaultTopology(topology).getPaths(src, dst);
93 + }
94 +
95 + @Override
96 + public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
97 + LinkWeight weight) {
98 + return defaultTopology(topology).getPaths(src, dst, weight);
99 + }
100 +
101 + @Override
102 + public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
103 + return defaultTopology(topology).isInfrastructure(connectPoint);
104 + }
105 +
106 + @Override
107 + public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
108 + return defaultTopology(topology).isBroadcastPoint(connectPoint);
109 + }
110 +
111 + @Override
112 + public TopologyEvent updateTopology(ProviderId providerId,
113 + GraphDescription graphDescription,
114 + List<Event> reasons) {
115 + // First off, make sure that what we're given is indeed newer than
116 + // what we already have.
117 + if (current != null && graphDescription.timestamp() < current.time()) {
118 + return null;
119 + }
120 +
121 + // Have the default topology construct self from the description data.
122 + DefaultTopology newTopology =
123 + new DefaultTopology(providerId, graphDescription);
124 +
125 + // Promote the new topology to current and return a ready-to-send event.
126 + synchronized (this) {
127 + current = newTopology;
128 + return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
129 + }
130 + }
131 +
132 + // Validates the specified topology and returns it as a default
133 + private DefaultTopology defaultTopology(Topology topology) {
134 + if (topology instanceof DefaultTopology) {
135 + return (DefaultTopology) topology;
136 + }
137 + throw new IllegalArgumentException("Topology class " + topology.getClass() +
138 + " not supported");
139 + }
140 +
141 +}
1 +package org.onlab.onos.store.topology.impl;
2 +
3 +import org.onlab.onos.net.DeviceId;
4 +
5 +import java.util.Objects;
6 +
7 +/**
8 + * Key for filing pre-computed paths between source and destination devices.
9 + */
10 +class PathKey {
11 + private final DeviceId src;
12 + private final DeviceId dst;
13 +
14 + /**
15 + * Creates a path key from the given source/dest pair.
16 + * @param src source device
17 + * @param dst destination device
18 + */
19 + PathKey(DeviceId src, DeviceId dst) {
20 + this.src = src;
21 + this.dst = dst;
22 + }
23 +
24 + @Override
25 + public int hashCode() {
26 + return Objects.hash(src, dst);
27 + }
28 +
29 + @Override
30 + public boolean equals(Object obj) {
31 + if (this == obj) {
32 + return true;
33 + }
34 + if (obj instanceof PathKey) {
35 + final PathKey other = (PathKey) obj;
36 + return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
37 + }
38 + return false;
39 + }
40 +}
...@@ -20,6 +20,7 @@ import org.junit.After; ...@@ -20,6 +20,7 @@ import org.junit.After;
20 import org.junit.AfterClass; 20 import org.junit.AfterClass;
21 import org.junit.Before; 21 import org.junit.Before;
22 import org.junit.BeforeClass; 22 import org.junit.BeforeClass;
23 +import org.junit.Ignore;
23 import org.junit.Test; 24 import org.junit.Test;
24 import org.onlab.onos.net.Device; 25 import org.onlab.onos.net.Device;
25 import org.onlab.onos.net.DeviceId; 26 import org.onlab.onos.net.DeviceId;
...@@ -32,15 +33,18 @@ import org.onlab.onos.net.device.DeviceEvent; ...@@ -32,15 +33,18 @@ import org.onlab.onos.net.device.DeviceEvent;
32 import org.onlab.onos.net.device.DeviceStoreDelegate; 33 import org.onlab.onos.net.device.DeviceStoreDelegate;
33 import org.onlab.onos.net.device.PortDescription; 34 import org.onlab.onos.net.device.PortDescription;
34 import org.onlab.onos.net.provider.ProviderId; 35 import org.onlab.onos.net.provider.ProviderId;
36 +import org.onlab.onos.store.common.StoreManager;
35 import org.onlab.onos.store.common.StoreService; 37 import org.onlab.onos.store.common.StoreService;
36 -import org.onlab.onos.store.impl.StoreManager; 38 +import org.onlab.onos.store.common.TestStoreManager;
37 -import org.onlab.onos.store.impl.TestStoreManager;
38 39
39 import com.google.common.collect.Iterables; 40 import com.google.common.collect.Iterables;
40 import com.google.common.collect.Sets; 41 import com.google.common.collect.Sets;
41 import com.hazelcast.config.Config; 42 import com.hazelcast.config.Config;
42 import com.hazelcast.core.Hazelcast; 43 import com.hazelcast.core.Hazelcast;
43 44
45 +/**
46 + * Test of the Hazelcast based distributed DeviceStore implementation.
47 + */
44 public class DistributedDeviceStoreTest { 48 public class DistributedDeviceStoreTest {
45 49
46 private static final ProviderId PID = new ProviderId("of", "foo"); 50 private static final ProviderId PID = new ProviderId("of", "foo");
...@@ -326,6 +330,7 @@ public class DistributedDeviceStoreTest { ...@@ -326,6 +330,7 @@ public class DistributedDeviceStoreTest {
326 } 330 }
327 331
328 // TODO add test for Port events when we have them 332 // TODO add test for Port events when we have them
333 + @Ignore("Ignore until Delegate spec. is clear.")
329 @Test 334 @Test
330 public final void testEvents() throws InterruptedException { 335 public final void testEvents() throws InterruptedException {
331 final CountDownLatch addLatch = new CountDownLatch(1); 336 final CountDownLatch addLatch = new CountDownLatch(1);
......
...@@ -15,6 +15,7 @@ import org.junit.After; ...@@ -15,6 +15,7 @@ import org.junit.After;
15 import org.junit.AfterClass; 15 import org.junit.AfterClass;
16 import org.junit.Before; 16 import org.junit.Before;
17 import org.junit.BeforeClass; 17 import org.junit.BeforeClass;
18 +import org.junit.Ignore;
18 import org.junit.Test; 19 import org.junit.Test;
19 import org.onlab.onos.net.ConnectPoint; 20 import org.onlab.onos.net.ConnectPoint;
20 import org.onlab.onos.net.DeviceId; 21 import org.onlab.onos.net.DeviceId;
...@@ -26,24 +27,22 @@ import org.onlab.onos.net.link.DefaultLinkDescription; ...@@ -26,24 +27,22 @@ import org.onlab.onos.net.link.DefaultLinkDescription;
26 import org.onlab.onos.net.link.LinkEvent; 27 import org.onlab.onos.net.link.LinkEvent;
27 import org.onlab.onos.net.link.LinkStoreDelegate; 28 import org.onlab.onos.net.link.LinkStoreDelegate;
28 import org.onlab.onos.net.provider.ProviderId; 29 import org.onlab.onos.net.provider.ProviderId;
30 +import org.onlab.onos.store.common.StoreManager;
29 import org.onlab.onos.store.common.StoreService; 31 import org.onlab.onos.store.common.StoreService;
30 -import org.onlab.onos.store.impl.StoreManager; 32 +import org.onlab.onos.store.common.TestStoreManager;
31 -import org.onlab.onos.store.impl.TestStoreManager;
32 33
33 import com.google.common.collect.Iterables; 34 import com.google.common.collect.Iterables;
34 import com.hazelcast.config.Config; 35 import com.hazelcast.config.Config;
35 import com.hazelcast.core.Hazelcast; 36 import com.hazelcast.core.Hazelcast;
36 37
38 +/**
39 + * Test of the Hazelcast based distributed LinkStore implementation.
40 + */
37 public class DistributedLinkStoreTest { 41 public class DistributedLinkStoreTest {
38 42
39 private static final ProviderId PID = new ProviderId("of", "foo"); 43 private static final ProviderId PID = new ProviderId("of", "foo");
40 private static final DeviceId DID1 = deviceId("of:foo"); 44 private static final DeviceId DID1 = deviceId("of:foo");
41 private static final DeviceId DID2 = deviceId("of:bar"); 45 private static final DeviceId DID2 = deviceId("of:bar");
42 -// private static final String MFR = "whitebox";
43 -// private static final String HW = "1.1.x";
44 -// private static final String SW1 = "3.8.1";
45 -// private static final String SW2 = "3.9.5";
46 -// private static final String SN = "43311-12345";
47 46
48 private static final PortNumber P1 = PortNumber.portNumber(1); 47 private static final PortNumber P1 = PortNumber.portNumber(1);
49 private static final PortNumber P2 = PortNumber.portNumber(2); 48 private static final PortNumber P2 = PortNumber.portNumber(2);
...@@ -302,6 +301,7 @@ public class DistributedLinkStoreTest { ...@@ -302,6 +301,7 @@ public class DistributedLinkStoreTest {
302 assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1)); 301 assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
303 } 302 }
304 303
304 + @Ignore("Ignore until Delegate spec. is clear.")
305 @Test 305 @Test
306 public final void testEvents() throws InterruptedException { 306 public final void testEvents() throws InterruptedException {
307 307
......
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
3 + <modelVersion>4.0.0</modelVersion>
4 +
5 + <parent>
6 + <groupId>org.onlab.onos</groupId>
7 + <artifactId>onos-core-store</artifactId>
8 + <version>1.0.0-SNAPSHOT</version>
9 + <relativePath>../pom.xml</relativePath>
10 + </parent>
11 +
12 + <artifactId>onos-core-hz</artifactId>
13 + <packaging>pom</packaging>
14 +
15 + <description>ONOS Core Hazelcast Store subsystem</description>
16 +
17 + <modules>
18 + <module>common</module>
19 + <module>cluster</module>
20 + <module>net</module>
21 + </modules>
22 +
23 + <dependencies>
24 + <dependency>
25 + <groupId>com.google.guava</groupId>
26 + <artifactId>guava</artifactId>
27 + </dependency>
28 + <dependency>
29 + <groupId>org.onlab.onos</groupId>
30 + <artifactId>onlab-misc</artifactId>
31 + </dependency>
32 + <dependency>
33 + <groupId>org.onlab.onos</groupId>
34 + <artifactId>onlab-junit</artifactId>
35 + </dependency>
36 + <dependency>
37 + <groupId>com.hazelcast</groupId>
38 + <artifactId>hazelcast</artifactId>
39 + </dependency>
40 + </dependencies>
41 +
42 + <build>
43 + <plugins>
44 + <plugin>
45 + <groupId>org.apache.felix</groupId>
46 + <artifactId>maven-bundle-plugin</artifactId>
47 + </plugin>
48 + </plugins>
49 + </build>
50 +
51 +</project>
1 <?xml version="1.0" encoding="UTF-8"?> 1 <?xml version="1.0" encoding="UTF-8"?>
2 -<project xmlns="http://maven.apache.org/POM/4.0.0" 2 +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
3 - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
5 <modelVersion>4.0.0</modelVersion> 3 <modelVersion>4.0.0</modelVersion>
6 4
7 <parent> 5 <parent>
...@@ -12,34 +10,41 @@ ...@@ -12,34 +10,41 @@
12 </parent> 10 </parent>
13 11
14 <artifactId>onos-core-store</artifactId> 12 <artifactId>onos-core-store</artifactId>
15 - <packaging>bundle</packaging> 13 + <packaging>pom</packaging>
16 14
17 - <description>ONOS distributed store subsystems</description> 15 + <description>ONOS Core Store subsystem</description>
16 +
17 + <modules>
18 + <module>trivial</module>
19 + <module>dist</module>
20 + <module>hz</module>
21 + <module>serializers</module>
22 + </modules>
18 23
19 <dependencies> 24 <dependencies>
20 <dependency> 25 <dependency>
26 + <groupId>com.google.guava</groupId>
27 + <artifactId>guava</artifactId>
28 + </dependency>
29 + <dependency>
21 <groupId>org.onlab.onos</groupId> 30 <groupId>org.onlab.onos</groupId>
22 - <artifactId>onos-api</artifactId> 31 + <artifactId>onlab-misc</artifactId>
23 </dependency> 32 </dependency>
24 <dependency> 33 <dependency>
25 - <groupId>org.apache.felix</groupId> 34 + <groupId>org.onlab.onos</groupId>
26 - <artifactId>org.apache.felix.scr.annotations</artifactId> 35 + <artifactId>onlab-junit</artifactId>
27 </dependency> 36 </dependency>
28 <dependency> 37 <dependency>
29 <groupId>com.hazelcast</groupId> 38 <groupId>com.hazelcast</groupId>
30 <artifactId>hazelcast</artifactId> 39 <artifactId>hazelcast</artifactId>
31 </dependency> 40 </dependency>
32 - <dependency>
33 - <groupId>de.javakaffee</groupId>
34 - <artifactId>kryo-serializers</artifactId>
35 - </dependency>
36 </dependencies> 41 </dependencies>
37 42
38 <build> 43 <build>
39 <plugins> 44 <plugins>
40 <plugin> 45 <plugin>
41 <groupId>org.apache.felix</groupId> 46 <groupId>org.apache.felix</groupId>
42 - <artifactId>maven-scr-plugin</artifactId> 47 + <artifactId>maven-bundle-plugin</artifactId>
43 </plugin> 48 </plugin>
44 </plugins> 49 </plugins>
45 </build> 50 </build>
......
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0"
3 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
5 + <modelVersion>4.0.0</modelVersion>
6 +
7 + <parent>
8 + <groupId>org.onlab.onos</groupId>
9 + <artifactId>onos-core-store</artifactId>
10 + <version>1.0.0-SNAPSHOT</version>
11 + <relativePath>../pom.xml</relativePath>
12 + </parent>
13 +
14 + <artifactId>onos-core-serializers</artifactId>
15 + <packaging>bundle</packaging>
16 +
17 + <description>Serializers for ONOS classes</description>
18 +
19 + <dependencies>
20 + <dependency>
21 + <groupId>org.onlab.onos</groupId>
22 + <artifactId>onos-api</artifactId>
23 + </dependency>
24 + <dependency>
25 + <groupId>org.apache.felix</groupId>
26 + <artifactId>org.apache.felix.scr.annotations</artifactId>
27 + </dependency>
28 + <dependency>
29 + <groupId>de.javakaffee</groupId>
30 + <artifactId>kryo-serializers</artifactId>
31 + </dependency>
32 + </dependencies>
33 +
34 + <build>
35 + <plugins>
36 + <plugin>
37 + <groupId>org.apache.felix</groupId>
38 + <artifactId>maven-scr-plugin</artifactId>
39 + </plugin>
40 + </plugins>
41 + </build>
42 +
43 +</project>
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
6 6
7 <parent> 7 <parent>
8 <groupId>org.onlab.onos</groupId> 8 <groupId>org.onlab.onos</groupId>
9 - <artifactId>onos-core</artifactId> 9 + <artifactId>onos-core-store</artifactId>
10 <version>1.0.0-SNAPSHOT</version> 10 <version>1.0.0-SNAPSHOT</version>
11 <relativePath>../pom.xml</relativePath> 11 <relativePath>../pom.xml</relativePath>
12 </parent> 12 </parent>
......
1 +package org.onlab.onos.net.trivial.impl;
2 +
3 +import org.apache.felix.scr.annotations.Component;
4 +import org.apache.felix.scr.annotations.Service;
5 +import org.onlab.onos.cluster.MastershipTerm;
6 +import org.onlab.onos.net.DeviceId;
7 +import org.onlab.onos.store.ClockService;
8 +import org.onlab.onos.store.Timestamp;
9 +
10 +//FIXME: Code clone in onos-core-trivial, onos-core-hz-net
11 +/**
12 + * Dummy implementation of {@link ClockService}.
13 + */
14 +@Component(immediate = true)
15 +@Service
16 +public class NoOpClockService implements ClockService {
17 +
18 + @Override
19 + public Timestamp getTimestamp(DeviceId deviceId) {
20 + return new Timestamp() {
21 +
22 + @Override
23 + public int compareTo(Timestamp o) {
24 + throw new IllegalStateException("Never expected to be used.");
25 + }
26 + };
27 + }
28 +
29 + @Override
30 + public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
31 + }
32 +}
...@@ -20,7 +20,7 @@ import java.util.Set; ...@@ -20,7 +20,7 @@ import java.util.Set;
20 import static org.slf4j.LoggerFactory.getLogger; 20 import static org.slf4j.LoggerFactory.getLogger;
21 21
22 /** 22 /**
23 - * Manages inventory of infrastructure DEVICES using trivial in-memory 23 + * Manages inventory of infrastructure devices using trivial in-memory
24 * structures implementation. 24 * structures implementation.
25 */ 25 */
26 @Component(immediate = true) 26 @Component(immediate = true)
...@@ -68,6 +68,11 @@ public class SimpleClusterStore ...@@ -68,6 +68,11 @@ public class SimpleClusterStore
68 } 68 }
69 69
70 @Override 70 @Override
71 + public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
72 + return null;
73 + }
74 +
75 + @Override
71 public void removeNode(NodeId nodeId) { 76 public void removeNode(NodeId nodeId) {
72 } 77 }
73 78
......
...@@ -101,9 +101,6 @@ public class SimpleDeviceStore ...@@ -101,9 +101,6 @@ public class SimpleDeviceStore
101 synchronized (this) { 101 synchronized (this) {
102 devices.put(deviceId, device); 102 devices.put(deviceId, device);
103 availableDevices.add(deviceId); 103 availableDevices.add(deviceId);
104 -
105 - // For now claim the device as a master automatically.
106 - // roles.put(deviceId, MastershipRole.MASTER);
107 } 104 }
108 return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null); 105 return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null);
109 } 106 }
...@@ -189,7 +186,7 @@ public class SimpleDeviceStore ...@@ -189,7 +186,7 @@ public class SimpleDeviceStore
189 new DefaultPort(device, portDescription.portNumber(), 186 new DefaultPort(device, portDescription.portNumber(),
190 portDescription.isEnabled()); 187 portDescription.isEnabled());
191 ports.put(port.number(), updatedPort); 188 ports.put(port.number(), updatedPort);
192 - return new DeviceEvent(PORT_UPDATED, device, port); 189 + return new DeviceEvent(PORT_UPDATED, device, updatedPort);
193 } 190 }
194 return null; 191 return null;
195 } 192 }
......
...@@ -51,8 +51,6 @@ public class SimpleLinkStore ...@@ -51,8 +51,6 @@ public class SimpleLinkStore
51 private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create(); 51 private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
52 private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create(); 52 private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
53 53
54 - private static final Set<Link> EMPTY = ImmutableSet.of();
55 -
56 @Activate 54 @Activate
57 public void activate() { 55 public void activate() {
58 log.info("Started"); 56 log.info("Started");
......
1 +/**
2 + *
3 + */
4 +package org.onlab.onos.net.trivial.impl;
5 +
6 +import static org.junit.Assert.*;
7 +import static org.onlab.onos.net.Device.Type.SWITCH;
8 +import static org.onlab.onos.net.DeviceId.deviceId;
9 +import static org.onlab.onos.net.device.DeviceEvent.Type.*;
10 +
11 +import java.util.Arrays;
12 +import java.util.HashMap;
13 +import java.util.List;
14 +import java.util.Map;
15 +import java.util.Set;
16 +import java.util.concurrent.CountDownLatch;
17 +import java.util.concurrent.TimeUnit;
18 +
19 +import org.junit.After;
20 +import org.junit.AfterClass;
21 +import org.junit.Before;
22 +import org.junit.BeforeClass;
23 +import org.junit.Ignore;
24 +import org.junit.Test;
25 +import org.onlab.onos.net.Device;
26 +import org.onlab.onos.net.DeviceId;
27 +import org.onlab.onos.net.Port;
28 +import org.onlab.onos.net.PortNumber;
29 +import org.onlab.onos.net.device.DefaultDeviceDescription;
30 +import org.onlab.onos.net.device.DefaultPortDescription;
31 +import org.onlab.onos.net.device.DeviceDescription;
32 +import org.onlab.onos.net.device.DeviceEvent;
33 +import org.onlab.onos.net.device.DeviceStore;
34 +import org.onlab.onos.net.device.DeviceStoreDelegate;
35 +import org.onlab.onos.net.device.PortDescription;
36 +import org.onlab.onos.net.provider.ProviderId;
37 +
38 +import com.google.common.collect.Iterables;
39 +import com.google.common.collect.Sets;
40 +
41 +/**
42 + * Test of the simple DeviceStore implementation.
43 + */
44 +public class SimpleDeviceStoreTest {
45 +
46 + private static final ProviderId PID = new ProviderId("of", "foo");
47 + private static final DeviceId DID1 = deviceId("of:foo");
48 + private static final DeviceId DID2 = deviceId("of:bar");
49 + private static final String MFR = "whitebox";
50 + private static final String HW = "1.1.x";
51 + private static final String SW1 = "3.8.1";
52 + private static final String SW2 = "3.9.5";
53 + private static final String SN = "43311-12345";
54 +
55 + private static final PortNumber P1 = PortNumber.portNumber(1);
56 + private static final PortNumber P2 = PortNumber.portNumber(2);
57 + private static final PortNumber P3 = PortNumber.portNumber(3);
58 +
59 + private SimpleDeviceStore simpleDeviceStore;
60 + private DeviceStore deviceStore;
61 +
62 +
63 +
64 + @BeforeClass
65 + public static void setUpBeforeClass() throws Exception {
66 + }
67 +
68 + @AfterClass
69 + public static void tearDownAfterClass() throws Exception {
70 + }
71 +
72 +
73 + @Before
74 + public void setUp() throws Exception {
75 + simpleDeviceStore = new SimpleDeviceStore();
76 + simpleDeviceStore.activate();
77 + deviceStore = simpleDeviceStore;
78 + }
79 +
80 + @After
81 + public void tearDown() throws Exception {
82 + simpleDeviceStore.deactivate();
83 + }
84 +
85 + private void putDevice(DeviceId deviceId, String swVersion) {
86 + DeviceDescription description =
87 + new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
88 + HW, swVersion, SN);
89 + deviceStore.createOrUpdateDevice(PID, deviceId, description);
90 + }
91 +
92 + private static void assertDevice(DeviceId id, String swVersion, Device device) {
93 + assertNotNull(device);
94 + assertEquals(id, device.id());
95 + assertEquals(MFR, device.manufacturer());
96 + assertEquals(HW, device.hwVersion());
97 + assertEquals(swVersion, device.swVersion());
98 + assertEquals(SN, device.serialNumber());
99 + }
100 +
101 + @Test
102 + public final void testGetDeviceCount() {
103 + assertEquals("initialy empty", 0, deviceStore.getDeviceCount());
104 +
105 + putDevice(DID1, SW1);
106 + putDevice(DID2, SW2);
107 + putDevice(DID1, SW1);
108 +
109 + assertEquals("expect 2 uniq devices", 2, deviceStore.getDeviceCount());
110 + }
111 +
112 + @Test
113 + public final void testGetDevices() {
114 + assertEquals("initialy empty", 0, Iterables.size(deviceStore.getDevices()));
115 +
116 + putDevice(DID1, SW1);
117 + putDevice(DID2, SW2);
118 + putDevice(DID1, SW1);
119 +
120 + assertEquals("expect 2 uniq devices",
121 + 2, Iterables.size(deviceStore.getDevices()));
122 +
123 + Map<DeviceId, Device> devices = new HashMap<>();
124 + for (Device device : deviceStore.getDevices()) {
125 + devices.put(device.id(), device);
126 + }
127 +
128 + assertDevice(DID1, SW1, devices.get(DID1));
129 + assertDevice(DID2, SW2, devices.get(DID2));
130 +
131 + // add case for new node?
132 + }
133 +
134 + @Test
135 + public final void testGetDevice() {
136 +
137 + putDevice(DID1, SW1);
138 +
139 + assertDevice(DID1, SW1, deviceStore.getDevice(DID1));
140 + assertNull("DID2 shouldn't be there", deviceStore.getDevice(DID2));
141 + }
142 +
143 + @Test
144 + public final void testCreateOrUpdateDevice() {
145 + DeviceDescription description =
146 + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
147 + HW, SW1, SN);
148 + DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description);
149 + assertEquals(DEVICE_ADDED, event.type());
150 + assertDevice(DID1, SW1, event.subject());
151 +
152 + DeviceDescription description2 =
153 + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
154 + HW, SW2, SN);
155 + DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
156 + assertEquals(DEVICE_UPDATED, event2.type());
157 + assertDevice(DID1, SW2, event2.subject());
158 +
159 + assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2));
160 + }
161 +
162 + @Test
163 + public final void testMarkOffline() {
164 +
165 + putDevice(DID1, SW1);
166 + assertTrue(deviceStore.isAvailable(DID1));
167 +
168 + DeviceEvent event = deviceStore.markOffline(DID1);
169 + assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type());
170 + assertDevice(DID1, SW1, event.subject());
171 + assertFalse(deviceStore.isAvailable(DID1));
172 +
173 + DeviceEvent event2 = deviceStore.markOffline(DID1);
174 + assertNull("No change, no event", event2);
175 +}
176 +
177 + @Test
178 + public final void testUpdatePorts() {
179 + putDevice(DID1, SW1);
180 + List<PortDescription> pds = Arrays.<PortDescription>asList(
181 + new DefaultPortDescription(P1, true),
182 + new DefaultPortDescription(P2, true)
183 + );
184 +
185 + List<DeviceEvent> events = deviceStore.updatePorts(DID1, pds);
186 +
187 + Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
188 + for (DeviceEvent event : events) {
189 + assertEquals(PORT_ADDED, event.type());
190 + assertDevice(DID1, SW1, event.subject());
191 + assertTrue("PortNumber is one of expected",
192 + expectedPorts.remove(event.port().number()));
193 + assertTrue("Port is enabled", event.port().isEnabled());
194 + }
195 + assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
196 +
197 +
198 + List<PortDescription> pds2 = Arrays.<PortDescription>asList(
199 + new DefaultPortDescription(P1, false),
200 + new DefaultPortDescription(P2, true),
201 + new DefaultPortDescription(P3, true)
202 + );
203 +
204 + events = deviceStore.updatePorts(DID1, pds2);
205 + assertFalse("event should be triggered", events.isEmpty());
206 + for (DeviceEvent event : events) {
207 + PortNumber num = event.port().number();
208 + if (P1.equals(num)) {
209 + assertEquals(PORT_UPDATED, event.type());
210 + assertDevice(DID1, SW1, event.subject());
211 + assertFalse("Port is disabled", event.port().isEnabled());
212 + } else if (P2.equals(num)) {
213 + fail("P2 event not expected.");
214 + } else if (P3.equals(num)) {
215 + assertEquals(PORT_ADDED, event.type());
216 + assertDevice(DID1, SW1, event.subject());
217 + assertTrue("Port is enabled", event.port().isEnabled());
218 + } else {
219 + fail("Unknown port number encountered: " + num);
220 + }
221 + }
222 +
223 + List<PortDescription> pds3 = Arrays.<PortDescription>asList(
224 + new DefaultPortDescription(P1, false),
225 + new DefaultPortDescription(P2, true)
226 + );
227 + events = deviceStore.updatePorts(DID1, pds3);
228 + assertFalse("event should be triggered", events.isEmpty());
229 + for (DeviceEvent event : events) {
230 + PortNumber num = event.port().number();
231 + if (P1.equals(num)) {
232 + fail("P1 event not expected.");
233 + } else if (P2.equals(num)) {
234 + fail("P2 event not expected.");
235 + } else if (P3.equals(num)) {
236 + assertEquals(PORT_REMOVED, event.type());
237 + assertDevice(DID1, SW1, event.subject());
238 + assertTrue("Port was enabled", event.port().isEnabled());
239 + } else {
240 + fail("Unknown port number encountered: " + num);
241 + }
242 + }
243 +
244 + }
245 +
246 + @Test
247 + public final void testUpdatePortStatus() {
248 + putDevice(DID1, SW1);
249 + List<PortDescription> pds = Arrays.<PortDescription>asList(
250 + new DefaultPortDescription(P1, true)
251 + );
252 + deviceStore.updatePorts(DID1, pds);
253 +
254 + DeviceEvent event = deviceStore.updatePortStatus(DID1,
255 + new DefaultPortDescription(P1, false));
256 + assertEquals(PORT_UPDATED, event.type());
257 + assertDevice(DID1, SW1, event.subject());
258 + assertEquals(P1, event.port().number());
259 + assertFalse("Port is disabled", event.port().isEnabled());
260 + }
261 +
262 + @Test
263 + public final void testGetPorts() {
264 + putDevice(DID1, SW1);
265 + putDevice(DID2, SW1);
266 + List<PortDescription> pds = Arrays.<PortDescription>asList(
267 + new DefaultPortDescription(P1, true),
268 + new DefaultPortDescription(P2, true)
269 + );
270 + deviceStore.updatePorts(DID1, pds);
271 +
272 + Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
273 + List<Port> ports = deviceStore.getPorts(DID1);
274 + for (Port port : ports) {
275 + assertTrue("Port is enabled", port.isEnabled());
276 + assertTrue("PortNumber is one of expected",
277 + expectedPorts.remove(port.number()));
278 + }
279 + assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
280 +
281 +
282 + assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty());
283 + }
284 +
285 + @Test
286 + public final void testGetPort() {
287 + putDevice(DID1, SW1);
288 + putDevice(DID2, SW1);
289 + List<PortDescription> pds = Arrays.<PortDescription>asList(
290 + new DefaultPortDescription(P1, true),
291 + new DefaultPortDescription(P2, false)
292 + );
293 + deviceStore.updatePorts(DID1, pds);
294 +
295 + Port port1 = deviceStore.getPort(DID1, P1);
296 + assertEquals(P1, port1.number());
297 + assertTrue("Port is enabled", port1.isEnabled());
298 +
299 + Port port2 = deviceStore.getPort(DID1, P2);
300 + assertEquals(P2, port2.number());
301 + assertFalse("Port is disabled", port2.isEnabled());
302 +
303 + Port port3 = deviceStore.getPort(DID1, P3);
304 + assertNull("P3 not expected", port3);
305 + }
306 +
307 + @Test
308 + public final void testRemoveDevice() {
309 + putDevice(DID1, SW1);
310 + putDevice(DID2, SW1);
311 +
312 + assertEquals(2, deviceStore.getDeviceCount());
313 +
314 + DeviceEvent event = deviceStore.removeDevice(DID1);
315 + assertEquals(DEVICE_REMOVED, event.type());
316 + assertDevice(DID1, SW1, event.subject());
317 +
318 + assertEquals(1, deviceStore.getDeviceCount());
319 + }
320 +
321 + // If Delegates should be called only on remote events,
322 + // then Simple* should never call them, thus not test required.
323 + // TODO add test for Port events when we have them
324 + @Ignore("Ignore until Delegate spec. is clear.")
325 + @Test
326 + public final void testEvents() throws InterruptedException {
327 + final CountDownLatch addLatch = new CountDownLatch(1);
328 + DeviceStoreDelegate checkAdd = new DeviceStoreDelegate() {
329 + @Override
330 + public void notify(DeviceEvent event) {
331 + assertEquals(DEVICE_ADDED, event.type());
332 + assertDevice(DID1, SW1, event.subject());
333 + addLatch.countDown();
334 + }
335 + };
336 + final CountDownLatch updateLatch = new CountDownLatch(1);
337 + DeviceStoreDelegate checkUpdate = new DeviceStoreDelegate() {
338 + @Override
339 + public void notify(DeviceEvent event) {
340 + assertEquals(DEVICE_UPDATED, event.type());
341 + assertDevice(DID1, SW2, event.subject());
342 + updateLatch.countDown();
343 + }
344 + };
345 + final CountDownLatch removeLatch = new CountDownLatch(1);
346 + DeviceStoreDelegate checkRemove = new DeviceStoreDelegate() {
347 + @Override
348 + public void notify(DeviceEvent event) {
349 + assertEquals(DEVICE_REMOVED, event.type());
350 + assertDevice(DID1, SW2, event.subject());
351 + removeLatch.countDown();
352 + }
353 + };
354 +
355 + DeviceDescription description =
356 + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
357 + HW, SW1, SN);
358 + deviceStore.setDelegate(checkAdd);
359 + deviceStore.createOrUpdateDevice(PID, DID1, description);
360 + assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
361 +
362 +
363 + DeviceDescription description2 =
364 + new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
365 + HW, SW2, SN);
366 + deviceStore.unsetDelegate(checkAdd);
367 + deviceStore.setDelegate(checkUpdate);
368 + deviceStore.createOrUpdateDevice(PID, DID1, description2);
369 + assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
370 +
371 + deviceStore.unsetDelegate(checkUpdate);
372 + deviceStore.setDelegate(checkRemove);
373 + deviceStore.removeDevice(DID1);
374 + assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
375 + }
376 +}
1 +package org.onlab.onos.net.trivial.impl;
2 +
3 +import static org.junit.Assert.*;
4 +import static org.onlab.onos.net.DeviceId.deviceId;
5 +import static org.onlab.onos.net.Link.Type.*;
6 +import static org.onlab.onos.net.link.LinkEvent.Type.*;
7 +
8 +import java.util.HashMap;
9 +import java.util.Map;
10 +import java.util.Set;
11 +import java.util.concurrent.CountDownLatch;
12 +import java.util.concurrent.TimeUnit;
13 +
14 +import org.junit.After;
15 +import org.junit.AfterClass;
16 +import org.junit.Before;
17 +import org.junit.BeforeClass;
18 +import org.junit.Ignore;
19 +import org.junit.Test;
20 +import org.onlab.onos.net.ConnectPoint;
21 +import org.onlab.onos.net.DeviceId;
22 +import org.onlab.onos.net.Link;
23 +import org.onlab.onos.net.LinkKey;
24 +import org.onlab.onos.net.PortNumber;
25 +import org.onlab.onos.net.Link.Type;
26 +import org.onlab.onos.net.link.DefaultLinkDescription;
27 +import org.onlab.onos.net.link.LinkEvent;
28 +import org.onlab.onos.net.link.LinkStore;
29 +import org.onlab.onos.net.link.LinkStoreDelegate;
30 +import org.onlab.onos.net.provider.ProviderId;
31 +
32 +import com.google.common.collect.Iterables;
33 +
34 +/**
35 + * Test of the simple LinkStore implementation.
36 + */
37 +public class SimpleLinkStoreTest {
38 +
39 + private static final ProviderId PID = new ProviderId("of", "foo");
40 + private static final DeviceId DID1 = deviceId("of:foo");
41 + private static final DeviceId DID2 = deviceId("of:bar");
42 +
43 + private static final PortNumber P1 = PortNumber.portNumber(1);
44 + private static final PortNumber P2 = PortNumber.portNumber(2);
45 + private static final PortNumber P3 = PortNumber.portNumber(3);
46 +
47 +
48 + private SimpleLinkStore simpleLinkStore;
49 + private LinkStore linkStore;
50 +
51 + @BeforeClass
52 + public static void setUpBeforeClass() throws Exception {
53 + }
54 +
55 + @AfterClass
56 + public static void tearDownAfterClass() throws Exception {
57 + }
58 +
59 + @Before
60 + public void setUp() throws Exception {
61 + simpleLinkStore = new SimpleLinkStore();
62 + simpleLinkStore.activate();
63 + linkStore = simpleLinkStore;
64 + }
65 +
66 + @After
67 + public void tearDown() throws Exception {
68 + simpleLinkStore.deactivate();
69 + }
70 +
71 + private void putLink(DeviceId srcId, PortNumber srcNum,
72 + DeviceId dstId, PortNumber dstNum, Type type) {
73 + ConnectPoint src = new ConnectPoint(srcId, srcNum);
74 + ConnectPoint dst = new ConnectPoint(dstId, dstNum);
75 + linkStore.createOrUpdateLink(PID, new DefaultLinkDescription(src, dst, type));
76 + }
77 +
78 + private void putLink(LinkKey key, Type type) {
79 + putLink(key.src().deviceId(), key.src().port(),
80 + key.dst().deviceId(), key.dst().port(),
81 + type);
82 + }
83 +
84 + private static void assertLink(DeviceId srcId, PortNumber srcNum,
85 + DeviceId dstId, PortNumber dstNum, Type type,
86 + Link link) {
87 + assertEquals(srcId, link.src().deviceId());
88 + assertEquals(srcNum, link.src().port());
89 + assertEquals(dstId, link.dst().deviceId());
90 + assertEquals(dstNum, link.dst().port());
91 + assertEquals(type, link.type());
92 + }
93 +
94 + private static void assertLink(LinkKey key, Type type, Link link) {
95 + assertLink(key.src().deviceId(), key.src().port(),
96 + key.dst().deviceId(), key.dst().port(),
97 + type, link);
98 + }
99 +
100 + @Test
101 + public final void testGetLinkCount() {
102 + assertEquals("initialy empty", 0, linkStore.getLinkCount());
103 +
104 + putLink(DID1, P1, DID2, P2, DIRECT);
105 + putLink(DID2, P2, DID1, P1, DIRECT);
106 + putLink(DID1, P1, DID2, P2, DIRECT);
107 +
108 + assertEquals("expecting 2 unique link", 2, linkStore.getLinkCount());
109 + }
110 +
111 + @Test
112 + public final void testGetLinks() {
113 + assertEquals("initialy empty", 0,
114 + Iterables.size(linkStore.getLinks()));
115 +
116 + LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
117 + LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
118 +
119 + putLink(linkId1, DIRECT);
120 + putLink(linkId2, DIRECT);
121 + putLink(linkId1, DIRECT);
122 +
123 + assertEquals("expecting 2 unique link", 2,
124 + Iterables.size(linkStore.getLinks()));
125 +
126 + Map<LinkKey, Link> links = new HashMap<>();
127 + for (Link link : linkStore.getLinks()) {
128 + links.put(new LinkKey(link.src(), link.dst()), link);
129 + }
130 +
131 + assertLink(linkId1, DIRECT, links.get(linkId1));
132 + assertLink(linkId2, DIRECT, links.get(linkId2));
133 + }
134 +
135 + @Test
136 + public final void testGetDeviceEgressLinks() {
137 + LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
138 + LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
139 + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
140 +
141 + putLink(linkId1, DIRECT);
142 + putLink(linkId2, DIRECT);
143 + putLink(linkId3, DIRECT);
144 +
145 + // DID1,P1 => DID2,P2
146 + // DID2,P2 => DID1,P1
147 + // DID1,P2 => DID2,P3
148 +
149 + Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1);
150 + assertEquals(2, links1.size());
151 + // check
152 +
153 + Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2);
154 + assertEquals(1, links2.size());
155 + assertLink(linkId2, DIRECT, links2.iterator().next());
156 + }
157 +
158 + @Test
159 + public final void testGetDeviceIngressLinks() {
160 + LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
161 + LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
162 + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
163 +
164 + putLink(linkId1, DIRECT);
165 + putLink(linkId2, DIRECT);
166 + putLink(linkId3, DIRECT);
167 +
168 + // DID1,P1 => DID2,P2
169 + // DID2,P2 => DID1,P1
170 + // DID1,P2 => DID2,P3
171 +
172 + Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2);
173 + assertEquals(2, links1.size());
174 + // check
175 +
176 + Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1);
177 + assertEquals(1, links2.size());
178 + assertLink(linkId2, DIRECT, links2.iterator().next());
179 + }
180 +
181 + @Test
182 + public final void testGetLink() {
183 + ConnectPoint src = new ConnectPoint(DID1, P1);
184 + ConnectPoint dst = new ConnectPoint(DID2, P2);
185 + LinkKey linkId1 = new LinkKey(src, dst);
186 +
187 + putLink(linkId1, DIRECT);
188 +
189 + Link link = linkStore.getLink(src, dst);
190 + assertLink(linkId1, DIRECT, link);
191 +
192 + assertNull("There shouldn't be reverese link",
193 + linkStore.getLink(dst, src));
194 + }
195 +
196 + @Test
197 + public final void testGetEgressLinks() {
198 + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
199 + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
200 + LinkKey linkId1 = new LinkKey(d1P1, d2P2);
201 + LinkKey linkId2 = new LinkKey(d2P2, d1P1);
202 + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
203 +
204 + putLink(linkId1, DIRECT);
205 + putLink(linkId2, DIRECT);
206 + putLink(linkId3, DIRECT);
207 +
208 + // DID1,P1 => DID2,P2
209 + // DID2,P2 => DID1,P1
210 + // DID1,P2 => DID2,P3
211 +
212 + Set<Link> links1 = linkStore.getEgressLinks(d1P1);
213 + assertEquals(1, links1.size());
214 + assertLink(linkId1, DIRECT, links1.iterator().next());
215 +
216 + Set<Link> links2 = linkStore.getEgressLinks(d2P2);
217 + assertEquals(1, links2.size());
218 + assertLink(linkId2, DIRECT, links2.iterator().next());
219 + }
220 +
221 + @Test
222 + public final void testGetIngressLinks() {
223 + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
224 + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
225 + LinkKey linkId1 = new LinkKey(d1P1, d2P2);
226 + LinkKey linkId2 = new LinkKey(d2P2, d1P1);
227 + LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
228 +
229 + putLink(linkId1, DIRECT);
230 + putLink(linkId2, DIRECT);
231 + putLink(linkId3, DIRECT);
232 +
233 + // DID1,P1 => DID2,P2
234 + // DID2,P2 => DID1,P1
235 + // DID1,P2 => DID2,P3
236 +
237 + Set<Link> links1 = linkStore.getIngressLinks(d2P2);
238 + assertEquals(1, links1.size());
239 + assertLink(linkId1, DIRECT, links1.iterator().next());
240 +
241 + Set<Link> links2 = linkStore.getIngressLinks(d1P1);
242 + assertEquals(1, links2.size());
243 + assertLink(linkId2, DIRECT, links2.iterator().next());
244 + }
245 +
246 + @Test
247 + public final void testCreateOrUpdateLink() {
248 + ConnectPoint src = new ConnectPoint(DID1, P1);
249 + ConnectPoint dst = new ConnectPoint(DID2, P2);
250 +
251 + // add link
252 + LinkEvent event = linkStore.createOrUpdateLink(PID,
253 + new DefaultLinkDescription(src, dst, INDIRECT));
254 +
255 + assertLink(DID1, P1, DID2, P2, INDIRECT, event.subject());
256 + assertEquals(LINK_ADDED, event.type());
257 +
258 + // update link type
259 + LinkEvent event2 = linkStore.createOrUpdateLink(PID,
260 + new DefaultLinkDescription(src, dst, DIRECT));
261 +
262 + assertLink(DID1, P1, DID2, P2, DIRECT, event2.subject());
263 + assertEquals(LINK_UPDATED, event2.type());
264 +
265 + // no change
266 + LinkEvent event3 = linkStore.createOrUpdateLink(PID,
267 + new DefaultLinkDescription(src, dst, DIRECT));
268 +
269 + assertNull("No change event expected", event3);
270 + }
271 +
272 + @Test
273 + public final void testRemoveLink() {
274 + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
275 + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
276 + LinkKey linkId1 = new LinkKey(d1P1, d2P2);
277 + LinkKey linkId2 = new LinkKey(d2P2, d1P1);
278 +
279 + putLink(linkId1, DIRECT);
280 + putLink(linkId2, DIRECT);
281 +
282 + // DID1,P1 => DID2,P2
283 + // DID2,P2 => DID1,P1
284 + // DID1,P2 => DID2,P3
285 +
286 + LinkEvent event = linkStore.removeLink(d1P1, d2P2);
287 + assertEquals(LINK_REMOVED, event.type());
288 + LinkEvent event2 = linkStore.removeLink(d1P1, d2P2);
289 + assertNull(event2);
290 +
291 + assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
292 + }
293 +
294 + // If Delegates should be called only on remote events,
295 + // then Simple* should never call them, thus not test required.
296 + @Ignore("Ignore until Delegate spec. is clear.")
297 + @Test
298 + public final void testEvents() throws InterruptedException {
299 +
300 + final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
301 + final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
302 + final LinkKey linkId1 = new LinkKey(d1P1, d2P2);
303 +
304 + final CountDownLatch addLatch = new CountDownLatch(1);
305 + LinkStoreDelegate checkAdd = new LinkStoreDelegate() {
306 + @Override
307 + public void notify(LinkEvent event) {
308 + assertEquals(LINK_ADDED, event.type());
309 + assertLink(linkId1, INDIRECT, event.subject());
310 + addLatch.countDown();
311 + }
312 + };
313 + final CountDownLatch updateLatch = new CountDownLatch(1);
314 + LinkStoreDelegate checkUpdate = new LinkStoreDelegate() {
315 + @Override
316 + public void notify(LinkEvent event) {
317 + assertEquals(LINK_UPDATED, event.type());
318 + assertLink(linkId1, DIRECT, event.subject());
319 + updateLatch.countDown();
320 + }
321 + };
322 + final CountDownLatch removeLatch = new CountDownLatch(1);
323 + LinkStoreDelegate checkRemove = new LinkStoreDelegate() {
324 + @Override
325 + public void notify(LinkEvent event) {
326 + assertEquals(LINK_REMOVED, event.type());
327 + assertLink(linkId1, DIRECT, event.subject());
328 + removeLatch.countDown();
329 + }
330 + };
331 +
332 + linkStore.setDelegate(checkAdd);
333 + putLink(linkId1, INDIRECT);
334 + assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
335 +
336 + linkStore.unsetDelegate(checkAdd);
337 + linkStore.setDelegate(checkUpdate);
338 + putLink(linkId1, DIRECT);
339 + assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
340 +
341 + linkStore.unsetDelegate(checkUpdate);
342 + linkStore.setDelegate(checkRemove);
343 + linkStore.removeLink(d1P1, d2P2);
344 + assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
345 + }
346 +}
...@@ -48,7 +48,17 @@ ...@@ -48,7 +48,17 @@
48 description="ONOS core components"> 48 description="ONOS core components">
49 <feature>onos-api</feature> 49 <feature>onos-api</feature>
50 <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle> 50 <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
51 - <bundle>mvn:org.onlab.onos/onos-core-store/1.0.0-SNAPSHOT</bundle> 51 + <bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle>
52 + </feature>
53 +
54 + <feature name="onos-core-hazelcast" version="1.0.0"
55 + description="ONOS core components built on hazelcast">
56 + <feature>onos-api</feature>
57 + <bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
58 + <bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
59 + <bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
60 + <bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
61 + <bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle>
52 </feature> 62 </feature>
53 63
54 <feature name="onos-core-trivial" version="1.0.0" 64 <feature name="onos-core-trivial" version="1.0.0"
......
...@@ -93,12 +93,16 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -93,12 +93,16 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
93 93
94 @Override 94 @Override
95 public final void sendMsg(OFMessage m) { 95 public final void sendMsg(OFMessage m) {
96 - this.write(m); 96 + if (role == RoleState.MASTER) {
97 + this.write(m);
98 + }
97 } 99 }
98 100
99 @Override 101 @Override
100 public final void sendMsg(List<OFMessage> msgs) { 102 public final void sendMsg(List<OFMessage> msgs) {
101 - this.write(msgs); 103 + if (role == RoleState.MASTER) {
104 + this.write(msgs);
105 + }
102 } 106 }
103 107
104 @Override 108 @Override
...@@ -164,7 +168,9 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -164,7 +168,9 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
164 */ 168 */
165 @Override 169 @Override
166 public final void handleMessage(OFMessage m) { 170 public final void handleMessage(OFMessage m) {
167 - this.agent.processMessage(dpid, m); 171 + if (this.role == RoleState.MASTER) {
172 + this.agent.processMessage(dpid, m);
173 + }
168 } 174 }
169 175
170 @Override 176 @Override
...@@ -226,19 +232,34 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -226,19 +232,34 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
226 @Override 232 @Override
227 public abstract void processDriverHandshakeMessage(OFMessage m); 233 public abstract void processDriverHandshakeMessage(OFMessage m);
228 234
235 +
236 + // Role Handling
237 +
229 @Override 238 @Override
230 public void setRole(RoleState role) { 239 public void setRole(RoleState role) {
231 try { 240 try {
232 - log.info("Sending role {} to switch {}", role, getStringId());
233 if (this.roleMan.sendRoleRequest(role, RoleRecvStatus.MATCHED_SET_ROLE)) { 241 if (this.roleMan.sendRoleRequest(role, RoleRecvStatus.MATCHED_SET_ROLE)) {
234 - this.role = role; 242 + log.info("Sending role {} to switch {}", role, getStringId());
243 + if (role == RoleState.SLAVE || role == RoleState.EQUAL) {
244 + this.role = role;
245 + }
235 } 246 }
236 } catch (IOException e) { 247 } catch (IOException e) {
237 log.error("Unable to write to switch {}.", this.dpid); 248 log.error("Unable to write to switch {}.", this.dpid);
238 } 249 }
239 } 250 }
240 251
241 - // Role Handling 252 + @Override
253 + public void reassertRole() {
254 + if (this.getRole() == RoleState.MASTER) {
255 + log.warn("Received permission error from switch {} while " +
256 + "being master. Reasserting master role.",
257 + this.getStringId());
258 + this.setRole(RoleState.MASTER);
259 + }
260 + }
261 +
262 +
242 263
243 @Override 264 @Override
244 public void handleRole(OFMessage m) throws SwitchStateException { 265 public void handleRole(OFMessage m) throws SwitchStateException {
...@@ -246,11 +267,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -246,11 +267,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
246 RoleRecvStatus rrs = roleMan.deliverRoleReply(rri); 267 RoleRecvStatus rrs = roleMan.deliverRoleReply(rri);
247 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) { 268 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
248 if (rri.getRole() == RoleState.MASTER) { 269 if (rri.getRole() == RoleState.MASTER) {
270 + this.role = rri.getRole();
249 this.transitionToMasterSwitch(); 271 this.transitionToMasterSwitch();
250 } else if (rri.getRole() == RoleState.EQUAL || 272 } else if (rri.getRole() == RoleState.EQUAL ||
251 - rri.getRole() == RoleState.MASTER) { 273 + rri.getRole() == RoleState.SLAVE) {
252 this.transitionToEqualSwitch(); 274 this.transitionToEqualSwitch();
253 } 275 }
276 + } else {
277 + return;
278 + //TODO: tell people that we failed.
254 } 279 }
255 } 280 }
256 281
...@@ -267,11 +292,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -267,11 +292,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
267 new RoleReplyInfo(r, null, m.getXid())); 292 new RoleReplyInfo(r, null, m.getXid()));
268 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) { 293 if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
269 if (r == RoleState.MASTER) { 294 if (r == RoleState.MASTER) {
295 + this.role = r;
270 this.transitionToMasterSwitch(); 296 this.transitionToMasterSwitch();
271 } else if (r == RoleState.EQUAL || 297 } else if (r == RoleState.EQUAL ||
272 r == RoleState.SLAVE) { 298 r == RoleState.SLAVE) {
273 this.transitionToEqualSwitch(); 299 this.transitionToEqualSwitch();
274 } 300 }
301 + } else {
302 + return;
303 + //TODO: tell people that we failed.
275 } 304 }
276 } 305 }
277 306
...@@ -285,12 +314,7 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver { ...@@ -285,12 +314,7 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
285 return true; 314 return true;
286 } 315 }
287 316
288 - @Override 317 +
289 - public void reassertRole() {
290 - if (this.getRole() == RoleState.MASTER) {
291 - this.setRole(RoleState.MASTER);
292 - }
293 - }
294 318
295 @Override 319 @Override
296 public final void setAgent(OpenFlowAgent ag) { 320 public final void setAgent(OpenFlowAgent ag) {
......
...@@ -521,9 +521,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler { ...@@ -521,9 +521,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
521 // if two controllers are master (even if its only for 521 // if two controllers are master (even if its only for
522 // a brief period). We might need to see if these errors 522 // a brief period). We might need to see if these errors
523 // persist before we reassert 523 // persist before we reassert
524 - log.warn("Received permission error from switch {} while" + 524 +
525 - "being master. Reasserting master role.",
526 - h.getSwitchInfoString());
527 h.sw.reassertRole(); 525 h.sw.reassertRole();
528 } else if (m.getErrType() == OFErrorType.FLOW_MOD_FAILED && 526 } else if (m.getErrType() == OFErrorType.FLOW_MOD_FAILED &&
529 ((OFFlowModFailedErrorMsg) m).getCode() == 527 ((OFFlowModFailedErrorMsg) m).getCode() ==
......
...@@ -142,9 +142,9 @@ class RoleManager implements RoleHandler { ...@@ -142,9 +142,9 @@ class RoleManager implements RoleHandler {
142 } 142 }
143 // OF1.0 switch with support for NX_ROLE_REQUEST vendor extn. 143 // OF1.0 switch with support for NX_ROLE_REQUEST vendor extn.
144 // make Role.EQUAL become Role.SLAVE 144 // make Role.EQUAL become Role.SLAVE
145 + pendingRole = role;
145 role = (role == RoleState.EQUAL) ? RoleState.SLAVE : role; 146 role = (role == RoleState.EQUAL) ? RoleState.SLAVE : role;
146 pendingXid = sendNxRoleRequest(role); 147 pendingXid = sendNxRoleRequest(role);
147 - pendingRole = role;
148 requestPending = true; 148 requestPending = true;
149 } else { 149 } else {
150 // OF1.3 switch, use OFPT_ROLE_REQUEST message 150 // OF1.3 switch, use OFPT_ROLE_REQUEST message
......
1 -<project> 1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project xmlns="http://maven.apache.org/POM/4.0.0"
3 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4_0_0.xsd">
2 <modelVersion>4.0.0</modelVersion> 5 <modelVersion>4.0.0</modelVersion>
3 <groupId>org.onlab.tools</groupId> 6 <groupId>org.onlab.tools</groupId>
4 <artifactId>onos-build-conf</artifactId> 7 <artifactId>onos-build-conf</artifactId>
......
...@@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 ...@@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
15 15
16 pre-stop script 16 pre-stop script
17 /opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log 17 /opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log
18 - sleep 3 18 + sleep 2
19 end script 19 end script
20 20
21 script 21 script
......
...@@ -8,7 +8,21 @@ ...@@ -8,7 +8,21 @@
8 8
9 remote=$ONOS_USER@${1:-$OCI} 9 remote=$ONOS_USER@${1:-$OCI}
10 10
11 +# Generate a cluster.json from the ON* environment variables
12 +CDEF_FILE=/tmp/cluster.json
13 +echo "{ \"nodes\":[" > $CDEF_FILE
14 +for node in $(env | sort | egrep "OC[2-9]+" | cut -d= -f2); do
15 + echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $CDEF_FILE
16 +done
17 +echo " { \"id\": \"$OC1\", \"ip\": \"$OC1\", \"tcpPort\": 9876 }" >> $CDEF_FILE
18 +echo "]}" >> $CDEF_FILE
19 +
11 ssh $remote " 20 ssh $remote "
12 sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \ 21 sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \
13 $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml 22 $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml
14 -"
...\ No newline at end of file ...\ No newline at end of file
23 +
24 + echo \"onos.ip = \$(ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \
25 + >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties
26 +"
27 +
28 +scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/
...\ No newline at end of file ...\ No newline at end of file
......
...@@ -24,6 +24,7 @@ ssh $remote " ...@@ -24,6 +24,7 @@ ssh $remote "
24 # Make a link to the log file directory and make a home for auxiliaries 24 # Make a link to the log file directory and make a home for auxiliaries
25 ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log 25 ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log
26 mkdir $ONOS_INSTALL_DIR/var 26 mkdir $ONOS_INSTALL_DIR/var
27 + mkdir $ONOS_INSTALL_DIR/config
27 28
28 # Install the upstart configuration file and setup options for debugging 29 # Install the upstart configuration file and setup options for debugging
29 sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf 30 sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf
......
1 +#!/bin/bash
2 +#-------------------------------------------------------------------------------
3 +# Verifies connectivity to each node in ONOS cell.
4 +#-------------------------------------------------------------------------------
5 +
6 +[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
7 +. $ONOS_ROOT/tools/build/envDefaults
8 +
9 +SSHCMD="ssh -o PasswordAuthentication=no"
10 +SCPCMD="scp -q -o PasswordAuthentication=no"
11 +
12 +echo "Copying topology files to mininet vm."
13 +$SSHCMD -n $ONOS_USER@$OCN mkdir -p topos
14 +$SCPCMD $ONOS_ROOT/tools/test/topos/* $ONOS_USER@$OCN:topos/
15 +
16 +echo "Starting Network."
17 +$SSHCMD -t $ONOS_USER@$OCN sudo python topos/sol.py $(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
1 # Default virtual box ONOS instances 1,2 & ONOS mininet box 1 # Default virtual box ONOS instances 1,2 & ONOS mininet box
2 . $ONOS_ROOT/tools/test/cells/.reset 2 . $ONOS_ROOT/tools/test/cells/.reset
3 3
4 +export ONOS_NIC=192.168.56.*
5 +
4 export OC1="192.168.56.101" 6 export OC1="192.168.56.101"
5 export OC2="192.168.56.102" 7 export OC2="192.168.56.102"
6 8
......
1 #!/usr/bin/python 1 #!/usr/bin/python
2 import sys, solar 2 import sys, solar
3 -topo = solar.Solar(cip=sys.argv[1]) 3 +topo = solar.Solar(cips=sys.argv[1:])
4 topo.run() 4 topo.run()
......
...@@ -17,22 +17,22 @@ class CustomCLI(CLI): ...@@ -17,22 +17,22 @@ class CustomCLI(CLI):
17 class Solar(object): 17 class Solar(object):
18 """ Create a tiered topology from semi-scratch in Mininet """ 18 """ Create a tiered topology from semi-scratch in Mininet """
19 19
20 - def __init__(self, cname='onos', cip='192.168.56.1', islands=3, edges=2, hosts=2, 20 + def __init__(self, cname='onos', cips=['192.168.56.1'], islands=3, edges=2, hosts=2):
21 - proto=None):
22 """Create tower topology for mininet""" 21 """Create tower topology for mininet"""
23 22
24 # We are creating the controller with local-loopback on purpose to avoid 23 # We are creating the controller with local-loopback on purpose to avoid
25 # having the switches connect immediately. Instead, we'll set controller 24 # having the switches connect immediately. Instead, we'll set controller
26 # explicitly for each switch after configuring it as we want. 25 # explicitly for each switch after configuring it as we want.
27 - self.flare = RemoteController(cname, cip, 6633) 26 + self.ctrls = [ RemoteController(cname, cip, 6633) for cip in cips ]
28 - self.net = Mininet(controller=self.flare, switch = OVSKernelSwitch, 27 + self.net = Mininet(controller=RemoteController, switch = OVSKernelSwitch,
29 build=False) 28 build=False)
30 29
31 - self.cip = cip 30 + self.cips = cips
32 self.spines = [] 31 self.spines = []
33 self.leaves = [] 32 self.leaves = []
34 self.hosts = [] 33 self.hosts = []
35 - self.proto = proto 34 + for ctrl in self.ctrls:
35 + self.net.addController(ctrl)
36 36
37 # Create the two core switches and links between them 37 # Create the two core switches and links between them
38 c1 = self.net.addSwitch('c1',dpid='1111000000000000') 38 c1 = self.net.addSwitch('c1',dpid='1111000000000000')
...@@ -83,29 +83,11 @@ class Solar(object): ...@@ -83,29 +83,11 @@ class Solar(object):
83 83
84 def run(self): 84 def run(self):
85 """ Runs the created network topology and launches mininet cli""" 85 """ Runs the created network topology and launches mininet cli"""
86 - self.run_silent() 86 + self.net.build()
87 + self.net.start()
87 CustomCLI(self.net) 88 CustomCLI(self.net)
88 self.net.stop() 89 self.net.stop()
89 90
90 - def run_silent(self):
91 - """ Runs silently - for unit testing """
92 - self.net.build()
93 -
94 - # Start the switches, configure them with desired protocols and only
95 - # then set the controller
96 - for sw in self.spines:
97 - sw.start([self.flare])
98 - if self.proto:
99 - sw.cmd('ovs-vsctl set bridge %(sw)s protocols=%(proto)s' % \
100 - { 'sw': sw.name, 'proto': self.proto})
101 - sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
102 - {'sw': sw.name, 'ctl': self.cip})
103 -
104 - for sw in self.leaves:
105 - sw.start([self.flare])
106 - sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
107 - {'sw': sw.name, 'ctl': self.cip})
108 -
109 def pingAll(self): 91 def pingAll(self):
110 """ PingAll to create flows - for unit testing """ 92 """ PingAll to create flows - for unit testing """
111 self.net.pingAll() 93 self.net.pingAll()
......
1 package org.onlab.util; 1 package org.onlab.util;
2 2
3 +import java.nio.ByteBuffer;
3 import java.util.ArrayList; 4 import java.util.ArrayList;
4 import java.util.List; 5 import java.util.List;
5 import java.util.concurrent.ConcurrentLinkedQueue; 6 import java.util.concurrent.ConcurrentLinkedQueue;
...@@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair; ...@@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair;
8 9
9 import com.esotericsoftware.kryo.Kryo; 10 import com.esotericsoftware.kryo.Kryo;
10 import com.esotericsoftware.kryo.Serializer; 11 import com.esotericsoftware.kryo.Serializer;
12 +import com.esotericsoftware.kryo.io.ByteBufferInput;
13 +import com.esotericsoftware.kryo.io.ByteBufferOutput;
11 import com.esotericsoftware.kryo.io.Input; 14 import com.esotericsoftware.kryo.io.Input;
12 import com.esotericsoftware.kryo.io.Output; 15 import com.esotericsoftware.kryo.io.Output;
13 import com.google.common.collect.ImmutableList; 16 import com.google.common.collect.ImmutableList;
...@@ -174,6 +177,22 @@ public final class KryoPool { ...@@ -174,6 +177,22 @@ public final class KryoPool {
174 } 177 }
175 178
176 /** 179 /**
180 + * Serializes given object to byte buffer using Kryo instance in pool.
181 + *
182 + * @param obj Object to serialize
183 + * @param buffer to write to
184 + */
185 + public void serialize(final Object obj, final ByteBuffer buffer) {
186 + ByteBufferOutput out = new ByteBufferOutput(buffer);
187 + Kryo kryo = getKryo();
188 + try {
189 + kryo.writeClassAndObject(out, obj);
190 + } finally {
191 + putKryo(kryo);
192 + }
193 + }
194 +
195 + /**
177 * Deserializes given byte array to Object using Kryo instance in pool. 196 * Deserializes given byte array to Object using Kryo instance in pool.
178 * 197 *
179 * @param bytes serialized bytes 198 * @param bytes serialized bytes
...@@ -192,6 +211,24 @@ public final class KryoPool { ...@@ -192,6 +211,24 @@ public final class KryoPool {
192 } 211 }
193 } 212 }
194 213
214 + /**
215 + * Deserializes given byte buffer to Object using Kryo instance in pool.
216 + *
217 + * @param buffer input with serialized bytes
218 + * @param <T> deserialized Object type
219 + * @return deserialized Object
220 + */
221 + public <T> T deserialize(final ByteBuffer buffer) {
222 + ByteBufferInput in = new ByteBufferInput(buffer);
223 + Kryo kryo = getKryo();
224 + try {
225 + @SuppressWarnings("unchecked")
226 + T obj = (T) kryo.readClassAndObject(in);
227 + return obj;
228 + } finally {
229 + putKryo(kryo);
230 + }
231 + }
195 232
196 /** 233 /**
197 * Creates a Kryo instance with {@link #registeredTypes} pre-registered. 234 * Creates a Kryo instance with {@link #registeredTypes} pre-registered.
......
...@@ -54,6 +54,15 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> ...@@ -54,6 +54,15 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
54 } 54 }
55 55
56 /** 56 /**
57 + * Returns the number of message stream in custody of the loop.
58 + *
59 + * @return number of message streams
60 + */
61 + public int streamCount() {
62 + return streams.size();
63 + }
64 +
65 + /**
57 * Creates a new message stream backed by the specified socket channel. 66 * Creates a new message stream backed by the specified socket channel.
58 * 67 *
59 * @param byteChannel backing byte channel 68 * @param byteChannel backing byte channel
...@@ -84,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> ...@@ -84,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
84 * 93 *
85 * @param key selection key holding the pending connect operation. 94 * @param key selection key holding the pending connect operation.
86 */ 95 */
87 - protected void connect(SelectionKey key) { 96 + protected void connect(SelectionKey key) throws IOException {
88 - try { 97 + SocketChannel ch = (SocketChannel) key.channel();
89 - SocketChannel ch = (SocketChannel) key.channel(); 98 + ch.finishConnect();
90 - ch.finishConnect();
91 - } catch (IOException | IllegalStateException e) {
92 - log.warn("Unable to complete connection", e);
93 - }
94 -
95 if (key.isValid()) { 99 if (key.isValid()) {
96 key.interestOps(SelectionKey.OP_READ); 100 key.interestOps(SelectionKey.OP_READ);
97 } 101 }
...@@ -115,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> ...@@ -115,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
115 119
116 // If there is a pending connect operation, complete it. 120 // If there is a pending connect operation, complete it.
117 if (key.isConnectable()) { 121 if (key.isConnectable()) {
118 - connect(key); 122 + try {
123 + connect(key);
124 + } catch (IOException | IllegalStateException e) {
125 + log.warn("Unable to complete connection", e);
126 + }
119 } 127 }
120 128
121 // If there is a read operation, slurp as much data as possible. 129 // If there is a read operation, slurp as much data as possible.
...@@ -182,9 +190,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> ...@@ -182,9 +190,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
182 * with a pending accept operation. 190 * with a pending accept operation.
183 * 191 *
184 * @param channel backing socket channel 192 * @param channel backing socket channel
193 + * @return newly accepted message stream
185 */ 194 */
186 - public void acceptStream(SocketChannel channel) { 195 + public S acceptStream(SocketChannel channel) {
187 - createAndAdmit(channel, SelectionKey.OP_READ); 196 + return createAndAdmit(channel, SelectionKey.OP_READ);
188 } 197 }
189 198
190 199
...@@ -193,9 +202,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> ...@@ -193,9 +202,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
193 * with a pending connect operation. 202 * with a pending connect operation.
194 * 203 *
195 * @param channel backing socket channel 204 * @param channel backing socket channel
205 + * @return newly connected message stream
196 */ 206 */
197 - public void connectStream(SocketChannel channel) { 207 + public S connectStream(SocketChannel channel) {
198 - createAndAdmit(channel, SelectionKey.OP_CONNECT); 208 + return createAndAdmit(channel, SelectionKey.OP_CONNECT);
199 } 209 }
200 210
201 /** 211 /**
...@@ -205,12 +215,14 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>> ...@@ -205,12 +215,14 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
205 * @param channel socket channel 215 * @param channel socket channel
206 * @param op pending operations mask to be applied to the selection 216 * @param op pending operations mask to be applied to the selection
207 * key as a set of initial interestedOps 217 * key as a set of initial interestedOps
218 + * @return newly created message stream
208 */ 219 */
209 - private synchronized void createAndAdmit(SocketChannel channel, int op) { 220 + private synchronized S createAndAdmit(SocketChannel channel, int op) {
210 S stream = createStream(channel); 221 S stream = createStream(channel);
211 streams.add(stream); 222 streams.add(stream);
212 newStreamRequests.add(new NewStreamRequest(stream, channel, op)); 223 newStreamRequests.add(new NewStreamRequest(stream, channel, op));
213 selector.wakeup(); 224 selector.wakeup();
225 + return stream;
214 } 226 }
215 227
216 /** 228 /**
......
...@@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel; ...@@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel;
10 import java.nio.channels.SelectionKey; 10 import java.nio.channels.SelectionKey;
11 import java.util.ArrayList; 11 import java.util.ArrayList;
12 import java.util.List; 12 import java.util.List;
13 +import java.util.Objects;
13 14
14 import static com.google.common.base.Preconditions.checkArgument; 15 import static com.google.common.base.Preconditions.checkArgument;
15 import static com.google.common.base.Preconditions.checkNotNull; 16 import static com.google.common.base.Preconditions.checkNotNull;
...@@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> { ...@@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> {
170 } 171 }
171 172
172 /** 173 /**
173 - * Reads, withouth blocking, a list of messages from the stream. 174 + * Reads, without blocking, a list of messages from the stream.
174 * The list will be empty if there were not messages pending. 175 * The list will be empty if there were not messages pending.
175 * 176 *
176 * @return list of messages or null if backing channel has been closed 177 * @return list of messages or null if backing channel has been closed
...@@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> { ...@@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> {
262 try { 263 try {
263 channel.write(outbound); 264 channel.write(outbound);
264 } catch (IOException e) { 265 } catch (IOException e) {
265 - if (!closed && !e.getMessage().equals("Broken pipe")) { 266 + if (!closed && !Objects.equals(e.getMessage(), "Broken pipe")) {
266 log.warn("Unable to write data", e); 267 log.warn("Unable to write data", e);
267 ioError = e; 268 ioError = e;
268 } 269 }
......
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
2 * Mechanism to transfer messages over network using IO loop and 2 * Mechanism to transfer messages over network using IO loop and
3 * message stream, backed by NIO byte buffers. 3 * message stream, backed by NIO byte buffers.
4 */ 4 */
5 -package org.onlab.nio;
...\ No newline at end of file ...\ No newline at end of file
5 +package org.onlab.nio;
......
...@@ -230,7 +230,7 @@ public class IOLoopTestClient { ...@@ -230,7 +230,7 @@ public class IOLoopTestClient {
230 } 230 }
231 231
232 @Override 232 @Override
233 - protected void connect(SelectionKey key) { 233 + protected void connect(SelectionKey key) throws IOException {
234 super.connect(key); 234 super.connect(key);
235 TestMessageStream b = (TestMessageStream) key.attachment(); 235 TestMessageStream b = (TestMessageStream) key.attachment();
236 Worker w = ((CustomIOLoop) b.loop()).worker; 236 Worker w = ((CustomIOLoop) b.loop()).worker;
......