tom

Merge remote-tracking branch 'origin/master'

......@@ -5,7 +5,7 @@ import java.util.Objects;
public final class MastershipTerm {
private final NodeId master;
private int termNumber;
private final int termNumber;
private MastershipTerm(NodeId master, int term) {
this.master = master;
......
package org.onlab.onos.cluster.impl;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.Set;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
......@@ -12,6 +17,7 @@ import org.onlab.onos.cluster.MastershipEvent;
import org.onlab.onos.cluster.MastershipListener;
import org.onlab.onos.cluster.MastershipService;
import org.onlab.onos.cluster.MastershipStore;
import org.onlab.onos.cluster.MastershipStoreDelegate;
import org.onlab.onos.cluster.MastershipTerm;
import org.onlab.onos.cluster.MastershipTermService;
import org.onlab.onos.cluster.NodeId;
......@@ -21,15 +27,10 @@ import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.MastershipRole;
import org.slf4j.Logger;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.slf4j.LoggerFactory.getLogger;
@Component(immediate = true)
@Service
public class MastershipManager
implements MastershipService, MastershipAdminService {
implements MastershipService, MastershipAdminService {
private static final String NODE_ID_NULL = "Node ID cannot be null";
private static final String DEVICE_ID_NULL = "Device ID cannot be null";
......@@ -40,6 +41,8 @@ public class MastershipManager
protected final AbstractListenerRegistry<MastershipEvent, MastershipListener>
listenerRegistry = new AbstractListenerRegistry<>();
private final MastershipStoreDelegate delegate = new InternalDelegate();
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected MastershipStore store;
......@@ -52,12 +55,14 @@ public class MastershipManager
@Activate
public void activate() {
eventDispatcher.addSink(MastershipEvent.class, listenerRegistry);
store.setDelegate(delegate);
log.info("Started");
}
@Deactivate
public void deactivate() {
eventDispatcher.removeSink(MastershipEvent.class);
store.unsetDelegate(delegate);
log.info("Stopped");
}
......@@ -141,4 +146,14 @@ public class MastershipManager
}
public class InternalDelegate implements MastershipStoreDelegate {
@Override
public void notify(MastershipEvent event) {
log.info("dispatching mastership event {}", event);
eventDispatcher.post(event);
}
}
}
......
package org.onlab.onos.net.device.impl;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.List;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
......@@ -32,20 +38,14 @@ import org.onlab.onos.net.provider.AbstractProviderRegistry;
import org.onlab.onos.net.provider.AbstractProviderService;
import org.slf4j.Logger;
import java.util.List;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.onlab.onos.net.device.DeviceEvent.Type.DEVICE_MASTERSHIP_CHANGED;
import static org.slf4j.LoggerFactory.getLogger;
/**
* Provides implementation of the device SB &amp; NB APIs.
*/
@Component(immediate = true)
@Service
public class DeviceManager
extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
private static final String DEVICE_ID_NULL = "Device ID cannot be null";
private static final String PORT_NUMBER_NULL = "Port number cannot be null";
......@@ -58,7 +58,7 @@ public class DeviceManager
protected final AbstractListenerRegistry<DeviceEvent, DeviceListener>
listenerRegistry = new AbstractListenerRegistry<>();
private DeviceStoreDelegate delegate = new InternalStoreDelegate();
private final DeviceStoreDelegate delegate = new InternalStoreDelegate();
private final MastershipListener mastershipListener = new InternalMastershipListener();
......@@ -249,9 +249,10 @@ public class DeviceManager
private class InternalMastershipListener implements MastershipListener {
@Override
public void event(MastershipEvent event) {
// FIXME: for now we're taking action only on becoming master
if (event.master().equals(clusterService.getLocalNode().id())) {
applyRole(event.subject(), MastershipRole.MASTER);
} else {
applyRole(event.subject(), MastershipRole.STANDBY);
}
}
}
......
package org.onlab.onos.store.cluster.impl;
import com.google.common.base.Optional;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableSet;
import com.hazelcast.core.IMap;
import static com.google.common.cache.CacheBuilder.newBuilder;
import static org.onlab.onos.cluster.MastershipEvent.Type.MASTER_CHANGED;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
......@@ -23,11 +25,10 @@ import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
import org.onlab.onos.store.impl.AbstractDistributedStore;
import org.onlab.onos.store.impl.OptionalCacheLoader;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import static com.google.common.cache.CacheBuilder.newBuilder;
import com.google.common.base.Optional;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableSet;
import com.hazelcast.core.IMap;
/**
* Distributed implementation of the cluster nodes store.
......@@ -35,8 +36,8 @@ import static com.google.common.cache.CacheBuilder.newBuilder;
@Component(immediate = true)
@Service
public class DistributedMastershipStore
extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate>
implements MastershipStore {
extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate>
implements MastershipStore {
private IMap<byte[], byte[]> rawMasters;
private LoadingCache<DeviceId, Optional<NodeId>> masters;
......@@ -53,7 +54,7 @@ public class DistributedMastershipStore
OptionalCacheLoader<DeviceId, NodeId> nodeLoader
= new OptionalCacheLoader<>(storeService, rawMasters);
masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
rawMasters.addEntryListener(new RemoteEventHandler<>(masters), true);
rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true);
loadMasters();
......@@ -122,4 +123,25 @@ public class DistributedMastershipStore
return null;
}
private class RemoteMasterShipEventHandler extends RemoteEventHandler<DeviceId, NodeId> {
public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) {
super(cache);
}
@Override
protected void onAdd(DeviceId deviceId, NodeId nodeId) {
notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
}
@Override
protected void onRemove(DeviceId deviceId, NodeId nodeId) {
notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
}
@Override
protected void onUpdate(DeviceId deviceId, NodeId oldNodeId, NodeId nodeId) {
notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
}
}
}
......
package org.onlab.onos.store.flow.impl;
import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.Collection;
import java.util.Collections;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.ApplicationId;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.flow.DefaultFlowRule;
import org.onlab.onos.net.flow.FlowRule;
import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
import org.onlab.onos.net.flow.FlowRuleEvent;
import org.onlab.onos.net.flow.FlowRuleEvent.Type;
import org.onlab.onos.net.flow.FlowRuleStore;
import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
import org.onlab.onos.store.AbstractStore;
import org.slf4j.Logger;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
/**
* Manages inventory of flow rules using trivial in-memory implementation.
*/
//FIXME: I LIE I AM NOT DISTRIBUTED
@Component(immediate = true)
@Service
public class DistributedFlowRuleStore
extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
implements FlowRuleStore {
private final Logger log = getLogger(getClass());
// store entries as a pile of rules, no info about device tables
private final Multimap<DeviceId, FlowRule> flowEntries =
ArrayListMultimap.<DeviceId, FlowRule>create();
private final Multimap<ApplicationId, FlowRule> flowEntriesById =
ArrayListMultimap.<ApplicationId, FlowRule>create();
@Activate
public void activate() {
log.info("Started");
}
@Deactivate
public void deactivate() {
log.info("Stopped");
}
@Override
public synchronized FlowRule getFlowRule(FlowRule rule) {
for (FlowRule f : flowEntries.get(rule.deviceId())) {
if (f.equals(rule)) {
return f;
}
}
return null;
}
@Override
public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
Collection<FlowRule> rules = flowEntries.get(deviceId);
if (rules == null) {
return Collections.emptyList();
}
return ImmutableSet.copyOf(rules);
}
@Override
public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
Collection<FlowRule> rules = flowEntriesById.get(appId);
if (rules == null) {
return Collections.emptyList();
}
return ImmutableSet.copyOf(rules);
}
@Override
public synchronized void storeFlowRule(FlowRule rule) {
FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
DeviceId did = f.deviceId();
if (!flowEntries.containsEntry(did, f)) {
flowEntries.put(did, f);
flowEntriesById.put(rule.appId(), f);
}
}
@Override
public synchronized void deleteFlowRule(FlowRule rule) {
FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
DeviceId did = f.deviceId();
/*
* find the rule and mark it for deletion.
* Ultimately a flow removed will come remove it.
*/
if (flowEntries.containsEntry(did, f)) {
//synchronized (flowEntries) {
flowEntries.remove(did, f);
flowEntries.put(did, f);
flowEntriesById.remove(rule.appId(), rule);
//}
}
}
@Override
public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
DeviceId did = rule.deviceId();
// check if this new rule is an update to an existing entry
if (flowEntries.containsEntry(did, rule)) {
//synchronized (flowEntries) {
// Multimaps support duplicates so we have to remove our rule
// and replace it with the current version.
flowEntries.remove(did, rule);
flowEntries.put(did, rule);
//}
return new FlowRuleEvent(Type.RULE_UPDATED, rule);
}
flowEntries.put(did, rule);
return new FlowRuleEvent(RULE_ADDED, rule);
}
@Override
public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
//synchronized (this) {
if (flowEntries.remove(rule.deviceId(), rule)) {
return new FlowRuleEvent(RULE_REMOVED, rule);
} else {
return null;
}
//}
}
}
package org.onlab.onos.store.host.impl;
import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DefaultHost;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Host;
import org.onlab.onos.net.HostId;
import org.onlab.onos.net.host.HostDescription;
import org.onlab.onos.net.host.HostEvent;
import org.onlab.onos.net.host.HostStore;
import org.onlab.onos.net.host.HostStoreDelegate;
import org.onlab.onos.net.host.PortAddresses;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.store.AbstractStore;
import org.onlab.packet.IpPrefix;
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
import org.slf4j.Logger;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
/**
* Manages inventory of end-station hosts using trivial in-memory
* implementation.
*/
//FIXME: I LIE I AM NOT DISTRIBUTED
@Component(immediate = true)
@Service
public class DistributedHostStore
extends AbstractStore<HostEvent, HostStoreDelegate>
implements HostStore {
private final Logger log = getLogger(getClass());
// Host inventory
private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
// Hosts tracked by their location
private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
private final Map<ConnectPoint, PortAddresses> portAddresses =
new ConcurrentHashMap<>();
@Activate
public void activate() {
log.info("Started");
}
@Deactivate
public void deactivate() {
log.info("Stopped");
}
@Override
public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
HostDescription hostDescription) {
Host host = hosts.get(hostId);
if (host == null) {
return createHost(providerId, hostId, hostDescription);
}
return updateHost(providerId, host, hostDescription);
}
// creates a new host and sends HOST_ADDED
private HostEvent createHost(ProviderId providerId, HostId hostId,
HostDescription descr) {
DefaultHost newhost = new DefaultHost(providerId, hostId,
descr.hwAddress(),
descr.vlan(),
descr.location(),
descr.ipAddresses());
synchronized (this) {
hosts.put(hostId, newhost);
locations.put(descr.location(), newhost);
}
return new HostEvent(HOST_ADDED, newhost);
}
// checks for type of update to host, sends appropriate event
private HostEvent updateHost(ProviderId providerId, Host host,
HostDescription descr) {
DefaultHost updated;
HostEvent event;
if (!host.location().equals(descr.location())) {
updated = new DefaultHost(providerId, host.id(),
host.mac(),
host.vlan(),
descr.location(),
host.ipAddresses());
event = new HostEvent(HOST_MOVED, updated);
} else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
updated = new DefaultHost(providerId, host.id(),
host.mac(),
host.vlan(),
descr.location(),
descr.ipAddresses());
event = new HostEvent(HOST_UPDATED, updated);
} else {
return null;
}
synchronized (this) {
hosts.put(host.id(), updated);
locations.remove(host.location(), host);
locations.put(updated.location(), updated);
}
return event;
}
@Override
public HostEvent removeHost(HostId hostId) {
synchronized (this) {
Host host = hosts.remove(hostId);
if (host != null) {
locations.remove((host.location()), host);
return new HostEvent(HOST_REMOVED, host);
}
return null;
}
}
@Override
public int getHostCount() {
return hosts.size();
}
@Override
public Iterable<Host> getHosts() {
return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
}
@Override
public Host getHost(HostId hostId) {
return hosts.get(hostId);
}
@Override
public Set<Host> getHosts(VlanId vlanId) {
Set<Host> vlanset = new HashSet<>();
for (Host h : hosts.values()) {
if (h.vlan().equals(vlanId)) {
vlanset.add(h);
}
}
return vlanset;
}
@Override
public Set<Host> getHosts(MacAddress mac) {
Set<Host> macset = new HashSet<>();
for (Host h : hosts.values()) {
if (h.mac().equals(mac)) {
macset.add(h);
}
}
return macset;
}
@Override
public Set<Host> getHosts(IpPrefix ip) {
Set<Host> ipset = new HashSet<>();
for (Host h : hosts.values()) {
if (h.ipAddresses().contains(ip)) {
ipset.add(h);
}
}
return ipset;
}
@Override
public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
return ImmutableSet.copyOf(locations.get(connectPoint));
}
@Override
public Set<Host> getConnectedHosts(DeviceId deviceId) {
Set<Host> hostset = new HashSet<>();
for (ConnectPoint p : locations.keySet()) {
if (p.deviceId().equals(deviceId)) {
hostset.addAll(locations.get(p));
}
}
return hostset;
}
@Override
public void updateAddressBindings(PortAddresses addresses) {
synchronized (portAddresses) {
PortAddresses existing = portAddresses.get(addresses.connectPoint());
if (existing == null) {
portAddresses.put(addresses.connectPoint(), addresses);
} else {
Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
.immutableCopy();
MacAddress newMac = (addresses.mac() == null) ? existing.mac()
: addresses.mac();
PortAddresses newAddresses =
new PortAddresses(addresses.connectPoint(), union, newMac);
portAddresses.put(newAddresses.connectPoint(), newAddresses);
}
}
}
@Override
public void removeAddressBindings(PortAddresses addresses) {
synchronized (portAddresses) {
PortAddresses existing = portAddresses.get(addresses.connectPoint());
if (existing != null) {
Set<IpPrefix> difference =
Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
// If they removed the existing mac, set the new mac to null.
// Otherwise, keep the existing mac.
MacAddress newMac = existing.mac();
if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
newMac = null;
}
PortAddresses newAddresses =
new PortAddresses(addresses.connectPoint(), difference, newMac);
portAddresses.put(newAddresses.connectPoint(), newAddresses);
}
}
}
@Override
public void clearAddressBindings(ConnectPoint connectPoint) {
synchronized (portAddresses) {
portAddresses.remove(connectPoint);
}
}
@Override
public Set<PortAddresses> getAddressBindings() {
synchronized (portAddresses) {
return new HashSet<>(portAddresses.values());
}
}
@Override
public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
PortAddresses addresses;
synchronized (portAddresses) {
addresses = portAddresses.get(connectPoint);
}
if (addresses == null) {
addresses = new PortAddresses(connectPoint, null, null);
}
return addresses;
}
}
package org.onlab.onos.store.topology.impl;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSetMultimap;
import org.onlab.graph.DijkstraGraphSearch;
import org.onlab.graph.GraphPathSearch;
import org.onlab.graph.TarjanGraphSearch;
import org.onlab.onos.net.AbstractModel;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DefaultPath;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Link;
import org.onlab.onos.net.Path;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.net.topology.ClusterId;
import org.onlab.onos.net.topology.DefaultTopologyCluster;
import org.onlab.onos.net.topology.DefaultTopologyVertex;
import org.onlab.onos.net.topology.GraphDescription;
import org.onlab.onos.net.topology.LinkWeight;
import org.onlab.onos.net.topology.Topology;
import org.onlab.onos.net.topology.TopologyCluster;
import org.onlab.onos.net.topology.TopologyEdge;
import org.onlab.onos.net.topology.TopologyGraph;
import org.onlab.onos.net.topology.TopologyVertex;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.collect.ImmutableSetMultimap.Builder;
import static org.onlab.graph.GraphPathSearch.Result;
import static org.onlab.graph.TarjanGraphSearch.SCCResult;
import static org.onlab.onos.net.Link.Type.INDIRECT;
/**
* Default implementation of the topology descriptor. This carries the
* backing topology data.
*/
public class DefaultTopology extends AbstractModel implements Topology {
private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
new DijkstraGraphSearch<>();
private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
new TarjanGraphSearch<>();
private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
private final long time;
private final TopologyGraph graph;
private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
private final ImmutableSetMultimap<PathKey, Path> paths;
private final ImmutableMap<ClusterId, TopologyCluster> clusters;
private final ImmutableSet<ConnectPoint> infrastructurePoints;
private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
/**
* Creates a topology descriptor attributed to the specified provider.
*
* @param providerId identity of the provider
* @param description data describing the new topology
*/
DefaultTopology(ProviderId providerId, GraphDescription description) {
super(providerId);
this.time = description.timestamp();
// Build the graph
this.graph = new DefaultTopologyGraph(description.vertexes(),
description.edges());
this.results = searchForShortestPaths();
this.paths = buildPaths();
this.clusterResults = searchForClusters();
this.clusters = buildTopologyClusters();
buildIndexes();
this.broadcastSets = buildBroadcastSets();
this.infrastructurePoints = findInfrastructurePoints();
}
@Override
public long time() {
return time;
}
@Override
public int clusterCount() {
return clusters.size();
}
@Override
public int deviceCount() {
return graph.getVertexes().size();
}
@Override
public int linkCount() {
return graph.getEdges().size();
}
@Override
public int pathCount() {
return paths.size();
}
/**
* Returns the backing topology graph.
*
* @return topology graph
*/
TopologyGraph getGraph() {
return graph;
}
/**
* Returns the set of topology clusters.
*
* @return set of clusters
*/
Set<TopologyCluster> getClusters() {
return ImmutableSet.copyOf(clusters.values());
}
/**
* Returns the specified topology cluster.
*
* @param clusterId cluster identifier
* @return topology cluster
*/
TopologyCluster getCluster(ClusterId clusterId) {
return clusters.get(clusterId);
}
/**
* Returns the topology cluster that contains the given device.
*
* @param deviceId device identifier
* @return topology cluster
*/
TopologyCluster getCluster(DeviceId deviceId) {
return clustersByDevice.get(deviceId);
}
/**
* Returns the set of cluster devices.
*
* @param cluster topology cluster
* @return cluster devices
*/
Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
return devicesByCluster.get(cluster);
}
/**
* Returns the set of cluster links.
*
* @param cluster topology cluster
* @return cluster links
*/
Set<Link> getClusterLinks(TopologyCluster cluster) {
return linksByCluster.get(cluster);
}
/**
* Indicates whether the given point is an infrastructure link end-point.
*
* @param connectPoint connection point
* @return true if infrastructure
*/
boolean isInfrastructure(ConnectPoint connectPoint) {
return infrastructurePoints.contains(connectPoint);
}
/**
* Indicates whether the given point is part of a broadcast set.
*
* @param connectPoint connection point
* @return true if in broadcast set
*/
boolean isBroadcastPoint(ConnectPoint connectPoint) {
// Any non-infrastructure, i.e. edge points are assumed to be OK.
if (!isInfrastructure(connectPoint)) {
return true;
}
// Find the cluster to which the device belongs.
TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
if (cluster == null) {
throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
}
// If the broadcast set is null or empty, or if the point explicitly
// belongs to it, return true;
Set<ConnectPoint> points = broadcastSets.get(cluster.id());
return points == null || points.isEmpty() || points.contains(connectPoint);
}
/**
* Returns the size of the cluster broadcast set.
*
* @param clusterId cluster identifier
* @return size of the cluster broadcast set
*/
int broadcastSetSize(ClusterId clusterId) {
return broadcastSets.get(clusterId).size();
}
/**
* Returns the set of pre-computed shortest paths between source and
* destination devices.
*
* @param src source device
* @param dst destination device
* @return set of shortest paths
*/
Set<Path> getPaths(DeviceId src, DeviceId dst) {
return paths.get(new PathKey(src, dst));
}
/**
* Computes on-demand the set of shortest paths between source and
* destination devices.
*
* @param src source device
* @param dst destination device
* @return set of shortest paths
*/
Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
new DefaultTopologyVertex(dst), weight);
ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
builder.add(networkPath(path));
}
return builder.build();
}
// Searches the graph for all shortest paths and returns the search results.
private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
// Search graph paths for each source to all destinations.
LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
for (TopologyVertex src : graph.getVertexes()) {
builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
}
return builder.build();
}
// Builds network paths from the graph path search results
private ImmutableSetMultimap<PathKey, Path> buildPaths() {
Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
for (DeviceId deviceId : results.keySet()) {
Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
networkPath(path));
}
}
return builder.build();
}
// Converts graph path to a network path with the same cost.
private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
List<Link> links = new ArrayList<>();
for (TopologyEdge edge : path.edges()) {
links.add(edge.link());
}
return new DefaultPath(PID, links, path.cost());
}
// Searches for SCC clusters in the network topology graph using Tarjan
// algorithm.
private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
return TARJAN.search(graph, new NoIndirectLinksWeight());
}
// Builds the topology clusters and returns the id-cluster bindings.
private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
SCCResult<TopologyVertex, TopologyEdge> result =
TARJAN.search(graph, new NoIndirectLinksWeight());
// Extract both vertexes and edges from the results; the lists form
// pairs along the same index.
List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
// Scan over the lists and create a cluster from the results.
for (int i = 0, n = result.clusterCount(); i < n; i++) {
Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
Set<TopologyEdge> edgeSet = clusterEdges.get(i);
ClusterId cid = ClusterId.clusterId(i);
DefaultTopologyCluster cluster =
new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
findRoot(vertexSet).deviceId());
clusterBuilder.put(cid, cluster);
}
return clusterBuilder.build();
}
// Finds the vertex whose device id is the lexicographical minimum in the
// specified set.
private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
TopologyVertex minVertex = null;
for (TopologyVertex vertex : vertexSet) {
if (minVertex == null ||
minVertex.deviceId().toString()
.compareTo(minVertex.deviceId().toString()) < 0) {
minVertex = vertex;
}
}
return minVertex;
}
// Processes a map of broadcast sets for each cluster.
private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
for (TopologyCluster cluster : clusters.values()) {
addClusterBroadcastSet(cluster, builder);
}
return builder.build();
}
// Finds all broadcast points for the cluster. These are those connection
// points which lie along the shortest paths between the cluster root and
// all other devices within the cluster.
private void addClusterBroadcastSet(TopologyCluster cluster,
Builder<ClusterId, ConnectPoint> builder) {
// Use the graph root search results to build the broadcast set.
Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
TopologyVertex vertex = entry.getKey();
// Ignore any parents that lead outside the cluster.
if (clustersByDevice.get(vertex.deviceId()) != cluster) {
continue;
}
// Ignore any back-link sets that are empty.
Set<TopologyEdge> parents = entry.getValue();
if (parents.isEmpty()) {
continue;
}
// Use the first back-link source and destinations to add to the
// broadcast set.
Link link = parents.iterator().next().link();
builder.put(cluster.id(), link.src());
builder.put(cluster.id(), link.dst());
}
}
// Collects and returns an set of all infrastructure link end-points.
private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
for (TopologyEdge edge : graph.getEdges()) {
builder.add(edge.link().src());
builder.add(edge.link().dst());
}
return builder.build();
}
// Builds cluster-devices, cluster-links and device-cluster indexes.
private void buildIndexes() {
// Prepare the index builders
ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
// Now scan through all the clusters
for (TopologyCluster cluster : clusters.values()) {
int i = cluster.id().index();
// Scan through all the cluster vertexes.
for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
devicesBuilder.put(cluster, vertex.deviceId());
clusterBuilder.put(vertex.deviceId(), cluster);
}
// Scan through all the cluster edges.
for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
linksBuilder.put(cluster, edge.link());
}
}
// Finalize all indexes.
clustersByDevice = clusterBuilder.build();
devicesByCluster = devicesBuilder.build();
linksByCluster = linksBuilder.build();
}
// Link weight for measuring link cost as hop count with indirect links
// being as expensive as traversing the entire graph to assume the worst.
private static class HopCountLinkWeight implements LinkWeight {
private final int indirectLinkCost;
HopCountLinkWeight(int indirectLinkCost) {
this.indirectLinkCost = indirectLinkCost;
}
@Override
public double weight(TopologyEdge edge) {
// To force preference to use direct paths first, make indirect
// links as expensive as the linear vertex traversal.
return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
}
}
// Link weight for preventing traversal over indirect links.
private static class NoIndirectLinksWeight implements LinkWeight {
@Override
public double weight(TopologyEdge edge) {
return edge.link().type() == INDIRECT ? -1 : 1;
}
}
@Override
public String toString() {
return toStringHelper(this)
.add("time", time)
.add("clusters", clusterCount())
.add("devices", deviceCount())
.add("links", linkCount())
.add("pathCount", pathCount())
.toString();
}
}
package org.onlab.onos.store.topology.impl;
import org.onlab.graph.AdjacencyListsGraph;
import org.onlab.onos.net.topology.TopologyEdge;
import org.onlab.onos.net.topology.TopologyGraph;
import org.onlab.onos.net.topology.TopologyVertex;
import java.util.Set;
/**
* Default implementation of an immutable topology graph based on a generic
* implementation of adjacency lists graph.
*/
public class DefaultTopologyGraph
extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
implements TopologyGraph {
/**
* Creates a topology graph comprising of the specified vertexes and edges.
*
* @param vertexes set of graph vertexes
* @param edges set of graph edges
*/
public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
super(vertexes, edges);
}
}
package org.onlab.onos.store.topology.impl;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.List;
import java.util.Set;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.event.Event;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Link;
import org.onlab.onos.net.Path;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.net.topology.ClusterId;
import org.onlab.onos.net.topology.GraphDescription;
import org.onlab.onos.net.topology.LinkWeight;
import org.onlab.onos.net.topology.Topology;
import org.onlab.onos.net.topology.TopologyCluster;
import org.onlab.onos.net.topology.TopologyEvent;
import org.onlab.onos.net.topology.TopologyGraph;
import org.onlab.onos.net.topology.TopologyStore;
import org.onlab.onos.net.topology.TopologyStoreDelegate;
import org.onlab.onos.store.AbstractStore;
import org.slf4j.Logger;
/**
* Manages inventory of topology snapshots using trivial in-memory
* structures implementation.
*/
//FIXME: I LIE I AM NOT DISTRIBUTED
@Component(immediate = true)
@Service
public class DistributedTopologyStore
extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
implements TopologyStore {
private final Logger log = getLogger(getClass());
private volatile DefaultTopology current;
@Activate
public void activate() {
log.info("Started");
}
@Deactivate
public void deactivate() {
log.info("Stopped");
}
@Override
public Topology currentTopology() {
return current;
}
@Override
public boolean isLatest(Topology topology) {
// Topology is current only if it is the same as our current topology
return topology == current;
}
@Override
public TopologyGraph getGraph(Topology topology) {
return defaultTopology(topology).getGraph();
}
@Override
public Set<TopologyCluster> getClusters(Topology topology) {
return defaultTopology(topology).getClusters();
}
@Override
public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
return defaultTopology(topology).getCluster(clusterId);
}
@Override
public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
return defaultTopology(topology).getClusterDevices(cluster);
}
@Override
public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
return defaultTopology(topology).getClusterLinks(cluster);
}
@Override
public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
return defaultTopology(topology).getPaths(src, dst);
}
@Override
public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
LinkWeight weight) {
return defaultTopology(topology).getPaths(src, dst, weight);
}
@Override
public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
return defaultTopology(topology).isInfrastructure(connectPoint);
}
@Override
public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
return defaultTopology(topology).isBroadcastPoint(connectPoint);
}
@Override
public TopologyEvent updateTopology(ProviderId providerId,
GraphDescription graphDescription,
List<Event> reasons) {
// First off, make sure that what we're given is indeed newer than
// what we already have.
if (current != null && graphDescription.timestamp() < current.time()) {
return null;
}
// Have the default topology construct self from the description data.
DefaultTopology newTopology =
new DefaultTopology(providerId, graphDescription);
// Promote the new topology to current and return a ready-to-send event.
synchronized (this) {
current = newTopology;
return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
}
}
// Validates the specified topology and returns it as a default
private DefaultTopology defaultTopology(Topology topology) {
if (topology instanceof DefaultTopology) {
return (DefaultTopology) topology;
}
throw new IllegalArgumentException("Topology class " + topology.getClass() +
" not supported");
}
}
package org.onlab.onos.store.topology.impl;
import org.onlab.onos.net.DeviceId;
import java.util.Objects;
/**
* Key for filing pre-computed paths between source and destination devices.
*/
class PathKey {
private final DeviceId src;
private final DeviceId dst;
/**
* Creates a path key from the given source/dest pair.
* @param src source device
* @param dst destination device
*/
PathKey(DeviceId src, DeviceId dst) {
this.src = src;
this.dst = dst;
}
@Override
public int hashCode() {
return Objects.hash(src, dst);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof PathKey) {
final PathKey other = (PathKey) obj;
return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
}
return false;
}
}
......@@ -93,13 +93,17 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
@Override
public final void sendMsg(OFMessage m) {
if (role == RoleState.MASTER) {
this.write(m);
}
}
@Override
public final void sendMsg(List<OFMessage> msgs) {
if (role == RoleState.MASTER) {
this.write(msgs);
}
}
@Override
public abstract void write(OFMessage msg);
......@@ -164,8 +168,10 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
*/
@Override
public final void handleMessage(OFMessage m) {
if (this.role == RoleState.MASTER) {
this.agent.processMessage(dpid, m);
}
}
@Override
public RoleState getRole() {
......@@ -226,19 +232,34 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
@Override
public abstract void processDriverHandshakeMessage(OFMessage m);
// Role Handling
@Override
public void setRole(RoleState role) {
try {
log.info("Sending role {} to switch {}", role, getStringId());
if (this.roleMan.sendRoleRequest(role, RoleRecvStatus.MATCHED_SET_ROLE)) {
log.info("Sending role {} to switch {}", role, getStringId());
if (role == RoleState.SLAVE || role == RoleState.EQUAL) {
this.role = role;
}
}
} catch (IOException e) {
log.error("Unable to write to switch {}.", this.dpid);
}
}
// Role Handling
@Override
public void reassertRole() {
if (this.getRole() == RoleState.MASTER) {
log.warn("Received permission error from switch {} while " +
"being master. Reasserting master role.",
this.getStringId());
this.setRole(RoleState.MASTER);
}
}
@Override
public void handleRole(OFMessage m) throws SwitchStateException {
......@@ -246,11 +267,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
RoleRecvStatus rrs = roleMan.deliverRoleReply(rri);
if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
if (rri.getRole() == RoleState.MASTER) {
this.role = rri.getRole();
this.transitionToMasterSwitch();
} else if (rri.getRole() == RoleState.EQUAL ||
rri.getRole() == RoleState.MASTER) {
rri.getRole() == RoleState.SLAVE) {
this.transitionToEqualSwitch();
}
} else {
return;
//TODO: tell people that we failed.
}
}
......@@ -267,11 +292,15 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
new RoleReplyInfo(r, null, m.getXid()));
if (rrs == RoleRecvStatus.MATCHED_SET_ROLE) {
if (r == RoleState.MASTER) {
this.role = r;
this.transitionToMasterSwitch();
} else if (r == RoleState.EQUAL ||
r == RoleState.SLAVE) {
this.transitionToEqualSwitch();
}
} else {
return;
//TODO: tell people that we failed.
}
}
......@@ -285,12 +314,7 @@ public abstract class AbstractOpenFlowSwitch implements OpenFlowSwitchDriver {
return true;
}
@Override
public void reassertRole() {
if (this.getRole() == RoleState.MASTER) {
this.setRole(RoleState.MASTER);
}
}
@Override
public final void setAgent(OpenFlowAgent ag) {
......
......@@ -521,9 +521,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
// if two controllers are master (even if its only for
// a brief period). We might need to see if these errors
// persist before we reassert
log.warn("Received permission error from switch {} while" +
"being master. Reasserting master role.",
h.getSwitchInfoString());
h.sw.reassertRole();
} else if (m.getErrType() == OFErrorType.FLOW_MOD_FAILED &&
((OFFlowModFailedErrorMsg) m).getCode() ==
......
......@@ -142,9 +142,9 @@ class RoleManager implements RoleHandler {
}
// OF1.0 switch with support for NX_ROLE_REQUEST vendor extn.
// make Role.EQUAL become Role.SLAVE
pendingRole = role;
role = (role == RoleState.EQUAL) ? RoleState.SLAVE : role;
pendingXid = sendNxRoleRequest(role);
pendingRole = role;
requestPending = true;
} else {
// OF1.3 switch, use OFPT_ROLE_REQUEST message
......
#!/usr/bin/python
import sys, solar
topo = solar.Solar(cip=sys.argv[1])
topo = solar.Solar(cips=sys.argv[1:])
topo.run()
......
......@@ -17,22 +17,22 @@ class CustomCLI(CLI):
class Solar(object):
""" Create a tiered topology from semi-scratch in Mininet """
def __init__(self, cname='onos', cip='192.168.56.1', islands=3, edges=2, hosts=2,
proto=None):
def __init__(self, cname='onos', cips=['192.168.56.1'], islands=3, edges=2, hosts=2):
"""Create tower topology for mininet"""
# We are creating the controller with local-loopback on purpose to avoid
# having the switches connect immediately. Instead, we'll set controller
# explicitly for each switch after configuring it as we want.
self.flare = RemoteController(cname, cip, 6633)
self.net = Mininet(controller=self.flare, switch = OVSKernelSwitch,
self.ctrls = [ RemoteController(cname, cip, 6633) for cip in cips ]
self.net = Mininet(controller=RemoteController, switch = OVSKernelSwitch,
build=False)
self.cip = cip
self.cips = cips
self.spines = []
self.leaves = []
self.hosts = []
self.proto = proto
for ctrl in self.ctrls:
self.net.addController(ctrl)
# Create the two core switches and links between them
c1 = self.net.addSwitch('c1',dpid='1111000000000000')
......@@ -83,29 +83,11 @@ class Solar(object):
def run(self):
""" Runs the created network topology and launches mininet cli"""
self.run_silent()
self.net.build()
self.net.start()
CustomCLI(self.net)
self.net.stop()
def run_silent(self):
""" Runs silently - for unit testing """
self.net.build()
# Start the switches, configure them with desired protocols and only
# then set the controller
for sw in self.spines:
sw.start([self.flare])
if self.proto:
sw.cmd('ovs-vsctl set bridge %(sw)s protocols=%(proto)s' % \
{ 'sw': sw.name, 'proto': self.proto})
sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
{'sw': sw.name, 'ctl': self.cip})
for sw in self.leaves:
sw.start([self.flare])
sw.cmdPrint('ovs-vsctl set-controller %(sw)s tcp:%(ctl)s:6633' % \
{'sw': sw.name, 'ctl': self.cip})
def pingAll(self):
""" PingAll to create flows - for unit testing """
self.net.pingAll()
......