alshabib

Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next

......@@ -16,7 +16,7 @@
<param-name>com.sun.jersey.config.property.packages</param-name>
<param-value>org.onlab.onos.tvue</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
<load-on-startup>10</load-on-startup>
</servlet>
<servlet-mapping>
......
......@@ -8,6 +8,8 @@ import org.onlab.onos.net.DeviceId;
*/
public class MastershipEvent extends AbstractEvent<MastershipEvent.Type, DeviceId> {
//do we worry about explicitly setting slaves/equals? probably not,
//to keep it simple
NodeId master;
/**
......@@ -28,7 +30,7 @@ public class MastershipEvent extends AbstractEvent<MastershipEvent.Type, DeviceI
* @param device event device subject
* @param master master ID subject
*/
protected MastershipEvent(Type type, DeviceId device, NodeId master) {
public MastershipEvent(Type type, DeviceId device, NodeId master) {
super(type, device);
this.master = master;
}
......@@ -42,7 +44,7 @@ public class MastershipEvent extends AbstractEvent<MastershipEvent.Type, DeviceI
* @param master master ID subject
* @param time occurrence time
*/
protected MastershipEvent(Type type, DeviceId device, NodeId master, long time) {
public MastershipEvent(Type type, DeviceId device, NodeId master, long time) {
super(type, device, time);
this.master = master;
}
......
......@@ -5,9 +5,12 @@ import static org.slf4j.LoggerFactory.getLogger;
import java.util.Set;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.cluster.ClusterService;
import org.onlab.onos.cluster.MastershipAdminService;
import org.onlab.onos.cluster.MastershipEvent;
import org.onlab.onos.cluster.MastershipListener;
......@@ -26,6 +29,8 @@ import org.slf4j.Logger;
import static com.google.common.base.Preconditions.checkNotNull;
@Component(immediate = true)
@Service
public class MastershipManager
extends AbstractProviderRegistry<MastershipProvider, MastershipProviderService>
implements MastershipService, MastershipAdminService {
......@@ -46,7 +51,7 @@ public class MastershipManager
protected EventDeliveryService eventDispatcher;
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected ClusterManager clusterManager;
protected ClusterService clusterService;
@Activate
public void activate() {
......@@ -65,7 +70,10 @@ public class MastershipManager
checkNotNull(nodeId, NODE_ID_NULL);
checkNotNull(deviceId, DEVICE_ID_NULL);
checkNotNull(role, ROLE_NULL);
store.setRole(nodeId, deviceId, role);
MastershipEvent event = store.setRole(nodeId, deviceId, role);
if (event != null) {
post(event);
}
}
@Override
......@@ -83,7 +91,7 @@ public class MastershipManager
@Override
public MastershipRole requestRoleFor(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_ID_NULL);
NodeId id = clusterManager.getLocalNode().id();
NodeId id = clusterService.getLocalNode().id();
return store.getRole(id, deviceId);
}
......
......@@ -6,6 +6,7 @@ import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.cluster.MastershipService;
import org.onlab.onos.event.AbstractListenerRegistry;
import org.onlab.onos.event.EventDeliveryService;
import org.onlab.onos.net.Device;
......@@ -29,6 +30,7 @@ import org.slf4j.Logger;
import java.util.List;
import static org.onlab.onos.net.device.DeviceEvent.Type.*;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.slf4j.LoggerFactory.getLogger;
......@@ -58,6 +60,9 @@ public class DeviceManager
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected EventDeliveryService eventDispatcher;
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected MastershipService mastershipService;
@Activate
public void activate() {
eventDispatcher.addSink(DeviceEvent.class, listenerRegistry);
......@@ -171,6 +176,10 @@ public class DeviceManager
// If there was a change of any kind, trigger role selection process.
if (event != null) {
log.info("Device {} connected", deviceId);
if (event.type().equals(DEVICE_ADDED)) {
MastershipRole role = mastershipService.requestRoleFor(deviceId);
store.setRole(deviceId, role);
}
Device device = event.subject();
provider().roleChanged(device, store.getRole(device.id()));
post(event);
......
package org.onlab.onos.net.host.impl;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.util.Timeout;
import org.jboss.netty.util.TimerTask;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.Device;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Host;
import org.onlab.onos.net.Port;
import org.onlab.onos.net.device.DeviceService;
import org.onlab.onos.net.flow.DefaultTrafficTreatment;
import org.onlab.onos.net.flow.TrafficTreatment;
import org.onlab.onos.net.flow.instructions.Instruction;
import org.onlab.onos.net.flow.instructions.Instructions;
import org.onlab.onos.net.host.HostProvider;
import org.onlab.onos.net.host.HostService;
import org.onlab.onos.net.packet.PacketProvider;
import org.onlab.onos.net.host.HostStore;
import org.onlab.onos.net.host.PortAddresses;
import org.onlab.onos.net.packet.DefaultOutboundPacket;
import org.onlab.onos.net.packet.OutboundPacket;
import org.onlab.onos.net.packet.PacketService;
import org.onlab.onos.net.topology.TopologyService;
import org.onlab.packet.ARP;
import org.onlab.packet.Ethernet;
import org.onlab.packet.IpAddress;
import org.onlab.packet.IpPrefix;
import org.onlab.packet.MacAddress;
import org.onlab.util.Timer;
/**
* Monitors hosts on the dataplane to detect changes in host data.
* <p/>
* The HostMonitor can monitor hosts that have already been detected for
* changes. At an application's request, it can also monitor and actively
* probe for hosts that have not yet been detected (specified by IP address).
*/
public class HostMonitor implements TimerTask {
private static final byte[] DEFAULT_MAC_ADDRESS =
MacAddress.valueOf("00:00:00:00:00:01").getAddress();
private static final byte[] ZERO_MAC_ADDRESS =
MacAddress.valueOf("00:00:00:00:00:00").getAddress();
// TODO put on Ethernet
private static final byte[] BROADCAST_MAC =
MacAddress.valueOf("ff:ff:ff:ff:ff:ff").getAddress();
private final HostService hostService;
private final TopologyService topologyService;
private final DeviceService deviceService;
private final HostProvider hostProvider;
private final PacketProvider packetProvider;
private final PacketService packetService;
private final HostStore hostStore;
private final Set<IpPrefix> monitoredAddresses;
private final Set<IpAddress> monitoredAddresses;
private final long probeRate;
......@@ -32,12 +69,14 @@ public class HostMonitor implements TimerTask {
public HostMonitor(HostService hostService, TopologyService topologyService,
DeviceService deviceService,
HostProvider hostProvider, PacketProvider packetProvider) {
HostProvider hostProvider, PacketService packetService,
HostStore hostStore) {
this.hostService = hostService;
this.topologyService = topologyService;
this.deviceService = deviceService;
this.hostProvider = hostProvider;
this.packetProvider = packetProvider;
this.packetService = packetService;
this.hostStore = hostStore;
monitoredAddresses = new HashSet<>();
......@@ -46,11 +85,11 @@ public class HostMonitor implements TimerTask {
timeout = Timer.getTimer().newTimeout(this, 0, TimeUnit.MILLISECONDS);
}
public void addMonitoringFor(IpPrefix ip) {
public void addMonitoringFor(IpAddress ip) {
monitoredAddresses.add(ip);
}
public void stopMonitoring(IpPrefix ip) {
public void stopMonitoring(IpAddress ip) {
monitoredAddresses.remove(ip);
}
......@@ -60,8 +99,8 @@ public class HostMonitor implements TimerTask {
@Override
public void run(Timeout timeout) throws Exception {
for (IpPrefix ip : monitoredAddresses) {
Set<Host> hosts = hostService.getHostsByIp(ip);
for (IpAddress ip : monitoredAddresses) {
Set<Host> hosts = Collections.emptySet(); //TODO hostService.getHostsByIp(ip);
if (hosts.isEmpty()) {
sendArpRequest(ip);
......@@ -80,28 +119,70 @@ public class HostMonitor implements TimerTask {
*
* @param targetIp IP address to ARP for
*/
private void sendArpRequest(IpPrefix targetIp) {
// emit ARP packet out appropriate ports
private void sendArpRequest(IpAddress targetIp) {
// if ip in one of the configured (external) subnets
// sent out that port
// else (ip isn't in any configured subnet)
// send out all non-external edge ports
/*for (Device device : deviceService.getDevices()) {
// Find ports with an IP address in the target's subnet and sent ARP
// probes out those ports.
for (Device device : deviceService.getDevices()) {
for (Port port : deviceService.getPorts(device.id())) {
for (IpPrefix ip : port.ipAddresses()) {
if (ip.contains(targetIp)) {
sendProbe(port, targetIp);
continue;
}
ConnectPoint cp = new ConnectPoint(device.id(), port.number());
PortAddresses addresses = hostStore.getAddressBindingsForPort(cp);
if (addresses.ip().contains(targetIp)) {
sendProbe(device.id(), port, addresses, targetIp);
}
}
}*/
}
// TODO case where no address was found.
// Broadcast out internal edge ports?
}
private void sendProbe(Port port, IpPrefix targetIp) {
private void sendProbe(DeviceId deviceId, Port port, PortAddresses portAddresses,
IpAddress targetIp) {
Ethernet arpPacket = createArpFor(targetIp, portAddresses);
List<Instruction> instructions = new ArrayList<>();
instructions.add(Instructions.createOutput(port.number()));
TrafficTreatment treatment =
new DefaultTrafficTreatment.Builder()
.add(Instructions.createOutput(port.number()))
.build();
OutboundPacket outboundPacket =
new DefaultOutboundPacket(deviceId, treatment,
ByteBuffer.wrap(arpPacket.serialize()));
packetService.emit(outboundPacket);
}
private Ethernet createArpFor(IpAddress targetIp, PortAddresses portAddresses) {
ARP arp = new ARP();
arp.setHardwareType(ARP.HW_TYPE_ETHERNET)
.setHardwareAddressLength((byte) Ethernet.DATALAYER_ADDRESS_LENGTH)
.setProtocolType(ARP.PROTO_TYPE_IP)
.setProtocolAddressLength((byte) IpPrefix.INET_LEN);
byte[] sourceMacAddress;
if (portAddresses.mac() == null) {
sourceMacAddress = DEFAULT_MAC_ADDRESS;
} else {
sourceMacAddress = portAddresses.mac().getAddress();
}
arp.setSenderHardwareAddress(sourceMacAddress)
.setSenderProtocolAddress(portAddresses.ip().toOctets())
.setTargetHardwareAddress(ZERO_MAC_ADDRESS)
.setTargetProtocolAddress(targetIp.toOctets());
Ethernet ethernet = new Ethernet();
ethernet.setEtherType(Ethernet.TYPE_ARP)
.setDestinationMACAddress(BROADCAST_MAC)
.setSourceMACAddress(sourceMacAddress)
.setPayload(arp);
return ethernet;
}
}
......
......@@ -3,6 +3,9 @@ package org.onlab.onos.net.device.impl;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.onlab.onos.cluster.MastershipListener;
import org.onlab.onos.cluster.MastershipService;
import org.onlab.onos.cluster.NodeId;
import org.onlab.onos.event.Event;
import org.onlab.onos.net.Device;
import org.onlab.onos.net.DeviceId;
......@@ -25,9 +28,12 @@ import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.event.impl.TestEventDispatcher;
import org.onlab.onos.net.trivial.impl.SimpleDeviceStore;
import com.google.common.collect.Sets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import static org.junit.Assert.*;
import static org.onlab.onos.net.Device.Type.SWITCH;
......@@ -69,6 +75,7 @@ public class DeviceManagerTest {
registry = mgr;
mgr.store = new SimpleDeviceStore();
mgr.eventDispatcher = new TestEventDispatcher();
mgr.mastershipService = new TestMastershipService();
mgr.activate();
service.addListener(listener);
......@@ -252,4 +259,31 @@ public class DeviceManagerTest {
}
}
private static class TestMastershipService implements MastershipService {
@Override
public NodeId getMasterFor(DeviceId deviceId) {
return null;
}
@Override
public Set<DeviceId> getDevicesOf(NodeId nodeId) {
return Sets.newHashSet(DID1, DID2);
}
@Override
public MastershipRole requestRoleFor(DeviceId deviceId) {
return MastershipRole.MASTER;
}
@Override
public void addListener(MastershipListener listener) {
}
@Override
public void removeListener(MastershipListener listener) {
}
}
}
......
package org.onlab.onos.net.device.impl;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.hazelcast.config.Config;
import com.hazelcast.core.Hazelcast;
import com.hazelcast.core.HazelcastInstance;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.onlab.onos.cluster.MastershipListener;
import org.onlab.onos.cluster.MastershipService;
import org.onlab.onos.cluster.NodeId;
import org.onlab.onos.event.Event;
import org.onlab.onos.event.impl.TestEventDispatcher;
import org.onlab.onos.net.Device;
......@@ -34,6 +39,7 @@ import org.onlab.onos.store.impl.StoreManager;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import static org.junit.Assert.*;
......@@ -98,6 +104,7 @@ public class DistributedDeviceManagerTest {
dstore.activate();
mgr.store = dstore;
mgr.eventDispatcher = new TestEventDispatcher();
mgr.mastershipService = new TestMastershipService();
mgr.activate();
service.addListener(listener);
......@@ -302,4 +309,32 @@ public class DistributedDeviceManagerTest {
setupKryoPool();
}
}
private static class TestMastershipService implements MastershipService {
@Override
public NodeId getMasterFor(DeviceId deviceId) {
return null;
}
@Override
public Set<DeviceId> getDevicesOf(NodeId nodeId) {
return Sets.newHashSet(DID1, DID2);
}
@Override
public MastershipRole requestRoleFor(DeviceId deviceId) {
return MastershipRole.MASTER;
}
@Override
public void addListener(MastershipListener listener) {
}
@Override
public void removeListener(MastershipListener listener) {
}
}
}
......
package org.onlab.onos.store.impl;
import com.hazelcast.config.Config;
import com.hazelcast.config.FileSystemXmlConfig;
import com.hazelcast.core.Hazelcast;
import com.hazelcast.core.HazelcastInstance;
import de.javakaffee.kryoserializers.URISerializer;
......@@ -25,6 +27,7 @@ import org.onlab.util.KryoPool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
......@@ -36,6 +39,8 @@ import java.util.HashMap;
@Service
public class StoreManager implements StoreService {
private static final String HAZELCAST_XML_FILE = "etc/hazelcast.xml";
private final Logger log = LoggerFactory.getLogger(getClass());
protected HazelcastInstance instance;
......@@ -44,9 +49,14 @@ public class StoreManager implements StoreService {
@Activate
public void activate() {
instance = Hazelcast.newHazelcastInstance();
setupKryoPool();
log.info("Started");
try {
Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE);
instance = Hazelcast.newHazelcastInstance(config);
setupKryoPool();
log.info("Started");
} catch (FileNotFoundException e) {
log.error("Unable to configure Hazelcast", e);
}
}
/**
......
......@@ -98,7 +98,7 @@ public class SimpleDeviceStore implements DeviceStore {
availableDevices.add(deviceId);
// For now claim the device as a master automatically.
roles.put(deviceId, MastershipRole.MASTER);
// roles.put(deviceId, MastershipRole.MASTER);
}
return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null);
}
......
package org.onlab.onos.net.trivial.impl;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.cluster.ControllerNode;
import org.onlab.onos.cluster.DefaultControllerNode;
import org.onlab.onos.cluster.MastershipEvent;
import org.onlab.onos.cluster.MastershipStore;
import org.onlab.onos.cluster.NodeId;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.MastershipRole;
import org.onlab.packet.IpPrefix;
import org.slf4j.Logger;
import static org.onlab.onos.cluster.MastershipEvent.Type.*;
/**
* Manages inventory of controller mastership over devices using
* trivial in-memory structures implementation.
*/
@Component(immediate = true)
@Service
public class SimpleMastershipStore implements MastershipStore {
public static final IpPrefix LOCALHOST = IpPrefix.valueOf("127.0.0.1");
private final Logger log = getLogger(getClass());
private ControllerNode instance;
protected final ConcurrentMap<DeviceId, MastershipRole> roleMap =
new ConcurrentHashMap<DeviceId, MastershipRole>();
@Activate
public void activate() {
instance = new DefaultControllerNode(new NodeId("local"), LOCALHOST);
log.info("Started");
}
@Deactivate
public void deactivate() {
log.info("Stopped");
}
@Override
public MastershipEvent setRole(NodeId nodeId, DeviceId deviceId,
MastershipRole role) {
if (roleMap.get(deviceId) == null) {
return null;
}
roleMap.put(deviceId, role);
return new MastershipEvent(MASTER_CHANGED, deviceId, nodeId);
}
@Override
public MastershipEvent addOrUpdateDevice(NodeId instance,
DeviceId deviceId, MastershipRole role) {
//TODO refine when we do listeners
roleMap.put(deviceId, role);
return null;
}
@Override
public NodeId getMaster(DeviceId deviceId) {
return instance.id();
}
@Override
public Set<DeviceId> getDevices(NodeId nodeId) {
return Collections.unmodifiableSet(roleMap.keySet());
}
@Override
public MastershipRole getRole(NodeId nodeId, DeviceId deviceId) {
MastershipRole role = roleMap.get(deviceId);
if (role == null) {
//say MASTER. If clustered, we'd figure out if anyone's got dibs here.
role = MastershipRole.MASTER;
roleMap.put(deviceId, role);
}
return role;
}
}
......@@ -93,8 +93,8 @@
<feature name="onos-app-tvue" version="1.0.0"
description="ONOS sample topology viewer application">
<feature>onos-thirdparty-web</feature>
<feature>onos-api</feature>
<feature>onos-thirdparty-web</feature>
<bundle>mvn:org.onlab.onos/onos-app-tvue/1.0.0-SNAPSHOT</bundle>
</feature>
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Builds the ONOS from source.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
cd $ONOS_ROOT
mvn clean install && mvn javadoc:aggregate
\ No newline at end of file
......@@ -51,7 +51,7 @@ perl -pi.old -e "s|^(featuresRepositories=.*)|\1,mvn:org.onlab.onos/onos-feature
$ONOS_STAGE/$KARAF_DIST/etc/org.apache.karaf.features.cfg
# Patch the Apache Karaf distribution file to load ONOS features
perl -pi.old -e 's|^(featuresBoot=.*)|\1,onos-api,onos-core,onos-cli,onos-rest,onos-gui,onos-openflow,onos-app-tvue,onos-app-fwd|' \
perl -pi.old -e 's|^(featuresBoot=.*)|\1,onos-api,onos-core-trivial,onos-cli,onos-rest,onos-gui,onos-openflow,onos-app-tvue,onos-app-fwd|' \
$ONOS_STAGE/$KARAF_DIST/etc/org.apache.karaf.features.cfg
# Patch the Apache Karaf distribution with ONOS branding bundle
......
......@@ -9,5 +9,5 @@
nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
onos-package
for node in $nodes; do onos-install -f $node; done
for node in $nodes; do printf "%s: " $node; onos-install -f $node; done
for node in $nodes; do onos-wait-for-start $node; done
......
......@@ -28,8 +28,9 @@ function o {
# Short-hand for 'mvn clean install' for us lazy folk
alias mci='mvn clean install'
# Short-hand for ONOS build from the top of the source tree.
alias ob='o && mvn clean install javadoc:aggregate'
# Short-hand for ONOS build, package and test.
alias ob='onos-build'
alias op='onos-package'
alias ot='onos-test'
# Short-hand for tailing the ONOS (karaf) log
......@@ -41,7 +42,7 @@ alias pp='python -m json.tool'
# Short-hand to launch API docs and sample topology viewer GUI
alias docs='open $ONOS_ROOT/target/site/apidocs/index.html'
alias gui='open http://localhost:8181/onos/tvue'
alias gui='onos-gui'
# Test related conveniences
......
......@@ -16,6 +16,6 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
script
[ -f /opt/onos/options ] && . /opt/onos/options
start-stop-daemon --signal INT --start --chuid sdn \
--exec /opt/onos/bin/onos-ctl -- $ONOS_OPTS \
--exec /opt/onos/bin/onos-service -- $ONOS_OPTS \
>/opt/onos/var/stdout.log 2>/opt/onos/var/stderr.log
end script
......
<?xml version="1.0" encoding="UTF-8"?>
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.2.xsd"
<!--
~ Copyright (c) 2008-2013, Hazelcast, Inc. All Rights Reserved.
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!--
The default Hazelcast configuration. This is used when:
- no hazelcast.xml if present
-->
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd"
xmlns="http://www.hazelcast.com/schema/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<group>
......@@ -11,8 +33,8 @@
<port auto-increment="true" port-count="100">5701</port>
<outbound-ports>
<!--
Allowed port range when connecting to other nodes.
0 or * means use system provided port.
Allowed port range when connecting to other nodes.
0 or * means use system provided port.
-->
<ports>0</ports>
</outbound-ports>
......@@ -24,18 +46,6 @@
<tcp-ip enabled="false">
<interface>127.0.0.1</interface>
</tcp-ip>
<aws enabled="false">
<access-key>my-access-key</access-key>
<secret-key>my-secret-key</secret-key>
<!--optional, default is us-east-1 -->
<region>us-west-1</region>
<!--optional, default is ec2.amazonaws.com. If set, region shouldn't be set as it will override this property -->
<host-header>ec2.amazonaws.com</host-header>
<!-- optional, only instances belonging to this group will be discovered, default will try all running instances -->
<security-group-name>hazelcast-sg</security-group-name>
<tag-key>type</tag-key>
<tag-value>hz-nodes</tag-value>
</aws>
</join>
<interfaces enabled="true">
<interface>192.168.56.*</interface>
......@@ -61,9 +71,9 @@
</symmetric-encryption>
</network>
<partition-group enabled="false"/>
<executor-service>
<executor-service name="default">
<pool-size>16</pool-size>
<!-- Queue capacity. 0 means Integer.MAX_VALUE -->
<!--Queue capacity. 0 means Integer.MAX_VALUE.-->
<queue-capacity>0</queue-capacity>
</executor-service>
<queue name="default">
......@@ -81,22 +91,24 @@
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
<!--
Number of async backups. 0 means no backup.
-->
<async-backup-count>0</async-backup-count>
<empty-queue-ttl>-1</empty-queue-ttl>
</queue>
<map name="default">
<!--
Data type that will be used for storing recordMap.
Possible values:
BINARY (default): keys and values will be stored as binary data
OBJECT : values will be stored in their object forms
OFFHEAP : values will be stored in non-heap region of JVM
Data type that will be used for storing recordMap.
Possible values:
BINARY (default): keys and values will be stored as binary data
OBJECT : values will be stored in their object forms
OFFHEAP : values will be stored in non-heap region of JVM
-->
<in-memory-format>BINARY</in-memory-format>
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
......@@ -144,6 +156,12 @@
-->
<eviction-percentage>25</eviction-percentage>
<!--
Minimum time in milliseconds which should pass before checking
if a partition of this map is evictable or not.
Default value is 100 millis.
-->
<min-eviction-check-millis>100</min-eviction-check-millis>
<!--
While recovering from split-brain (network partitioning),
map entries in the small cluster will merge into the bigger cluster
based on the policy set here. When an entry merge into the
......@@ -159,6 +177,7 @@
com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
-->
<merge-policy>com.hazelcast.map.merge.PassThroughMergePolicy</merge-policy>
</map>
<multimap name="default">
......@@ -199,5 +218,6 @@
<portable-version>0</portable-version>
</serialization>
<services enable-defaults="true" />
<services enable-defaults="true"/>
</hazelcast>
......
#!/bin/bash
#-------------------------------------------------------------------------------
# ONOS remote command-line client
# ONOS remote command-line client.
#-------------------------------------------------------------------------------
[ -n "$1" ] && OCI=$1 && shift
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Checks the logs of the remote ONOS instance and makes sure they are clean.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
remote=$ONOS_USER@${1:-$OCI}
LOG=$ONOS_INSTALL_DIR/log/karaf.log
ssh $remote "egrep 'ERROR|Exception' $LOG"
#!/bin/bash
#-------------------------------------------------------------------------------
# Launches ONOS GUI on the specified node.
#-------------------------------------------------------------------------------
host=${1:-$OCI}
host=${host:-localhost}
open http://$host:8181/onos/tvue
\ No newline at end of file
#!/bin/bash
#-------------------------------------------------------------------------------
# Remotely pushes bits to a remote machine and installs ONOS.
# Remotely pushes bits to a remote node and installs ONOS on it.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Monitors remote ONOS log file.
# Monitors remote ONOS log file on the specified node.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Pushes the local id_rsa.pub to the remote ONOS host authorized_keys.
# Pushes the local id_rsa.pub to the authorized_keys on a remote ONOS node.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Remotely administers the ONOS service on the specified node.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
ssh $ONOS_USER@${1:-$OCI} "sudo service onos ${2:-status}"
\ No newline at end of file
#!/bin/bash
#-------------------------------------------------------------------------------
# Logs in to the remote ONOS instance.
# Logs in to the remote ONOS node.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Remotely stops & uninstalls ONOS.
# Remotely stops & uninstalls ONOS on the specified node.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
......
#!/bin/bash
#-------------------------------------------------------------------------------
# Waits for ONOS to reach run-level 100.
# Waits for ONOS to reach run-level 100 on the specified remote node.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
......