Thomas Vachuska
Committed by Ray Milkey

Added bootstrap code to auto-generate cluster.json, tablets.json and hazelcast.x…

…ml using local site address.

Change-Id: I3210aadc63403022b4aac3bc3591736801240b50
......@@ -71,15 +71,15 @@ public class ApplicationManager implements ApplicationService, ApplicationAdminS
@Activate
public void activate() {
store.setDelegate(delegate);
eventDispatcher.addSink(ApplicationEvent.class, listenerRegistry);
store.setDelegate(delegate);
log.info("Started");
}
@Deactivate
public void deactivate() {
store.unsetDelegate(delegate);
eventDispatcher.removeSink(ApplicationEvent.class);
store.unsetDelegate(delegate);
log.info("Stopped");
}
......
......@@ -27,6 +27,8 @@ import org.onosproject.core.CoreService;
import org.onosproject.core.IdBlockStore;
import org.onosproject.core.IdGenerator;
import org.onosproject.core.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.List;
......@@ -41,6 +43,8 @@ import static com.google.common.base.Preconditions.checkNotNull;
@Service
public class CoreManager implements CoreService {
private final Logger log = LoggerFactory.getLogger(getClass());
private static final File VERSION_FILE = new File("../VERSION");
private static Version version = Version.version("1.2.0-SNAPSHOT");
......
......@@ -57,6 +57,8 @@ import java.util.stream.Collectors;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.net.NetworkInterface.getNetworkInterfaces;
import static java.util.Collections.list;
import static org.onlab.util.Tools.groupedThreads;
import static org.slf4j.LoggerFactory.getLogger;
......@@ -70,7 +72,7 @@ public class DistributedClusterStore
extends AbstractStore<ClusterEvent, ClusterStoreDelegate>
implements ClusterStore {
private final Logger log = getLogger(DistributedClusterStore.class);
private static final Logger log = getLogger(DistributedClusterStore.class);
// TODO: make these configurable.
private static final int HEARTBEAT_FD_PORT = 2419;
......@@ -81,14 +83,16 @@ public class DistributedClusterStore
private static final String CLUSTER_DEFINITION_FILE = "cluster.json";
private static final String HEARTBEAT_MESSAGE = "onos-cluster-heartbeat";
public static final int DEFAULT_PORT = 9876;
private static final KryoSerializer SERIALIZER = new KryoSerializer() {
@Override
protected void setupKryoPool() {
serializerPool = KryoNamespace.newBuilder()
.register(KryoNamespaces.API)
.register(HeartbeatMessage.class)
.build()
.populate(1);
.register(KryoNamespaces.API)
.register(HeartbeatMessage.class)
.build()
.populate(1);
}
};
......@@ -112,18 +116,22 @@ public class DistributedClusterStore
@Activate
public void activate() {
File clusterDefinitionFile = new File(CONFIG_DIR,
CLUSTER_DEFINITION_FILE);
File clusterDefinitionFile = new File(CONFIG_DIR, CLUSTER_DEFINITION_FILE);
ClusterDefinitionStore clusterDefinitionStore =
new ClusterDefinitionStore(clusterDefinitionFile.getPath());
if (!clusterDefinitionFile.exists()) {
createDefaultClusterDefinition(clusterDefinitionStore);
}
try {
clusterDefinition = new ClusterDefinitionStore(
clusterDefinitionFile.getPath()).read();
clusterDefinition = clusterDefinitionStore.read();
seedNodes = ImmutableSet
.copyOf(clusterDefinition.getNodes())
.stream()
.map(nodeInfo -> new DefaultControllerNode(new NodeId(
nodeInfo.getId()), IpAddress.valueOf(nodeInfo
.getIp()), nodeInfo.getTcpPort()))
.map(nodeInfo -> new DefaultControllerNode(new NodeId(nodeInfo.getId()),
IpAddress.valueOf(nodeInfo.getIp()),
nodeInfo.getTcpPort()))
.collect(Collectors.toSet());
} catch (IOException e) {
throw new IllegalStateException(
......@@ -148,16 +156,51 @@ public class DistributedClusterStore
+ " failure detector communication channel.", e);
}
messagingService.registerHandler(HEARTBEAT_MESSAGE,
new HeartbeatMessageHandler(), heartBeatMessageHandler);
new HeartbeatMessageHandler(), heartBeatMessageHandler);
failureDetector = new PhiAccrualFailureDetector();
heartBeatSender.scheduleWithFixedDelay(this::heartbeat, 0,
HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS);
HEARTBEAT_INTERVAL_MS, TimeUnit.MILLISECONDS);
log.info("Started");
}
private void createDefaultClusterDefinition(ClusterDefinitionStore store) {
// Assumes IPv4 is returned.
String ip = DistributedClusterStore.getSiteLocalAddress();
String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*");
NodeInfo node = NodeInfo.from(ip, ip, DEFAULT_PORT);
try {
store.write(ClusterDefinition.from(ImmutableSet.of(node), ipPrefix));
} catch (IOException e) {
log.warn("Unable to write default cluster definition", e);
}
}
/**
* Returns the site local address if one can be found, loopback otherwise.
*
* @return site-local address in string form
*/
public static String getSiteLocalAddress() {
try {
for (NetworkInterface nif : list(getNetworkInterfaces())) {
for (InetAddress address : list(nif.getInetAddresses())) {
if (address.getAddress()[0] == (byte) 0xC0) {
return address.toString().substring(1);
}
}
}
return InetAddress.getLoopbackAddress().toString().substring(1);
} catch (SocketException e) {
log.error("Unable to get network interfaces", e);
}
return null;
}
@Deactivate
public void deactivate() {
try {
......@@ -300,7 +343,7 @@ public class DistributedClusterStore
NetworkInterface.getNetworkInterfaces();
while (interfaces.hasMoreElements()) {
NetworkInterface iface = interfaces.nextElement();
Enumeration<InetAddress> inetAddresses = iface.getInetAddresses();
Enumeration<InetAddress> inetAddresses = iface.getInetAddresses();
while (inetAddresses.hasMoreElements()) {
IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement());
if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) {
......
......@@ -16,6 +16,8 @@
package org.onosproject.store.consistent.impl;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
......@@ -40,6 +42,7 @@ import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onosproject.cluster.ClusterService;
import org.onosproject.store.cluster.impl.DistributedClusterStore;
import org.onosproject.store.cluster.impl.NodeInfo;
import org.onosproject.store.cluster.messaging.ClusterCommunicationService;
import org.onosproject.store.ecmap.EventuallyConsistentMapBuilderImpl;
......@@ -102,8 +105,11 @@ public class DatabaseManager implements StorageService, StorageAdminService {
Map<String, Set<NodeInfo>> partitionMap;
try {
DatabaseDefinitionStore databaseDef = new DatabaseDefinitionStore(file);
partitionMap = databaseDef.read().getPartitions();
DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(file);
if (!file.exists()) {
createDefaultDatabaseDefinition(databaseDefStore);
}
partitionMap = databaseDefStore.read().getPartitions();
} catch (IOException e) {
throw new IllegalStateException("Failed to load database config", e);
}
......@@ -180,6 +186,18 @@ public class DatabaseManager implements StorageService, StorageAdminService {
log.info("Started");
}
private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) {
// Assumes IPv4 is returned.
String ip = DistributedClusterStore.getSiteLocalAddress();
NodeInfo node = NodeInfo.from(ip, ip, DistributedClusterStore.DEFAULT_PORT);
try {
store.write(DatabaseDefinition.from(ImmutableMap.of("p1", ImmutableSet.of(node)),
ImmutableSet.of(node)));
} catch (IOException e) {
log.warn("Unable to write default cluster definition", e);
}
}
@Deactivate
public void deactivate() {
CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close())
......
......@@ -15,6 +15,8 @@
*/
package org.onosproject.store.hz;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import com.hazelcast.config.Config;
import com.hazelcast.config.FileSystemXmlConfig;
import com.hazelcast.core.Hazelcast;
......@@ -24,10 +26,14 @@ import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Service;
import org.onosproject.store.cluster.impl.DistributedClusterStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
/**
* Auxiliary bootstrap of distributed store.
......@@ -45,6 +51,11 @@ public class StoreManager implements StoreService {
@Activate
public void activate() {
try {
File hazelcastFile = new File(HAZELCAST_XML_FILE);
if (!hazelcastFile.exists()) {
createDefaultHazelcastFile(hazelcastFile);
}
Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE);
instance = Hazelcast.newHazelcastInstance(config);
......@@ -54,6 +65,20 @@ public class StoreManager implements StoreService {
}
}
private void createDefaultHazelcastFile(File hazelcastFile) {
String ip = DistributedClusterStore.getSiteLocalAddress();
String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*");
InputStream his = getClass().getResourceAsStream("/hazelcast.xml");
try {
String hzCfg = new String(ByteStreams.toByteArray(his), "UTF-8");
hzCfg = hzCfg.replaceFirst("@NAME", ip);
hzCfg = hzCfg.replaceFirst("@PREFIX", ipPrefix);
Files.write(hzCfg.getBytes("UTF-8"), hazelcastFile);
} catch (IOException e) {
log.error("Unable to write default hazelcast file", e);
}
}
@Deactivate
public void deactivate() {
instance.shutdown();
......
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 2015 Open Networking Laboratory
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!--
The default Hazelcast configuration. This is used when:
- no hazelcast.xml if present
-->
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd"
xmlns="http://www.hazelcast.com/schema/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<group>
<name>@NAME</name>
<password>rocks</password>
</group>
<management-center enabled="false">http://localhost:8080/mancenter</management-center>
<properties>
<property name="hazelcast.max.no.heartbeat.seconds">30</property>
<property name="hazelcast.merge.first.run.delay.seconds">30</property>
<property name="hazelcast.merge.next.run.delay.seconds">30</property>
</properties>
<network>
<port auto-increment="true" port-count="100">5701</port>
<outbound-ports>
<!--
Allowed port range when connecting to other nodes.
0 or * means use system provided port.
-->
<ports>0</ports>
</outbound-ports>
<join>
<multicast enabled="true">
<multicast-group>224.2.2.3</multicast-group>
<multicast-port>54327</multicast-port>
</multicast>
<tcp-ip enabled="false">
<interface>127.0.0.1</interface>
</tcp-ip>
</join>
<interfaces enabled="true">
<interface>@PREFIX</interface>
</interfaces>
<ssl enabled="false"/>
<socket-interceptor enabled="false"/>
<symmetric-encryption enabled="false">
<!--
encryption algorithm such as
DES/ECB/PKCS5Padding,
PBEWithMD5AndDES,
AES/CBC/PKCS5Padding,
Blowfish,
DESede
-->
<algorithm>PBEWithMD5AndDES</algorithm>
<!-- salt value to use when generating the secret key -->
<salt>thesalt</salt>
<!-- pass phrase to use when generating the secret key -->
<password>thepass</password>
<!-- iteration count to use when generating the secret key -->
<iteration-count>19</iteration-count>
</symmetric-encryption>
</network>
<partition-group enabled="false"/>
<executor-service name="default">
<pool-size>16</pool-size>
<!--Queue capacity. 0 means Integer.MAX_VALUE.-->
<queue-capacity>0</queue-capacity>
</executor-service>
<queue name="default">
<!--
Maximum size of the queue. When a JVM's local queue size reaches the maximum,
all put/offer operations will get blocked until the queue size
of the JVM goes down below the maximum.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size>0</max-size>
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
<!--
Number of async backups. 0 means no backup.
-->
<async-backup-count>0</async-backup-count>
<empty-queue-ttl>-1</empty-queue-ttl>
</queue>
<map name="default">
<!--
Data type that will be used for storing recordMap.
Possible values:
BINARY (default): keys and values will be stored as binary data
OBJECT : values will be stored in their object forms
OFFHEAP : values will be stored in non-heap region of JVM
-->
<in-memory-format>BINARY</in-memory-format>
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
<!--
Number of async backups. 0 means no backup.
-->
<async-backup-count>0</async-backup-count>
<!--
Maximum number of seconds for each entry to stay in the map. Entries that are
older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
will get automatically evicted from the map.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<time-to-live-seconds>0</time-to-live-seconds>
<!--
Maximum number of seconds for each entry to stay idle in the map. Entries that are
idle(not touched) for more than <max-idle-seconds> will get
automatically evicted from the map. Entry is touched if get, put or containsKey is called.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<max-idle-seconds>0</max-idle-seconds>
<!--
Valid values are:
NONE (no eviction),
LRU (Least Recently Used),
LFU (Least Frequently Used).
NONE is the default.
-->
<eviction-policy>NONE</eviction-policy>
<!--
Maximum size of the map. When max size is reached,
map is evicted based on the policy defined.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size policy="PER_NODE">0</max-size>
<!--
When max. size is reached, specified percentage of
the map will be evicted. Any integer between 0 and 100.
If 25 is set for example, 25% of the entries will
get evicted.
-->
<eviction-percentage>25</eviction-percentage>
<!--
Minimum time in milliseconds which should pass before checking
if a partition of this map is evictable or not.
Default value is 100 millis.
-->
<min-eviction-check-millis>100</min-eviction-check-millis>
<!--
While recovering from split-brain (network partitioning),
map entries in the small cluster will merge into the bigger cluster
based on the policy set here. When an entry merge into the
cluster, there might an existing entry with the same key already.
Values of these entries might be different for that same key.
Which value should be set for the key? Conflict is resolved by
the policy set here. Default policy is PutIfAbsentMapMergePolicy
There are built-in merge policies such as
com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key.
com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins.
com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
-->
<merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
</map>
<multimap name="default">
<backup-count>1</backup-count>
<value-collection-type>SET</value-collection-type>
</multimap>
<multimap name="default">
<backup-count>1</backup-count>
<value-collection-type>SET</value-collection-type>
</multimap>
<list name="default">
<backup-count>1</backup-count>
</list>
<set name="default">
<backup-count>1</backup-count>
</set>
<jobtracker name="default">
<max-thread-size>0</max-thread-size>
<!-- Queue size 0 means number of partitions * 2 -->
<queue-size>0</queue-size>
<retry-count>0</retry-count>
<chunk-size>1000</chunk-size>
<communicate-stats>true</communicate-stats>
<topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
</jobtracker>
<semaphore name="default">
<initial-permits>0</initial-permits>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
</semaphore>
<serialization>
<portable-version>0</portable-version>
</serialization>
<services enable-defaults="true"/>
</hazelcast>