Added support for firing up multiple raft partitions + Workaround for an issue w…
…here db calls timeout when a raft cluster node is down. Change-Id: I67406da34c8a96b8ab9371d4d9b14653edfd2e2d
Showing
8 changed files
with
242 additions
and
165 deletions
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
1 | package org.onosproject.store.cluster.impl; | 16 | package org.onosproject.store.cluster.impl; |
2 | 17 | ||
3 | import java.util.Set; | 18 | import java.util.Set; |
4 | 19 | ||
5 | -import org.onosproject.cluster.DefaultControllerNode; | ||
6 | - | ||
7 | import com.google.common.collect.ImmutableSet; | 20 | import com.google.common.collect.ImmutableSet; |
8 | 21 | ||
9 | /** | 22 | /** |
... | @@ -11,16 +24,16 @@ import com.google.common.collect.ImmutableSet; | ... | @@ -11,16 +24,16 @@ import com.google.common.collect.ImmutableSet; |
11 | */ | 24 | */ |
12 | public class ClusterDefinition { | 25 | public class ClusterDefinition { |
13 | 26 | ||
14 | - private Set<DefaultControllerNode> nodes; | 27 | + private Set<NodeInfo> nodes; |
15 | private String ipPrefix; | 28 | private String ipPrefix; |
16 | 29 | ||
17 | /** | 30 | /** |
18 | * Creates a new cluster definition. | 31 | * Creates a new cluster definition. |
19 | - * @param nodes cluster nodes. | 32 | + * @param nodes cluster nodes information |
20 | - * @param ipPrefix ip prefix common to all cluster nodes. | 33 | + * @param ipPrefix ip prefix common to all cluster nodes |
21 | * @return cluster definition | 34 | * @return cluster definition |
22 | */ | 35 | */ |
23 | - public static ClusterDefinition from(Set<DefaultControllerNode> nodes, String ipPrefix) { | 36 | + public static ClusterDefinition from(Set<NodeInfo> nodes, String ipPrefix) { |
24 | ClusterDefinition definition = new ClusterDefinition(); | 37 | ClusterDefinition definition = new ClusterDefinition(); |
25 | definition.ipPrefix = ipPrefix; | 38 | definition.ipPrefix = ipPrefix; |
26 | definition.nodes = ImmutableSet.copyOf(nodes); | 39 | definition.nodes = ImmutableSet.copyOf(nodes); |
... | @@ -28,18 +41,18 @@ public class ClusterDefinition { | ... | @@ -28,18 +41,18 @@ public class ClusterDefinition { |
28 | } | 41 | } |
29 | 42 | ||
30 | /** | 43 | /** |
31 | - * Returns set of cluster nodes. | 44 | + * Returns set of cluster nodes info. |
32 | - * @return cluster nodes. | 45 | + * @return cluster nodes info |
33 | */ | 46 | */ |
34 | - public Set<DefaultControllerNode> nodes() { | 47 | + public Set<NodeInfo> getNodes() { |
35 | return ImmutableSet.copyOf(nodes); | 48 | return ImmutableSet.copyOf(nodes); |
36 | } | 49 | } |
37 | 50 | ||
38 | /** | 51 | /** |
39 | * Returns ipPrefix in dotted decimal notion. | 52 | * Returns ipPrefix in dotted decimal notion. |
40 | - * @return ip prefix. | 53 | + * @return ip prefix |
41 | */ | 54 | */ |
42 | - public String ipPrefix() { | 55 | + public String getIpPrefix() { |
43 | return ipPrefix; | 56 | return ipPrefix; |
44 | } | 57 | } |
45 | } | 58 | } |
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
... | @@ -15,25 +15,12 @@ | ... | @@ -15,25 +15,12 @@ |
15 | */ | 15 | */ |
16 | package org.onosproject.store.cluster.impl; | 16 | package org.onosproject.store.cluster.impl; |
17 | 17 | ||
18 | -import com.fasterxml.jackson.core.JsonEncoding; | 18 | +import static com.google.common.base.Preconditions.checkNotNull; |
19 | -import com.fasterxml.jackson.core.JsonFactory; | ||
20 | -import com.fasterxml.jackson.databind.JsonNode; | ||
21 | -import com.fasterxml.jackson.databind.ObjectMapper; | ||
22 | -import com.fasterxml.jackson.databind.node.ArrayNode; | ||
23 | -import com.fasterxml.jackson.databind.node.ObjectNode; | ||
24 | -import com.fasterxml.jackson.databind.node.TextNode; | ||
25 | - | ||
26 | -import org.onosproject.cluster.DefaultControllerNode; | ||
27 | -import org.onosproject.cluster.NodeId; | ||
28 | -import org.onlab.packet.IpAddress; | ||
29 | 19 | ||
20 | +import com.fasterxml.jackson.databind.ObjectMapper; | ||
30 | import java.io.File; | 21 | import java.io.File; |
31 | import java.io.IOException; | 22 | import java.io.IOException; |
32 | -import java.util.HashSet; | ||
33 | -import java.util.Iterator; | ||
34 | -import java.util.Set; | ||
35 | 23 | ||
36 | -//Not used right now | ||
37 | /** | 24 | /** |
38 | * Allows for reading and writing cluster definition as a JSON file. | 25 | * Allows for reading and writing cluster definition as a JSON file. |
39 | */ | 26 | */ |
... | @@ -43,54 +30,32 @@ public class ClusterDefinitionStore { | ... | @@ -43,54 +30,32 @@ public class ClusterDefinitionStore { |
43 | 30 | ||
44 | /** | 31 | /** |
45 | * Creates a reader/writer of the cluster definition file. | 32 | * Creates a reader/writer of the cluster definition file. |
46 | - * | ||
47 | * @param filePath location of the definition file | 33 | * @param filePath location of the definition file |
48 | */ | 34 | */ |
49 | public ClusterDefinitionStore(String filePath) { | 35 | public ClusterDefinitionStore(String filePath) { |
50 | file = new File(filePath); | 36 | file = new File(filePath); |
51 | } | 37 | } |
52 | 38 | ||
53 | - /* | 39 | + /** |
54 | - * Returns set of the controller nodes, including self. | 40 | + * Returns the cluster definition. |
55 | - * | 41 | + * @return cluster definition |
56 | - * @return set of controller nodes | 42 | + * @throws IOException when I/O exception of some sort has occurred |
57 | */ | 43 | */ |
58 | public ClusterDefinition read() throws IOException { | 44 | public ClusterDefinition read() throws IOException { |
59 | - Set<DefaultControllerNode> nodes = new HashSet<>(); | ||
60 | ObjectMapper mapper = new ObjectMapper(); | 45 | ObjectMapper mapper = new ObjectMapper(); |
61 | - ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file); | 46 | + ClusterDefinition definition = mapper.readValue(file, ClusterDefinition.class); |
62 | - Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements(); | 47 | + return definition; |
63 | - while (it.hasNext()) { | ||
64 | - ObjectNode nodeDef = (ObjectNode) it.next(); | ||
65 | - nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()), | ||
66 | - IpAddress.valueOf(nodeDef.get("ip").asText()), | ||
67 | - nodeDef.get("tcpPort").asInt(9876))); | ||
68 | } | 48 | } |
69 | - String ipPrefix = clusterNodeDef.get("ipPrefix").asText(); | ||
70 | 49 | ||
71 | - return ClusterDefinition.from(nodes, ipPrefix); | 50 | + /** |
72 | - } | 51 | + * Writes the specified cluster definition to file. |
73 | - | 52 | + * @param definition cluster definition |
74 | - /* | 53 | + * @throws IOException when I/O exception of some sort has occurred |
75 | - * Writes the given cluster definition. | ||
76 | - * | ||
77 | - * @param cluster definition | ||
78 | */ | 54 | */ |
79 | public void write(ClusterDefinition definition) throws IOException { | 55 | public void write(ClusterDefinition definition) throws IOException { |
80 | - ObjectMapper mapper = new ObjectMapper(); | 56 | + checkNotNull(definition); |
81 | - ObjectNode clusterNodeDef = mapper.createObjectNode(); | 57 | + // write back to file |
82 | - clusterNodeDef.set("ipPrefix", new TextNode(definition.ipPrefix())); | 58 | + final ObjectMapper mapper = new ObjectMapper(); |
83 | - ArrayNode nodeDefs = mapper.createArrayNode(); | 59 | + mapper.writeValue(file, definition); |
84 | - clusterNodeDef.set("nodes", nodeDefs); | ||
85 | - for (DefaultControllerNode node : definition.nodes()) { | ||
86 | - ObjectNode nodeDef = mapper.createObjectNode(); | ||
87 | - nodeDef.put("id", node.id().toString()) | ||
88 | - .put("ip", node.ip().toString()) | ||
89 | - .put("tcpPort", node.tcpPort()); | ||
90 | - nodeDefs.add(nodeDef); | ||
91 | - } | ||
92 | - mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8), | ||
93 | - clusterNodeDef); | ||
94 | } | 60 | } |
95 | - | ||
96 | } | 61 | } |
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
... | @@ -127,7 +127,13 @@ public class ClusterManager implements ClusterService, ClusterAdminService { | ... | @@ -127,7 +127,13 @@ public class ClusterManager implements ClusterService, ClusterAdminService { |
127 | 127 | ||
128 | try { | 128 | try { |
129 | clusterDefinition = new ClusterDefinitionStore(clusterDefinitionFile.getPath()).read(); | 129 | clusterDefinition = new ClusterDefinitionStore(clusterDefinitionFile.getPath()).read(); |
130 | - seedNodes = ImmutableSet.copyOf(clusterDefinition.nodes()); | 130 | + seedNodes = ImmutableSet.copyOf(clusterDefinition.getNodes()) |
131 | + .stream() | ||
132 | + .map(nodeInfo -> new DefaultControllerNode( | ||
133 | + new NodeId(nodeInfo.getId()), | ||
134 | + IpAddress.valueOf(nodeInfo.getIp()), | ||
135 | + nodeInfo.getTcpPort())) | ||
136 | + .collect(Collectors.toSet()); | ||
131 | } catch (IOException e) { | 137 | } catch (IOException e) { |
132 | log.warn("Failed to read cluster definition.", e); | 138 | log.warn("Failed to read cluster definition.", e); |
133 | } | 139 | } |
... | @@ -330,7 +336,7 @@ public class ClusterManager implements ClusterService, ClusterAdminService { | ... | @@ -330,7 +336,7 @@ public class ClusterManager implements ClusterService, ClusterAdminService { |
330 | Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); | 336 | Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); |
331 | while (inetAddresses.hasMoreElements()) { | 337 | while (inetAddresses.hasMoreElements()) { |
332 | IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); | 338 | IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); |
333 | - if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.ipPrefix())) { | 339 | + if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) { |
334 | return ip; | 340 | return ip; |
335 | } | 341 | } |
336 | } | 342 | } | ... | ... |
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.store.cluster.impl; | ||
17 | + | ||
18 | +import static com.google.common.base.MoreObjects.toStringHelper; | ||
19 | + | ||
20 | +import java.util.Objects; | ||
21 | + | ||
22 | +import org.onosproject.cluster.ControllerNode; | ||
23 | + | ||
24 | +/** | ||
25 | + * Node info read from configuration files during bootstrap. | ||
26 | + */ | ||
27 | +public final class NodeInfo { | ||
28 | + private final String id; | ||
29 | + private final String ip; | ||
30 | + private final int tcpPort; | ||
31 | + | ||
32 | + private NodeInfo(String id, String ip, int port) { | ||
33 | + this.id = id; | ||
34 | + this.ip = ip; | ||
35 | + this.tcpPort = port; | ||
36 | + } | ||
37 | + | ||
38 | + /** | ||
39 | + * Creates a new instance. | ||
40 | + * @param id node id | ||
41 | + * @param ip node ip address | ||
42 | + * @param port tcp port | ||
43 | + * @return NodeInfo | ||
44 | + */ | ||
45 | + public static NodeInfo from(String id, String ip, int port) { | ||
46 | + NodeInfo node = new NodeInfo(id, ip, port); | ||
47 | + return node; | ||
48 | + } | ||
49 | + | ||
50 | + /** | ||
51 | + * Returns the NodeInfo for a controller node. | ||
52 | + * @param node controller node | ||
53 | + * @return NodeInfo | ||
54 | + */ | ||
55 | + public static NodeInfo of(ControllerNode node) { | ||
56 | + return NodeInfo.from(node.id().toString(), node.ip().toString(), node.tcpPort()); | ||
57 | + } | ||
58 | + | ||
59 | + /** | ||
60 | + * Returns node id. | ||
61 | + * @return node id | ||
62 | + */ | ||
63 | + public String getId() { | ||
64 | + return id; | ||
65 | + } | ||
66 | + | ||
67 | + /** | ||
68 | + * Returns node ip. | ||
69 | + * @return node ip | ||
70 | + */ | ||
71 | + public String getIp() { | ||
72 | + return ip; | ||
73 | + } | ||
74 | + | ||
75 | + /** | ||
76 | + * Returns node port. | ||
77 | + * @return port | ||
78 | + */ | ||
79 | + public int getTcpPort() { | ||
80 | + return tcpPort; | ||
81 | + } | ||
82 | + | ||
83 | + @Override | ||
84 | + public int hashCode() { | ||
85 | + return Objects.hash(id, ip, tcpPort); | ||
86 | + } | ||
87 | + | ||
88 | + @Override | ||
89 | + public boolean equals(Object o) { | ||
90 | + if (this == o) { | ||
91 | + return true; | ||
92 | + } | ||
93 | + if (o instanceof NodeInfo) { | ||
94 | + NodeInfo that = (NodeInfo) o; | ||
95 | + return Objects.equals(this.id, that.id) && | ||
96 | + Objects.equals(this.ip, that.ip) && | ||
97 | + Objects.equals(this.tcpPort, that.tcpPort); | ||
98 | + } | ||
99 | + return false; | ||
100 | + } | ||
101 | + | ||
102 | + @Override | ||
103 | + public String toString() { | ||
104 | + return toStringHelper(this) | ||
105 | + .add("id", id) | ||
106 | + .add("ip", ip) | ||
107 | + .add("tcpPort", tcpPort).toString(); | ||
108 | + } | ||
109 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
core/store/dist/src/main/java/org/onosproject/store/consistent/impl/DatabaseDefinition.java
0 → 100644
1 | +/* | ||
2 | + * Copyright 2015 Open Networking Laboratory | ||
3 | + * | ||
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | + * you may not use this file except in compliance with the License. | ||
6 | + * You may obtain a copy of the License at | ||
7 | + * | ||
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | + * | ||
10 | + * Unless required by applicable law or agreed to in writing, software | ||
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | + * See the License for the specific language governing permissions and | ||
14 | + * limitations under the License. | ||
15 | + */ | ||
16 | +package org.onosproject.store.consistent.impl; | ||
17 | + | ||
18 | +import java.util.Map; | ||
19 | +import java.util.Set; | ||
20 | + | ||
21 | +import org.onosproject.store.cluster.impl.NodeInfo; | ||
22 | + | ||
23 | +import static com.google.common.base.Preconditions.checkNotNull; | ||
24 | + | ||
25 | +import com.google.common.collect.ImmutableMap; | ||
26 | +import com.google.common.collect.ImmutableSet; | ||
27 | + | ||
28 | +/** | ||
29 | + * Partitioned database configuration. | ||
30 | + */ | ||
31 | +public class DatabaseDefinition { | ||
32 | + private Map<String, Set<NodeInfo>> partitions; | ||
33 | + private Set<NodeInfo> nodes; | ||
34 | + | ||
35 | + /** | ||
36 | + * Creates a new DatabaseDefinition. | ||
37 | + * @param partitions partition map | ||
38 | + * @param nodes set of nodes | ||
39 | + * @return database definition | ||
40 | + */ | ||
41 | + public static DatabaseDefinition from(Map<String, Set<NodeInfo>> partitions, Set<NodeInfo> nodes) { | ||
42 | + checkNotNull(partitions); | ||
43 | + checkNotNull(nodes); | ||
44 | + DatabaseDefinition definition = new DatabaseDefinition(); | ||
45 | + definition.partitions = ImmutableMap.copyOf(partitions); | ||
46 | + definition.nodes = ImmutableSet.copyOf(nodes); | ||
47 | + return definition; | ||
48 | + } | ||
49 | + | ||
50 | + /** | ||
51 | + * Returns the map of database partitions. | ||
52 | + * @return db partition map | ||
53 | + */ | ||
54 | + public Map<String, Set<NodeInfo>> getPartitions() { | ||
55 | + return partitions; | ||
56 | + } | ||
57 | + | ||
58 | + /** | ||
59 | + * Returns the set of nodes. | ||
60 | + * @return nodes | ||
61 | + */ | ||
62 | + public Set<NodeInfo> getNodes() { | ||
63 | + return nodes; | ||
64 | + } | ||
65 | +} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
... | @@ -16,39 +16,16 @@ | ... | @@ -16,39 +16,16 @@ |
16 | 16 | ||
17 | package org.onosproject.store.consistent.impl; | 17 | package org.onosproject.store.consistent.impl; |
18 | 18 | ||
19 | -import static com.google.common.base.Preconditions.checkArgument; | ||
20 | import static com.google.common.base.Preconditions.checkNotNull; | 19 | import static com.google.common.base.Preconditions.checkNotNull; |
21 | -import static org.slf4j.LoggerFactory.getLogger; | ||
22 | - | ||
23 | import java.io.File; | 20 | import java.io.File; |
24 | import java.io.IOException; | 21 | import java.io.IOException; |
25 | -import java.util.HashMap; | ||
26 | -import java.util.HashSet; | ||
27 | -import java.util.Iterator; | ||
28 | -import java.util.Map; | ||
29 | -import java.util.Map.Entry; | ||
30 | -import java.util.Set; | ||
31 | - | ||
32 | -import org.onosproject.cluster.DefaultControllerNode; | ||
33 | -import org.onosproject.cluster.NodeId; | ||
34 | -import org.onlab.packet.IpAddress; | ||
35 | -import org.slf4j.Logger; | ||
36 | - | ||
37 | -import com.fasterxml.jackson.core.JsonEncoding; | ||
38 | -import com.fasterxml.jackson.core.JsonFactory; | ||
39 | -import com.fasterxml.jackson.databind.JsonNode; | ||
40 | import com.fasterxml.jackson.databind.ObjectMapper; | 22 | import com.fasterxml.jackson.databind.ObjectMapper; |
41 | -import com.fasterxml.jackson.databind.node.ArrayNode; | ||
42 | -import com.fasterxml.jackson.databind.node.ObjectNode; | ||
43 | -import com.google.common.collect.Maps; | ||
44 | 23 | ||
45 | /** | 24 | /** |
46 | * Allows for reading and writing partitioned database definition as a JSON file. | 25 | * Allows for reading and writing partitioned database definition as a JSON file. |
47 | */ | 26 | */ |
48 | public class DatabaseDefinitionStore { | 27 | public class DatabaseDefinitionStore { |
49 | 28 | ||
50 | - private final Logger log = getLogger(getClass()); | ||
51 | - | ||
52 | private final File definitionfile; | 29 | private final File definitionfile; |
53 | 30 | ||
54 | /** | 31 | /** |
... | @@ -57,7 +34,7 @@ public class DatabaseDefinitionStore { | ... | @@ -57,7 +34,7 @@ public class DatabaseDefinitionStore { |
57 | * @param filePath location of the definition file | 34 | * @param filePath location of the definition file |
58 | */ | 35 | */ |
59 | public DatabaseDefinitionStore(String filePath) { | 36 | public DatabaseDefinitionStore(String filePath) { |
60 | - definitionfile = new File(filePath); | 37 | + definitionfile = new File(checkNotNull(filePath)); |
61 | } | 38 | } |
62 | 39 | ||
63 | /** | 40 | /** |
... | @@ -70,72 +47,27 @@ public class DatabaseDefinitionStore { | ... | @@ -70,72 +47,27 @@ public class DatabaseDefinitionStore { |
70 | } | 47 | } |
71 | 48 | ||
72 | /** | 49 | /** |
73 | - * Returns the Map from database partition name to set of initial active member nodes. | 50 | + * Returns the database definition. |
74 | * | 51 | * |
75 | - * @return Map from partition name to set of active member nodes | 52 | + * @return database definition |
76 | * @throws IOException when I/O exception of some sort has occurred. | 53 | * @throws IOException when I/O exception of some sort has occurred. |
77 | */ | 54 | */ |
78 | - public Map<String, Set<DefaultControllerNode>> read() throws IOException { | 55 | + public DatabaseDefinition read() throws IOException { |
79 | - | 56 | + ObjectMapper mapper = new ObjectMapper(); |
80 | - final Map<String, Set<DefaultControllerNode>> partitions = Maps.newHashMap(); | 57 | + DatabaseDefinition definition = mapper.readValue(definitionfile, DatabaseDefinition.class); |
81 | - | 58 | + return definition; |
82 | - final ObjectMapper mapper = new ObjectMapper(); | ||
83 | - final ObjectNode tabletNodes = (ObjectNode) mapper.readTree(definitionfile); | ||
84 | - final Iterator<Entry<String, JsonNode>> fields = tabletNodes.fields(); | ||
85 | - while (fields.hasNext()) { | ||
86 | - final Entry<String, JsonNode> next = fields.next(); | ||
87 | - final Set<DefaultControllerNode> nodes = new HashSet<>(); | ||
88 | - final Iterator<JsonNode> elements = next.getValue().elements(); | ||
89 | - while (elements.hasNext()) { | ||
90 | - ObjectNode nodeDef = (ObjectNode) elements.next(); | ||
91 | - nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()), | ||
92 | - IpAddress.valueOf(nodeDef.get("ip").asText()), | ||
93 | - nodeDef.get("tcpPort").asInt(DatabaseManager.COPYCAT_TCP_PORT))); | ||
94 | - } | ||
95 | - | ||
96 | - partitions.put(next.getKey(), nodes); | ||
97 | - } | ||
98 | - return partitions; | ||
99 | } | 59 | } |
100 | 60 | ||
101 | /** | 61 | /** |
102 | - * Updates the Map from database partition name to set of member nodes. | 62 | + * Writes the specified database definition to file. |
103 | * | 63 | * |
104 | - * @param partitionName name of the database partition to update | 64 | + * @param definition database definition |
105 | - * @param nodes set of initial member nodes | ||
106 | * @throws IOException when I/O exception of some sort has occurred. | 65 | * @throws IOException when I/O exception of some sort has occurred. |
107 | */ | 66 | */ |
108 | - public void write(String partitionName, Set<DefaultControllerNode> nodes) throws IOException { | 67 | + public void write(DatabaseDefinition definition) throws IOException { |
109 | - checkNotNull(partitionName); | 68 | + checkNotNull(definition); |
110 | - checkArgument(partitionName.isEmpty(), "Partition name cannot be empty"); | ||
111 | - | ||
112 | - // load current | ||
113 | - Map<String, Set<DefaultControllerNode>> config; | ||
114 | - try { | ||
115 | - config = read(); | ||
116 | - } catch (IOException e) { | ||
117 | - log.info("Reading partition config failed, assuming empty definition."); | ||
118 | - config = new HashMap<>(); | ||
119 | - } | ||
120 | - // update with specified | ||
121 | - config.put(partitionName, nodes); | ||
122 | - | ||
123 | // write back to file | 69 | // write back to file |
124 | final ObjectMapper mapper = new ObjectMapper(); | 70 | final ObjectMapper mapper = new ObjectMapper(); |
125 | - final ObjectNode partitionNodes = mapper.createObjectNode(); | 71 | + mapper.writeValue(definitionfile, definition); |
126 | - for (Entry<String, Set<DefaultControllerNode>> tablet : config.entrySet()) { | ||
127 | - ArrayNode nodeDefs = mapper.createArrayNode(); | ||
128 | - partitionNodes.set(tablet.getKey(), nodeDefs); | ||
129 | - | ||
130 | - for (DefaultControllerNode node : tablet.getValue()) { | ||
131 | - ObjectNode nodeDef = mapper.createObjectNode(); | ||
132 | - nodeDef.put("id", node.id().toString()) | ||
133 | - .put("ip", node.ip().toString()) | ||
134 | - .put("tcpPort", node.tcpPort()); | ||
135 | - nodeDefs.add(nodeDef); | ||
136 | - } | ||
137 | - } | ||
138 | - mapper.writeTree(new JsonFactory().createGenerator(definitionfile, JsonEncoding.UTF8), | ||
139 | - partitionNodes); | ||
140 | } | 72 | } |
141 | } | 73 | } | ... | ... |
... | @@ -31,8 +31,7 @@ import org.apache.felix.scr.annotations.Reference; | ... | @@ -31,8 +31,7 @@ import org.apache.felix.scr.annotations.Reference; |
31 | import org.apache.felix.scr.annotations.ReferenceCardinality; | 31 | import org.apache.felix.scr.annotations.ReferenceCardinality; |
32 | import org.apache.felix.scr.annotations.Service; | 32 | import org.apache.felix.scr.annotations.Service; |
33 | import org.onosproject.cluster.ClusterService; | 33 | import org.onosproject.cluster.ClusterService; |
34 | -import org.onosproject.cluster.ControllerNode; | 34 | +import org.onosproject.store.cluster.impl.NodeInfo; |
35 | -import org.onosproject.cluster.DefaultControllerNode; | ||
36 | import org.onosproject.store.service.ConsistentMap; | 35 | import org.onosproject.store.service.ConsistentMap; |
37 | import org.onosproject.store.service.PartitionInfo; | 36 | import org.onosproject.store.service.PartitionInfo; |
38 | import org.onosproject.store.service.Serializer; | 37 | import org.onosproject.store.service.Serializer; |
... | @@ -69,8 +68,8 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -69,8 +68,8 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
69 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | 68 | @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) |
70 | protected ClusterService clusterService; | 69 | protected ClusterService clusterService; |
71 | 70 | ||
72 | - protected String nodeToUri(ControllerNode node) { | 71 | + protected String nodeToUri(NodeInfo node) { |
73 | - return String.format("tcp://%s:%d", node.ip(), COPYCAT_TCP_PORT); | 72 | + return String.format("tcp://%s:%d", node.getIp(), COPYCAT_TCP_PORT); |
74 | } | 73 | } |
75 | 74 | ||
76 | @Activate | 75 | @Activate |
... | @@ -82,12 +81,11 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -82,12 +81,11 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
82 | File file = new File(CONFIG_DIR, PARTITION_DEFINITION_FILE); | 81 | File file = new File(CONFIG_DIR, PARTITION_DEFINITION_FILE); |
83 | log.info("Loading database definition: {}", file.getAbsolutePath()); | 82 | log.info("Loading database definition: {}", file.getAbsolutePath()); |
84 | 83 | ||
85 | - DatabaseDefinitionStore databaseDef = new DatabaseDefinitionStore(file); | 84 | + Map<String, Set<NodeInfo>> partitionMap; |
86 | - Map<String, Set<DefaultControllerNode>> partitionMap; | ||
87 | try { | 85 | try { |
88 | - partitionMap = databaseDef.read(); | 86 | + DatabaseDefinitionStore databaseDef = new DatabaseDefinitionStore(file); |
87 | + partitionMap = databaseDef.read().getPartitions(); | ||
89 | } catch (IOException e) { | 88 | } catch (IOException e) { |
90 | - log.error("Failed to load database config {}", file); | ||
91 | throw new IllegalStateException("Failed to load database config", e); | 89 | throw new IllegalStateException("Failed to load database config", e); |
92 | } | 90 | } |
93 | 91 | ||
... | @@ -99,7 +97,7 @@ public class DatabaseManager implements StorageService, StorageAdminService { | ... | @@ -99,7 +97,7 @@ public class DatabaseManager implements StorageService, StorageAdminService { |
99 | .map(this::nodeToUri) | 97 | .map(this::nodeToUri) |
100 | .toArray(String[]::new); | 98 | .toArray(String[]::new); |
101 | 99 | ||
102 | - String localNodeUri = nodeToUri(clusterService.getLocalNode()); | 100 | + String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode())); |
103 | 101 | ||
104 | ClusterConfig clusterConfig = new ClusterConfig() | 102 | ClusterConfig clusterConfig = new ClusterConfig() |
105 | .withProtocol(new NettyTcpProtocol() | 103 | .withProtocol(new NettyTcpProtocol() | ... | ... |
... | @@ -39,18 +39,7 @@ ssh $remote " | ... | @@ -39,18 +39,7 @@ ssh $remote " |
39 | 39 | ||
40 | # Generate a default tablets.json from the ON* environment variables | 40 | # Generate a default tablets.json from the ON* environment variables |
41 | TDEF_FILE=/tmp/${remote}.tablets.json | 41 | TDEF_FILE=/tmp/${remote}.tablets.json |
42 | -nodes=( $(env | sort | egrep "OC[0-9]+" | cut -d= -f2) ) | 42 | +onos-gen-partitions $TDEF_FILE |
43 | -echo "{ \"default\":[" > $TDEF_FILE | ||
44 | -while [ ${#nodes[@]} -gt 0 ]; do | ||
45 | - node=${nodes[0]} | ||
46 | - nodes=( ${nodes[@]:1} ) | ||
47 | - if [ "${#nodes[@]}" -ne "0" ]; then | ||
48 | - echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $TDEF_FILE | ||
49 | - else | ||
50 | - echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }" >> $TDEF_FILE | ||
51 | - fi | ||
52 | -done | ||
53 | -echo "]}" >> $TDEF_FILE | ||
54 | scp -q $TDEF_FILE $remote:$ONOS_INSTALL_DIR/config/tablets.json | 43 | scp -q $TDEF_FILE $remote:$ONOS_INSTALL_DIR/config/tablets.json |
55 | 44 | ||
56 | 45 | ... | ... |
-
Please register or login to post a comment