mirror of
https://github.com/opennetworkinglab/onos.git
synced 2025-10-18 10:51:04 +02:00
Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next
Conflicts: core/net/src/main/java/org/onlab/onos/net/proxyarp/impl/package-info.java Change-Id: I7bf076fae02c619ff0d57ffcbff4a4189716c474
This commit is contained in:
commit
cccd6b95a9
@ -27,10 +27,6 @@
|
|||||||
<artifactId>onlab-nio</artifactId>
|
<artifactId>onlab-nio</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>org.livetribe.slp</groupId>
|
|
||||||
<artifactId>livetribe-slp</artifactId>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.karaf.shell</groupId>
|
<groupId>org.apache.karaf.shell</groupId>
|
||||||
<artifactId>org.apache.karaf.shell.console</artifactId>
|
<artifactId>org.apache.karaf.shell.console</artifactId>
|
||||||
|
@ -233,7 +233,7 @@ public class IOLoopTestClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void connect(SelectionKey key) {
|
protected void connect(SelectionKey key) throws IOException {
|
||||||
super.connect(key);
|
super.connect(key);
|
||||||
TestMessageStream b = (TestMessageStream) key.attachment();
|
TestMessageStream b = (TestMessageStream) key.attachment();
|
||||||
Worker w = ((CustomIOLoop) b.loop()).worker;
|
Worker w = ((CustomIOLoop) b.loop()).worker;
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
livetribe.slp.da.expired.services.purge.period=60
|
||||||
|
livetribe.slp.sa.client.connect.address=127.0.0.1
|
||||||
|
livetribe.slp.sa.client.factory=org.livetribe.slp.sa.StandardServiceAgentClient$Factory
|
||||||
|
livetribe.slp.sa.factory=org.livetribe.slp.sa.StandardServiceAgent$Factory
|
||||||
|
livetribe.slp.sa.service.renewal.enabled=true
|
||||||
|
livetribe.slp.sa.unicast.prefer.tcp=false
|
||||||
|
livetribe.slp.tcp.connector.factory=org.livetribe.slp.spi.net.SocketTCPConnector$Factory
|
||||||
|
livetribe.slp.tcp.connector.server.factory=org.livetribe.slp.spi.net.SocketTCPConnectorServer$Factory
|
||||||
|
livetribe.slp.tcp.message.max.length=4096
|
||||||
|
livetribe.slp.tcp.read.timeout=300000
|
||||||
|
livetribe.slp.ua.client.factory=org.livetribe.slp.ua.StandardUserAgentClient$Factory
|
||||||
|
livetribe.slp.ua.factory=org.livetribe.slp.ua.StandardUserAgent$Factory
|
||||||
|
livetribe.slp.ua.unicast.prefer.tcp=false
|
||||||
|
livetribe.slp.udp.connector.factory=org.livetribe.slp.spi.net.SocketUDPConnector$Factory
|
||||||
|
livetribe.slp.udp.connector.server.factory=org.livetribe.slp.spi.net.SocketUDPConnectorServer$Factory
|
||||||
|
net.slp.DAAddresses=
|
||||||
|
net.slp.DAAttributes=
|
||||||
|
net.slp.DAHeartBeat=10800
|
||||||
|
net.slp.MTU=1400
|
||||||
|
net.slp.SAAttributes=
|
||||||
|
net.slp.broadcastAddress=255.255.255.255
|
||||||
|
net.slp.datagramTimeouts=150,250,400
|
||||||
|
net.slp.interfaces=0.0.0.0
|
||||||
|
net.slp.isBroadcastOnly=false
|
||||||
|
net.slp.locale=en
|
||||||
|
net.slp.multicastAddress=239.255.255.253
|
||||||
|
net.slp.multicastMaximumWait=15000
|
||||||
|
net.slp.multicastTTL=255
|
||||||
|
net.slp.multicastTimeouts=150,250,400,600,1000
|
||||||
|
net.slp.notificationPort=1847
|
||||||
|
net.slp.port=427
|
||||||
|
net.slp.useScopes=default
|
||||||
|
|
||||||
|
org.onlab.cluster.name = TV-ONOS
|
34
cli/src/main/java/org/onlab/onos/cli/NodeAddCommand.java
Normal file
34
cli/src/main/java/org/onlab/onos/cli/NodeAddCommand.java
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package org.onlab.onos.cli;
|
||||||
|
|
||||||
|
import org.apache.karaf.shell.commands.Argument;
|
||||||
|
import org.apache.karaf.shell.commands.Command;
|
||||||
|
import org.onlab.onos.cluster.ClusterAdminService;
|
||||||
|
import org.onlab.onos.cluster.NodeId;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new controller cluster node.
|
||||||
|
*/
|
||||||
|
@Command(scope = "onos", name = "add-node",
|
||||||
|
description = "Adds a new controller cluster node")
|
||||||
|
public class NodeAddCommand extends AbstractShellCommand {
|
||||||
|
|
||||||
|
@Argument(index = 0, name = "nodeId", description = "Node ID",
|
||||||
|
required = true, multiValued = false)
|
||||||
|
String nodeId = null;
|
||||||
|
|
||||||
|
@Argument(index = 1, name = "ip", description = "Node IP address",
|
||||||
|
required = true, multiValued = false)
|
||||||
|
String ip = null;
|
||||||
|
|
||||||
|
@Argument(index = 2, name = "tcpPort", description = "Node TCP listen port",
|
||||||
|
required = false, multiValued = false)
|
||||||
|
int tcpPort = 9876;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void execute() {
|
||||||
|
ClusterAdminService service = get(ClusterAdminService.class);
|
||||||
|
service.addNode(new NodeId(nodeId), IpPrefix.valueOf(ip), tcpPort);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
25
cli/src/main/java/org/onlab/onos/cli/NodeRemoveCommand.java
Normal file
25
cli/src/main/java/org/onlab/onos/cli/NodeRemoveCommand.java
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package org.onlab.onos.cli;
|
||||||
|
|
||||||
|
import org.apache.karaf.shell.commands.Argument;
|
||||||
|
import org.apache.karaf.shell.commands.Command;
|
||||||
|
import org.onlab.onos.cluster.ClusterAdminService;
|
||||||
|
import org.onlab.onos.cluster.NodeId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes a controller cluster node.
|
||||||
|
*/
|
||||||
|
@Command(scope = "onos", name = "remove-node",
|
||||||
|
description = "Removes a new controller cluster node")
|
||||||
|
public class NodeRemoveCommand extends AbstractShellCommand {
|
||||||
|
|
||||||
|
@Argument(index = 0, name = "nodeId", description = "Node ID",
|
||||||
|
required = true, multiValued = false)
|
||||||
|
String nodeId = null;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void execute() {
|
||||||
|
ClusterAdminService service = get(ClusterAdminService.class);
|
||||||
|
service.removeNode(new NodeId(nodeId));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -17,7 +17,7 @@ import static com.google.common.collect.Lists.newArrayList;
|
|||||||
public class NodesListCommand extends AbstractShellCommand {
|
public class NodesListCommand extends AbstractShellCommand {
|
||||||
|
|
||||||
private static final String FMT =
|
private static final String FMT =
|
||||||
"id=%s, ip=%s, state=%s %s";
|
"id=%s, address=%s:%s, state=%s %s";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void execute() {
|
protected void execute() {
|
||||||
@ -26,7 +26,7 @@ public class NodesListCommand extends AbstractShellCommand {
|
|||||||
Collections.sort(nodes, Comparators.NODE_COMPARATOR);
|
Collections.sort(nodes, Comparators.NODE_COMPARATOR);
|
||||||
ControllerNode self = service.getLocalNode();
|
ControllerNode self = service.getLocalNode();
|
||||||
for (ControllerNode node : nodes) {
|
for (ControllerNode node : nodes) {
|
||||||
print(FMT, node.id(), node.ip(),
|
print(FMT, node.id(), node.ip(), node.tcpPort(),
|
||||||
service.getState(node.id()),
|
service.getState(node.id()),
|
||||||
node.equals(self) ? "*" : "");
|
node.equals(self) ? "*" : "");
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,12 @@
|
|||||||
<command>
|
<command>
|
||||||
<action class="org.onlab.onos.cli.NodesListCommand"/>
|
<action class="org.onlab.onos.cli.NodesListCommand"/>
|
||||||
</command>
|
</command>
|
||||||
|
<command>
|
||||||
|
<action class="org.onlab.onos.cli.NodeAddCommand"/>
|
||||||
|
</command>
|
||||||
|
<command>
|
||||||
|
<action class="org.onlab.onos.cli.NodeRemoveCommand"/>
|
||||||
|
</command>
|
||||||
<command>
|
<command>
|
||||||
<action class="org.onlab.onos.cli.MastersListCommand"/>
|
<action class="org.onlab.onos.cli.MastersListCommand"/>
|
||||||
<completers>
|
<completers>
|
||||||
|
@ -1,10 +1,22 @@
|
|||||||
package org.onlab.onos.cluster;
|
package org.onlab.onos.cluster;
|
||||||
|
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Service for administering the cluster node membership.
|
* Service for administering the cluster node membership.
|
||||||
*/
|
*/
|
||||||
public interface ClusterAdminService {
|
public interface ClusterAdminService {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new controller node to the cluster.
|
||||||
|
*
|
||||||
|
* @param nodeId controller node identifier
|
||||||
|
* @param ip node IP listen address
|
||||||
|
* @param tcpPort tcp listen port
|
||||||
|
* @return newly added node
|
||||||
|
*/
|
||||||
|
ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes the specified node from the cluster node list.
|
* Removes the specified node from the cluster node list.
|
||||||
*
|
*
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package org.onlab.onos.cluster;
|
package org.onlab.onos.cluster;
|
||||||
|
|
||||||
import org.onlab.onos.store.Store;
|
import org.onlab.onos.store.Store;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
@ -39,6 +40,16 @@ public interface ClusterStore extends Store<ClusterEvent, ClusterStoreDelegate>
|
|||||||
*/
|
*/
|
||||||
ControllerNode.State getState(NodeId nodeId);
|
ControllerNode.State getState(NodeId nodeId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new controller node to the cluster.
|
||||||
|
*
|
||||||
|
* @param nodeId controller node identifier
|
||||||
|
* @param ip node IP listen address
|
||||||
|
* @param tcpPort tcp listen port
|
||||||
|
* @return newly added node
|
||||||
|
*/
|
||||||
|
ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes the specified node from the inventory of cluster nodes.
|
* Removes the specified node from the inventory of cluster nodes.
|
||||||
*
|
*
|
||||||
|
@ -35,4 +35,12 @@ public interface ControllerNode {
|
|||||||
*/
|
*/
|
||||||
IpPrefix ip();
|
IpPrefix ip();
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the TCP port on which the node listens for connections.
|
||||||
|
*
|
||||||
|
* @return TCP port
|
||||||
|
*/
|
||||||
|
int tcpPort();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -11,13 +11,17 @@ import static com.google.common.base.MoreObjects.toStringHelper;
|
|||||||
*/
|
*/
|
||||||
public class DefaultControllerNode implements ControllerNode {
|
public class DefaultControllerNode implements ControllerNode {
|
||||||
|
|
||||||
|
private static final int DEFAULT_PORT = 9876;
|
||||||
|
|
||||||
private final NodeId id;
|
private final NodeId id;
|
||||||
private final IpPrefix ip;
|
private final IpPrefix ip;
|
||||||
|
private final int tcpPort;
|
||||||
|
|
||||||
// For serialization
|
// For serialization
|
||||||
private DefaultControllerNode() {
|
private DefaultControllerNode() {
|
||||||
this.id = null;
|
this.id = null;
|
||||||
this.ip = null;
|
this.ip = null;
|
||||||
|
this.tcpPort = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -27,8 +31,19 @@ public class DefaultControllerNode implements ControllerNode {
|
|||||||
* @param ip instance IP address
|
* @param ip instance IP address
|
||||||
*/
|
*/
|
||||||
public DefaultControllerNode(NodeId id, IpPrefix ip) {
|
public DefaultControllerNode(NodeId id, IpPrefix ip) {
|
||||||
|
this(id, ip, DEFAULT_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new instance with the specified id and IP address and TCP port.
|
||||||
|
*
|
||||||
|
* @param id instance identifier
|
||||||
|
* @param ip instance IP address
|
||||||
|
*/
|
||||||
|
public DefaultControllerNode(NodeId id, IpPrefix ip, int tcpPort) {
|
||||||
this.id = id;
|
this.id = id;
|
||||||
this.ip = ip;
|
this.ip = ip;
|
||||||
|
this.tcpPort = tcpPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -41,6 +56,11 @@ public class DefaultControllerNode implements ControllerNode {
|
|||||||
return ip;
|
return ip;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int tcpPort() {
|
||||||
|
return tcpPort;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(id);
|
return Objects.hash(id);
|
||||||
@ -60,7 +80,8 @@ public class DefaultControllerNode implements ControllerNode {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return toStringHelper(this).add("id", id).add("ip", ip).toString();
|
return toStringHelper(this).add("id", id)
|
||||||
|
.add("ip", ip).add("tcpPort", tcpPort).toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -16,10 +16,12 @@ import org.onlab.onos.cluster.ControllerNode;
|
|||||||
import org.onlab.onos.cluster.NodeId;
|
import org.onlab.onos.cluster.NodeId;
|
||||||
import org.onlab.onos.event.AbstractListenerRegistry;
|
import org.onlab.onos.event.AbstractListenerRegistry;
|
||||||
import org.onlab.onos.event.EventDeliveryService;
|
import org.onlab.onos.event.EventDeliveryService;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
import static org.slf4j.LoggerFactory.getLogger;
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
@ -80,6 +82,14 @@ public class ClusterManager implements ClusterService, ClusterAdminService {
|
|||||||
return store.getState(nodeId);
|
return store.getState(nodeId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
|
||||||
|
checkNotNull(nodeId, INSTANCE_ID_NULL);
|
||||||
|
checkNotNull(ip, "IP address cannot be null");
|
||||||
|
checkArgument(tcpPort > 5000, "TCP port must be > 5000");
|
||||||
|
return store.addNode(nodeId, ip, tcpPort);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void removeNode(NodeId nodeId) {
|
public void removeNode(NodeId nodeId) {
|
||||||
checkNotNull(nodeId, INSTANCE_ID_NULL);
|
checkNotNull(nodeId, INSTANCE_ID_NULL);
|
||||||
|
@ -33,8 +33,11 @@ import org.onlab.onos.net.device.PortDescription;
|
|||||||
import org.onlab.onos.net.provider.AbstractProvider;
|
import org.onlab.onos.net.provider.AbstractProvider;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
import org.onlab.onos.store.common.StoreManager;
|
import org.onlab.onos.store.common.StoreManager;
|
||||||
|
import org.onlab.onos.store.common.StoreService;
|
||||||
import org.onlab.onos.store.common.TestStoreManager;
|
import org.onlab.onos.store.common.TestStoreManager;
|
||||||
import org.onlab.onos.store.device.impl.DistributedDeviceStore;
|
import org.onlab.onos.store.device.impl.DistributedDeviceStore;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationManager;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationService;
|
||||||
import org.onlab.packet.IpPrefix;
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -92,6 +95,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
private DistributedDeviceStore dstore;
|
private DistributedDeviceStore dstore;
|
||||||
private TestMastershipManager masterManager;
|
private TestMastershipManager masterManager;
|
||||||
private EventDeliveryService eventService;
|
private EventDeliveryService eventService;
|
||||||
|
private KryoSerializationManager serializationMgr;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() {
|
public void setUp() {
|
||||||
@ -107,7 +111,10 @@ public class DistributedDeviceManagerTest {
|
|||||||
storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
|
storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
|
||||||
storeManager.activate();
|
storeManager.activate();
|
||||||
|
|
||||||
dstore = new TestDistributedDeviceStore();
|
serializationMgr = new KryoSerializationManager();
|
||||||
|
serializationMgr.activate();
|
||||||
|
|
||||||
|
dstore = new TestDistributedDeviceStore(storeManager, serializationMgr);
|
||||||
dstore.activate();
|
dstore.activate();
|
||||||
|
|
||||||
mgr.store = dstore;
|
mgr.store = dstore;
|
||||||
@ -133,6 +140,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
mgr.deactivate();
|
mgr.deactivate();
|
||||||
|
|
||||||
dstore.deactivate();
|
dstore.deactivate();
|
||||||
|
serializationMgr.deactivate();
|
||||||
storeManager.deactivate();
|
storeManager.deactivate();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +171,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
public void deviceDisconnected() {
|
public void deviceDisconnected() {
|
||||||
connectDevice(DID1, SW1);
|
connectDevice(DID1, SW1);
|
||||||
connectDevice(DID2, SW1);
|
connectDevice(DID2, SW1);
|
||||||
validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED);
|
validateEvents(DEVICE_ADDED, DEVICE_ADDED);
|
||||||
assertTrue("device should be available", service.isAvailable(DID1));
|
assertTrue("device should be available", service.isAvailable(DID1));
|
||||||
|
|
||||||
// Disconnect
|
// Disconnect
|
||||||
@ -182,10 +190,10 @@ public class DistributedDeviceManagerTest {
|
|||||||
@Test
|
@Test
|
||||||
public void deviceUpdated() {
|
public void deviceUpdated() {
|
||||||
connectDevice(DID1, SW1);
|
connectDevice(DID1, SW1);
|
||||||
validateEvents(DEVICE_ADDED, DEVICE_ADDED);
|
validateEvents(DEVICE_ADDED);
|
||||||
|
|
||||||
connectDevice(DID1, SW2);
|
connectDevice(DID1, SW2);
|
||||||
validateEvents(DEVICE_UPDATED, DEVICE_UPDATED);
|
validateEvents(DEVICE_UPDATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -202,7 +210,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
pds.add(new DefaultPortDescription(P2, true));
|
pds.add(new DefaultPortDescription(P2, true));
|
||||||
pds.add(new DefaultPortDescription(P3, true));
|
pds.add(new DefaultPortDescription(P3, true));
|
||||||
providerService.updatePorts(DID1, pds);
|
providerService.updatePorts(DID1, pds);
|
||||||
validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED);
|
validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED, PORT_ADDED);
|
||||||
pds.clear();
|
pds.clear();
|
||||||
|
|
||||||
pds.add(new DefaultPortDescription(P1, false));
|
pds.add(new DefaultPortDescription(P1, false));
|
||||||
@ -218,7 +226,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
pds.add(new DefaultPortDescription(P1, true));
|
pds.add(new DefaultPortDescription(P1, true));
|
||||||
pds.add(new DefaultPortDescription(P2, true));
|
pds.add(new DefaultPortDescription(P2, true));
|
||||||
providerService.updatePorts(DID1, pds);
|
providerService.updatePorts(DID1, pds);
|
||||||
validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
|
validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
|
||||||
|
|
||||||
providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false));
|
providerService.portStatusChanged(DID1, new DefaultPortDescription(P1, false));
|
||||||
validateEvents(PORT_UPDATED);
|
validateEvents(PORT_UPDATED);
|
||||||
@ -233,7 +241,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
pds.add(new DefaultPortDescription(P1, true));
|
pds.add(new DefaultPortDescription(P1, true));
|
||||||
pds.add(new DefaultPortDescription(P2, true));
|
pds.add(new DefaultPortDescription(P2, true));
|
||||||
providerService.updatePorts(DID1, pds);
|
providerService.updatePorts(DID1, pds);
|
||||||
validateEvents(DEVICE_ADDED, DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
|
validateEvents(DEVICE_ADDED, PORT_ADDED, PORT_ADDED);
|
||||||
assertEquals("wrong port count", 2, service.getPorts(DID1).size());
|
assertEquals("wrong port count", 2, service.getPorts(DID1).size());
|
||||||
|
|
||||||
Port port = service.getPort(DID1, P1);
|
Port port = service.getPort(DID1, P1);
|
||||||
@ -247,7 +255,7 @@ public class DistributedDeviceManagerTest {
|
|||||||
connectDevice(DID2, SW2);
|
connectDevice(DID2, SW2);
|
||||||
assertEquals("incorrect device count", 2, service.getDeviceCount());
|
assertEquals("incorrect device count", 2, service.getDeviceCount());
|
||||||
admin.removeDevice(DID1);
|
admin.removeDevice(DID1);
|
||||||
validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED, DEVICE_REMOVED);
|
validateEvents(DEVICE_ADDED, DEVICE_ADDED, DEVICE_REMOVED);
|
||||||
assertNull("device should not be found", service.getDevice(DID1));
|
assertNull("device should not be found", service.getDevice(DID1));
|
||||||
assertNotNull("device should be found", service.getDevice(DID2));
|
assertNotNull("device should be found", service.getDevice(DID2));
|
||||||
assertEquals("incorrect device count", 1, service.getDeviceCount());
|
assertEquals("incorrect device count", 1, service.getDeviceCount());
|
||||||
@ -298,8 +306,10 @@ public class DistributedDeviceManagerTest {
|
|||||||
|
|
||||||
private class TestDistributedDeviceStore extends DistributedDeviceStore {
|
private class TestDistributedDeviceStore extends DistributedDeviceStore {
|
||||||
|
|
||||||
public TestDistributedDeviceStore() {
|
public TestDistributedDeviceStore(StoreService storeService,
|
||||||
this.storeService = storeManager;
|
KryoSerializationService kryoSerializationService) {
|
||||||
|
this.storeService = storeService;
|
||||||
|
this.kryoSerializationService = kryoSerializationService;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
17
core/store/dist/pom.xml
vendored
17
core/store/dist/pom.xml
vendored
@ -26,6 +26,23 @@
|
|||||||
<artifactId>onos-core-serializers</artifactId>
|
<artifactId>onos-core-serializers</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onlab-nio</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-databind</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.felix</groupId>
|
<groupId>org.apache.felix</groupId>
|
||||||
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
||||||
|
75
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java
vendored
Normal file
75
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/ClusterDefinitionStore.java
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package org.onlab.onos.store.cluster.impl;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.core.JsonEncoding;
|
||||||
|
import com.fasterxml.jackson.core.JsonFactory;
|
||||||
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.fasterxml.jackson.databind.node.ArrayNode;
|
||||||
|
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||||
|
import org.onlab.onos.cluster.DefaultControllerNode;
|
||||||
|
import org.onlab.onos.cluster.NodeId;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allows for reading and writing cluster definition as a JSON file.
|
||||||
|
*/
|
||||||
|
public class ClusterDefinitionStore {
|
||||||
|
|
||||||
|
private final File file;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a reader/writer of the cluster definition file.
|
||||||
|
*
|
||||||
|
* @param filePath location of the definition file
|
||||||
|
*/
|
||||||
|
public ClusterDefinitionStore(String filePath) {
|
||||||
|
file = new File(filePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns set of the controller nodes, including self.
|
||||||
|
*
|
||||||
|
* @return set of controller nodes
|
||||||
|
*/
|
||||||
|
public Set<DefaultControllerNode> read() throws IOException {
|
||||||
|
Set<DefaultControllerNode> nodes = new HashSet<>();
|
||||||
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
|
ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file);
|
||||||
|
Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements();
|
||||||
|
while (it.hasNext()) {
|
||||||
|
ObjectNode nodeDef = (ObjectNode) it.next();
|
||||||
|
nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()),
|
||||||
|
IpPrefix.valueOf(nodeDef.get("ip").asText()),
|
||||||
|
nodeDef.get("tcpPort").asInt(9876)));
|
||||||
|
}
|
||||||
|
return nodes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the given set of the controller nodes.
|
||||||
|
*
|
||||||
|
* @param nodes set of controller nodes
|
||||||
|
*/
|
||||||
|
public void write(Set<DefaultControllerNode> nodes) throws IOException {
|
||||||
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
|
ObjectNode clusterNodeDef = mapper.createObjectNode();
|
||||||
|
ArrayNode nodeDefs = mapper.createArrayNode();
|
||||||
|
clusterNodeDef.set("nodes", nodeDefs);
|
||||||
|
for (DefaultControllerNode node : nodes) {
|
||||||
|
ObjectNode nodeDef = mapper.createObjectNode();
|
||||||
|
nodeDef.put("id", node.id().toString())
|
||||||
|
.put("ip", node.ip().toString())
|
||||||
|
.put("tcpPort", node.tcpPort());
|
||||||
|
nodeDefs.add(nodeDef);
|
||||||
|
}
|
||||||
|
mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8),
|
||||||
|
clusterNodeDef);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
393
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
vendored
Normal file
393
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java
vendored
Normal file
@ -0,0 +1,393 @@
|
|||||||
|
package org.onlab.onos.store.cluster.impl;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.nio.AcceptorLoop;
|
||||||
|
import org.onlab.nio.IOLoop;
|
||||||
|
import org.onlab.nio.MessageStream;
|
||||||
|
import org.onlab.onos.cluster.ClusterEvent;
|
||||||
|
import org.onlab.onos.cluster.ClusterStore;
|
||||||
|
import org.onlab.onos.cluster.ClusterStoreDelegate;
|
||||||
|
import org.onlab.onos.cluster.ControllerNode;
|
||||||
|
import org.onlab.onos.cluster.DefaultControllerNode;
|
||||||
|
import org.onlab.onos.cluster.NodeId;
|
||||||
|
import org.onlab.onos.store.AbstractStore;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.Socket;
|
||||||
|
import java.net.SocketAddress;
|
||||||
|
import java.nio.channels.ByteChannel;
|
||||||
|
import java.nio.channels.SelectionKey;
|
||||||
|
import java.nio.channels.ServerSocketChannel;
|
||||||
|
import java.nio.channels.SocketChannel;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.Timer;
|
||||||
|
import java.util.TimerTask;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
|
||||||
|
import static java.net.InetAddress.getByAddress;
|
||||||
|
import static org.onlab.onos.cluster.ControllerNode.State;
|
||||||
|
import static org.onlab.packet.IpPrefix.valueOf;
|
||||||
|
import static org.onlab.util.Tools.namedThreads;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Distributed implementation of the cluster nodes store.
|
||||||
|
*/
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class DistributedClusterStore
|
||||||
|
extends AbstractStore<ClusterEvent, ClusterStoreDelegate>
|
||||||
|
implements ClusterStore {
|
||||||
|
|
||||||
|
private static final int HELLO_MSG = 1;
|
||||||
|
private static final int ECHO_MSG = 2;
|
||||||
|
|
||||||
|
private final Logger log = LoggerFactory.getLogger(getClass());
|
||||||
|
|
||||||
|
private static final long CONNECTION_CUSTODIAN_DELAY = 1000L;
|
||||||
|
private static final long CONNECTION_CUSTODIAN_FREQUENCY = 5000;
|
||||||
|
|
||||||
|
private static final long START_TIMEOUT = 1000;
|
||||||
|
private static final long SELECT_TIMEOUT = 50;
|
||||||
|
private static final int WORKERS = 3;
|
||||||
|
private static final int COMM_BUFFER_SIZE = 32 * 1024;
|
||||||
|
private static final int COMM_IDLE_TIME = 500;
|
||||||
|
|
||||||
|
private static final boolean SO_NO_DELAY = false;
|
||||||
|
private static final int SO_SEND_BUFFER_SIZE = COMM_BUFFER_SIZE;
|
||||||
|
private static final int SO_RCV_BUFFER_SIZE = COMM_BUFFER_SIZE;
|
||||||
|
|
||||||
|
private DefaultControllerNode self;
|
||||||
|
private final Map<NodeId, DefaultControllerNode> nodes = new ConcurrentHashMap<>();
|
||||||
|
private final Map<NodeId, State> states = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
// Means to track message streams to other nodes.
|
||||||
|
private final Map<NodeId, TLVMessageStream> streams = new ConcurrentHashMap<>();
|
||||||
|
private final Map<SocketChannel, DefaultControllerNode> nodesByChannel = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
// Executor pools for listening and managing connections to other nodes.
|
||||||
|
private final ExecutorService listenExecutor =
|
||||||
|
Executors.newSingleThreadExecutor(namedThreads("onos-comm-listen"));
|
||||||
|
private final ExecutorService commExecutors =
|
||||||
|
Executors.newFixedThreadPool(WORKERS, namedThreads("onos-comm-cluster"));
|
||||||
|
private final ExecutorService heartbeatExecutor =
|
||||||
|
Executors.newSingleThreadExecutor(namedThreads("onos-comm-heartbeat"));
|
||||||
|
|
||||||
|
private final Timer timer = new Timer("onos-comm-initiator");
|
||||||
|
private final TimerTask connectionCustodian = new ConnectionCustodian();
|
||||||
|
|
||||||
|
private ListenLoop listenLoop;
|
||||||
|
private List<CommLoop> commLoops = new ArrayList<>(WORKERS);
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
loadClusterDefinition();
|
||||||
|
startCommunications();
|
||||||
|
startListening();
|
||||||
|
startInitiating();
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
listenLoop.shutdown();
|
||||||
|
for (CommLoop loop : commLoops) {
|
||||||
|
loop.shutdown();
|
||||||
|
}
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loads the cluster definition file
|
||||||
|
private void loadClusterDefinition() {
|
||||||
|
// ClusterDefinitionStore cds = new ClusterDefinitionStore("../config/cluster.json");
|
||||||
|
// try {
|
||||||
|
// Set<DefaultControllerNode> storedNodes = cds.read();
|
||||||
|
// for (DefaultControllerNode node : storedNodes) {
|
||||||
|
// nodes.put(node.id(), node);
|
||||||
|
// }
|
||||||
|
// } catch (IOException e) {
|
||||||
|
// log.error("Unable to read cluster definitions", e);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Establishes the controller's own identity.
|
||||||
|
IpPrefix ip = valueOf(System.getProperty("onos.ip", "127.0.1.1"));
|
||||||
|
self = nodes.get(new NodeId(ip.toString()));
|
||||||
|
|
||||||
|
// As a fall-back, let's make sure we at least know who we are.
|
||||||
|
if (self == null) {
|
||||||
|
self = new DefaultControllerNode(new NodeId(ip.toString()), ip);
|
||||||
|
nodes.put(self.id(), self);
|
||||||
|
states.put(self.id(), State.ACTIVE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kicks off the IO loops.
|
||||||
|
private void startCommunications() {
|
||||||
|
for (int i = 0; i < WORKERS; i++) {
|
||||||
|
try {
|
||||||
|
CommLoop loop = new CommLoop();
|
||||||
|
commLoops.add(loop);
|
||||||
|
commExecutors.execute(loop);
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.warn("Unable to start comm IO loop", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the IO loops to start
|
||||||
|
for (CommLoop loop : commLoops) {
|
||||||
|
if (!loop.awaitStart(START_TIMEOUT)) {
|
||||||
|
log.warn("Comm loop did not start on-time; moving on...");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Starts listening for connections from peer cluster members.
|
||||||
|
private void startListening() {
|
||||||
|
try {
|
||||||
|
listenLoop = new ListenLoop(self.ip(), self.tcpPort());
|
||||||
|
listenExecutor.execute(listenLoop);
|
||||||
|
if (!listenLoop.awaitStart(START_TIMEOUT)) {
|
||||||
|
log.warn("Listen loop did not start on-time; moving on...");
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.error("Unable to listen for cluster connections", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initiates open connection request and registers the pending socket
|
||||||
|
* channel with the given IO loop.
|
||||||
|
*
|
||||||
|
* @param loop loop with which the channel should be registered
|
||||||
|
* @throws java.io.IOException if the socket could not be open or connected
|
||||||
|
*/
|
||||||
|
private void openConnection(DefaultControllerNode node, CommLoop loop) throws IOException {
|
||||||
|
SocketAddress sa = new InetSocketAddress(getByAddress(node.ip().toOctets()), node.tcpPort());
|
||||||
|
SocketChannel ch = SocketChannel.open();
|
||||||
|
nodesByChannel.put(ch, node);
|
||||||
|
ch.configureBlocking(false);
|
||||||
|
ch.connect(sa);
|
||||||
|
loop.connectStream(ch);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Attempts to connect to any nodes that do not have an associated connection.
|
||||||
|
private void startInitiating() {
|
||||||
|
timer.schedule(connectionCustodian, CONNECTION_CUSTODIAN_DELAY, CONNECTION_CUSTODIAN_FREQUENCY);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ControllerNode getLocalNode() {
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<ControllerNode> getNodes() {
|
||||||
|
ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder();
|
||||||
|
return builder.addAll(nodes.values()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ControllerNode getNode(NodeId nodeId) {
|
||||||
|
return nodes.get(nodeId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public State getState(NodeId nodeId) {
|
||||||
|
State state = states.get(nodeId);
|
||||||
|
return state == null ? State.INACTIVE : state;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
|
||||||
|
DefaultControllerNode node = new DefaultControllerNode(nodeId, ip, tcpPort);
|
||||||
|
nodes.put(nodeId, node);
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeNode(NodeId nodeId) {
|
||||||
|
nodes.remove(nodeId);
|
||||||
|
TLVMessageStream stream = streams.remove(nodeId);
|
||||||
|
if (stream != null) {
|
||||||
|
stream.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listens and accepts inbound connections from other cluster nodes.
|
||||||
|
private class ListenLoop extends AcceptorLoop {
|
||||||
|
ListenLoop(IpPrefix ip, int tcpPort) throws IOException {
|
||||||
|
super(SELECT_TIMEOUT, new InetSocketAddress(getByAddress(ip.toOctets()), tcpPort));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void acceptConnection(ServerSocketChannel channel) throws IOException {
|
||||||
|
SocketChannel sc = channel.accept();
|
||||||
|
sc.configureBlocking(false);
|
||||||
|
|
||||||
|
Socket so = sc.socket();
|
||||||
|
so.setTcpNoDelay(SO_NO_DELAY);
|
||||||
|
so.setReceiveBufferSize(SO_RCV_BUFFER_SIZE);
|
||||||
|
so.setSendBufferSize(SO_SEND_BUFFER_SIZE);
|
||||||
|
|
||||||
|
findLeastUtilizedLoop().acceptStream(sc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class CommLoop extends IOLoop<TLVMessage, TLVMessageStream> {
|
||||||
|
CommLoop() throws IOException {
|
||||||
|
super(SELECT_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected TLVMessageStream createStream(ByteChannel byteChannel) {
|
||||||
|
return new TLVMessageStream(this, byteChannel, COMM_BUFFER_SIZE, COMM_IDLE_TIME);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void processMessages(List<TLVMessage> messages, MessageStream<TLVMessage> stream) {
|
||||||
|
TLVMessageStream tlvStream = (TLVMessageStream) stream;
|
||||||
|
for (TLVMessage message : messages) {
|
||||||
|
// TODO: add type-based dispatching here... this is just a hack to get going
|
||||||
|
if (message.type() == HELLO_MSG) {
|
||||||
|
processHello(message, tlvStream);
|
||||||
|
} else if (message.type() == ECHO_MSG) {
|
||||||
|
processEcho(message, tlvStream);
|
||||||
|
} else {
|
||||||
|
log.info("Deal with other messages");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TLVMessageStream acceptStream(SocketChannel channel) {
|
||||||
|
TLVMessageStream stream = super.acceptStream(channel);
|
||||||
|
try {
|
||||||
|
InetSocketAddress sa = (InetSocketAddress) channel.getRemoteAddress();
|
||||||
|
log.info("Accepted connection from node {}", valueOf(sa.getAddress().getAddress()));
|
||||||
|
stream.write(createHello(self));
|
||||||
|
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.warn("Unable to accept connection from an unknown end-point", e);
|
||||||
|
}
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TLVMessageStream connectStream(SocketChannel channel) {
|
||||||
|
TLVMessageStream stream = super.connectStream(channel);
|
||||||
|
DefaultControllerNode node = nodesByChannel.get(channel);
|
||||||
|
if (node != null) {
|
||||||
|
log.debug("Opened connection to node {}", node.id());
|
||||||
|
nodesByChannel.remove(channel);
|
||||||
|
}
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void connect(SelectionKey key) throws IOException {
|
||||||
|
try {
|
||||||
|
super.connect(key);
|
||||||
|
TLVMessageStream stream = (TLVMessageStream) key.attachment();
|
||||||
|
send(stream, createHello(self));
|
||||||
|
} catch (IOException e) {
|
||||||
|
if (!Objects.equals(e.getMessage(), "Connection refused")) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void removeStream(MessageStream<TLVMessage> stream) {
|
||||||
|
DefaultControllerNode node = ((TLVMessageStream) stream).node();
|
||||||
|
if (node != null) {
|
||||||
|
log.info("Closed connection to node {}", node.id());
|
||||||
|
states.put(node.id(), State.INACTIVE);
|
||||||
|
streams.remove(node.id());
|
||||||
|
}
|
||||||
|
super.removeStream(stream);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes a HELLO message from a peer controller node.
|
||||||
|
private void processHello(TLVMessage message, TLVMessageStream stream) {
|
||||||
|
// FIXME: pure hack for now
|
||||||
|
String data = new String(message.data());
|
||||||
|
String[] fields = data.split(":");
|
||||||
|
DefaultControllerNode node = new DefaultControllerNode(new NodeId(fields[0]),
|
||||||
|
valueOf(fields[1]),
|
||||||
|
Integer.parseInt(fields[2]));
|
||||||
|
stream.setNode(node);
|
||||||
|
nodes.put(node.id(), node);
|
||||||
|
streams.put(node.id(), stream);
|
||||||
|
states.put(node.id(), State.ACTIVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes an ECHO message from a peer controller node.
|
||||||
|
private void processEcho(TLVMessage message, TLVMessageStream tlvStream) {
|
||||||
|
// TODO: implement heart-beat refresh
|
||||||
|
log.info("Dealing with echoes...");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sends message to the specified stream.
|
||||||
|
private void send(TLVMessageStream stream, TLVMessage message) {
|
||||||
|
try {
|
||||||
|
stream.write(message);
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.warn("Unable to send message to {}", stream.node().id());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a hello message to be sent to a peer controller node.
|
||||||
|
private TLVMessage createHello(DefaultControllerNode self) {
|
||||||
|
return new TLVMessage(HELLO_MSG, (self.id() + ":" + self.ip() + ":" + self.tcpPort()).getBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sweeps through all controller nodes and attempts to open connection to
|
||||||
|
// those that presently do not have one.
|
||||||
|
private class ConnectionCustodian extends TimerTask {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
for (DefaultControllerNode node : nodes.values()) {
|
||||||
|
if (node != self && !streams.containsKey(node.id())) {
|
||||||
|
try {
|
||||||
|
openConnection(node, findLeastUtilizedLoop());
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.debug("Unable to connect", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds the least utilities IO loop.
|
||||||
|
private CommLoop findLeastUtilizedLoop() {
|
||||||
|
CommLoop leastUtilized = null;
|
||||||
|
int minCount = Integer.MAX_VALUE;
|
||||||
|
for (CommLoop loop : commLoops) {
|
||||||
|
int count = loop.streamCount();
|
||||||
|
if (count == 0) {
|
||||||
|
return loop;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count < minCount) {
|
||||||
|
leastUtilized = loop;
|
||||||
|
minCount = count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return leastUtilized;
|
||||||
|
}
|
||||||
|
}
|
70
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessage.java
vendored
Normal file
70
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessage.java
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package org.onlab.onos.store.cluster.impl;
|
||||||
|
|
||||||
|
import org.onlab.nio.AbstractMessage;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
import static com.google.common.base.MoreObjects.toStringHelper;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base message for cluster-wide communications using TLVs.
|
||||||
|
*/
|
||||||
|
public class TLVMessage extends AbstractMessage {
|
||||||
|
|
||||||
|
private final int type;
|
||||||
|
private final byte[] data;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an immutable TLV message.
|
||||||
|
*
|
||||||
|
* @param type message type
|
||||||
|
* @param data message data bytes
|
||||||
|
*/
|
||||||
|
public TLVMessage(int type, byte[] data) {
|
||||||
|
this.length = data.length + TLVMessageStream.METADATA_LENGTH;
|
||||||
|
this.type = type;
|
||||||
|
this.data = data;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the message type indicator.
|
||||||
|
*
|
||||||
|
* @return message type
|
||||||
|
*/
|
||||||
|
public int type() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the data bytes.
|
||||||
|
*
|
||||||
|
* @return message data
|
||||||
|
*/
|
||||||
|
public byte[] data() {
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(type, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (this == obj) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (obj == null || getClass() != obj.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
final TLVMessage other = (TLVMessage) obj;
|
||||||
|
return Objects.equals(this.type, other.type) &&
|
||||||
|
Objects.equals(this.data, other.data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return toStringHelper(this).add("type", type).add("length", length).toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
95
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessageStream.java
vendored
Normal file
95
core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/TLVMessageStream.java
vendored
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package org.onlab.onos.store.cluster.impl;
|
||||||
|
|
||||||
|
import org.onlab.nio.IOLoop;
|
||||||
|
import org.onlab.nio.MessageStream;
|
||||||
|
import org.onlab.onos.cluster.DefaultControllerNode;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.channels.ByteChannel;
|
||||||
|
|
||||||
|
import static com.google.common.base.Preconditions.checkState;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stream for transferring TLV messages between cluster members.
|
||||||
|
*/
|
||||||
|
public class TLVMessageStream extends MessageStream<TLVMessage> {
|
||||||
|
|
||||||
|
public static final int METADATA_LENGTH = 16; // 8 + 4 + 4
|
||||||
|
|
||||||
|
private static final int LENGTH_OFFSET = 12;
|
||||||
|
private static final long MARKER = 0xfeedcafecafefeedL;
|
||||||
|
|
||||||
|
private DefaultControllerNode node;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a message stream associated with the specified IO loop and
|
||||||
|
* backed by the given byte channel.
|
||||||
|
*
|
||||||
|
* @param loop IO loop
|
||||||
|
* @param byteChannel backing byte channel
|
||||||
|
* @param bufferSize size of the backing byte buffers
|
||||||
|
* @param maxIdleMillis maximum number of millis the stream can be idle
|
||||||
|
*/
|
||||||
|
protected TLVMessageStream(IOLoop<TLVMessage, ?> loop, ByteChannel byteChannel,
|
||||||
|
int bufferSize, int maxIdleMillis) {
|
||||||
|
super(loop, byteChannel, bufferSize, maxIdleMillis);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the node with which this stream is associated.
|
||||||
|
*
|
||||||
|
* @return controller node
|
||||||
|
*/
|
||||||
|
DefaultControllerNode node() {
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the node with which this stream is affiliated.
|
||||||
|
*
|
||||||
|
* @param node controller node
|
||||||
|
*/
|
||||||
|
void setNode(DefaultControllerNode node) {
|
||||||
|
checkState(this.node == null, "Stream is already bound to a node");
|
||||||
|
this.node = node;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected TLVMessage read(ByteBuffer buffer) {
|
||||||
|
// Do we have enough bytes to read the header? If not, bail.
|
||||||
|
if (buffer.remaining() < METADATA_LENGTH) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peek at the length and if we have enough to read the entire message
|
||||||
|
// go ahead, otherwise bail.
|
||||||
|
int length = buffer.getInt(buffer.position() + LENGTH_OFFSET);
|
||||||
|
if (buffer.remaining() < length) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we have enough data to read a complete message.
|
||||||
|
long marker = buffer.getLong();
|
||||||
|
checkState(marker == MARKER, "Incorrect message marker");
|
||||||
|
|
||||||
|
int type = buffer.getInt();
|
||||||
|
length = buffer.getInt();
|
||||||
|
|
||||||
|
// TODO: add deserialization hook here
|
||||||
|
byte[] data = new byte[length - METADATA_LENGTH];
|
||||||
|
buffer.get(data);
|
||||||
|
|
||||||
|
return new TLVMessage(type, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void write(TLVMessage message, ByteBuffer buffer) {
|
||||||
|
buffer.putLong(MARKER);
|
||||||
|
buffer.putInt(message.type());
|
||||||
|
buffer.putInt(message.length());
|
||||||
|
|
||||||
|
// TODO: add serialization hook here
|
||||||
|
buffer.put(message.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -86,46 +86,48 @@ public class OnosDistributedDeviceStore
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterable<Device> getDevices() {
|
public Iterable<Device> getDevices() {
|
||||||
// TODO builder v.s. copyOf. Guava semms to be using copyOf?
|
|
||||||
// FIXME: synchronize.
|
|
||||||
Builder<Device> builder = ImmutableSet.builder();
|
Builder<Device> builder = ImmutableSet.builder();
|
||||||
for (VersionedValue<? extends Device> device : devices.values()) {
|
synchronized (this) {
|
||||||
|
for (VersionedValue<Device> device : devices.values()) {
|
||||||
builder.add(device.entity());
|
builder.add(device.entity());
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Device getDevice(DeviceId deviceId) {
|
public Device getDevice(DeviceId deviceId) {
|
||||||
return devices.get(deviceId).entity();
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
|
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
|
||||||
|
return device.entity();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
|
public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
|
||||||
DeviceDescription deviceDescription) {
|
DeviceDescription deviceDescription) {
|
||||||
Timestamp now = clockService.getTimestamp(deviceId);
|
Timestamp newTimestamp = clockService.getTimestamp(deviceId);
|
||||||
VersionedValue<Device> device = devices.get(deviceId);
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
|
|
||||||
if (device == null) {
|
if (device == null) {
|
||||||
return createDevice(providerId, deviceId, deviceDescription, now);
|
return createDevice(providerId, deviceId, deviceDescription, newTimestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
checkState(now.compareTo(device.timestamp()) > 0,
|
checkState(newTimestamp.compareTo(device.timestamp()) > 0,
|
||||||
"Existing device has a timestamp in the future!");
|
"Existing device has a timestamp in the future!");
|
||||||
|
|
||||||
return updateDevice(providerId, device.entity(), deviceDescription, now);
|
return updateDevice(providerId, device.entity(), deviceDescription, newTimestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates the device and returns the appropriate event if necessary.
|
// Creates the device and returns the appropriate event if necessary.
|
||||||
private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
|
private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
|
||||||
DeviceDescription desc, Timestamp timestamp) {
|
DeviceDescription desc, Timestamp timestamp) {
|
||||||
DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(),
|
Device device = new DefaultDevice(providerId, deviceId, desc.type(),
|
||||||
desc.manufacturer(),
|
desc.manufacturer(),
|
||||||
desc.hwVersion(), desc.swVersion(),
|
desc.hwVersion(), desc.swVersion(),
|
||||||
desc.serialNumber());
|
desc.serialNumber());
|
||||||
|
|
||||||
devices.put(deviceId, new VersionedValue<Device>(device, true, timestamp));
|
devices.put(deviceId, new VersionedValue<>(device, true, timestamp));
|
||||||
// FIXME: broadcast a message telling peers of a device event.
|
// TODO,FIXME: broadcast a message telling peers of a device event.
|
||||||
return new DeviceEvent(DEVICE_ADDED, device, null);
|
return new DeviceEvent(DEVICE_ADDED, device, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,7 +150,7 @@ public class OnosDistributedDeviceStore
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise merely attempt to change availability
|
// Otherwise merely attempt to change availability
|
||||||
DefaultDevice updated = new DefaultDevice(providerId, device.id(),
|
Device updated = new DefaultDevice(providerId, device.id(),
|
||||||
desc.type(),
|
desc.type(),
|
||||||
desc.manufacturer(),
|
desc.manufacturer(),
|
||||||
desc.hwVersion(),
|
desc.hwVersion(),
|
||||||
@ -196,18 +198,18 @@ public class OnosDistributedDeviceStore
|
|||||||
VersionedValue<Device> device = devices.get(deviceId);
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
|
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
|
||||||
Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
|
Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
|
||||||
Timestamp timestamp = clockService.getTimestamp(deviceId);
|
Timestamp newTimestamp = clockService.getTimestamp(deviceId);
|
||||||
|
|
||||||
// Add new ports
|
// Add new ports
|
||||||
Set<PortNumber> processed = new HashSet<>();
|
Set<PortNumber> processed = new HashSet<>();
|
||||||
for (PortDescription portDescription : portDescriptions) {
|
for (PortDescription portDescription : portDescriptions) {
|
||||||
VersionedValue<Port> port = ports.get(portDescription.portNumber());
|
VersionedValue<Port> port = ports.get(portDescription.portNumber());
|
||||||
if (port == null) {
|
if (port == null) {
|
||||||
events.add(createPort(device, portDescription, ports, timestamp));
|
events.add(createPort(device, portDescription, ports, newTimestamp));
|
||||||
}
|
}
|
||||||
checkState(timestamp.compareTo(port.timestamp()) > 0,
|
checkState(newTimestamp.compareTo(port.timestamp()) > 0,
|
||||||
"Existing port state has a timestamp in the future!");
|
"Existing port state has a timestamp in the future!");
|
||||||
events.add(updatePort(device, port, portDescription, ports, timestamp));
|
events.add(updatePort(device.entity(), port.entity(), portDescription, ports, newTimestamp));
|
||||||
processed.add(portDescription.portNumber());
|
processed.add(portDescription.portNumber());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,19 +235,19 @@ public class OnosDistributedDeviceStore
|
|||||||
// Checks if the specified port requires update and if so, it replaces the
|
// Checks if the specified port requires update and if so, it replaces the
|
||||||
// existing entry in the map and returns corresponding event.
|
// existing entry in the map and returns corresponding event.
|
||||||
//@GuardedBy("this")
|
//@GuardedBy("this")
|
||||||
private DeviceEvent updatePort(VersionedValue<Device> device, VersionedValue<Port> port,
|
private DeviceEvent updatePort(Device device, Port port,
|
||||||
PortDescription portDescription,
|
PortDescription portDescription,
|
||||||
Map<PortNumber, VersionedValue<Port>> ports,
|
Map<PortNumber, VersionedValue<Port>> ports,
|
||||||
Timestamp timestamp) {
|
Timestamp timestamp) {
|
||||||
if (port.entity().isEnabled() != portDescription.isEnabled()) {
|
if (port.isEnabled() != portDescription.isEnabled()) {
|
||||||
VersionedValue<Port> updatedPort = new VersionedValue<Port>(
|
VersionedValue<Port> updatedPort = new VersionedValue<Port>(
|
||||||
new DefaultPort(device.entity(), portDescription.portNumber(),
|
new DefaultPort(device, portDescription.portNumber(),
|
||||||
portDescription.isEnabled()),
|
portDescription.isEnabled()),
|
||||||
portDescription.isEnabled(),
|
portDescription.isEnabled(),
|
||||||
timestamp);
|
timestamp);
|
||||||
ports.put(port.entity().number(), updatedPort);
|
ports.put(port.number(), updatedPort);
|
||||||
updatePortMap(device.entity().id(), ports);
|
updatePortMap(device.id(), ports);
|
||||||
return new DeviceEvent(PORT_UPDATED, device.entity(), updatedPort.entity());
|
return new DeviceEvent(PORT_UPDATED, device, updatedPort.entity());
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -300,7 +302,7 @@ public class OnosDistributedDeviceStore
|
|||||||
Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
|
Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
|
||||||
VersionedValue<Port> port = ports.get(portDescription.portNumber());
|
VersionedValue<Port> port = ports.get(portDescription.portNumber());
|
||||||
Timestamp timestamp = clockService.getTimestamp(deviceId);
|
Timestamp timestamp = clockService.getTimestamp(deviceId);
|
||||||
return updatePort(device, port, portDescription, ports, timestamp);
|
return updatePort(device.entity(), port.entity(), portDescription, ports, timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
246
core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java
vendored
Normal file
246
core/store/dist/src/main/java/org/onlab/onos/store/link/impl/OnosDistributedLinkStore.java
vendored
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
package org.onlab.onos.store.link.impl;
|
||||||
|
|
||||||
|
import static org.onlab.onos.net.Link.Type.DIRECT;
|
||||||
|
import static org.onlab.onos.net.Link.Type.INDIRECT;
|
||||||
|
import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED;
|
||||||
|
import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED;
|
||||||
|
import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED;
|
||||||
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Reference;
|
||||||
|
import org.apache.felix.scr.annotations.ReferenceCardinality;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
|
import org.onlab.onos.net.DefaultLink;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Link;
|
||||||
|
import org.onlab.onos.net.LinkKey;
|
||||||
|
import org.onlab.onos.net.link.LinkDescription;
|
||||||
|
import org.onlab.onos.net.link.LinkEvent;
|
||||||
|
import org.onlab.onos.net.link.LinkStore;
|
||||||
|
import org.onlab.onos.net.link.LinkStoreDelegate;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.store.AbstractStore;
|
||||||
|
import org.onlab.onos.store.ClockService;
|
||||||
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
import org.onlab.onos.store.device.impl.VersionedValue;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
import com.google.common.collect.HashMultimap;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
import com.google.common.collect.ImmutableSet.Builder;
|
||||||
|
|
||||||
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
|
import static com.google.common.base.Preconditions.checkState;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages inventory of infrastructure links using a protocol that takes into consideration
|
||||||
|
* the order in which events occur.
|
||||||
|
*/
|
||||||
|
// FIXME: This does not yet implement the full protocol.
|
||||||
|
// The full protocol requires the sender of LLDP message to include the
|
||||||
|
// version information of src device/port and the receiver to
|
||||||
|
// take that into account when figuring out if a more recent src
|
||||||
|
// device/port down event renders the link discovery obsolete.
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class OnosDistributedLinkStore
|
||||||
|
extends AbstractStore<LinkEvent, LinkStoreDelegate>
|
||||||
|
implements LinkStore {
|
||||||
|
|
||||||
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
|
// Link inventory
|
||||||
|
private ConcurrentMap<LinkKey, VersionedValue<Link>> links;
|
||||||
|
|
||||||
|
public static final String LINK_NOT_FOUND = "Link between %s and %s not found";
|
||||||
|
|
||||||
|
// TODO synchronize?
|
||||||
|
// Egress and ingress link sets
|
||||||
|
private final Multimap<DeviceId, VersionedValue<Link>> srcLinks = HashMultimap.create();
|
||||||
|
private final Multimap<DeviceId, VersionedValue<Link>> dstLinks = HashMultimap.create();
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected ClockService clockService;
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
|
||||||
|
links = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getLinkCount() {
|
||||||
|
return links.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterable<Link> getLinks() {
|
||||||
|
Builder<Link> builder = ImmutableSet.builder();
|
||||||
|
synchronized (this) {
|
||||||
|
for (VersionedValue<Link> link : links.values()) {
|
||||||
|
builder.add(link.entity());
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
|
||||||
|
Set<VersionedValue<Link>> egressLinks = ImmutableSet.copyOf(srcLinks.get(deviceId));
|
||||||
|
Set<Link> rawEgressLinks = new HashSet<>();
|
||||||
|
for (VersionedValue<Link> link : egressLinks) {
|
||||||
|
rawEgressLinks.add(link.entity());
|
||||||
|
}
|
||||||
|
return rawEgressLinks;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
|
||||||
|
Set<VersionedValue<Link>> ingressLinks = ImmutableSet.copyOf(dstLinks.get(deviceId));
|
||||||
|
Set<Link> rawIngressLinks = new HashSet<>();
|
||||||
|
for (VersionedValue<Link> link : ingressLinks) {
|
||||||
|
rawIngressLinks.add(link.entity());
|
||||||
|
}
|
||||||
|
return rawIngressLinks;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Link getLink(ConnectPoint src, ConnectPoint dst) {
|
||||||
|
VersionedValue<Link> link = links.get(new LinkKey(src, dst));
|
||||||
|
checkArgument(link != null, "LINK_NOT_FOUND", src, dst);
|
||||||
|
return link.entity();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Link> getEgressLinks(ConnectPoint src) {
|
||||||
|
Set<Link> egressLinks = new HashSet<>();
|
||||||
|
for (VersionedValue<Link> link : srcLinks.get(src.deviceId())) {
|
||||||
|
if (link.entity().src().equals(src)) {
|
||||||
|
egressLinks.add(link.entity());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return egressLinks;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Link> getIngressLinks(ConnectPoint dst) {
|
||||||
|
Set<Link> ingressLinks = new HashSet<>();
|
||||||
|
for (VersionedValue<Link> link : dstLinks.get(dst.deviceId())) {
|
||||||
|
if (link.entity().dst().equals(dst)) {
|
||||||
|
ingressLinks.add(link.entity());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ingressLinks;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LinkEvent createOrUpdateLink(ProviderId providerId,
|
||||||
|
LinkDescription linkDescription) {
|
||||||
|
|
||||||
|
final DeviceId destinationDeviceId = linkDescription.dst().deviceId();
|
||||||
|
final Timestamp newTimestamp = clockService.getTimestamp(destinationDeviceId);
|
||||||
|
|
||||||
|
LinkKey key = new LinkKey(linkDescription.src(), linkDescription.dst());
|
||||||
|
VersionedValue<Link> link = links.get(key);
|
||||||
|
if (link == null) {
|
||||||
|
return createLink(providerId, key, linkDescription, newTimestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
checkState(newTimestamp.compareTo(link.timestamp()) > 0,
|
||||||
|
"Existing Link has a timestamp in the future!");
|
||||||
|
|
||||||
|
return updateLink(providerId, link, key, linkDescription, newTimestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates and stores the link and returns the appropriate event.
|
||||||
|
private LinkEvent createLink(ProviderId providerId, LinkKey key,
|
||||||
|
LinkDescription linkDescription, Timestamp timestamp) {
|
||||||
|
VersionedValue<Link> link = new VersionedValue<Link>(new DefaultLink(providerId, key.src(), key.dst(),
|
||||||
|
linkDescription.type()), true, timestamp);
|
||||||
|
synchronized (this) {
|
||||||
|
links.put(key, link);
|
||||||
|
addNewLink(link, timestamp);
|
||||||
|
}
|
||||||
|
// FIXME: notify peers.
|
||||||
|
return new LinkEvent(LINK_ADDED, link.entity());
|
||||||
|
}
|
||||||
|
|
||||||
|
// update Egress and ingress link sets
|
||||||
|
private void addNewLink(VersionedValue<Link> link, Timestamp timestamp) {
|
||||||
|
Link rawLink = link.entity();
|
||||||
|
synchronized (this) {
|
||||||
|
srcLinks.put(rawLink.src().deviceId(), link);
|
||||||
|
dstLinks.put(rawLink.dst().deviceId(), link);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updates, if necessary the specified link and returns the appropriate event.
|
||||||
|
private LinkEvent updateLink(ProviderId providerId, VersionedValue<Link> existingLink,
|
||||||
|
LinkKey key, LinkDescription linkDescription, Timestamp timestamp) {
|
||||||
|
// FIXME confirm Link update condition is OK
|
||||||
|
if (existingLink.entity().type() == INDIRECT && linkDescription.type() == DIRECT) {
|
||||||
|
synchronized (this) {
|
||||||
|
|
||||||
|
VersionedValue<Link> updatedLink = new VersionedValue<Link>(
|
||||||
|
new DefaultLink(providerId, existingLink.entity().src(), existingLink.entity().dst(),
|
||||||
|
linkDescription.type()), true, timestamp);
|
||||||
|
links.replace(key, existingLink, updatedLink);
|
||||||
|
|
||||||
|
replaceLink(existingLink, updatedLink);
|
||||||
|
// FIXME: notify peers.
|
||||||
|
return new LinkEvent(LINK_UPDATED, updatedLink.entity());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update Egress and ingress link sets
|
||||||
|
private void replaceLink(VersionedValue<Link> current, VersionedValue<Link> updated) {
|
||||||
|
synchronized (this) {
|
||||||
|
srcLinks.remove(current.entity().src().deviceId(), current);
|
||||||
|
dstLinks.remove(current.entity().dst().deviceId(), current);
|
||||||
|
|
||||||
|
srcLinks.put(current.entity().src().deviceId(), updated);
|
||||||
|
dstLinks.put(current.entity().dst().deviceId(), updated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
|
||||||
|
synchronized (this) {
|
||||||
|
LinkKey key = new LinkKey(src, dst);
|
||||||
|
VersionedValue<Link> link = links.remove(key);
|
||||||
|
if (link != null) {
|
||||||
|
removeLink(link);
|
||||||
|
// notify peers
|
||||||
|
return new LinkEvent(LINK_REMOVED, link.entity());
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update Egress and ingress link sets
|
||||||
|
private void removeLink(VersionedValue<Link> link) {
|
||||||
|
synchronized (this) {
|
||||||
|
srcLinks.remove(link.entity().src().deviceId(), link);
|
||||||
|
dstLinks.remove(link.entity().dst().deviceId(), link);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -49,6 +49,7 @@ public class DistributedClusterStore
|
|||||||
private final MembershipListener listener = new InternalMembershipListener();
|
private final MembershipListener listener = new InternalMembershipListener();
|
||||||
private final Map<NodeId, State> states = new ConcurrentHashMap<>();
|
private final Map<NodeId, State> states = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
@Override
|
||||||
@Activate
|
@Activate
|
||||||
public void activate() {
|
public void activate() {
|
||||||
super.activate();
|
super.activate();
|
||||||
@ -56,9 +57,9 @@ public class DistributedClusterStore
|
|||||||
|
|
||||||
rawNodes = theInstance.getMap("nodes");
|
rawNodes = theInstance.getMap("nodes");
|
||||||
OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader
|
OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader
|
||||||
= new OptionalCacheLoader<>(storeService, rawNodes);
|
= new OptionalCacheLoader<>(kryoSerializationService, rawNodes);
|
||||||
nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
|
nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
|
||||||
rawNodes.addEntryListener(new RemoteEventHandler<>(nodes), true);
|
rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true);
|
||||||
|
|
||||||
loadClusterNodes();
|
loadClusterNodes();
|
||||||
|
|
||||||
@ -68,7 +69,7 @@ public class DistributedClusterStore
|
|||||||
// Loads the initial set of cluster nodes
|
// Loads the initial set of cluster nodes
|
||||||
private void loadClusterNodes() {
|
private void loadClusterNodes() {
|
||||||
for (Member member : theInstance.getCluster().getMembers()) {
|
for (Member member : theInstance.getCluster().getMembers()) {
|
||||||
addMember(member);
|
addNode(node(member));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,6 +104,11 @@ public class DistributedClusterStore
|
|||||||
return state == null ? State.INACTIVE : state;
|
return state == null ? State.INACTIVE : state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
|
||||||
|
return addNode(new DefaultControllerNode(nodeId, ip, tcpPort));
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void removeNode(NodeId nodeId) {
|
public void removeNode(NodeId nodeId) {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
@ -112,8 +118,7 @@ public class DistributedClusterStore
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Adds a new node based on the specified member
|
// Adds a new node based on the specified member
|
||||||
private synchronized ControllerNode addMember(Member member) {
|
private synchronized ControllerNode addNode(DefaultControllerNode node) {
|
||||||
DefaultControllerNode node = node(member);
|
|
||||||
rawNodes.put(serialize(node.id()), serialize(node));
|
rawNodes.put(serialize(node.id()), serialize(node));
|
||||||
nodes.put(node.id(), Optional.of(node));
|
nodes.put(node.id(), Optional.of(node));
|
||||||
states.put(node.id(), State.ACTIVE);
|
states.put(node.id(), State.ACTIVE);
|
||||||
@ -136,7 +141,7 @@ public class DistributedClusterStore
|
|||||||
@Override
|
@Override
|
||||||
public void memberAdded(MembershipEvent membershipEvent) {
|
public void memberAdded(MembershipEvent membershipEvent) {
|
||||||
log.info("Member {} added", membershipEvent.getMember());
|
log.info("Member {} added", membershipEvent.getMember());
|
||||||
ControllerNode node = addMember(membershipEvent.getMember());
|
ControllerNode node = addNode(node(membershipEvent.getMember()));
|
||||||
notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node));
|
notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ implements MastershipStore {
|
|||||||
|
|
||||||
rawMasters = theInstance.getMap("masters");
|
rawMasters = theInstance.getMap("masters");
|
||||||
OptionalCacheLoader<DeviceId, NodeId> nodeLoader
|
OptionalCacheLoader<DeviceId, NodeId> nodeLoader
|
||||||
= new OptionalCacheLoader<>(storeService, rawMasters);
|
= new OptionalCacheLoader<>(kryoSerializationService, rawMasters);
|
||||||
masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
|
masters = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader));
|
||||||
rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true);
|
rawMasters.addEntryListener(new RemoteMasterShipEventHandler(masters), true);
|
||||||
|
|
||||||
@ -123,7 +123,7 @@ implements MastershipStore {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private class RemoteMasterShipEventHandler extends RemoteEventHandler<DeviceId, NodeId> {
|
private class RemoteMasterShipEventHandler extends RemoteCacheEventHandler<DeviceId, NodeId> {
|
||||||
public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) {
|
public RemoteMasterShipEventHandler(LoadingCache<DeviceId, Optional<NodeId>> cache) {
|
||||||
super(cache);
|
super(cache);
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ import com.hazelcast.core.EntryAdapter;
|
|||||||
import com.hazelcast.core.EntryEvent;
|
import com.hazelcast.core.EntryEvent;
|
||||||
import com.hazelcast.core.HazelcastInstance;
|
import com.hazelcast.core.HazelcastInstance;
|
||||||
import com.hazelcast.core.MapEvent;
|
import com.hazelcast.core.MapEvent;
|
||||||
|
import com.hazelcast.core.Member;
|
||||||
|
|
||||||
import org.apache.felix.scr.annotations.Activate;
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
import org.apache.felix.scr.annotations.Component;
|
import org.apache.felix.scr.annotations.Component;
|
||||||
@ -14,6 +15,7 @@ import org.apache.felix.scr.annotations.ReferenceCardinality;
|
|||||||
import org.onlab.onos.event.Event;
|
import org.onlab.onos.event.Event;
|
||||||
import org.onlab.onos.store.AbstractStore;
|
import org.onlab.onos.store.AbstractStore;
|
||||||
import org.onlab.onos.store.StoreDelegate;
|
import org.onlab.onos.store.StoreDelegate;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationService;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
@ -31,6 +33,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
protected StoreService storeService;
|
protected StoreService storeService;
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected KryoSerializationService kryoSerializationService;
|
||||||
|
|
||||||
protected HazelcastInstance theInstance;
|
protected HazelcastInstance theInstance;
|
||||||
|
|
||||||
@Activate
|
@Activate
|
||||||
@ -45,7 +50,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
* @return serialized object
|
* @return serialized object
|
||||||
*/
|
*/
|
||||||
protected byte[] serialize(Object obj) {
|
protected byte[] serialize(Object obj) {
|
||||||
return storeService.serialize(obj);
|
return kryoSerializationService.serialize(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -56,7 +61,7 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
* @return deserialized object
|
* @return deserialized object
|
||||||
*/
|
*/
|
||||||
protected <T> T deserialize(byte[] bytes) {
|
protected <T> T deserialize(byte[] bytes) {
|
||||||
return storeService.deserialize(bytes);
|
return kryoSerializationService.deserialize(bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -66,8 +71,9 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
* @param <K> IMap key type after deserialization
|
* @param <K> IMap key type after deserialization
|
||||||
* @param <V> IMap value type after deserialization
|
* @param <V> IMap value type after deserialization
|
||||||
*/
|
*/
|
||||||
public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
|
public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
|
||||||
|
|
||||||
|
private final Member localMember;
|
||||||
private LoadingCache<K, Optional<V>> cache;
|
private LoadingCache<K, Optional<V>> cache;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -75,17 +81,26 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
*
|
*
|
||||||
* @param cache cache to update
|
* @param cache cache to update
|
||||||
*/
|
*/
|
||||||
public RemoteEventHandler(LoadingCache<K, Optional<V>> cache) {
|
public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) {
|
||||||
|
this.localMember = theInstance.getCluster().getLocalMember();
|
||||||
this.cache = checkNotNull(cache);
|
this.cache = checkNotNull(cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void mapCleared(MapEvent event) {
|
public void mapCleared(MapEvent event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
cache.invalidateAll();
|
cache.invalidateAll();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void entryAdded(EntryEvent<byte[], byte[]> event) {
|
public void entryAdded(EntryEvent<byte[], byte[]> event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
K key = deserialize(event.getKey());
|
K key = deserialize(event.getKey());
|
||||||
V newVal = deserialize(event.getValue());
|
V newVal = deserialize(event.getValue());
|
||||||
Optional<V> newValue = Optional.of(newVal);
|
Optional<V> newValue = Optional.of(newVal);
|
||||||
@ -95,6 +110,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void entryUpdated(EntryEvent<byte[], byte[]> event) {
|
public void entryUpdated(EntryEvent<byte[], byte[]> event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
K key = deserialize(event.getKey());
|
K key = deserialize(event.getKey());
|
||||||
V oldVal = deserialize(event.getOldValue());
|
V oldVal = deserialize(event.getOldValue());
|
||||||
Optional<V> oldValue = Optional.fromNullable(oldVal);
|
Optional<V> oldValue = Optional.fromNullable(oldVal);
|
||||||
@ -106,6 +125,10 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void entryRemoved(EntryEvent<byte[], byte[]> event) {
|
public void entryRemoved(EntryEvent<byte[], byte[]> event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
K key = deserialize(event.getKey());
|
K key = deserialize(event.getKey());
|
||||||
V val = deserialize(event.getOldValue());
|
V val = deserialize(event.getOldValue());
|
||||||
cache.invalidate(key);
|
cache.invalidate(key);
|
||||||
@ -141,4 +164,80 @@ public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDel
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Distributed object remote event entry listener.
|
||||||
|
*
|
||||||
|
* @param <K> Entry key type after deserialization
|
||||||
|
* @param <V> Entry value type after deserialization
|
||||||
|
*/
|
||||||
|
public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> {
|
||||||
|
|
||||||
|
private final Member localMember;
|
||||||
|
|
||||||
|
public RemoteEventHandler() {
|
||||||
|
this.localMember = theInstance.getCluster().getLocalMember();
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public void entryAdded(EntryEvent<byte[], byte[]> event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
K key = deserialize(event.getKey());
|
||||||
|
V newVal = deserialize(event.getValue());
|
||||||
|
onAdd(key, newVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void entryRemoved(EntryEvent<byte[], byte[]> event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
K key = deserialize(event.getKey());
|
||||||
|
V val = deserialize(event.getValue());
|
||||||
|
onRemove(key, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void entryUpdated(EntryEvent<byte[], byte[]> event) {
|
||||||
|
if (localMember.equals(event.getMember())) {
|
||||||
|
// ignore locally triggered event
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
K key = deserialize(event.getKey());
|
||||||
|
V oldVal = deserialize(event.getOldValue());
|
||||||
|
V newVal = deserialize(event.getValue());
|
||||||
|
onUpdate(key, oldVal, newVal);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remote entry addition hook.
|
||||||
|
*
|
||||||
|
* @param key new key
|
||||||
|
* @param newVal new value
|
||||||
|
*/
|
||||||
|
protected void onAdd(K key, V newVal) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remote entry update hook.
|
||||||
|
*
|
||||||
|
* @param key new key
|
||||||
|
* @param oldValue old value
|
||||||
|
* @param newVal new value
|
||||||
|
*/
|
||||||
|
protected void onUpdate(K key, V oldValue, V newVal) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remote entry remove hook.
|
||||||
|
*
|
||||||
|
* @param key new key
|
||||||
|
* @param val old value
|
||||||
|
*/
|
||||||
|
protected void onRemove(K key, V val) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,8 @@ package org.onlab.onos.store.common;
|
|||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
|
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationService;
|
||||||
|
|
||||||
import com.google.common.base.Optional;
|
import com.google.common.base.Optional;
|
||||||
import com.google.common.cache.CacheLoader;
|
import com.google.common.cache.CacheLoader;
|
||||||
import com.hazelcast.core.IMap;
|
import com.hazelcast.core.IMap;
|
||||||
@ -16,28 +18,28 @@ import com.hazelcast.core.IMap;
|
|||||||
public final class OptionalCacheLoader<K, V> extends
|
public final class OptionalCacheLoader<K, V> extends
|
||||||
CacheLoader<K, Optional<V>> {
|
CacheLoader<K, Optional<V>> {
|
||||||
|
|
||||||
private final StoreService storeService;
|
private final KryoSerializationService kryoSerializationService;
|
||||||
private IMap<byte[], byte[]> rawMap;
|
private IMap<byte[], byte[]> rawMap;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor.
|
* Constructor.
|
||||||
*
|
*
|
||||||
* @param storeService to use for serialization
|
* @param kryoSerializationService to use for serialization
|
||||||
* @param rawMap underlying IMap
|
* @param rawMap underlying IMap
|
||||||
*/
|
*/
|
||||||
public OptionalCacheLoader(StoreService storeService, IMap<byte[], byte[]> rawMap) {
|
public OptionalCacheLoader(KryoSerializationService kryoSerializationService, IMap<byte[], byte[]> rawMap) {
|
||||||
this.storeService = checkNotNull(storeService);
|
this.kryoSerializationService = checkNotNull(kryoSerializationService);
|
||||||
this.rawMap = checkNotNull(rawMap);
|
this.rawMap = checkNotNull(rawMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Optional<V> load(K key) throws Exception {
|
public Optional<V> load(K key) throws Exception {
|
||||||
byte[] keyBytes = storeService.serialize(key);
|
byte[] keyBytes = kryoSerializationService.serialize(key);
|
||||||
byte[] valBytes = rawMap.get(keyBytes);
|
byte[] valBytes = rawMap.get(keyBytes);
|
||||||
if (valBytes == null) {
|
if (valBytes == null) {
|
||||||
return Optional.absent();
|
return Optional.absent();
|
||||||
}
|
}
|
||||||
V dev = storeService.deserialize(valBytes);
|
V dev = kryoSerializationService.deserialize(valBytes);
|
||||||
return Optional.of(dev);
|
return Optional.of(dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,46 +5,14 @@ import com.hazelcast.config.FileSystemXmlConfig;
|
|||||||
import com.hazelcast.core.Hazelcast;
|
import com.hazelcast.core.Hazelcast;
|
||||||
import com.hazelcast.core.HazelcastInstance;
|
import com.hazelcast.core.HazelcastInstance;
|
||||||
|
|
||||||
import de.javakaffee.kryoserializers.URISerializer;
|
|
||||||
|
|
||||||
import org.apache.felix.scr.annotations.Activate;
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
import org.apache.felix.scr.annotations.Component;
|
import org.apache.felix.scr.annotations.Component;
|
||||||
import org.apache.felix.scr.annotations.Deactivate;
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
import org.apache.felix.scr.annotations.Service;
|
import org.apache.felix.scr.annotations.Service;
|
||||||
import org.onlab.onos.cluster.ControllerNode;
|
|
||||||
import org.onlab.onos.cluster.DefaultControllerNode;
|
|
||||||
import org.onlab.onos.cluster.NodeId;
|
|
||||||
import org.onlab.onos.net.ConnectPoint;
|
|
||||||
import org.onlab.onos.net.DefaultDevice;
|
|
||||||
import org.onlab.onos.net.DefaultLink;
|
|
||||||
import org.onlab.onos.net.DefaultPort;
|
|
||||||
import org.onlab.onos.net.Device;
|
|
||||||
import org.onlab.onos.net.DeviceId;
|
|
||||||
import org.onlab.onos.net.Element;
|
|
||||||
import org.onlab.onos.net.Link;
|
|
||||||
import org.onlab.onos.net.LinkKey;
|
|
||||||
import org.onlab.onos.net.MastershipRole;
|
|
||||||
import org.onlab.onos.net.Port;
|
|
||||||
import org.onlab.onos.net.PortNumber;
|
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
|
||||||
import org.onlab.onos.store.serializers.ConnectPointSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.DefaultLinkSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.DefaultPortSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.DeviceIdSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.IpPrefixSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.LinkKeySerializer;
|
|
||||||
import org.onlab.onos.store.serializers.NodeIdSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.PortNumberSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.ProviderIdSerializer;
|
|
||||||
import org.onlab.packet.IpPrefix;
|
|
||||||
import org.onlab.util.KryoPool;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.net.URI;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.HashMap;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Auxiliary bootstrap of distributed store.
|
* Auxiliary bootstrap of distributed store.
|
||||||
@ -58,55 +26,18 @@ public class StoreManager implements StoreService {
|
|||||||
private final Logger log = LoggerFactory.getLogger(getClass());
|
private final Logger log = LoggerFactory.getLogger(getClass());
|
||||||
|
|
||||||
protected HazelcastInstance instance;
|
protected HazelcastInstance instance;
|
||||||
private KryoPool serializerPool;
|
|
||||||
|
|
||||||
|
|
||||||
@Activate
|
@Activate
|
||||||
public void activate() {
|
public void activate() {
|
||||||
try {
|
try {
|
||||||
Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE);
|
Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE);
|
||||||
instance = Hazelcast.newHazelcastInstance(config);
|
instance = Hazelcast.newHazelcastInstance(config);
|
||||||
setupKryoPool();
|
|
||||||
log.info("Started");
|
log.info("Started");
|
||||||
} catch (FileNotFoundException e) {
|
} catch (FileNotFoundException e) {
|
||||||
log.error("Unable to configure Hazelcast", e);
|
log.error("Unable to configure Hazelcast", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets up the common serialzers pool.
|
|
||||||
*/
|
|
||||||
protected void setupKryoPool() {
|
|
||||||
// FIXME Slice out types used in common to separate pool/namespace.
|
|
||||||
serializerPool = KryoPool.newBuilder()
|
|
||||||
.register(ArrayList.class,
|
|
||||||
HashMap.class,
|
|
||||||
|
|
||||||
ControllerNode.State.class,
|
|
||||||
Device.Type.class,
|
|
||||||
|
|
||||||
DefaultControllerNode.class,
|
|
||||||
DefaultDevice.class,
|
|
||||||
MastershipRole.class,
|
|
||||||
Port.class,
|
|
||||||
Element.class,
|
|
||||||
|
|
||||||
Link.Type.class
|
|
||||||
)
|
|
||||||
.register(IpPrefix.class, new IpPrefixSerializer())
|
|
||||||
.register(URI.class, new URISerializer())
|
|
||||||
.register(NodeId.class, new NodeIdSerializer())
|
|
||||||
.register(ProviderId.class, new ProviderIdSerializer())
|
|
||||||
.register(DeviceId.class, new DeviceIdSerializer())
|
|
||||||
.register(PortNumber.class, new PortNumberSerializer())
|
|
||||||
.register(DefaultPort.class, new DefaultPortSerializer())
|
|
||||||
.register(LinkKey.class, new LinkKeySerializer())
|
|
||||||
.register(ConnectPoint.class, new ConnectPointSerializer())
|
|
||||||
.register(DefaultLink.class, new DefaultLinkSerializer())
|
|
||||||
.build()
|
|
||||||
.populate(10);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Deactivate
|
@Deactivate
|
||||||
public void deactivate() {
|
public void deactivate() {
|
||||||
instance.shutdown();
|
instance.shutdown();
|
||||||
@ -118,18 +49,4 @@ public class StoreManager implements StoreService {
|
|||||||
return instance;
|
return instance;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public byte[] serialize(final Object obj) {
|
|
||||||
return serializerPool.serialize(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public <T> T deserialize(final byte[] bytes) {
|
|
||||||
if (bytes == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return serializerPool.deserialize(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -15,22 +15,4 @@ public interface StoreService {
|
|||||||
*/
|
*/
|
||||||
HazelcastInstance getHazelcastInstance();
|
HazelcastInstance getHazelcastInstance();
|
||||||
|
|
||||||
/**
|
|
||||||
* Serializes the specified object into bytes using one of the
|
|
||||||
* pre-registered serializers.
|
|
||||||
*
|
|
||||||
* @param obj object to be serialized
|
|
||||||
* @return serialized bytes
|
|
||||||
*/
|
|
||||||
public byte[] serialize(final Object obj);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deserializes the specified bytes into an object using one of the
|
|
||||||
* pre-registered serializers.
|
|
||||||
*
|
|
||||||
* @param bytes bytes to be deserialized
|
|
||||||
* @return deserialized object
|
|
||||||
*/
|
|
||||||
public <T> T deserialize(final byte[] bytes);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -46,9 +46,8 @@ public class TestStoreManager extends StoreManager {
|
|||||||
this.instance = instance;
|
this.instance = instance;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hazelcast setup removed from original code.
|
|
||||||
@Override
|
@Override
|
||||||
public void activate() {
|
public void activate() {
|
||||||
setupKryoPool();
|
// Hazelcast setup removed from original code.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,6 +72,10 @@ public class DistributedDeviceStore
|
|||||||
private IMap<byte[], byte[]> rawDevicePorts;
|
private IMap<byte[], byte[]> rawDevicePorts;
|
||||||
private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts;
|
private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts;
|
||||||
|
|
||||||
|
private String devicesListener;
|
||||||
|
|
||||||
|
private String portsListener;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Activate
|
@Activate
|
||||||
public void activate() {
|
public void activate() {
|
||||||
@ -83,20 +87,20 @@ public class DistributedDeviceStore
|
|||||||
// TODO decide on Map name scheme to avoid collision
|
// TODO decide on Map name scheme to avoid collision
|
||||||
rawDevices = theInstance.getMap("devices");
|
rawDevices = theInstance.getMap("devices");
|
||||||
final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader
|
final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader
|
||||||
= new OptionalCacheLoader<>(storeService, rawDevices);
|
= new OptionalCacheLoader<>(kryoSerializationService, rawDevices);
|
||||||
devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader));
|
devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader));
|
||||||
// refresh/populate cache based on notification from other instance
|
// refresh/populate cache based on notification from other instance
|
||||||
rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
|
devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
|
||||||
|
|
||||||
// TODO cache availableDevices
|
// TODO cache availableDevices
|
||||||
availableDevices = theInstance.getSet("availableDevices");
|
availableDevices = theInstance.getSet("availableDevices");
|
||||||
|
|
||||||
rawDevicePorts = theInstance.getMap("devicePorts");
|
rawDevicePorts = theInstance.getMap("devicePorts");
|
||||||
final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader
|
final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader
|
||||||
= new OptionalCacheLoader<>(storeService, rawDevicePorts);
|
= new OptionalCacheLoader<>(kryoSerializationService, rawDevicePorts);
|
||||||
devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader));
|
devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader));
|
||||||
// refresh/populate cache based on notification from other instance
|
// refresh/populate cache based on notification from other instance
|
||||||
rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
|
portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
|
||||||
|
|
||||||
loadDeviceCache();
|
loadDeviceCache();
|
||||||
loadDevicePortsCache();
|
loadDevicePortsCache();
|
||||||
@ -106,6 +110,8 @@ public class DistributedDeviceStore
|
|||||||
|
|
||||||
@Deactivate
|
@Deactivate
|
||||||
public void deactivate() {
|
public void deactivate() {
|
||||||
|
rawDevicePorts.removeEntryListener(portsListener);
|
||||||
|
rawDevices.removeEntryListener(devicesListener);
|
||||||
log.info("Stopped");
|
log.info("Stopped");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -354,7 +360,7 @@ public class DistributedDeviceStore
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class RemoteDeviceEventHandler extends RemoteEventHandler<DeviceId, DefaultDevice> {
|
private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> {
|
||||||
public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) {
|
public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) {
|
||||||
super(cache);
|
super(cache);
|
||||||
}
|
}
|
||||||
@ -375,7 +381,7 @@ public class DistributedDeviceStore
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class RemotePortEventHandler extends RemoteEventHandler<DeviceId, Map<PortNumber, Port>> {
|
private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> {
|
||||||
public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) {
|
public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) {
|
||||||
super(cache);
|
super(cache);
|
||||||
}
|
}
|
||||||
|
@ -58,6 +58,8 @@ public class DistributedLinkStore
|
|||||||
private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
|
private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
|
||||||
private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
|
private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
|
||||||
|
|
||||||
|
private String linksListener;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Activate
|
@Activate
|
||||||
public void activate() {
|
public void activate() {
|
||||||
@ -68,10 +70,10 @@ public class DistributedLinkStore
|
|||||||
// TODO decide on Map name scheme to avoid collision
|
// TODO decide on Map name scheme to avoid collision
|
||||||
rawLinks = theInstance.getMap("links");
|
rawLinks = theInstance.getMap("links");
|
||||||
final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader
|
final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader
|
||||||
= new OptionalCacheLoader<>(storeService, rawLinks);
|
= new OptionalCacheLoader<>(kryoSerializationService, rawLinks);
|
||||||
links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader));
|
links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader));
|
||||||
// refresh/populate cache based on notification from other instance
|
// refresh/populate cache based on notification from other instance
|
||||||
rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
|
linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
|
||||||
|
|
||||||
loadLinkCache();
|
loadLinkCache();
|
||||||
|
|
||||||
@ -80,7 +82,7 @@ public class DistributedLinkStore
|
|||||||
|
|
||||||
@Deactivate
|
@Deactivate
|
||||||
public void deactivate() {
|
public void deactivate() {
|
||||||
super.activate();
|
rawLinks.removeEntryListener(linksListener);
|
||||||
log.info("Stopped");
|
log.info("Stopped");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,7 +235,7 @@ public class DistributedLinkStore
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class RemoteLinkEventHandler extends RemoteEventHandler<LinkKey, DefaultLink> {
|
private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> {
|
||||||
public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) {
|
public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) {
|
||||||
super(cache);
|
super(cache);
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ import org.junit.After;
|
|||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Ignore;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.onlab.onos.net.Device;
|
import org.onlab.onos.net.Device;
|
||||||
import org.onlab.onos.net.DeviceId;
|
import org.onlab.onos.net.DeviceId;
|
||||||
@ -35,12 +36,17 @@ import org.onlab.onos.net.provider.ProviderId;
|
|||||||
import org.onlab.onos.store.common.StoreManager;
|
import org.onlab.onos.store.common.StoreManager;
|
||||||
import org.onlab.onos.store.common.StoreService;
|
import org.onlab.onos.store.common.StoreService;
|
||||||
import org.onlab.onos.store.common.TestStoreManager;
|
import org.onlab.onos.store.common.TestStoreManager;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationManager;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationService;
|
||||||
|
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
import com.hazelcast.config.Config;
|
import com.hazelcast.config.Config;
|
||||||
import com.hazelcast.core.Hazelcast;
|
import com.hazelcast.core.Hazelcast;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test of the Hazelcast based distributed DeviceStore implementation.
|
||||||
|
*/
|
||||||
public class DistributedDeviceStoreTest {
|
public class DistributedDeviceStoreTest {
|
||||||
|
|
||||||
private static final ProviderId PID = new ProviderId("of", "foo");
|
private static final ProviderId PID = new ProviderId("of", "foo");
|
||||||
@ -57,6 +63,7 @@ public class DistributedDeviceStoreTest {
|
|||||||
private static final PortNumber P3 = PortNumber.portNumber(3);
|
private static final PortNumber P3 = PortNumber.portNumber(3);
|
||||||
|
|
||||||
private DistributedDeviceStore deviceStore;
|
private DistributedDeviceStore deviceStore;
|
||||||
|
private KryoSerializationManager serializationMgr;
|
||||||
|
|
||||||
private StoreManager storeManager;
|
private StoreManager storeManager;
|
||||||
|
|
||||||
@ -78,7 +85,10 @@ public class DistributedDeviceStoreTest {
|
|||||||
storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
|
storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
|
||||||
storeManager.activate();
|
storeManager.activate();
|
||||||
|
|
||||||
deviceStore = new TestDistributedDeviceStore(storeManager);
|
serializationMgr = new KryoSerializationManager();
|
||||||
|
serializationMgr.activate();
|
||||||
|
|
||||||
|
deviceStore = new TestDistributedDeviceStore(storeManager, serializationMgr);
|
||||||
deviceStore.activate();
|
deviceStore.activate();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,6 +96,8 @@ public class DistributedDeviceStoreTest {
|
|||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
deviceStore.deactivate();
|
deviceStore.deactivate();
|
||||||
|
|
||||||
|
serializationMgr.deactivate();
|
||||||
|
|
||||||
storeManager.deactivate();
|
storeManager.deactivate();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,6 +338,7 @@ public class DistributedDeviceStoreTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO add test for Port events when we have them
|
// TODO add test for Port events when we have them
|
||||||
|
@Ignore("Ignore until Delegate spec. is clear.")
|
||||||
@Test
|
@Test
|
||||||
public final void testEvents() throws InterruptedException {
|
public final void testEvents() throws InterruptedException {
|
||||||
final CountDownLatch addLatch = new CountDownLatch(1);
|
final CountDownLatch addLatch = new CountDownLatch(1);
|
||||||
@ -379,8 +392,10 @@ public class DistributedDeviceStoreTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private class TestDistributedDeviceStore extends DistributedDeviceStore {
|
private class TestDistributedDeviceStore extends DistributedDeviceStore {
|
||||||
public TestDistributedDeviceStore(StoreService storeService) {
|
public TestDistributedDeviceStore(StoreService storeService,
|
||||||
|
KryoSerializationService kryoSerializationService) {
|
||||||
this.storeService = storeService;
|
this.storeService = storeService;
|
||||||
|
this.kryoSerializationService = kryoSerializationService;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import org.junit.After;
|
|||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Ignore;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.onlab.onos.net.ConnectPoint;
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
import org.onlab.onos.net.DeviceId;
|
import org.onlab.onos.net.DeviceId;
|
||||||
@ -29,27 +30,28 @@ import org.onlab.onos.net.provider.ProviderId;
|
|||||||
import org.onlab.onos.store.common.StoreManager;
|
import org.onlab.onos.store.common.StoreManager;
|
||||||
import org.onlab.onos.store.common.StoreService;
|
import org.onlab.onos.store.common.StoreService;
|
||||||
import org.onlab.onos.store.common.TestStoreManager;
|
import org.onlab.onos.store.common.TestStoreManager;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationManager;
|
||||||
|
import org.onlab.onos.store.serializers.KryoSerializationService;
|
||||||
|
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.hazelcast.config.Config;
|
import com.hazelcast.config.Config;
|
||||||
import com.hazelcast.core.Hazelcast;
|
import com.hazelcast.core.Hazelcast;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test of the Hazelcast based distributed LinkStore implementation.
|
||||||
|
*/
|
||||||
public class DistributedLinkStoreTest {
|
public class DistributedLinkStoreTest {
|
||||||
|
|
||||||
private static final ProviderId PID = new ProviderId("of", "foo");
|
private static final ProviderId PID = new ProviderId("of", "foo");
|
||||||
private static final DeviceId DID1 = deviceId("of:foo");
|
private static final DeviceId DID1 = deviceId("of:foo");
|
||||||
private static final DeviceId DID2 = deviceId("of:bar");
|
private static final DeviceId DID2 = deviceId("of:bar");
|
||||||
// private static final String MFR = "whitebox";
|
|
||||||
// private static final String HW = "1.1.x";
|
|
||||||
// private static final String SW1 = "3.8.1";
|
|
||||||
// private static final String SW2 = "3.9.5";
|
|
||||||
// private static final String SN = "43311-12345";
|
|
||||||
|
|
||||||
private static final PortNumber P1 = PortNumber.portNumber(1);
|
private static final PortNumber P1 = PortNumber.portNumber(1);
|
||||||
private static final PortNumber P2 = PortNumber.portNumber(2);
|
private static final PortNumber P2 = PortNumber.portNumber(2);
|
||||||
private static final PortNumber P3 = PortNumber.portNumber(3);
|
private static final PortNumber P3 = PortNumber.portNumber(3);
|
||||||
|
|
||||||
private StoreManager storeManager;
|
private StoreManager storeManager;
|
||||||
|
private KryoSerializationManager serializationMgr;
|
||||||
|
|
||||||
private DistributedLinkStore linkStore;
|
private DistributedLinkStore linkStore;
|
||||||
|
|
||||||
@ -69,13 +71,17 @@ public class DistributedLinkStoreTest {
|
|||||||
storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
|
storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
|
||||||
storeManager.activate();
|
storeManager.activate();
|
||||||
|
|
||||||
linkStore = new TestDistributedLinkStore(storeManager);
|
serializationMgr = new KryoSerializationManager();
|
||||||
|
serializationMgr.activate();
|
||||||
|
|
||||||
|
linkStore = new TestDistributedLinkStore(storeManager, serializationMgr);
|
||||||
linkStore.activate();
|
linkStore.activate();
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
linkStore.deactivate();
|
linkStore.deactivate();
|
||||||
|
serializationMgr.deactivate();
|
||||||
storeManager.deactivate();
|
storeManager.deactivate();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,6 +308,7 @@ public class DistributedLinkStoreTest {
|
|||||||
assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
|
assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Ignore("Ignore until Delegate spec. is clear.")
|
||||||
@Test
|
@Test
|
||||||
public final void testEvents() throws InterruptedException {
|
public final void testEvents() throws InterruptedException {
|
||||||
|
|
||||||
@ -354,8 +361,10 @@ public class DistributedLinkStoreTest {
|
|||||||
|
|
||||||
|
|
||||||
class TestDistributedLinkStore extends DistributedLinkStore {
|
class TestDistributedLinkStore extends DistributedLinkStore {
|
||||||
TestDistributedLinkStore(StoreService storeService) {
|
TestDistributedLinkStore(StoreService storeService,
|
||||||
|
KryoSerializationService kryoSerializationService) {
|
||||||
this.storeService = storeService;
|
this.storeService = storeService;
|
||||||
|
this.kryoSerializationService = kryoSerializationService;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,103 @@
|
|||||||
|
package org.onlab.onos.store.serializers;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.cluster.ControllerNode;
|
||||||
|
import org.onlab.onos.cluster.DefaultControllerNode;
|
||||||
|
import org.onlab.onos.cluster.NodeId;
|
||||||
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
|
import org.onlab.onos.net.DefaultDevice;
|
||||||
|
import org.onlab.onos.net.DefaultLink;
|
||||||
|
import org.onlab.onos.net.DefaultPort;
|
||||||
|
import org.onlab.onos.net.Device;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Element;
|
||||||
|
import org.onlab.onos.net.Link;
|
||||||
|
import org.onlab.onos.net.LinkKey;
|
||||||
|
import org.onlab.onos.net.MastershipRole;
|
||||||
|
import org.onlab.onos.net.Port;
|
||||||
|
import org.onlab.onos.net.PortNumber;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
import org.onlab.util.KryoPool;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import de.javakaffee.kryoserializers.URISerializer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serialization service using Kryo.
|
||||||
|
*/
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class KryoSerializationManager implements KryoSerializationService {
|
||||||
|
|
||||||
|
private final Logger log = LoggerFactory.getLogger(getClass());
|
||||||
|
private KryoPool serializerPool;
|
||||||
|
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
setupKryoPool();
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets up the common serialzers pool.
|
||||||
|
*/
|
||||||
|
protected void setupKryoPool() {
|
||||||
|
// FIXME Slice out types used in common to separate pool/namespace.
|
||||||
|
serializerPool = KryoPool.newBuilder()
|
||||||
|
.register(ArrayList.class,
|
||||||
|
HashMap.class,
|
||||||
|
|
||||||
|
ControllerNode.State.class,
|
||||||
|
Device.Type.class,
|
||||||
|
|
||||||
|
DefaultControllerNode.class,
|
||||||
|
DefaultDevice.class,
|
||||||
|
MastershipRole.class,
|
||||||
|
Port.class,
|
||||||
|
Element.class,
|
||||||
|
|
||||||
|
Link.Type.class
|
||||||
|
)
|
||||||
|
.register(IpPrefix.class, new IpPrefixSerializer())
|
||||||
|
.register(URI.class, new URISerializer())
|
||||||
|
.register(NodeId.class, new NodeIdSerializer())
|
||||||
|
.register(ProviderId.class, new ProviderIdSerializer())
|
||||||
|
.register(DeviceId.class, new DeviceIdSerializer())
|
||||||
|
.register(PortNumber.class, new PortNumberSerializer())
|
||||||
|
.register(DefaultPort.class, new DefaultPortSerializer())
|
||||||
|
.register(LinkKey.class, new LinkKeySerializer())
|
||||||
|
.register(ConnectPoint.class, new ConnectPointSerializer())
|
||||||
|
.register(DefaultLink.class, new DefaultLinkSerializer())
|
||||||
|
.build()
|
||||||
|
.populate(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] serialize(final Object obj) {
|
||||||
|
return serializerPool.serialize(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T deserialize(final byte[] bytes) {
|
||||||
|
if (bytes == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return serializerPool.deserialize(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,27 @@
|
|||||||
|
package org.onlab.onos.store.serializers;
|
||||||
|
|
||||||
|
// TODO: To be replaced with SerializationService from IOLoop activity
|
||||||
|
/**
|
||||||
|
* Service to serialize Objects into byte array.
|
||||||
|
*/
|
||||||
|
public interface KryoSerializationService {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serializes the specified object into bytes using one of the
|
||||||
|
* pre-registered serializers.
|
||||||
|
*
|
||||||
|
* @param obj object to be serialized
|
||||||
|
* @return serialized bytes
|
||||||
|
*/
|
||||||
|
public byte[] serialize(final Object obj);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deserializes the specified bytes into an object using one of the
|
||||||
|
* pre-registered serializers.
|
||||||
|
*
|
||||||
|
* @param bytes bytes to be deserialized
|
||||||
|
* @return deserialized object
|
||||||
|
*/
|
||||||
|
public <T> T deserialize(final byte[] bytes);
|
||||||
|
|
||||||
|
}
|
@ -20,7 +20,7 @@ import java.util.Set;
|
|||||||
import static org.slf4j.LoggerFactory.getLogger;
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manages inventory of infrastructure DEVICES using trivial in-memory
|
* Manages inventory of infrastructure devices using trivial in-memory
|
||||||
* structures implementation.
|
* structures implementation.
|
||||||
*/
|
*/
|
||||||
@Component(immediate = true)
|
@Component(immediate = true)
|
||||||
@ -67,6 +67,11 @@ public class SimpleClusterStore
|
|||||||
return ControllerNode.State.ACTIVE;
|
return ControllerNode.State.ACTIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ControllerNode addNode(NodeId nodeId, IpPrefix ip, int tcpPort) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void removeNode(NodeId nodeId) {
|
public void removeNode(NodeId nodeId) {
|
||||||
}
|
}
|
||||||
|
@ -101,9 +101,6 @@ public class SimpleDeviceStore
|
|||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
devices.put(deviceId, device);
|
devices.put(deviceId, device);
|
||||||
availableDevices.add(deviceId);
|
availableDevices.add(deviceId);
|
||||||
|
|
||||||
// For now claim the device as a master automatically.
|
|
||||||
// roles.put(deviceId, MastershipRole.MASTER);
|
|
||||||
}
|
}
|
||||||
return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null);
|
return new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, device, null);
|
||||||
}
|
}
|
||||||
@ -189,7 +186,7 @@ public class SimpleDeviceStore
|
|||||||
new DefaultPort(device, portDescription.portNumber(),
|
new DefaultPort(device, portDescription.portNumber(),
|
||||||
portDescription.isEnabled());
|
portDescription.isEnabled());
|
||||||
ports.put(port.number(), updatedPort);
|
ports.put(port.number(), updatedPort);
|
||||||
return new DeviceEvent(PORT_UPDATED, device, port);
|
return new DeviceEvent(PORT_UPDATED, device, updatedPort);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -51,8 +51,6 @@ public class SimpleLinkStore
|
|||||||
private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
|
private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
|
||||||
private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
|
private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
|
||||||
|
|
||||||
private static final Set<Link> EMPTY = ImmutableSet.of();
|
|
||||||
|
|
||||||
@Activate
|
@Activate
|
||||||
public void activate() {
|
public void activate() {
|
||||||
log.info("Started");
|
log.info("Started");
|
||||||
|
@ -0,0 +1,376 @@
|
|||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
package org.onlab.onos.net.trivial.impl;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
import static org.onlab.onos.net.Device.Type.SWITCH;
|
||||||
|
import static org.onlab.onos.net.DeviceId.deviceId;
|
||||||
|
import static org.onlab.onos.net.device.DeviceEvent.Type.*;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Ignore;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.onlab.onos.net.Device;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Port;
|
||||||
|
import org.onlab.onos.net.PortNumber;
|
||||||
|
import org.onlab.onos.net.device.DefaultDeviceDescription;
|
||||||
|
import org.onlab.onos.net.device.DefaultPortDescription;
|
||||||
|
import org.onlab.onos.net.device.DeviceDescription;
|
||||||
|
import org.onlab.onos.net.device.DeviceEvent;
|
||||||
|
import org.onlab.onos.net.device.DeviceStore;
|
||||||
|
import org.onlab.onos.net.device.DeviceStoreDelegate;
|
||||||
|
import org.onlab.onos.net.device.PortDescription;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
|
||||||
|
import com.google.common.collect.Iterables;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test of the simple DeviceStore implementation.
|
||||||
|
*/
|
||||||
|
public class SimpleDeviceStoreTest {
|
||||||
|
|
||||||
|
private static final ProviderId PID = new ProviderId("of", "foo");
|
||||||
|
private static final DeviceId DID1 = deviceId("of:foo");
|
||||||
|
private static final DeviceId DID2 = deviceId("of:bar");
|
||||||
|
private static final String MFR = "whitebox";
|
||||||
|
private static final String HW = "1.1.x";
|
||||||
|
private static final String SW1 = "3.8.1";
|
||||||
|
private static final String SW2 = "3.9.5";
|
||||||
|
private static final String SN = "43311-12345";
|
||||||
|
|
||||||
|
private static final PortNumber P1 = PortNumber.portNumber(1);
|
||||||
|
private static final PortNumber P2 = PortNumber.portNumber(2);
|
||||||
|
private static final PortNumber P3 = PortNumber.portNumber(3);
|
||||||
|
|
||||||
|
private SimpleDeviceStore simpleDeviceStore;
|
||||||
|
private DeviceStore deviceStore;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
simpleDeviceStore = new SimpleDeviceStore();
|
||||||
|
simpleDeviceStore.activate();
|
||||||
|
deviceStore = simpleDeviceStore;
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
simpleDeviceStore.deactivate();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void putDevice(DeviceId deviceId, String swVersion) {
|
||||||
|
DeviceDescription description =
|
||||||
|
new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
|
||||||
|
HW, swVersion, SN);
|
||||||
|
deviceStore.createOrUpdateDevice(PID, deviceId, description);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertDevice(DeviceId id, String swVersion, Device device) {
|
||||||
|
assertNotNull(device);
|
||||||
|
assertEquals(id, device.id());
|
||||||
|
assertEquals(MFR, device.manufacturer());
|
||||||
|
assertEquals(HW, device.hwVersion());
|
||||||
|
assertEquals(swVersion, device.swVersion());
|
||||||
|
assertEquals(SN, device.serialNumber());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetDeviceCount() {
|
||||||
|
assertEquals("initialy empty", 0, deviceStore.getDeviceCount());
|
||||||
|
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
putDevice(DID2, SW2);
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
|
||||||
|
assertEquals("expect 2 uniq devices", 2, deviceStore.getDeviceCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetDevices() {
|
||||||
|
assertEquals("initialy empty", 0, Iterables.size(deviceStore.getDevices()));
|
||||||
|
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
putDevice(DID2, SW2);
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
|
||||||
|
assertEquals("expect 2 uniq devices",
|
||||||
|
2, Iterables.size(deviceStore.getDevices()));
|
||||||
|
|
||||||
|
Map<DeviceId, Device> devices = new HashMap<>();
|
||||||
|
for (Device device : deviceStore.getDevices()) {
|
||||||
|
devices.put(device.id(), device);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertDevice(DID1, SW1, devices.get(DID1));
|
||||||
|
assertDevice(DID2, SW2, devices.get(DID2));
|
||||||
|
|
||||||
|
// add case for new node?
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetDevice() {
|
||||||
|
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
|
||||||
|
assertDevice(DID1, SW1, deviceStore.getDevice(DID1));
|
||||||
|
assertNull("DID2 shouldn't be there", deviceStore.getDevice(DID2));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testCreateOrUpdateDevice() {
|
||||||
|
DeviceDescription description =
|
||||||
|
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
|
||||||
|
HW, SW1, SN);
|
||||||
|
DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description);
|
||||||
|
assertEquals(DEVICE_ADDED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
|
||||||
|
DeviceDescription description2 =
|
||||||
|
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
|
||||||
|
HW, SW2, SN);
|
||||||
|
DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
|
||||||
|
assertEquals(DEVICE_UPDATED, event2.type());
|
||||||
|
assertDevice(DID1, SW2, event2.subject());
|
||||||
|
|
||||||
|
assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testMarkOffline() {
|
||||||
|
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
assertTrue(deviceStore.isAvailable(DID1));
|
||||||
|
|
||||||
|
DeviceEvent event = deviceStore.markOffline(DID1);
|
||||||
|
assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
assertFalse(deviceStore.isAvailable(DID1));
|
||||||
|
|
||||||
|
DeviceEvent event2 = deviceStore.markOffline(DID1);
|
||||||
|
assertNull("No change, no event", event2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testUpdatePorts() {
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
List<PortDescription> pds = Arrays.<PortDescription>asList(
|
||||||
|
new DefaultPortDescription(P1, true),
|
||||||
|
new DefaultPortDescription(P2, true)
|
||||||
|
);
|
||||||
|
|
||||||
|
List<DeviceEvent> events = deviceStore.updatePorts(DID1, pds);
|
||||||
|
|
||||||
|
Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
|
||||||
|
for (DeviceEvent event : events) {
|
||||||
|
assertEquals(PORT_ADDED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
assertTrue("PortNumber is one of expected",
|
||||||
|
expectedPorts.remove(event.port().number()));
|
||||||
|
assertTrue("Port is enabled", event.port().isEnabled());
|
||||||
|
}
|
||||||
|
assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
|
||||||
|
|
||||||
|
|
||||||
|
List<PortDescription> pds2 = Arrays.<PortDescription>asList(
|
||||||
|
new DefaultPortDescription(P1, false),
|
||||||
|
new DefaultPortDescription(P2, true),
|
||||||
|
new DefaultPortDescription(P3, true)
|
||||||
|
);
|
||||||
|
|
||||||
|
events = deviceStore.updatePorts(DID1, pds2);
|
||||||
|
assertFalse("event should be triggered", events.isEmpty());
|
||||||
|
for (DeviceEvent event : events) {
|
||||||
|
PortNumber num = event.port().number();
|
||||||
|
if (P1.equals(num)) {
|
||||||
|
assertEquals(PORT_UPDATED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
assertFalse("Port is disabled", event.port().isEnabled());
|
||||||
|
} else if (P2.equals(num)) {
|
||||||
|
fail("P2 event not expected.");
|
||||||
|
} else if (P3.equals(num)) {
|
||||||
|
assertEquals(PORT_ADDED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
assertTrue("Port is enabled", event.port().isEnabled());
|
||||||
|
} else {
|
||||||
|
fail("Unknown port number encountered: " + num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
List<PortDescription> pds3 = Arrays.<PortDescription>asList(
|
||||||
|
new DefaultPortDescription(P1, false),
|
||||||
|
new DefaultPortDescription(P2, true)
|
||||||
|
);
|
||||||
|
events = deviceStore.updatePorts(DID1, pds3);
|
||||||
|
assertFalse("event should be triggered", events.isEmpty());
|
||||||
|
for (DeviceEvent event : events) {
|
||||||
|
PortNumber num = event.port().number();
|
||||||
|
if (P1.equals(num)) {
|
||||||
|
fail("P1 event not expected.");
|
||||||
|
} else if (P2.equals(num)) {
|
||||||
|
fail("P2 event not expected.");
|
||||||
|
} else if (P3.equals(num)) {
|
||||||
|
assertEquals(PORT_REMOVED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
assertTrue("Port was enabled", event.port().isEnabled());
|
||||||
|
} else {
|
||||||
|
fail("Unknown port number encountered: " + num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testUpdatePortStatus() {
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
List<PortDescription> pds = Arrays.<PortDescription>asList(
|
||||||
|
new DefaultPortDescription(P1, true)
|
||||||
|
);
|
||||||
|
deviceStore.updatePorts(DID1, pds);
|
||||||
|
|
||||||
|
DeviceEvent event = deviceStore.updatePortStatus(DID1,
|
||||||
|
new DefaultPortDescription(P1, false));
|
||||||
|
assertEquals(PORT_UPDATED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
assertEquals(P1, event.port().number());
|
||||||
|
assertFalse("Port is disabled", event.port().isEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetPorts() {
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
putDevice(DID2, SW1);
|
||||||
|
List<PortDescription> pds = Arrays.<PortDescription>asList(
|
||||||
|
new DefaultPortDescription(P1, true),
|
||||||
|
new DefaultPortDescription(P2, true)
|
||||||
|
);
|
||||||
|
deviceStore.updatePorts(DID1, pds);
|
||||||
|
|
||||||
|
Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
|
||||||
|
List<Port> ports = deviceStore.getPorts(DID1);
|
||||||
|
for (Port port : ports) {
|
||||||
|
assertTrue("Port is enabled", port.isEnabled());
|
||||||
|
assertTrue("PortNumber is one of expected",
|
||||||
|
expectedPorts.remove(port.number()));
|
||||||
|
}
|
||||||
|
assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
|
||||||
|
|
||||||
|
|
||||||
|
assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetPort() {
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
putDevice(DID2, SW1);
|
||||||
|
List<PortDescription> pds = Arrays.<PortDescription>asList(
|
||||||
|
new DefaultPortDescription(P1, true),
|
||||||
|
new DefaultPortDescription(P2, false)
|
||||||
|
);
|
||||||
|
deviceStore.updatePorts(DID1, pds);
|
||||||
|
|
||||||
|
Port port1 = deviceStore.getPort(DID1, P1);
|
||||||
|
assertEquals(P1, port1.number());
|
||||||
|
assertTrue("Port is enabled", port1.isEnabled());
|
||||||
|
|
||||||
|
Port port2 = deviceStore.getPort(DID1, P2);
|
||||||
|
assertEquals(P2, port2.number());
|
||||||
|
assertFalse("Port is disabled", port2.isEnabled());
|
||||||
|
|
||||||
|
Port port3 = deviceStore.getPort(DID1, P3);
|
||||||
|
assertNull("P3 not expected", port3);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testRemoveDevice() {
|
||||||
|
putDevice(DID1, SW1);
|
||||||
|
putDevice(DID2, SW1);
|
||||||
|
|
||||||
|
assertEquals(2, deviceStore.getDeviceCount());
|
||||||
|
|
||||||
|
DeviceEvent event = deviceStore.removeDevice(DID1);
|
||||||
|
assertEquals(DEVICE_REMOVED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
|
||||||
|
assertEquals(1, deviceStore.getDeviceCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
// If Delegates should be called only on remote events,
|
||||||
|
// then Simple* should never call them, thus not test required.
|
||||||
|
// TODO add test for Port events when we have them
|
||||||
|
@Ignore("Ignore until Delegate spec. is clear.")
|
||||||
|
@Test
|
||||||
|
public final void testEvents() throws InterruptedException {
|
||||||
|
final CountDownLatch addLatch = new CountDownLatch(1);
|
||||||
|
DeviceStoreDelegate checkAdd = new DeviceStoreDelegate() {
|
||||||
|
@Override
|
||||||
|
public void notify(DeviceEvent event) {
|
||||||
|
assertEquals(DEVICE_ADDED, event.type());
|
||||||
|
assertDevice(DID1, SW1, event.subject());
|
||||||
|
addLatch.countDown();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
final CountDownLatch updateLatch = new CountDownLatch(1);
|
||||||
|
DeviceStoreDelegate checkUpdate = new DeviceStoreDelegate() {
|
||||||
|
@Override
|
||||||
|
public void notify(DeviceEvent event) {
|
||||||
|
assertEquals(DEVICE_UPDATED, event.type());
|
||||||
|
assertDevice(DID1, SW2, event.subject());
|
||||||
|
updateLatch.countDown();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
final CountDownLatch removeLatch = new CountDownLatch(1);
|
||||||
|
DeviceStoreDelegate checkRemove = new DeviceStoreDelegate() {
|
||||||
|
@Override
|
||||||
|
public void notify(DeviceEvent event) {
|
||||||
|
assertEquals(DEVICE_REMOVED, event.type());
|
||||||
|
assertDevice(DID1, SW2, event.subject());
|
||||||
|
removeLatch.countDown();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
DeviceDescription description =
|
||||||
|
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
|
||||||
|
HW, SW1, SN);
|
||||||
|
deviceStore.setDelegate(checkAdd);
|
||||||
|
deviceStore.createOrUpdateDevice(PID, DID1, description);
|
||||||
|
assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
|
||||||
|
|
||||||
|
|
||||||
|
DeviceDescription description2 =
|
||||||
|
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
|
||||||
|
HW, SW2, SN);
|
||||||
|
deviceStore.unsetDelegate(checkAdd);
|
||||||
|
deviceStore.setDelegate(checkUpdate);
|
||||||
|
deviceStore.createOrUpdateDevice(PID, DID1, description2);
|
||||||
|
assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
|
||||||
|
|
||||||
|
deviceStore.unsetDelegate(checkUpdate);
|
||||||
|
deviceStore.setDelegate(checkRemove);
|
||||||
|
deviceStore.removeDevice(DID1);
|
||||||
|
assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,346 @@
|
|||||||
|
package org.onlab.onos.net.trivial.impl;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
import static org.onlab.onos.net.DeviceId.deviceId;
|
||||||
|
import static org.onlab.onos.net.Link.Type.*;
|
||||||
|
import static org.onlab.onos.net.link.LinkEvent.Type.*;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Ignore;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Link;
|
||||||
|
import org.onlab.onos.net.LinkKey;
|
||||||
|
import org.onlab.onos.net.PortNumber;
|
||||||
|
import org.onlab.onos.net.Link.Type;
|
||||||
|
import org.onlab.onos.net.link.DefaultLinkDescription;
|
||||||
|
import org.onlab.onos.net.link.LinkEvent;
|
||||||
|
import org.onlab.onos.net.link.LinkStore;
|
||||||
|
import org.onlab.onos.net.link.LinkStoreDelegate;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
|
||||||
|
import com.google.common.collect.Iterables;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test of the simple LinkStore implementation.
|
||||||
|
*/
|
||||||
|
public class SimpleLinkStoreTest {
|
||||||
|
|
||||||
|
private static final ProviderId PID = new ProviderId("of", "foo");
|
||||||
|
private static final DeviceId DID1 = deviceId("of:foo");
|
||||||
|
private static final DeviceId DID2 = deviceId("of:bar");
|
||||||
|
|
||||||
|
private static final PortNumber P1 = PortNumber.portNumber(1);
|
||||||
|
private static final PortNumber P2 = PortNumber.portNumber(2);
|
||||||
|
private static final PortNumber P3 = PortNumber.portNumber(3);
|
||||||
|
|
||||||
|
|
||||||
|
private SimpleLinkStore simpleLinkStore;
|
||||||
|
private LinkStore linkStore;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
simpleLinkStore = new SimpleLinkStore();
|
||||||
|
simpleLinkStore.activate();
|
||||||
|
linkStore = simpleLinkStore;
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
simpleLinkStore.deactivate();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void putLink(DeviceId srcId, PortNumber srcNum,
|
||||||
|
DeviceId dstId, PortNumber dstNum, Type type) {
|
||||||
|
ConnectPoint src = new ConnectPoint(srcId, srcNum);
|
||||||
|
ConnectPoint dst = new ConnectPoint(dstId, dstNum);
|
||||||
|
linkStore.createOrUpdateLink(PID, new DefaultLinkDescription(src, dst, type));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void putLink(LinkKey key, Type type) {
|
||||||
|
putLink(key.src().deviceId(), key.src().port(),
|
||||||
|
key.dst().deviceId(), key.dst().port(),
|
||||||
|
type);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertLink(DeviceId srcId, PortNumber srcNum,
|
||||||
|
DeviceId dstId, PortNumber dstNum, Type type,
|
||||||
|
Link link) {
|
||||||
|
assertEquals(srcId, link.src().deviceId());
|
||||||
|
assertEquals(srcNum, link.src().port());
|
||||||
|
assertEquals(dstId, link.dst().deviceId());
|
||||||
|
assertEquals(dstNum, link.dst().port());
|
||||||
|
assertEquals(type, link.type());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertLink(LinkKey key, Type type, Link link) {
|
||||||
|
assertLink(key.src().deviceId(), key.src().port(),
|
||||||
|
key.dst().deviceId(), key.dst().port(),
|
||||||
|
type, link);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetLinkCount() {
|
||||||
|
assertEquals("initialy empty", 0, linkStore.getLinkCount());
|
||||||
|
|
||||||
|
putLink(DID1, P1, DID2, P2, DIRECT);
|
||||||
|
putLink(DID2, P2, DID1, P1, DIRECT);
|
||||||
|
putLink(DID1, P1, DID2, P2, DIRECT);
|
||||||
|
|
||||||
|
assertEquals("expecting 2 unique link", 2, linkStore.getLinkCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetLinks() {
|
||||||
|
assertEquals("initialy empty", 0,
|
||||||
|
Iterables.size(linkStore.getLinks()));
|
||||||
|
|
||||||
|
LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
|
||||||
|
LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
putLink(linkId2, DIRECT);
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
|
||||||
|
assertEquals("expecting 2 unique link", 2,
|
||||||
|
Iterables.size(linkStore.getLinks()));
|
||||||
|
|
||||||
|
Map<LinkKey, Link> links = new HashMap<>();
|
||||||
|
for (Link link : linkStore.getLinks()) {
|
||||||
|
links.put(new LinkKey(link.src(), link.dst()), link);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertLink(linkId1, DIRECT, links.get(linkId1));
|
||||||
|
assertLink(linkId2, DIRECT, links.get(linkId2));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetDeviceEgressLinks() {
|
||||||
|
LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
|
||||||
|
LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
|
||||||
|
LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
putLink(linkId2, DIRECT);
|
||||||
|
putLink(linkId3, DIRECT);
|
||||||
|
|
||||||
|
// DID1,P1 => DID2,P2
|
||||||
|
// DID2,P2 => DID1,P1
|
||||||
|
// DID1,P2 => DID2,P3
|
||||||
|
|
||||||
|
Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1);
|
||||||
|
assertEquals(2, links1.size());
|
||||||
|
// check
|
||||||
|
|
||||||
|
Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2);
|
||||||
|
assertEquals(1, links2.size());
|
||||||
|
assertLink(linkId2, DIRECT, links2.iterator().next());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetDeviceIngressLinks() {
|
||||||
|
LinkKey linkId1 = new LinkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
|
||||||
|
LinkKey linkId2 = new LinkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
|
||||||
|
LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
putLink(linkId2, DIRECT);
|
||||||
|
putLink(linkId3, DIRECT);
|
||||||
|
|
||||||
|
// DID1,P1 => DID2,P2
|
||||||
|
// DID2,P2 => DID1,P1
|
||||||
|
// DID1,P2 => DID2,P3
|
||||||
|
|
||||||
|
Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2);
|
||||||
|
assertEquals(2, links1.size());
|
||||||
|
// check
|
||||||
|
|
||||||
|
Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1);
|
||||||
|
assertEquals(1, links2.size());
|
||||||
|
assertLink(linkId2, DIRECT, links2.iterator().next());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetLink() {
|
||||||
|
ConnectPoint src = new ConnectPoint(DID1, P1);
|
||||||
|
ConnectPoint dst = new ConnectPoint(DID2, P2);
|
||||||
|
LinkKey linkId1 = new LinkKey(src, dst);
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
|
||||||
|
Link link = linkStore.getLink(src, dst);
|
||||||
|
assertLink(linkId1, DIRECT, link);
|
||||||
|
|
||||||
|
assertNull("There shouldn't be reverese link",
|
||||||
|
linkStore.getLink(dst, src));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetEgressLinks() {
|
||||||
|
final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
|
||||||
|
final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
|
||||||
|
LinkKey linkId1 = new LinkKey(d1P1, d2P2);
|
||||||
|
LinkKey linkId2 = new LinkKey(d2P2, d1P1);
|
||||||
|
LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
putLink(linkId2, DIRECT);
|
||||||
|
putLink(linkId3, DIRECT);
|
||||||
|
|
||||||
|
// DID1,P1 => DID2,P2
|
||||||
|
// DID2,P2 => DID1,P1
|
||||||
|
// DID1,P2 => DID2,P3
|
||||||
|
|
||||||
|
Set<Link> links1 = linkStore.getEgressLinks(d1P1);
|
||||||
|
assertEquals(1, links1.size());
|
||||||
|
assertLink(linkId1, DIRECT, links1.iterator().next());
|
||||||
|
|
||||||
|
Set<Link> links2 = linkStore.getEgressLinks(d2P2);
|
||||||
|
assertEquals(1, links2.size());
|
||||||
|
assertLink(linkId2, DIRECT, links2.iterator().next());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testGetIngressLinks() {
|
||||||
|
final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
|
||||||
|
final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
|
||||||
|
LinkKey linkId1 = new LinkKey(d1P1, d2P2);
|
||||||
|
LinkKey linkId2 = new LinkKey(d2P2, d1P1);
|
||||||
|
LinkKey linkId3 = new LinkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
putLink(linkId2, DIRECT);
|
||||||
|
putLink(linkId3, DIRECT);
|
||||||
|
|
||||||
|
// DID1,P1 => DID2,P2
|
||||||
|
// DID2,P2 => DID1,P1
|
||||||
|
// DID1,P2 => DID2,P3
|
||||||
|
|
||||||
|
Set<Link> links1 = linkStore.getIngressLinks(d2P2);
|
||||||
|
assertEquals(1, links1.size());
|
||||||
|
assertLink(linkId1, DIRECT, links1.iterator().next());
|
||||||
|
|
||||||
|
Set<Link> links2 = linkStore.getIngressLinks(d1P1);
|
||||||
|
assertEquals(1, links2.size());
|
||||||
|
assertLink(linkId2, DIRECT, links2.iterator().next());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testCreateOrUpdateLink() {
|
||||||
|
ConnectPoint src = new ConnectPoint(DID1, P1);
|
||||||
|
ConnectPoint dst = new ConnectPoint(DID2, P2);
|
||||||
|
|
||||||
|
// add link
|
||||||
|
LinkEvent event = linkStore.createOrUpdateLink(PID,
|
||||||
|
new DefaultLinkDescription(src, dst, INDIRECT));
|
||||||
|
|
||||||
|
assertLink(DID1, P1, DID2, P2, INDIRECT, event.subject());
|
||||||
|
assertEquals(LINK_ADDED, event.type());
|
||||||
|
|
||||||
|
// update link type
|
||||||
|
LinkEvent event2 = linkStore.createOrUpdateLink(PID,
|
||||||
|
new DefaultLinkDescription(src, dst, DIRECT));
|
||||||
|
|
||||||
|
assertLink(DID1, P1, DID2, P2, DIRECT, event2.subject());
|
||||||
|
assertEquals(LINK_UPDATED, event2.type());
|
||||||
|
|
||||||
|
// no change
|
||||||
|
LinkEvent event3 = linkStore.createOrUpdateLink(PID,
|
||||||
|
new DefaultLinkDescription(src, dst, DIRECT));
|
||||||
|
|
||||||
|
assertNull("No change event expected", event3);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testRemoveLink() {
|
||||||
|
final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
|
||||||
|
final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
|
||||||
|
LinkKey linkId1 = new LinkKey(d1P1, d2P2);
|
||||||
|
LinkKey linkId2 = new LinkKey(d2P2, d1P1);
|
||||||
|
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
putLink(linkId2, DIRECT);
|
||||||
|
|
||||||
|
// DID1,P1 => DID2,P2
|
||||||
|
// DID2,P2 => DID1,P1
|
||||||
|
// DID1,P2 => DID2,P3
|
||||||
|
|
||||||
|
LinkEvent event = linkStore.removeLink(d1P1, d2P2);
|
||||||
|
assertEquals(LINK_REMOVED, event.type());
|
||||||
|
LinkEvent event2 = linkStore.removeLink(d1P1, d2P2);
|
||||||
|
assertNull(event2);
|
||||||
|
|
||||||
|
assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
|
||||||
|
}
|
||||||
|
|
||||||
|
// If Delegates should be called only on remote events,
|
||||||
|
// then Simple* should never call them, thus not test required.
|
||||||
|
@Ignore("Ignore until Delegate spec. is clear.")
|
||||||
|
@Test
|
||||||
|
public final void testEvents() throws InterruptedException {
|
||||||
|
|
||||||
|
final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
|
||||||
|
final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
|
||||||
|
final LinkKey linkId1 = new LinkKey(d1P1, d2P2);
|
||||||
|
|
||||||
|
final CountDownLatch addLatch = new CountDownLatch(1);
|
||||||
|
LinkStoreDelegate checkAdd = new LinkStoreDelegate() {
|
||||||
|
@Override
|
||||||
|
public void notify(LinkEvent event) {
|
||||||
|
assertEquals(LINK_ADDED, event.type());
|
||||||
|
assertLink(linkId1, INDIRECT, event.subject());
|
||||||
|
addLatch.countDown();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
final CountDownLatch updateLatch = new CountDownLatch(1);
|
||||||
|
LinkStoreDelegate checkUpdate = new LinkStoreDelegate() {
|
||||||
|
@Override
|
||||||
|
public void notify(LinkEvent event) {
|
||||||
|
assertEquals(LINK_UPDATED, event.type());
|
||||||
|
assertLink(linkId1, DIRECT, event.subject());
|
||||||
|
updateLatch.countDown();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
final CountDownLatch removeLatch = new CountDownLatch(1);
|
||||||
|
LinkStoreDelegate checkRemove = new LinkStoreDelegate() {
|
||||||
|
@Override
|
||||||
|
public void notify(LinkEvent event) {
|
||||||
|
assertEquals(LINK_REMOVED, event.type());
|
||||||
|
assertLink(linkId1, DIRECT, event.subject());
|
||||||
|
removeLatch.countDown();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
linkStore.setDelegate(checkAdd);
|
||||||
|
putLink(linkId1, INDIRECT);
|
||||||
|
assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
|
||||||
|
|
||||||
|
linkStore.unsetDelegate(checkAdd);
|
||||||
|
linkStore.setDelegate(checkUpdate);
|
||||||
|
putLink(linkId1, DIRECT);
|
||||||
|
assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
|
||||||
|
|
||||||
|
linkStore.unsetDelegate(checkUpdate);
|
||||||
|
linkStore.setDelegate(checkRemove);
|
||||||
|
linkStore.removeLink(d1P1, d2P2);
|
||||||
|
assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
|
||||||
|
}
|
||||||
|
}
|
@ -48,20 +48,17 @@
|
|||||||
description="ONOS core components">
|
description="ONOS core components">
|
||||||
<feature>onos-api</feature>
|
<feature>onos-api</feature>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
|
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
|
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle>
|
|
||||||
</feature>
|
</feature>
|
||||||
|
|
||||||
<feature name="onos-core-dist" version="1.0.0"
|
<feature name="onos-core-hazelcast" version="1.0.0"
|
||||||
description="ONOS core components">
|
description="ONOS core components built on hazelcast">
|
||||||
<feature>onos-api</feature>
|
<feature>onos-api</feature>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle>
|
||||||
</feature>
|
</feature>
|
||||||
|
|
||||||
<feature name="onos-core-trivial" version="1.0.0"
|
<feature name="onos-core-trivial" version="1.0.0"
|
||||||
|
@ -9,5 +9,5 @@
|
|||||||
nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
|
nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
|
||||||
|
|
||||||
onos-package
|
onos-package
|
||||||
for node in $nodes; do printf "%s: " $node; onos-install -f $node; done
|
for node in $nodes; do (printf "%s: %s\n" "$node" "`onos-install -f $node`")& done
|
||||||
for node in $nodes; do onos-wait-for-start $node; done
|
for node in $nodes; do onos-wait-for-start $node; done
|
||||||
|
@ -15,7 +15,7 @@ env JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
|
|||||||
|
|
||||||
pre-stop script
|
pre-stop script
|
||||||
/opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log
|
/opt/onos/bin/onos halt 2>/opt/onos/var/stderr.log
|
||||||
sleep 3
|
sleep 2
|
||||||
end script
|
end script
|
||||||
|
|
||||||
script
|
script
|
||||||
|
@ -8,7 +8,21 @@
|
|||||||
|
|
||||||
remote=$ONOS_USER@${1:-$OCI}
|
remote=$ONOS_USER@${1:-$OCI}
|
||||||
|
|
||||||
|
# Generate a cluster.json from the ON* environment variables
|
||||||
|
CDEF_FILE=/tmp/cluster.json
|
||||||
|
echo "{ \"nodes\":[" > $CDEF_FILE
|
||||||
|
for node in $(env | sort | egrep "OC[2-9]+" | cut -d= -f2); do
|
||||||
|
echo " { \"id\": \"$node\", \"ip\": \"$node\", \"tcpPort\": 9876 }," >> $CDEF_FILE
|
||||||
|
done
|
||||||
|
echo " { \"id\": \"$OC1\", \"ip\": \"$OC1\", \"tcpPort\": 9876 }" >> $CDEF_FILE
|
||||||
|
echo "]}" >> $CDEF_FILE
|
||||||
|
|
||||||
ssh $remote "
|
ssh $remote "
|
||||||
sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \
|
sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \
|
||||||
$ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml
|
$ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml
|
||||||
|
|
||||||
|
echo \"onos.ip = \$(ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \
|
||||||
|
>> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties
|
||||||
"
|
"
|
||||||
|
|
||||||
|
scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/
|
@ -24,6 +24,7 @@ ssh $remote "
|
|||||||
# Make a link to the log file directory and make a home for auxiliaries
|
# Make a link to the log file directory and make a home for auxiliaries
|
||||||
ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log
|
ln -s $ONOS_INSTALL_DIR/$KARAF_DIST/data/log /opt/onos/log
|
||||||
mkdir $ONOS_INSTALL_DIR/var
|
mkdir $ONOS_INSTALL_DIR/var
|
||||||
|
mkdir $ONOS_INSTALL_DIR/config
|
||||||
|
|
||||||
# Install the upstart configuration file and setup options for debugging
|
# Install the upstart configuration file and setup options for debugging
|
||||||
sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf
|
sudo cp $ONOS_INSTALL_DIR/debian/onos.conf /etc/init/onos.conf
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
# Default virtual box ONOS instances 1,2 & ONOS mininet box
|
# Default virtual box ONOS instances 1,2 & ONOS mininet box
|
||||||
. $ONOS_ROOT/tools/test/cells/.reset
|
. $ONOS_ROOT/tools/test/cells/.reset
|
||||||
|
|
||||||
|
export ONOS_NIC=192.168.56.*
|
||||||
|
|
||||||
export OC1="192.168.56.101"
|
export OC1="192.168.56.101"
|
||||||
export OC2="192.168.56.102"
|
export OC2="192.168.56.102"
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package org.onlab.util;
|
package org.onlab.util;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||||
@ -8,6 +9,8 @@ import org.apache.commons.lang3.tuple.Pair;
|
|||||||
|
|
||||||
import com.esotericsoftware.kryo.Kryo;
|
import com.esotericsoftware.kryo.Kryo;
|
||||||
import com.esotericsoftware.kryo.Serializer;
|
import com.esotericsoftware.kryo.Serializer;
|
||||||
|
import com.esotericsoftware.kryo.io.ByteBufferInput;
|
||||||
|
import com.esotericsoftware.kryo.io.ByteBufferOutput;
|
||||||
import com.esotericsoftware.kryo.io.Input;
|
import com.esotericsoftware.kryo.io.Input;
|
||||||
import com.esotericsoftware.kryo.io.Output;
|
import com.esotericsoftware.kryo.io.Output;
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
@ -173,6 +176,22 @@ public final class KryoPool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serializes given object to byte buffer using Kryo instance in pool.
|
||||||
|
*
|
||||||
|
* @param obj Object to serialize
|
||||||
|
* @param buffer to write to
|
||||||
|
*/
|
||||||
|
public void serialize(final Object obj, final ByteBuffer buffer) {
|
||||||
|
ByteBufferOutput out = new ByteBufferOutput(buffer);
|
||||||
|
Kryo kryo = getKryo();
|
||||||
|
try {
|
||||||
|
kryo.writeClassAndObject(out, obj);
|
||||||
|
} finally {
|
||||||
|
putKryo(kryo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deserializes given byte array to Object using Kryo instance in pool.
|
* Deserializes given byte array to Object using Kryo instance in pool.
|
||||||
*
|
*
|
||||||
@ -192,6 +211,24 @@ public final class KryoPool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deserializes given byte buffer to Object using Kryo instance in pool.
|
||||||
|
*
|
||||||
|
* @param buffer input with serialized bytes
|
||||||
|
* @param <T> deserialized Object type
|
||||||
|
* @return deserialized Object
|
||||||
|
*/
|
||||||
|
public <T> T deserialize(final ByteBuffer buffer) {
|
||||||
|
ByteBufferInput in = new ByteBufferInput(buffer);
|
||||||
|
Kryo kryo = getKryo();
|
||||||
|
try {
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
T obj = (T) kryo.readClassAndObject(in);
|
||||||
|
return obj;
|
||||||
|
} finally {
|
||||||
|
putKryo(kryo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a Kryo instance with {@link #registeredTypes} pre-registered.
|
* Creates a Kryo instance with {@link #registeredTypes} pre-registered.
|
||||||
|
@ -53,6 +53,15 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
|
|||||||
super(timeout);
|
super(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the number of message stream in custody of the loop.
|
||||||
|
*
|
||||||
|
* @return number of message streams
|
||||||
|
*/
|
||||||
|
public int streamCount() {
|
||||||
|
return streams.size();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new message stream backed by the specified socket channel.
|
* Creates a new message stream backed by the specified socket channel.
|
||||||
*
|
*
|
||||||
@ -84,14 +93,9 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
|
|||||||
*
|
*
|
||||||
* @param key selection key holding the pending connect operation.
|
* @param key selection key holding the pending connect operation.
|
||||||
*/
|
*/
|
||||||
protected void connect(SelectionKey key) {
|
protected void connect(SelectionKey key) throws IOException {
|
||||||
try {
|
|
||||||
SocketChannel ch = (SocketChannel) key.channel();
|
SocketChannel ch = (SocketChannel) key.channel();
|
||||||
ch.finishConnect();
|
ch.finishConnect();
|
||||||
} catch (IOException | IllegalStateException e) {
|
|
||||||
log.warn("Unable to complete connection", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (key.isValid()) {
|
if (key.isValid()) {
|
||||||
key.interestOps(SelectionKey.OP_READ);
|
key.interestOps(SelectionKey.OP_READ);
|
||||||
}
|
}
|
||||||
@ -115,7 +119,11 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
|
|||||||
|
|
||||||
// If there is a pending connect operation, complete it.
|
// If there is a pending connect operation, complete it.
|
||||||
if (key.isConnectable()) {
|
if (key.isConnectable()) {
|
||||||
|
try {
|
||||||
connect(key);
|
connect(key);
|
||||||
|
} catch (IOException | IllegalStateException e) {
|
||||||
|
log.warn("Unable to complete connection", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a read operation, slurp as much data as possible.
|
// If there is a read operation, slurp as much data as possible.
|
||||||
@ -182,9 +190,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
|
|||||||
* with a pending accept operation.
|
* with a pending accept operation.
|
||||||
*
|
*
|
||||||
* @param channel backing socket channel
|
* @param channel backing socket channel
|
||||||
|
* @return newly accepted message stream
|
||||||
*/
|
*/
|
||||||
public void acceptStream(SocketChannel channel) {
|
public S acceptStream(SocketChannel channel) {
|
||||||
createAndAdmit(channel, SelectionKey.OP_READ);
|
return createAndAdmit(channel, SelectionKey.OP_READ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -193,9 +202,10 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
|
|||||||
* with a pending connect operation.
|
* with a pending connect operation.
|
||||||
*
|
*
|
||||||
* @param channel backing socket channel
|
* @param channel backing socket channel
|
||||||
|
* @return newly connected message stream
|
||||||
*/
|
*/
|
||||||
public void connectStream(SocketChannel channel) {
|
public S connectStream(SocketChannel channel) {
|
||||||
createAndAdmit(channel, SelectionKey.OP_CONNECT);
|
return createAndAdmit(channel, SelectionKey.OP_CONNECT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -205,12 +215,14 @@ public abstract class IOLoop<M extends Message, S extends MessageStream<M>>
|
|||||||
* @param channel socket channel
|
* @param channel socket channel
|
||||||
* @param op pending operations mask to be applied to the selection
|
* @param op pending operations mask to be applied to the selection
|
||||||
* key as a set of initial interestedOps
|
* key as a set of initial interestedOps
|
||||||
|
* @return newly created message stream
|
||||||
*/
|
*/
|
||||||
private synchronized void createAndAdmit(SocketChannel channel, int op) {
|
private synchronized S createAndAdmit(SocketChannel channel, int op) {
|
||||||
S stream = createStream(channel);
|
S stream = createStream(channel);
|
||||||
streams.add(stream);
|
streams.add(stream);
|
||||||
newStreamRequests.add(new NewStreamRequest(stream, channel, op));
|
newStreamRequests.add(new NewStreamRequest(stream, channel, op));
|
||||||
selector.wakeup();
|
selector.wakeup();
|
||||||
|
return stream;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -10,6 +10,7 @@ import java.nio.channels.ByteChannel;
|
|||||||
import java.nio.channels.SelectionKey;
|
import java.nio.channels.SelectionKey;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkArgument;
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
@ -170,7 +171,7 @@ public abstract class MessageStream<M extends Message> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads, withouth blocking, a list of messages from the stream.
|
* Reads, without blocking, a list of messages from the stream.
|
||||||
* The list will be empty if there were not messages pending.
|
* The list will be empty if there were not messages pending.
|
||||||
*
|
*
|
||||||
* @return list of messages or null if backing channel has been closed
|
* @return list of messages or null if backing channel has been closed
|
||||||
@ -262,7 +263,7 @@ public abstract class MessageStream<M extends Message> {
|
|||||||
try {
|
try {
|
||||||
channel.write(outbound);
|
channel.write(outbound);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (!closed && !e.getMessage().equals("Broken pipe")) {
|
if (!closed && !Objects.equals(e.getMessage(), "Broken pipe")) {
|
||||||
log.warn("Unable to write data", e);
|
log.warn("Unable to write data", e);
|
||||||
ioError = e;
|
ioError = e;
|
||||||
}
|
}
|
||||||
|
@ -230,7 +230,7 @@ public class IOLoopTestClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void connect(SelectionKey key) {
|
protected void connect(SelectionKey key) throws IOException {
|
||||||
super.connect(key);
|
super.connect(key);
|
||||||
TestMessageStream b = (TestMessageStream) key.attachment();
|
TestMessageStream b = (TestMessageStream) key.attachment();
|
||||||
Worker w = ((CustomIOLoop) b.loop()).worker;
|
Worker w = ((CustomIOLoop) b.loop()).worker;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user