mirror of
https://github.com/opennetworkinglab/onos.git
synced 2025-10-16 18:02:05 +02:00
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
c52a8a9e22
@ -1,4 +1,4 @@
|
|||||||
/**
|
/**
|
||||||
* Sample application for use in various experiments.
|
* Sample application for use in various experiments.
|
||||||
*/
|
*/
|
||||||
package org.onlab.onos.foo;
|
package org.onlab.onos.foo;
|
||||||
|
@ -103,4 +103,4 @@ public class FlowsListCommand extends AbstractShellCommand {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,29 @@
|
|||||||
|
package org.onlab.onos.net.proxyarp;
|
||||||
|
|
||||||
|
import org.onlab.packet.Ethernet;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Service for processing arp requests on behalf of applications.
|
||||||
|
*/
|
||||||
|
public interface ProxyArpService {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether this particular ip address is known to the system.
|
||||||
|
*
|
||||||
|
* @param addr
|
||||||
|
* a ip address
|
||||||
|
* @return true if know, false otherwise
|
||||||
|
*/
|
||||||
|
boolean known(IpPrefix addr);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sends a reply for a given request. If the host is not known then the arp
|
||||||
|
* will be flooded at all edge ports.
|
||||||
|
*
|
||||||
|
* @param request
|
||||||
|
* an arp request
|
||||||
|
*/
|
||||||
|
void reply(Ethernet request);
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,4 @@
|
|||||||
|
/**
|
||||||
|
* Base abstractions related to the proxy arp service.
|
||||||
|
*/
|
||||||
|
package org.onlab.onos.net.proxyarp;
|
@ -0,0 +1,26 @@
|
|||||||
|
package org.onlab.onos.store;
|
||||||
|
|
||||||
|
import org.onlab.onos.cluster.MastershipTerm;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
|
||||||
|
// TODO: Consider renaming to DeviceClockService?
|
||||||
|
/**
|
||||||
|
* Interface for a logical clock service that vends per device timestamps.
|
||||||
|
*/
|
||||||
|
public interface ClockService {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a new timestamp for the specified deviceId.
|
||||||
|
* @param deviceId device identifier.
|
||||||
|
* @return timestamp.
|
||||||
|
*/
|
||||||
|
public Timestamp getTimestamp(DeviceId deviceId);
|
||||||
|
|
||||||
|
// TODO: Should this be here or separate as Admin service, etc.?
|
||||||
|
/**
|
||||||
|
* Updates the mastership term for the specified deviceId.
|
||||||
|
* @param deviceId device identifier.
|
||||||
|
* @param term mastership term.
|
||||||
|
*/
|
||||||
|
public void setMastershipTerm(DeviceId deviceId, MastershipTerm term);
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
/**
|
/**
|
||||||
* Abstractions for creating and interacting with distributed stores.
|
* Abstractions for creating and interacting with distributed stores.
|
||||||
*/
|
*/
|
||||||
package org.onlab.onos.store;
|
package org.onlab.onos.store;
|
||||||
|
@ -40,13 +40,14 @@
|
|||||||
Currently required for DistributedDeviceManagerTest. -->
|
Currently required for DistributedDeviceManagerTest. -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.onlab.onos</groupId>
|
<groupId>org.onlab.onos</groupId>
|
||||||
<artifactId>onos-core-store</artifactId>
|
<artifactId>onos-core-hz-net</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.onlab.onos</groupId>
|
<groupId>org.onlab.onos</groupId>
|
||||||
<artifactId>onos-core-store</artifactId>
|
<!-- FIXME: should be somewhere else -->
|
||||||
|
<artifactId>onos-core-hz-common</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
<classifier>tests</classifier>
|
<classifier>tests</classifier>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/**
|
/**
|
||||||
* Subsystem for tracking controller cluster nodes.
|
* Subsystem for tracking controller cluster nodes.
|
||||||
*/
|
*/
|
||||||
package org.onlab.onos.cluster.impl;
|
package org.onlab.onos.cluster.impl;
|
||||||
|
@ -16,6 +16,7 @@ import org.onlab.onos.cluster.ClusterService;
|
|||||||
import org.onlab.onos.cluster.MastershipEvent;
|
import org.onlab.onos.cluster.MastershipEvent;
|
||||||
import org.onlab.onos.cluster.MastershipListener;
|
import org.onlab.onos.cluster.MastershipListener;
|
||||||
import org.onlab.onos.cluster.MastershipService;
|
import org.onlab.onos.cluster.MastershipService;
|
||||||
|
import org.onlab.onos.cluster.MastershipTerm;
|
||||||
import org.onlab.onos.event.AbstractListenerRegistry;
|
import org.onlab.onos.event.AbstractListenerRegistry;
|
||||||
import org.onlab.onos.event.EventDeliveryService;
|
import org.onlab.onos.event.EventDeliveryService;
|
||||||
import org.onlab.onos.net.Device;
|
import org.onlab.onos.net.Device;
|
||||||
@ -36,6 +37,7 @@ import org.onlab.onos.net.device.DeviceStoreDelegate;
|
|||||||
import org.onlab.onos.net.device.PortDescription;
|
import org.onlab.onos.net.device.PortDescription;
|
||||||
import org.onlab.onos.net.provider.AbstractProviderRegistry;
|
import org.onlab.onos.net.provider.AbstractProviderRegistry;
|
||||||
import org.onlab.onos.net.provider.AbstractProviderService;
|
import org.onlab.onos.net.provider.AbstractProviderService;
|
||||||
|
import org.onlab.onos.store.ClockService;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -44,8 +46,8 @@ import org.slf4j.Logger;
|
|||||||
@Component(immediate = true)
|
@Component(immediate = true)
|
||||||
@Service
|
@Service
|
||||||
public class DeviceManager
|
public class DeviceManager
|
||||||
extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
|
extends AbstractProviderRegistry<DeviceProvider, DeviceProviderService>
|
||||||
implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
||||||
|
|
||||||
private static final String DEVICE_ID_NULL = "Device ID cannot be null";
|
private static final String DEVICE_ID_NULL = "Device ID cannot be null";
|
||||||
private static final String PORT_NUMBER_NULL = "Port number cannot be null";
|
private static final String PORT_NUMBER_NULL = "Port number cannot be null";
|
||||||
@ -55,8 +57,8 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
|
|
||||||
private final Logger log = getLogger(getClass());
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
protected final AbstractListenerRegistry<DeviceEvent, DeviceListener>
|
protected final AbstractListenerRegistry<DeviceEvent, DeviceListener> listenerRegistry =
|
||||||
listenerRegistry = new AbstractListenerRegistry<>();
|
new AbstractListenerRegistry<>();
|
||||||
|
|
||||||
private final DeviceStoreDelegate delegate = new InternalStoreDelegate();
|
private final DeviceStoreDelegate delegate = new InternalStoreDelegate();
|
||||||
|
|
||||||
@ -74,6 +76,9 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
protected MastershipService mastershipService;
|
protected MastershipService mastershipService;
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected ClockService clockService;
|
||||||
|
|
||||||
@Activate
|
@Activate
|
||||||
public void activate() {
|
public void activate() {
|
||||||
store.setDelegate(delegate);
|
store.setDelegate(delegate);
|
||||||
@ -164,7 +169,8 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected DeviceProviderService createProviderService(DeviceProvider provider) {
|
protected DeviceProviderService createProviderService(
|
||||||
|
DeviceProvider provider) {
|
||||||
return new InternalDeviceProviderService(provider);
|
return new InternalDeviceProviderService(provider);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,14 +184,16 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void deviceConnected(DeviceId deviceId, DeviceDescription deviceDescription) {
|
public void deviceConnected(DeviceId deviceId,
|
||||||
|
DeviceDescription deviceDescription) {
|
||||||
checkNotNull(deviceId, DEVICE_ID_NULL);
|
checkNotNull(deviceId, DEVICE_ID_NULL);
|
||||||
checkNotNull(deviceDescription, DEVICE_DESCRIPTION_NULL);
|
checkNotNull(deviceDescription, DEVICE_DESCRIPTION_NULL);
|
||||||
checkValidity();
|
checkValidity();
|
||||||
DeviceEvent event = store.createOrUpdateDevice(provider().id(),
|
DeviceEvent event = store.createOrUpdateDevice(provider().id(),
|
||||||
deviceId, deviceDescription);
|
deviceId, deviceDescription);
|
||||||
|
|
||||||
// If there was a change of any kind, trigger role selection process.
|
// If there was a change of any kind, trigger role selection
|
||||||
|
// process.
|
||||||
if (event != null) {
|
if (event != null) {
|
||||||
log.info("Device {} connected", deviceId);
|
log.info("Device {} connected", deviceId);
|
||||||
mastershipService.requestRoleFor(deviceId);
|
mastershipService.requestRoleFor(deviceId);
|
||||||
@ -207,25 +215,30 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void updatePorts(DeviceId deviceId, List<PortDescription> portDescriptions) {
|
public void updatePorts(DeviceId deviceId,
|
||||||
|
List<PortDescription> portDescriptions) {
|
||||||
checkNotNull(deviceId, DEVICE_ID_NULL);
|
checkNotNull(deviceId, DEVICE_ID_NULL);
|
||||||
checkNotNull(portDescriptions, "Port descriptions list cannot be null");
|
checkNotNull(portDescriptions,
|
||||||
|
"Port descriptions list cannot be null");
|
||||||
checkValidity();
|
checkValidity();
|
||||||
List<DeviceEvent> events = store.updatePorts(deviceId, portDescriptions);
|
List<DeviceEvent> events = store.updatePorts(deviceId,
|
||||||
|
portDescriptions);
|
||||||
for (DeviceEvent event : events) {
|
for (DeviceEvent event : events) {
|
||||||
post(event);
|
post(event);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void portStatusChanged(DeviceId deviceId, PortDescription portDescription) {
|
public void portStatusChanged(DeviceId deviceId,
|
||||||
|
PortDescription portDescription) {
|
||||||
checkNotNull(deviceId, DEVICE_ID_NULL);
|
checkNotNull(deviceId, DEVICE_ID_NULL);
|
||||||
checkNotNull(portDescription, PORT_DESCRIPTION_NULL);
|
checkNotNull(portDescription, PORT_DESCRIPTION_NULL);
|
||||||
checkValidity();
|
checkValidity();
|
||||||
DeviceEvent event = store.updatePortStatus(deviceId, portDescription);
|
DeviceEvent event = store.updatePortStatus(deviceId,
|
||||||
|
portDescription);
|
||||||
if (event != null) {
|
if (event != null) {
|
||||||
log.info("Device {} port {} status changed", deviceId,
|
log.info("Device {} port {} status changed", deviceId, event
|
||||||
event.port().number());
|
.port().number());
|
||||||
post(event);
|
post(event);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -233,8 +246,8 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
@Override
|
@Override
|
||||||
public void unableToAssertRole(DeviceId deviceId, MastershipRole role) {
|
public void unableToAssertRole(DeviceId deviceId, MastershipRole role) {
|
||||||
// FIXME: implement response to this notification
|
// FIXME: implement response to this notification
|
||||||
log.warn("Failed to assert role [{}] onto Device {}",
|
log.warn("Failed to assert role [{}] onto Device {}", role,
|
||||||
role, deviceId);
|
deviceId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,10 +259,14 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Intercepts mastership events
|
// Intercepts mastership events
|
||||||
private class InternalMastershipListener implements MastershipListener {
|
private class InternalMastershipListener
|
||||||
|
implements MastershipListener {
|
||||||
@Override
|
@Override
|
||||||
public void event(MastershipEvent event) {
|
public void event(MastershipEvent event) {
|
||||||
if (event.master().equals(clusterService.getLocalNode().id())) {
|
if (event.master().equals(clusterService.getLocalNode().id())) {
|
||||||
|
MastershipTerm term = mastershipService.requestTermService()
|
||||||
|
.getMastershipTerm(event.subject());
|
||||||
|
clockService.setMastershipTerm(event.subject(), term);
|
||||||
applyRole(event.subject(), MastershipRole.MASTER);
|
applyRole(event.subject(), MastershipRole.MASTER);
|
||||||
} else {
|
} else {
|
||||||
applyRole(event.subject(), MastershipRole.STANDBY);
|
applyRole(event.subject(), MastershipRole.STANDBY);
|
||||||
@ -258,7 +275,8 @@ implements DeviceService, DeviceAdminService, DeviceProviderRegistry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store delegate to re-post events emitted from the store.
|
// Store delegate to re-post events emitted from the store.
|
||||||
private class InternalStoreDelegate implements DeviceStoreDelegate {
|
private class InternalStoreDelegate
|
||||||
|
implements DeviceStoreDelegate {
|
||||||
@Override
|
@Override
|
||||||
public void notify(DeviceEvent event) {
|
public void notify(DeviceEvent event) {
|
||||||
post(event);
|
post(event);
|
||||||
|
@ -0,0 +1,100 @@
|
|||||||
|
package org.onlab.onos.proxyarp.impl;
|
||||||
|
|
||||||
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Reference;
|
||||||
|
import org.apache.felix.scr.annotations.ReferenceCardinality;
|
||||||
|
import org.onlab.onos.net.Host;
|
||||||
|
import org.onlab.onos.net.flow.DefaultTrafficTreatment;
|
||||||
|
import org.onlab.onos.net.flow.TrafficTreatment;
|
||||||
|
import org.onlab.onos.net.host.HostService;
|
||||||
|
import org.onlab.onos.net.packet.DefaultOutboundPacket;
|
||||||
|
import org.onlab.onos.net.packet.PacketService;
|
||||||
|
import org.onlab.onos.net.proxyarp.ProxyArpService;
|
||||||
|
import org.onlab.onos.net.topology.TopologyService;
|
||||||
|
import org.onlab.packet.ARP;
|
||||||
|
import org.onlab.packet.Ethernet;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
import org.onlab.packet.VlanId;
|
||||||
|
|
||||||
|
public class ProxyArpManager implements ProxyArpService {
|
||||||
|
|
||||||
|
private static final String MAC_ADDR_NULL = "Mac address cannot be null.";
|
||||||
|
private static final String REQUEST_NULL = "Arp request cannot be null.";
|
||||||
|
private static final String REQUEST_NOT_ARP = "Ethernet frame does not contain ARP request.";
|
||||||
|
private static final String NOT_ARP_REQUEST = "ARP is not a request.";
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected HostService hostService;
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected PacketService packetService;
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected TopologyService topologyService;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean known(IpPrefix addr) {
|
||||||
|
checkNotNull(MAC_ADDR_NULL, addr);
|
||||||
|
Set<Host> hosts = hostService.getHostsByIp(addr);
|
||||||
|
return !hosts.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reply(Ethernet request) {
|
||||||
|
checkNotNull(REQUEST_NULL, request);
|
||||||
|
checkArgument(request.getEtherType() == Ethernet.TYPE_ARP,
|
||||||
|
REQUEST_NOT_ARP);
|
||||||
|
ARP arp = (ARP) request.getPayload();
|
||||||
|
checkArgument(arp.getOpCode() == ARP.OP_REQUEST, NOT_ARP_REQUEST);
|
||||||
|
|
||||||
|
VlanId vlan = VlanId.vlanId(request.getVlanID());
|
||||||
|
Set<Host> hosts = hostService.getHostsByIp(IpPrefix.valueOf(arp
|
||||||
|
.getTargetProtocolAddress()));
|
||||||
|
|
||||||
|
Host h = null;
|
||||||
|
for (Host host : hosts) {
|
||||||
|
if (host.vlan().equals(vlan)) {
|
||||||
|
h = host;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (h == null) {
|
||||||
|
flood(request);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ethernet arpReply = buildArpReply(h, request);
|
||||||
|
// TODO: check send status with host service.
|
||||||
|
TrafficTreatment.Builder builder = new DefaultTrafficTreatment.Builder();
|
||||||
|
builder.setOutput(h.location().port());
|
||||||
|
packetService.emit(new DefaultOutboundPacket(h.location().deviceId(),
|
||||||
|
builder.build(), ByteBuffer.wrap(arpReply.serialize())));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void flood(Ethernet request) {
|
||||||
|
// TODO: flood on all edge ports.
|
||||||
|
}
|
||||||
|
|
||||||
|
private Ethernet buildArpReply(Host h, Ethernet request) {
|
||||||
|
Ethernet eth = new Ethernet();
|
||||||
|
eth.setDestinationMACAddress(request.getSourceMACAddress());
|
||||||
|
eth.setSourceMACAddress(h.mac().getAddress());
|
||||||
|
eth.setEtherType(Ethernet.TYPE_ARP);
|
||||||
|
ARP arp = new ARP();
|
||||||
|
arp.setOpCode(ARP.OP_REPLY);
|
||||||
|
arp.setSenderHardwareAddress(h.mac().getAddress());
|
||||||
|
arp.setTargetHardwareAddress(request.getSourceMACAddress());
|
||||||
|
|
||||||
|
arp.setTargetProtocolAddress(((ARP) request.getPayload())
|
||||||
|
.getSenderProtocolAddress());
|
||||||
|
arp.setSenderProtocolAddress(h.ipAddresses().iterator().next().toInt());
|
||||||
|
eth.setPayload(arp);
|
||||||
|
return eth;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,4 @@
|
|||||||
|
/**
|
||||||
|
* Core subsystem for responding to arp requests.
|
||||||
|
*/
|
||||||
|
package org.onlab.onos.proxyarp.impl;
|
@ -32,9 +32,9 @@ import org.onlab.onos.net.device.DeviceService;
|
|||||||
import org.onlab.onos.net.device.PortDescription;
|
import org.onlab.onos.net.device.PortDescription;
|
||||||
import org.onlab.onos.net.provider.AbstractProvider;
|
import org.onlab.onos.net.provider.AbstractProvider;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.store.common.StoreManager;
|
||||||
|
import org.onlab.onos.store.common.TestStoreManager;
|
||||||
import org.onlab.onos.store.device.impl.DistributedDeviceStore;
|
import org.onlab.onos.store.device.impl.DistributedDeviceStore;
|
||||||
import org.onlab.onos.store.impl.StoreManager;
|
|
||||||
import org.onlab.onos.store.impl.TestStoreManager;
|
|
||||||
import org.onlab.packet.IpPrefix;
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
<module>api</module>
|
<module>api</module>
|
||||||
<module>net</module>
|
<module>net</module>
|
||||||
<module>store</module>
|
<module>store</module>
|
||||||
<module>trivial</module>
|
|
||||||
</modules>
|
</modules>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
48
core/store/dist/pom.xml
vendored
Normal file
48
core/store/dist/pom.xml
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-store</artifactId>
|
||||||
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>onos-core-dist</artifactId>
|
||||||
|
<packaging>bundle</packaging>
|
||||||
|
|
||||||
|
<description>ONOS Gossip based distributed store subsystems</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-serializers</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>de.javakaffee</groupId>
|
||||||
|
<artifactId>kryo-serializers</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>maven-scr-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
53
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosClockService.java
vendored
Normal file
53
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosClockService.java
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package org.onlab.onos.store.device.impl;
|
||||||
|
|
||||||
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.cluster.MastershipTerm;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.store.ClockService;
|
||||||
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
import org.onlab.onos.store.impl.OnosTimestamp;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class OnosClockService implements ClockService {
|
||||||
|
|
||||||
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
|
// TODO: Implement per device ticker that is reset to 0 at the beginning of a new term.
|
||||||
|
private final AtomicInteger ticker = new AtomicInteger(0);
|
||||||
|
private ConcurrentMap<DeviceId, MastershipTerm> deviceMastershipTerms = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Timestamp getTimestamp(DeviceId deviceId) {
|
||||||
|
MastershipTerm term = deviceMastershipTerms.get(deviceId);
|
||||||
|
if (term == null) {
|
||||||
|
throw new IllegalStateException("Requesting timestamp for a deviceId without mastership");
|
||||||
|
}
|
||||||
|
return new OnosTimestamp(term.termNumber(), ticker.incrementAndGet());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
|
||||||
|
deviceMastershipTerms.put(deviceId, term);
|
||||||
|
}
|
||||||
|
}
|
336
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosDistributedDeviceStore.java
vendored
Normal file
336
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/OnosDistributedDeviceStore.java
vendored
Normal file
@ -0,0 +1,336 @@
|
|||||||
|
package org.onlab.onos.store.device.impl;
|
||||||
|
|
||||||
|
import static com.google.common.base.Predicates.notNull;
|
||||||
|
import static com.google.common.base.Preconditions.checkState;
|
||||||
|
|
||||||
|
import com.google.common.collect.FluentIterable;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.ImmutableSet.Builder;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Reference;
|
||||||
|
import org.apache.felix.scr.annotations.ReferenceCardinality;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.net.DefaultDevice;
|
||||||
|
import org.onlab.onos.net.DefaultPort;
|
||||||
|
import org.onlab.onos.net.Device;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Port;
|
||||||
|
import org.onlab.onos.net.PortNumber;
|
||||||
|
import org.onlab.onos.net.device.DeviceDescription;
|
||||||
|
import org.onlab.onos.net.device.DeviceEvent;
|
||||||
|
import org.onlab.onos.net.device.DeviceStore;
|
||||||
|
import org.onlab.onos.net.device.DeviceStoreDelegate;
|
||||||
|
import org.onlab.onos.net.device.PortDescription;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.store.AbstractStore;
|
||||||
|
import org.onlab.onos.store.ClockService;
|
||||||
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
|
import static org.onlab.onos.net.device.DeviceEvent.Type.*;
|
||||||
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages inventory of infrastructure devices using a protocol that takes into consideration
|
||||||
|
* the order in which device events occur.
|
||||||
|
*/
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class OnosDistributedDeviceStore
|
||||||
|
extends AbstractStore<DeviceEvent, DeviceStoreDelegate>
|
||||||
|
implements DeviceStore {
|
||||||
|
|
||||||
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
|
public static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
|
||||||
|
|
||||||
|
private ConcurrentHashMap<DeviceId, VersionedValue<Device>> devices;
|
||||||
|
private ConcurrentHashMap<DeviceId, Map<PortNumber, VersionedValue<Port>>> devicePorts;
|
||||||
|
|
||||||
|
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
|
||||||
|
protected ClockService clockService;
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
|
||||||
|
devices = new ConcurrentHashMap<>();
|
||||||
|
devicePorts = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDeviceCount() {
|
||||||
|
return devices.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterable<Device> getDevices() {
|
||||||
|
// TODO builder v.s. copyOf. Guava semms to be using copyOf?
|
||||||
|
// FIXME: synchronize.
|
||||||
|
Builder<Device> builder = ImmutableSet.builder();
|
||||||
|
for (VersionedValue<? extends Device> device : devices.values()) {
|
||||||
|
builder.add(device.entity());
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Device getDevice(DeviceId deviceId) {
|
||||||
|
return devices.get(deviceId).entity();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
|
||||||
|
DeviceDescription deviceDescription) {
|
||||||
|
Timestamp now = clockService.getTimestamp(deviceId);
|
||||||
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
|
|
||||||
|
if (device == null) {
|
||||||
|
return createDevice(providerId, deviceId, deviceDescription, now);
|
||||||
|
}
|
||||||
|
|
||||||
|
checkState(now.compareTo(device.timestamp()) > 0,
|
||||||
|
"Existing device has a timestamp in the future!");
|
||||||
|
|
||||||
|
return updateDevice(providerId, device.entity(), deviceDescription, now);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates the device and returns the appropriate event if necessary.
|
||||||
|
private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
|
||||||
|
DeviceDescription desc, Timestamp timestamp) {
|
||||||
|
DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(),
|
||||||
|
desc.manufacturer(),
|
||||||
|
desc.hwVersion(), desc.swVersion(),
|
||||||
|
desc.serialNumber());
|
||||||
|
|
||||||
|
devices.put(deviceId, new VersionedValue<Device>(device, true, timestamp));
|
||||||
|
// FIXME: broadcast a message telling peers of a device event.
|
||||||
|
return new DeviceEvent(DEVICE_ADDED, device, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updates the device and returns the appropriate event if necessary.
|
||||||
|
private DeviceEvent updateDevice(ProviderId providerId, Device device,
|
||||||
|
DeviceDescription desc, Timestamp timestamp) {
|
||||||
|
// We allow only certain attributes to trigger update
|
||||||
|
if (!Objects.equals(device.hwVersion(), desc.hwVersion()) ||
|
||||||
|
!Objects.equals(device.swVersion(), desc.swVersion())) {
|
||||||
|
|
||||||
|
Device updated = new DefaultDevice(providerId, device.id(),
|
||||||
|
desc.type(),
|
||||||
|
desc.manufacturer(),
|
||||||
|
desc.hwVersion(),
|
||||||
|
desc.swVersion(),
|
||||||
|
desc.serialNumber());
|
||||||
|
devices.put(device.id(), new VersionedValue<Device>(updated, true, timestamp));
|
||||||
|
// FIXME: broadcast a message telling peers of a device event.
|
||||||
|
return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, updated, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise merely attempt to change availability
|
||||||
|
DefaultDevice updated = new DefaultDevice(providerId, device.id(),
|
||||||
|
desc.type(),
|
||||||
|
desc.manufacturer(),
|
||||||
|
desc.hwVersion(),
|
||||||
|
desc.swVersion(),
|
||||||
|
desc.serialNumber());
|
||||||
|
|
||||||
|
VersionedValue<Device> oldDevice = devices.put(device.id(),
|
||||||
|
new VersionedValue<Device>(updated, true, timestamp));
|
||||||
|
if (!oldDevice.isUp()) {
|
||||||
|
return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DeviceEvent markOffline(DeviceId deviceId) {
|
||||||
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
|
boolean willRemove = device != null && device.isUp();
|
||||||
|
if (!willRemove) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
Timestamp timestamp = clockService.getTimestamp(deviceId);
|
||||||
|
if (replaceIfLatest(device.entity(), false, timestamp)) {
|
||||||
|
return new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device.entity(), null);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace existing value if its timestamp is older.
|
||||||
|
private synchronized boolean replaceIfLatest(Device device, boolean isUp, Timestamp timestamp) {
|
||||||
|
VersionedValue<Device> existingValue = devices.get(device.id());
|
||||||
|
if (timestamp.compareTo(existingValue.timestamp()) > 0) {
|
||||||
|
devices.put(device.id(), new VersionedValue<Device>(device, isUp, timestamp));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<DeviceEvent> updatePorts(DeviceId deviceId,
|
||||||
|
List<PortDescription> portDescriptions) {
|
||||||
|
List<DeviceEvent> events = new ArrayList<>();
|
||||||
|
synchronized (this) {
|
||||||
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
|
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
|
||||||
|
Timestamp timestamp = clockService.getTimestamp(deviceId);
|
||||||
|
|
||||||
|
// Add new ports
|
||||||
|
Set<PortNumber> processed = new HashSet<>();
|
||||||
|
for (PortDescription portDescription : portDescriptions) {
|
||||||
|
VersionedValue<Port> port = ports.get(portDescription.portNumber());
|
||||||
|
if (port == null) {
|
||||||
|
events.add(createPort(device, portDescription, ports, timestamp));
|
||||||
|
}
|
||||||
|
checkState(timestamp.compareTo(port.timestamp()) > 0,
|
||||||
|
"Existing port state has a timestamp in the future!");
|
||||||
|
events.add(updatePort(device, port, portDescription, ports, timestamp));
|
||||||
|
processed.add(portDescription.portNumber());
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePortMap(deviceId, ports);
|
||||||
|
|
||||||
|
events.addAll(pruneOldPorts(device.entity(), ports, processed));
|
||||||
|
}
|
||||||
|
return FluentIterable.from(events).filter(notNull()).toList();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new port based on the port description adds it to the map and
|
||||||
|
// Returns corresponding event.
|
||||||
|
//@GuardedBy("this")
|
||||||
|
private DeviceEvent createPort(VersionedValue<Device> device, PortDescription portDescription,
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports, Timestamp timestamp) {
|
||||||
|
Port port = new DefaultPort(device.entity(), portDescription.portNumber(),
|
||||||
|
portDescription.isEnabled());
|
||||||
|
ports.put(port.number(), new VersionedValue<Port>(port, true, timestamp));
|
||||||
|
updatePortMap(device.entity().id(), ports);
|
||||||
|
return new DeviceEvent(PORT_ADDED, device.entity(), port);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks if the specified port requires update and if so, it replaces the
|
||||||
|
// existing entry in the map and returns corresponding event.
|
||||||
|
//@GuardedBy("this")
|
||||||
|
private DeviceEvent updatePort(VersionedValue<Device> device, VersionedValue<Port> port,
|
||||||
|
PortDescription portDescription,
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports,
|
||||||
|
Timestamp timestamp) {
|
||||||
|
if (port.entity().isEnabled() != portDescription.isEnabled()) {
|
||||||
|
VersionedValue<Port> updatedPort = new VersionedValue<Port>(
|
||||||
|
new DefaultPort(device.entity(), portDescription.portNumber(),
|
||||||
|
portDescription.isEnabled()),
|
||||||
|
portDescription.isEnabled(),
|
||||||
|
timestamp);
|
||||||
|
ports.put(port.entity().number(), updatedPort);
|
||||||
|
updatePortMap(device.entity().id(), ports);
|
||||||
|
return new DeviceEvent(PORT_UPDATED, device.entity(), updatedPort.entity());
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prunes the specified list of ports based on which ports are in the
|
||||||
|
// processed list and returns list of corresponding events.
|
||||||
|
//@GuardedBy("this")
|
||||||
|
private List<DeviceEvent> pruneOldPorts(Device device,
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports,
|
||||||
|
Set<PortNumber> processed) {
|
||||||
|
List<DeviceEvent> events = new ArrayList<>();
|
||||||
|
Iterator<PortNumber> iterator = ports.keySet().iterator();
|
||||||
|
while (iterator.hasNext()) {
|
||||||
|
PortNumber portNumber = iterator.next();
|
||||||
|
if (!processed.contains(portNumber)) {
|
||||||
|
events.add(new DeviceEvent(PORT_REMOVED, device,
|
||||||
|
ports.get(portNumber).entity()));
|
||||||
|
iterator.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!events.isEmpty()) {
|
||||||
|
updatePortMap(device.id(), ports);
|
||||||
|
}
|
||||||
|
return events;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets the map of ports for the specified device; if one does not already
|
||||||
|
// exist, it creates and registers a new one.
|
||||||
|
// WARN: returned value is a copy, changes made to the Map
|
||||||
|
// needs to be written back using updatePortMap
|
||||||
|
//@GuardedBy("this")
|
||||||
|
private Map<PortNumber, VersionedValue<Port>> getPortMap(DeviceId deviceId) {
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports = devicePorts.get(deviceId);
|
||||||
|
if (ports == null) {
|
||||||
|
ports = new HashMap<>();
|
||||||
|
// this probably is waste of time in most cases.
|
||||||
|
updatePortMap(deviceId, ports);
|
||||||
|
}
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
|
|
||||||
|
//@GuardedBy("this")
|
||||||
|
private void updatePortMap(DeviceId deviceId, Map<PortNumber, VersionedValue<Port>> ports) {
|
||||||
|
devicePorts.put(deviceId, ports);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DeviceEvent updatePortStatus(DeviceId deviceId,
|
||||||
|
PortDescription portDescription) {
|
||||||
|
VersionedValue<Device> device = devices.get(deviceId);
|
||||||
|
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports = getPortMap(deviceId);
|
||||||
|
VersionedValue<Port> port = ports.get(portDescription.portNumber());
|
||||||
|
Timestamp timestamp = clockService.getTimestamp(deviceId);
|
||||||
|
return updatePort(device, port, portDescription, ports, timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<Port> getPorts(DeviceId deviceId) {
|
||||||
|
Map<PortNumber, VersionedValue<Port>> versionedPorts = devicePorts.get(deviceId);
|
||||||
|
if (versionedPorts == null) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
List<Port> ports = new ArrayList<>();
|
||||||
|
for (VersionedValue<Port> port : versionedPorts.values()) {
|
||||||
|
ports.add(port.entity());
|
||||||
|
}
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Port getPort(DeviceId deviceId, PortNumber portNumber) {
|
||||||
|
Map<PortNumber, VersionedValue<Port>> ports = devicePorts.get(deviceId);
|
||||||
|
return ports == null ? null : ports.get(portNumber).entity();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isAvailable(DeviceId deviceId) {
|
||||||
|
return devices.get(deviceId).isUp();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DeviceEvent removeDevice(DeviceId deviceId) {
|
||||||
|
VersionedValue<Device> previousDevice = devices.remove(deviceId);
|
||||||
|
return previousDevice == null ? null :
|
||||||
|
new DeviceEvent(DEVICE_REMOVED, previousDevice.entity(), null);
|
||||||
|
}
|
||||||
|
}
|
45
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/VersionedValue.java
vendored
Normal file
45
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/VersionedValue.java
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package org.onlab.onos.store.device.impl;
|
||||||
|
|
||||||
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper class for a entity that is versioned
|
||||||
|
* and can either be up or down.
|
||||||
|
*
|
||||||
|
* @param <T> type of the value.
|
||||||
|
*/
|
||||||
|
public class VersionedValue<T> {
|
||||||
|
private final T entity;
|
||||||
|
private final Timestamp timestamp;
|
||||||
|
private final boolean isUp;
|
||||||
|
|
||||||
|
public VersionedValue(T entity, boolean isUp, Timestamp timestamp) {
|
||||||
|
this.entity = entity;
|
||||||
|
this.isUp = isUp;
|
||||||
|
this.timestamp = timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the value.
|
||||||
|
* @return value.
|
||||||
|
*/
|
||||||
|
public T entity() {
|
||||||
|
return entity;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tells whether the entity is up or down.
|
||||||
|
* @return true if up, false otherwise.
|
||||||
|
*/
|
||||||
|
public boolean isUp() {
|
||||||
|
return isUp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the timestamp (version) associated with this entity.
|
||||||
|
* @return timestamp.
|
||||||
|
*/
|
||||||
|
public Timestamp timestamp() {
|
||||||
|
return timestamp;
|
||||||
|
}
|
||||||
|
}
|
4
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
vendored
Normal file
4
core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
/**
|
||||||
|
* Implementation of device store using distributed structures.
|
||||||
|
*/
|
||||||
|
package org.onlab.onos.store.device.impl;
|
@ -1,11 +1,9 @@
|
|||||||
package org.onlab.onos.store.impl;
|
package org.onlab.onos.store.impl;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
|
||||||
import static com.google.common.base.Preconditions.checkArgument;
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
import org.onlab.onos.net.ElementId;
|
|
||||||
import org.onlab.onos.store.Timestamp;
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
|
||||||
import com.google.common.base.MoreObjects;
|
import com.google.common.base.MoreObjects;
|
||||||
@ -14,22 +12,20 @@ import com.google.common.collect.ComparisonChain;
|
|||||||
// If it is store specific, implement serializable interfaces?
|
// If it is store specific, implement serializable interfaces?
|
||||||
/**
|
/**
|
||||||
* Default implementation of Timestamp.
|
* Default implementation of Timestamp.
|
||||||
|
* TODO: Better documentation.
|
||||||
*/
|
*/
|
||||||
public final class OnosTimestamp implements Timestamp {
|
public final class OnosTimestamp implements Timestamp {
|
||||||
|
|
||||||
private final ElementId id;
|
|
||||||
private final int termNumber;
|
private final int termNumber;
|
||||||
private final int sequenceNumber;
|
private final int sequenceNumber;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default version tuple.
|
* Default version tuple.
|
||||||
*
|
*
|
||||||
* @param id identifier of the element
|
|
||||||
* @param termNumber the mastership termNumber
|
* @param termNumber the mastership termNumber
|
||||||
* @param sequenceNumber the sequenceNumber number within the termNumber
|
* @param sequenceNumber the sequenceNumber number within the termNumber
|
||||||
*/
|
*/
|
||||||
public OnosTimestamp(ElementId id, int termNumber, int sequenceNumber) {
|
public OnosTimestamp(int termNumber, int sequenceNumber) {
|
||||||
this.id = checkNotNull(id);
|
|
||||||
this.termNumber = termNumber;
|
this.termNumber = termNumber;
|
||||||
this.sequenceNumber = sequenceNumber;
|
this.sequenceNumber = sequenceNumber;
|
||||||
}
|
}
|
||||||
@ -38,9 +34,6 @@ public final class OnosTimestamp implements Timestamp {
|
|||||||
public int compareTo(Timestamp o) {
|
public int compareTo(Timestamp o) {
|
||||||
checkArgument(o instanceof OnosTimestamp, "Must be OnosTimestamp", o);
|
checkArgument(o instanceof OnosTimestamp, "Must be OnosTimestamp", o);
|
||||||
OnosTimestamp that = (OnosTimestamp) o;
|
OnosTimestamp that = (OnosTimestamp) o;
|
||||||
checkArgument(this.id.equals(that.id),
|
|
||||||
"Cannot compare version for different element this:%s, that:%s",
|
|
||||||
this, that);
|
|
||||||
|
|
||||||
return ComparisonChain.start()
|
return ComparisonChain.start()
|
||||||
.compare(this.termNumber, that.termNumber)
|
.compare(this.termNumber, that.termNumber)
|
||||||
@ -50,7 +43,7 @@ public final class OnosTimestamp implements Timestamp {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(id, termNumber, sequenceNumber);
|
return Objects.hash(termNumber, sequenceNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -62,29 +55,18 @@ public final class OnosTimestamp implements Timestamp {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
OnosTimestamp that = (OnosTimestamp) obj;
|
OnosTimestamp that = (OnosTimestamp) obj;
|
||||||
return Objects.equals(this.id, that.id) &&
|
return Objects.equals(this.termNumber, that.termNumber) &&
|
||||||
Objects.equals(this.termNumber, that.termNumber) &&
|
|
||||||
Objects.equals(this.sequenceNumber, that.sequenceNumber);
|
Objects.equals(this.sequenceNumber, that.sequenceNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return MoreObjects.toStringHelper(getClass())
|
return MoreObjects.toStringHelper(getClass())
|
||||||
.add("id", id)
|
|
||||||
.add("termNumber", termNumber)
|
.add("termNumber", termNumber)
|
||||||
.add("sequenceNumber", sequenceNumber)
|
.add("sequenceNumber", sequenceNumber)
|
||||||
.toString();
|
.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the element.
|
|
||||||
*
|
|
||||||
* @return element identifier
|
|
||||||
*/
|
|
||||||
public ElementId id() {
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the termNumber.
|
* Returns the termNumber.
|
||||||
*
|
*
|
@ -1,6 +1,5 @@
|
|||||||
package org.onlab.onos.store.serializers;
|
package org.onlab.onos.store.serializers;
|
||||||
|
|
||||||
import org.onlab.onos.net.ElementId;
|
|
||||||
import org.onlab.onos.store.impl.OnosTimestamp;
|
import org.onlab.onos.store.impl.OnosTimestamp;
|
||||||
|
|
||||||
import com.esotericsoftware.kryo.Kryo;
|
import com.esotericsoftware.kryo.Kryo;
|
||||||
@ -20,18 +19,17 @@ public class OnosTimestampSerializer extends Serializer<OnosTimestamp> {
|
|||||||
// non-null, immutable
|
// non-null, immutable
|
||||||
super(false, true);
|
super(false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(Kryo kryo, Output output, OnosTimestamp object) {
|
public void write(Kryo kryo, Output output, OnosTimestamp object) {
|
||||||
kryo.writeClassAndObject(output, object.id());
|
|
||||||
output.writeInt(object.termNumber());
|
output.writeInt(object.termNumber());
|
||||||
output.writeInt(object.sequenceNumber());
|
output.writeInt(object.sequenceNumber());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public OnosTimestamp read(Kryo kryo, Input input, Class<OnosTimestamp> type) {
|
public OnosTimestamp read(Kryo kryo, Input input, Class<OnosTimestamp> type) {
|
||||||
ElementId id = (ElementId) kryo.readClassAndObject(input);
|
|
||||||
final int term = input.readInt();
|
final int term = input.readInt();
|
||||||
final int sequence = input.readInt();
|
final int sequence = input.readInt();
|
||||||
return new OnosTimestamp(id, term, sequence);
|
return new OnosTimestamp(term, sequence);
|
||||||
}
|
}
|
||||||
}
|
}
|
64
core/store/hz/cluster/pom.xml
Normal file
64
core/store/hz/cluster/pom.xml
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz</artifactId>
|
||||||
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>onos-core-hz-cluster</artifactId>
|
||||||
|
<packaging>bundle</packaging>
|
||||||
|
|
||||||
|
<description>ONOS Hazelcast based distributed store subsystems</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-serializers</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz-common</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz-common</artifactId>
|
||||||
|
<classifier>tests</classifier>
|
||||||
|
<scope>test</scope>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.hazelcast</groupId>
|
||||||
|
<artifactId>hazelcast</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>de.javakaffee</groupId>
|
||||||
|
<artifactId>kryo-serializers</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>maven-scr-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
@ -8,6 +8,7 @@ import com.hazelcast.core.Member;
|
|||||||
import com.hazelcast.core.MemberAttributeEvent;
|
import com.hazelcast.core.MemberAttributeEvent;
|
||||||
import com.hazelcast.core.MembershipEvent;
|
import com.hazelcast.core.MembershipEvent;
|
||||||
import com.hazelcast.core.MembershipListener;
|
import com.hazelcast.core.MembershipListener;
|
||||||
|
|
||||||
import org.apache.felix.scr.annotations.Activate;
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
import org.apache.felix.scr.annotations.Component;
|
import org.apache.felix.scr.annotations.Component;
|
||||||
import org.apache.felix.scr.annotations.Deactivate;
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
@ -18,9 +19,9 @@ import org.onlab.onos.cluster.ClusterStoreDelegate;
|
|||||||
import org.onlab.onos.cluster.ControllerNode;
|
import org.onlab.onos.cluster.ControllerNode;
|
||||||
import org.onlab.onos.cluster.DefaultControllerNode;
|
import org.onlab.onos.cluster.DefaultControllerNode;
|
||||||
import org.onlab.onos.cluster.NodeId;
|
import org.onlab.onos.cluster.NodeId;
|
||||||
import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
|
import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
|
||||||
import org.onlab.onos.store.impl.AbstractDistributedStore;
|
import org.onlab.onos.store.common.AbstractHazelcastStore;
|
||||||
import org.onlab.onos.store.impl.OptionalCacheLoader;
|
import org.onlab.onos.store.common.OptionalCacheLoader;
|
||||||
import org.onlab.packet.IpPrefix;
|
import org.onlab.packet.IpPrefix;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -38,7 +39,7 @@ import static org.onlab.onos.cluster.ControllerNode.State;
|
|||||||
@Component(immediate = true)
|
@Component(immediate = true)
|
||||||
@Service
|
@Service
|
||||||
public class DistributedClusterStore
|
public class DistributedClusterStore
|
||||||
extends AbstractDistributedStore<ClusterEvent, ClusterStoreDelegate>
|
extends AbstractHazelcastStore<ClusterEvent, ClusterStoreDelegate>
|
||||||
implements ClusterStore {
|
implements ClusterStore {
|
||||||
|
|
||||||
private IMap<byte[], byte[]> rawNodes;
|
private IMap<byte[], byte[]> rawNodes;
|
@ -21,9 +21,9 @@ import org.onlab.onos.cluster.MastershipTerm;
|
|||||||
import org.onlab.onos.cluster.NodeId;
|
import org.onlab.onos.cluster.NodeId;
|
||||||
import org.onlab.onos.net.DeviceId;
|
import org.onlab.onos.net.DeviceId;
|
||||||
import org.onlab.onos.net.MastershipRole;
|
import org.onlab.onos.net.MastershipRole;
|
||||||
import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
|
import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
|
||||||
import org.onlab.onos.store.impl.AbstractDistributedStore;
|
import org.onlab.onos.store.common.AbstractHazelcastStore;
|
||||||
import org.onlab.onos.store.impl.OptionalCacheLoader;
|
import org.onlab.onos.store.common.OptionalCacheLoader;
|
||||||
|
|
||||||
import com.google.common.base.Optional;
|
import com.google.common.base.Optional;
|
||||||
import com.google.common.cache.LoadingCache;
|
import com.google.common.cache.LoadingCache;
|
||||||
@ -36,7 +36,7 @@ import com.hazelcast.core.IMap;
|
|||||||
@Component(immediate = true)
|
@Component(immediate = true)
|
||||||
@Service
|
@Service
|
||||||
public class DistributedMastershipStore
|
public class DistributedMastershipStore
|
||||||
extends AbstractDistributedStore<MastershipEvent, MastershipStoreDelegate>
|
extends AbstractHazelcastStore<MastershipEvent, MastershipStoreDelegate>
|
||||||
implements MastershipStore {
|
implements MastershipStore {
|
||||||
|
|
||||||
private IMap<byte[], byte[]> rawMasters;
|
private IMap<byte[], byte[]> rawMasters;
|
52
core/store/hz/common/pom.xml
Normal file
52
core/store/hz/common/pom.xml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz</artifactId>
|
||||||
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>onos-core-hz-common</artifactId>
|
||||||
|
<packaging>bundle</packaging>
|
||||||
|
|
||||||
|
<description>ONOS Hazelcast based distributed store subsystems</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-serializers</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.hazelcast</groupId>
|
||||||
|
<artifactId>hazelcast</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>de.javakaffee</groupId>
|
||||||
|
<artifactId>kryo-serializers</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>maven-scr-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
@ -1,4 +1,4 @@
|
|||||||
package org.onlab.onos.store.impl;
|
package org.onlab.onos.store.common;
|
||||||
|
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
@ -1,4 +1,4 @@
|
|||||||
package org.onlab.onos.store.impl;
|
package org.onlab.onos.store.common;
|
||||||
|
|
||||||
import com.google.common.base.Optional;
|
import com.google.common.base.Optional;
|
||||||
import com.google.common.cache.LoadingCache;
|
import com.google.common.cache.LoadingCache;
|
||||||
@ -6,6 +6,7 @@ import com.hazelcast.core.EntryAdapter;
|
|||||||
import com.hazelcast.core.EntryEvent;
|
import com.hazelcast.core.EntryEvent;
|
||||||
import com.hazelcast.core.HazelcastInstance;
|
import com.hazelcast.core.HazelcastInstance;
|
||||||
import com.hazelcast.core.MapEvent;
|
import com.hazelcast.core.MapEvent;
|
||||||
|
|
||||||
import org.apache.felix.scr.annotations.Activate;
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
import org.apache.felix.scr.annotations.Component;
|
import org.apache.felix.scr.annotations.Component;
|
||||||
import org.apache.felix.scr.annotations.Reference;
|
import org.apache.felix.scr.annotations.Reference;
|
||||||
@ -13,7 +14,6 @@ import org.apache.felix.scr.annotations.ReferenceCardinality;
|
|||||||
import org.onlab.onos.event.Event;
|
import org.onlab.onos.event.Event;
|
||||||
import org.onlab.onos.store.AbstractStore;
|
import org.onlab.onos.store.AbstractStore;
|
||||||
import org.onlab.onos.store.StoreDelegate;
|
import org.onlab.onos.store.StoreDelegate;
|
||||||
import org.onlab.onos.store.common.StoreService;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
@ -23,7 +23,7 @@ import static org.slf4j.LoggerFactory.getLogger;
|
|||||||
* Abstraction of a distributed store based on Hazelcast.
|
* Abstraction of a distributed store based on Hazelcast.
|
||||||
*/
|
*/
|
||||||
@Component(componentAbstract = true)
|
@Component(componentAbstract = true)
|
||||||
public abstract class AbstractDistributedStore<E extends Event, D extends StoreDelegate<E>>
|
public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDelegate<E>>
|
||||||
extends AbstractStore<E, D> {
|
extends AbstractStore<E, D> {
|
||||||
|
|
||||||
protected final Logger log = getLogger(getClass());
|
protected final Logger log = getLogger(getClass());
|
@ -1,9 +1,7 @@
|
|||||||
package org.onlab.onos.store.impl;
|
package org.onlab.onos.store.common;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkNotNull;
|
import static com.google.common.base.Preconditions.checkNotNull;
|
||||||
|
|
||||||
import org.onlab.onos.store.common.StoreService;
|
|
||||||
|
|
||||||
import com.google.common.base.Optional;
|
import com.google.common.base.Optional;
|
||||||
import com.google.common.cache.CacheLoader;
|
import com.google.common.cache.CacheLoader;
|
||||||
import com.hazelcast.core.IMap;
|
import com.hazelcast.core.IMap;
|
@ -1,4 +1,4 @@
|
|||||||
package org.onlab.onos.store.impl;
|
package org.onlab.onos.store.common;
|
||||||
|
|
||||||
import com.hazelcast.config.Config;
|
import com.hazelcast.config.Config;
|
||||||
import com.hazelcast.config.FileSystemXmlConfig;
|
import com.hazelcast.config.FileSystemXmlConfig;
|
||||||
@ -27,7 +27,6 @@ import org.onlab.onos.net.MastershipRole;
|
|||||||
import org.onlab.onos.net.Port;
|
import org.onlab.onos.net.Port;
|
||||||
import org.onlab.onos.net.PortNumber;
|
import org.onlab.onos.net.PortNumber;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
import org.onlab.onos.store.common.StoreService;
|
|
||||||
import org.onlab.onos.store.serializers.ConnectPointSerializer;
|
import org.onlab.onos.store.serializers.ConnectPointSerializer;
|
||||||
import org.onlab.onos.store.serializers.DefaultLinkSerializer;
|
import org.onlab.onos.store.serializers.DefaultLinkSerializer;
|
||||||
import org.onlab.onos.store.serializers.DefaultPortSerializer;
|
import org.onlab.onos.store.serializers.DefaultPortSerializer;
|
||||||
@ -35,7 +34,6 @@ import org.onlab.onos.store.serializers.DeviceIdSerializer;
|
|||||||
import org.onlab.onos.store.serializers.IpPrefixSerializer;
|
import org.onlab.onos.store.serializers.IpPrefixSerializer;
|
||||||
import org.onlab.onos.store.serializers.LinkKeySerializer;
|
import org.onlab.onos.store.serializers.LinkKeySerializer;
|
||||||
import org.onlab.onos.store.serializers.NodeIdSerializer;
|
import org.onlab.onos.store.serializers.NodeIdSerializer;
|
||||||
import org.onlab.onos.store.serializers.OnosTimestampSerializer;
|
|
||||||
import org.onlab.onos.store.serializers.PortNumberSerializer;
|
import org.onlab.onos.store.serializers.PortNumberSerializer;
|
||||||
import org.onlab.onos.store.serializers.ProviderIdSerializer;
|
import org.onlab.onos.store.serializers.ProviderIdSerializer;
|
||||||
import org.onlab.packet.IpPrefix;
|
import org.onlab.packet.IpPrefix;
|
||||||
@ -102,7 +100,6 @@ public class StoreManager implements StoreService {
|
|||||||
.register(DeviceId.class, new DeviceIdSerializer())
|
.register(DeviceId.class, new DeviceIdSerializer())
|
||||||
.register(PortNumber.class, new PortNumberSerializer())
|
.register(PortNumber.class, new PortNumberSerializer())
|
||||||
.register(DefaultPort.class, new DefaultPortSerializer())
|
.register(DefaultPort.class, new DefaultPortSerializer())
|
||||||
.register(OnosTimestamp.class, new OnosTimestampSerializer())
|
|
||||||
.register(LinkKey.class, new LinkKeySerializer())
|
.register(LinkKey.class, new LinkKeySerializer())
|
||||||
.register(ConnectPoint.class, new ConnectPointSerializer())
|
.register(ConnectPoint.class, new ConnectPointSerializer())
|
||||||
.register(DefaultLink.class, new DefaultLinkSerializer())
|
.register(DefaultLink.class, new DefaultLinkSerializer())
|
@ -1,4 +1,4 @@
|
|||||||
package org.onlab.onos.store.impl;
|
package org.onlab.onos.store.common;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
64
core/store/hz/net/pom.xml
Normal file
64
core/store/hz/net/pom.xml
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz</artifactId>
|
||||||
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>onos-core-hz-net</artifactId>
|
||||||
|
<packaging>bundle</packaging>
|
||||||
|
|
||||||
|
<description>ONOS Hazelcast based distributed store subsystems</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-serializers</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz-common</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-hz-common</artifactId>
|
||||||
|
<classifier>tests</classifier>
|
||||||
|
<scope>test</scope>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.hazelcast</groupId>
|
||||||
|
<artifactId>hazelcast</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>de.javakaffee</groupId>
|
||||||
|
<artifactId>kryo-serializers</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>maven-scr-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
@ -1,6 +1,7 @@
|
|||||||
package org.onlab.onos.store.device.impl;
|
package org.onlab.onos.store.device.impl;
|
||||||
|
|
||||||
import static com.google.common.base.Predicates.notNull;
|
import static com.google.common.base.Predicates.notNull;
|
||||||
|
|
||||||
import com.google.common.base.Optional;
|
import com.google.common.base.Optional;
|
||||||
import com.google.common.cache.LoadingCache;
|
import com.google.common.cache.LoadingCache;
|
||||||
import com.google.common.collect.FluentIterable;
|
import com.google.common.collect.FluentIterable;
|
||||||
@ -26,9 +27,9 @@ import org.onlab.onos.net.device.DeviceStore;
|
|||||||
import org.onlab.onos.net.device.DeviceStoreDelegate;
|
import org.onlab.onos.net.device.DeviceStoreDelegate;
|
||||||
import org.onlab.onos.net.device.PortDescription;
|
import org.onlab.onos.net.device.PortDescription;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
|
import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
|
||||||
import org.onlab.onos.store.impl.AbstractDistributedStore;
|
import org.onlab.onos.store.common.AbstractHazelcastStore;
|
||||||
import org.onlab.onos.store.impl.OptionalCacheLoader;
|
import org.onlab.onos.store.common.OptionalCacheLoader;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -52,7 +53,7 @@ import static org.slf4j.LoggerFactory.getLogger;
|
|||||||
@Component(immediate = true)
|
@Component(immediate = true)
|
||||||
@Service
|
@Service
|
||||||
public class DistributedDeviceStore
|
public class DistributedDeviceStore
|
||||||
extends AbstractDistributedStore<DeviceEvent, DeviceStoreDelegate>
|
extends AbstractHazelcastStore<DeviceEvent, DeviceStoreDelegate>
|
||||||
implements DeviceStore {
|
implements DeviceStore {
|
||||||
|
|
||||||
private final Logger log = getLogger(getClass());
|
private final Logger log = getLogger(getClass());
|
@ -0,0 +1,32 @@
|
|||||||
|
package org.onlab.onos.store.device.impl;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.cluster.MastershipTerm;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.store.ClockService;
|
||||||
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
|
||||||
|
// FIXME: Code clone in onos-core-trivial, onos-core-hz-net
|
||||||
|
/**
|
||||||
|
* Dummy implementation of {@link ClockService}.
|
||||||
|
*/
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class NoOpClockService implements ClockService {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Timestamp getTimestamp(DeviceId deviceId) {
|
||||||
|
return new Timestamp() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int compareTo(Timestamp o) {
|
||||||
|
throw new IllegalStateException("Never expected to be used.");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,153 @@
|
|||||||
|
package org.onlab.onos.store.flow.impl;
|
||||||
|
|
||||||
|
import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_ADDED;
|
||||||
|
import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
|
||||||
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.ApplicationId;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.flow.DefaultFlowRule;
|
||||||
|
import org.onlab.onos.net.flow.FlowRule;
|
||||||
|
import org.onlab.onos.net.flow.FlowRule.FlowRuleState;
|
||||||
|
import org.onlab.onos.net.flow.FlowRuleEvent;
|
||||||
|
import org.onlab.onos.net.flow.FlowRuleEvent.Type;
|
||||||
|
import org.onlab.onos.net.flow.FlowRuleStore;
|
||||||
|
import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
|
||||||
|
import org.onlab.onos.store.AbstractStore;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
import com.google.common.collect.ArrayListMultimap;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages inventory of flow rules using trivial in-memory implementation.
|
||||||
|
*/
|
||||||
|
//FIXME: I LIE I AM NOT DISTRIBUTED
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class DistributedFlowRuleStore
|
||||||
|
extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
|
||||||
|
implements FlowRuleStore {
|
||||||
|
|
||||||
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
|
// store entries as a pile of rules, no info about device tables
|
||||||
|
private final Multimap<DeviceId, FlowRule> flowEntries =
|
||||||
|
ArrayListMultimap.<DeviceId, FlowRule>create();
|
||||||
|
|
||||||
|
private final Multimap<ApplicationId, FlowRule> flowEntriesById =
|
||||||
|
ArrayListMultimap.<ApplicationId, FlowRule>create();
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized FlowRule getFlowRule(FlowRule rule) {
|
||||||
|
for (FlowRule f : flowEntries.get(rule.deviceId())) {
|
||||||
|
if (f.equals(rule)) {
|
||||||
|
return f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized Iterable<FlowRule> getFlowEntries(DeviceId deviceId) {
|
||||||
|
Collection<FlowRule> rules = flowEntries.get(deviceId);
|
||||||
|
if (rules == null) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
return ImmutableSet.copyOf(rules);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized Iterable<FlowRule> getFlowEntriesByAppId(ApplicationId appId) {
|
||||||
|
Collection<FlowRule> rules = flowEntriesById.get(appId);
|
||||||
|
if (rules == null) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
return ImmutableSet.copyOf(rules);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void storeFlowRule(FlowRule rule) {
|
||||||
|
FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_ADD);
|
||||||
|
DeviceId did = f.deviceId();
|
||||||
|
if (!flowEntries.containsEntry(did, f)) {
|
||||||
|
flowEntries.put(did, f);
|
||||||
|
flowEntriesById.put(rule.appId(), f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void deleteFlowRule(FlowRule rule) {
|
||||||
|
FlowRule f = new DefaultFlowRule(rule, FlowRuleState.PENDING_REMOVE);
|
||||||
|
DeviceId did = f.deviceId();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* find the rule and mark it for deletion.
|
||||||
|
* Ultimately a flow removed will come remove it.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (flowEntries.containsEntry(did, f)) {
|
||||||
|
//synchronized (flowEntries) {
|
||||||
|
flowEntries.remove(did, f);
|
||||||
|
flowEntries.put(did, f);
|
||||||
|
flowEntriesById.remove(rule.appId(), rule);
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowRule rule) {
|
||||||
|
DeviceId did = rule.deviceId();
|
||||||
|
|
||||||
|
// check if this new rule is an update to an existing entry
|
||||||
|
if (flowEntries.containsEntry(did, rule)) {
|
||||||
|
//synchronized (flowEntries) {
|
||||||
|
// Multimaps support duplicates so we have to remove our rule
|
||||||
|
// and replace it with the current version.
|
||||||
|
flowEntries.remove(did, rule);
|
||||||
|
flowEntries.put(did, rule);
|
||||||
|
//}
|
||||||
|
return new FlowRuleEvent(Type.RULE_UPDATED, rule);
|
||||||
|
}
|
||||||
|
|
||||||
|
flowEntries.put(did, rule);
|
||||||
|
return new FlowRuleEvent(RULE_ADDED, rule);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized FlowRuleEvent removeFlowRule(FlowRule rule) {
|
||||||
|
//synchronized (this) {
|
||||||
|
if (flowEntries.remove(rule.deviceId(), rule)) {
|
||||||
|
return new FlowRuleEvent(RULE_REMOVED, rule);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,278 @@
|
|||||||
|
package org.onlab.onos.store.host.impl;
|
||||||
|
|
||||||
|
import static org.onlab.onos.net.host.HostEvent.Type.HOST_ADDED;
|
||||||
|
import static org.onlab.onos.net.host.HostEvent.Type.HOST_MOVED;
|
||||||
|
import static org.onlab.onos.net.host.HostEvent.Type.HOST_REMOVED;
|
||||||
|
import static org.onlab.onos.net.host.HostEvent.Type.HOST_UPDATED;
|
||||||
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
|
import org.onlab.onos.net.DefaultHost;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Host;
|
||||||
|
import org.onlab.onos.net.HostId;
|
||||||
|
import org.onlab.onos.net.host.HostDescription;
|
||||||
|
import org.onlab.onos.net.host.HostEvent;
|
||||||
|
import org.onlab.onos.net.host.HostStore;
|
||||||
|
import org.onlab.onos.net.host.HostStoreDelegate;
|
||||||
|
import org.onlab.onos.net.host.PortAddresses;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.store.AbstractStore;
|
||||||
|
import org.onlab.packet.IpPrefix;
|
||||||
|
import org.onlab.packet.MacAddress;
|
||||||
|
import org.onlab.packet.VlanId;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
import com.google.common.collect.HashMultimap;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages inventory of end-station hosts using trivial in-memory
|
||||||
|
* implementation.
|
||||||
|
*/
|
||||||
|
//FIXME: I LIE I AM NOT DISTRIBUTED
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class DistributedHostStore
|
||||||
|
extends AbstractStore<HostEvent, HostStoreDelegate>
|
||||||
|
implements HostStore {
|
||||||
|
|
||||||
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
|
// Host inventory
|
||||||
|
private final Map<HostId, Host> hosts = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
// Hosts tracked by their location
|
||||||
|
private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
|
||||||
|
|
||||||
|
private final Map<ConnectPoint, PortAddresses> portAddresses =
|
||||||
|
new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
|
||||||
|
HostDescription hostDescription) {
|
||||||
|
Host host = hosts.get(hostId);
|
||||||
|
if (host == null) {
|
||||||
|
return createHost(providerId, hostId, hostDescription);
|
||||||
|
}
|
||||||
|
return updateHost(providerId, host, hostDescription);
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates a new host and sends HOST_ADDED
|
||||||
|
private HostEvent createHost(ProviderId providerId, HostId hostId,
|
||||||
|
HostDescription descr) {
|
||||||
|
DefaultHost newhost = new DefaultHost(providerId, hostId,
|
||||||
|
descr.hwAddress(),
|
||||||
|
descr.vlan(),
|
||||||
|
descr.location(),
|
||||||
|
descr.ipAddresses());
|
||||||
|
synchronized (this) {
|
||||||
|
hosts.put(hostId, newhost);
|
||||||
|
locations.put(descr.location(), newhost);
|
||||||
|
}
|
||||||
|
return new HostEvent(HOST_ADDED, newhost);
|
||||||
|
}
|
||||||
|
|
||||||
|
// checks for type of update to host, sends appropriate event
|
||||||
|
private HostEvent updateHost(ProviderId providerId, Host host,
|
||||||
|
HostDescription descr) {
|
||||||
|
DefaultHost updated;
|
||||||
|
HostEvent event;
|
||||||
|
if (!host.location().equals(descr.location())) {
|
||||||
|
updated = new DefaultHost(providerId, host.id(),
|
||||||
|
host.mac(),
|
||||||
|
host.vlan(),
|
||||||
|
descr.location(),
|
||||||
|
host.ipAddresses());
|
||||||
|
event = new HostEvent(HOST_MOVED, updated);
|
||||||
|
|
||||||
|
} else if (!(host.ipAddresses().equals(descr.ipAddresses()))) {
|
||||||
|
updated = new DefaultHost(providerId, host.id(),
|
||||||
|
host.mac(),
|
||||||
|
host.vlan(),
|
||||||
|
descr.location(),
|
||||||
|
descr.ipAddresses());
|
||||||
|
event = new HostEvent(HOST_UPDATED, updated);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
synchronized (this) {
|
||||||
|
hosts.put(host.id(), updated);
|
||||||
|
locations.remove(host.location(), host);
|
||||||
|
locations.put(updated.location(), updated);
|
||||||
|
}
|
||||||
|
return event;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HostEvent removeHost(HostId hostId) {
|
||||||
|
synchronized (this) {
|
||||||
|
Host host = hosts.remove(hostId);
|
||||||
|
if (host != null) {
|
||||||
|
locations.remove((host.location()), host);
|
||||||
|
return new HostEvent(HOST_REMOVED, host);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getHostCount() {
|
||||||
|
return hosts.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterable<Host> getHosts() {
|
||||||
|
return Collections.unmodifiableSet(new HashSet<>(hosts.values()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Host getHost(HostId hostId) {
|
||||||
|
return hosts.get(hostId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Host> getHosts(VlanId vlanId) {
|
||||||
|
Set<Host> vlanset = new HashSet<>();
|
||||||
|
for (Host h : hosts.values()) {
|
||||||
|
if (h.vlan().equals(vlanId)) {
|
||||||
|
vlanset.add(h);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vlanset;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Host> getHosts(MacAddress mac) {
|
||||||
|
Set<Host> macset = new HashSet<>();
|
||||||
|
for (Host h : hosts.values()) {
|
||||||
|
if (h.mac().equals(mac)) {
|
||||||
|
macset.add(h);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return macset;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Host> getHosts(IpPrefix ip) {
|
||||||
|
Set<Host> ipset = new HashSet<>();
|
||||||
|
for (Host h : hosts.values()) {
|
||||||
|
if (h.ipAddresses().contains(ip)) {
|
||||||
|
ipset.add(h);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ipset;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
|
||||||
|
return ImmutableSet.copyOf(locations.get(connectPoint));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Host> getConnectedHosts(DeviceId deviceId) {
|
||||||
|
Set<Host> hostset = new HashSet<>();
|
||||||
|
for (ConnectPoint p : locations.keySet()) {
|
||||||
|
if (p.deviceId().equals(deviceId)) {
|
||||||
|
hostset.addAll(locations.get(p));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hostset;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void updateAddressBindings(PortAddresses addresses) {
|
||||||
|
synchronized (portAddresses) {
|
||||||
|
PortAddresses existing = portAddresses.get(addresses.connectPoint());
|
||||||
|
if (existing == null) {
|
||||||
|
portAddresses.put(addresses.connectPoint(), addresses);
|
||||||
|
} else {
|
||||||
|
Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
|
||||||
|
.immutableCopy();
|
||||||
|
|
||||||
|
MacAddress newMac = (addresses.mac() == null) ? existing.mac()
|
||||||
|
: addresses.mac();
|
||||||
|
|
||||||
|
PortAddresses newAddresses =
|
||||||
|
new PortAddresses(addresses.connectPoint(), union, newMac);
|
||||||
|
|
||||||
|
portAddresses.put(newAddresses.connectPoint(), newAddresses);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeAddressBindings(PortAddresses addresses) {
|
||||||
|
synchronized (portAddresses) {
|
||||||
|
PortAddresses existing = portAddresses.get(addresses.connectPoint());
|
||||||
|
if (existing != null) {
|
||||||
|
Set<IpPrefix> difference =
|
||||||
|
Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
|
||||||
|
|
||||||
|
// If they removed the existing mac, set the new mac to null.
|
||||||
|
// Otherwise, keep the existing mac.
|
||||||
|
MacAddress newMac = existing.mac();
|
||||||
|
if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
|
||||||
|
newMac = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
PortAddresses newAddresses =
|
||||||
|
new PortAddresses(addresses.connectPoint(), difference, newMac);
|
||||||
|
|
||||||
|
portAddresses.put(newAddresses.connectPoint(), newAddresses);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clearAddressBindings(ConnectPoint connectPoint) {
|
||||||
|
synchronized (portAddresses) {
|
||||||
|
portAddresses.remove(connectPoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<PortAddresses> getAddressBindings() {
|
||||||
|
synchronized (portAddresses) {
|
||||||
|
return new HashSet<>(portAddresses.values());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
|
||||||
|
PortAddresses addresses;
|
||||||
|
|
||||||
|
synchronized (portAddresses) {
|
||||||
|
addresses = portAddresses.get(connectPoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (addresses == null) {
|
||||||
|
addresses = new PortAddresses(connectPoint, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
return addresses;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -10,6 +10,7 @@ import static org.slf4j.LoggerFactory.getLogger;
|
|||||||
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.felix.scr.annotations.Activate;
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
import org.apache.felix.scr.annotations.Component;
|
import org.apache.felix.scr.annotations.Component;
|
||||||
import org.apache.felix.scr.annotations.Deactivate;
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
@ -24,9 +25,9 @@ import org.onlab.onos.net.link.LinkEvent;
|
|||||||
import org.onlab.onos.net.link.LinkStore;
|
import org.onlab.onos.net.link.LinkStore;
|
||||||
import org.onlab.onos.net.link.LinkStoreDelegate;
|
import org.onlab.onos.net.link.LinkStoreDelegate;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
import org.onlab.onos.store.impl.AbsentInvalidatingLoadingCache;
|
import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
|
||||||
import org.onlab.onos.store.impl.AbstractDistributedStore;
|
import org.onlab.onos.store.common.AbstractHazelcastStore;
|
||||||
import org.onlab.onos.store.impl.OptionalCacheLoader;
|
import org.onlab.onos.store.common.OptionalCacheLoader;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
import com.google.common.base.Optional;
|
import com.google.common.base.Optional;
|
||||||
@ -43,7 +44,7 @@ import com.hazelcast.core.IMap;
|
|||||||
@Component(immediate = true)
|
@Component(immediate = true)
|
||||||
@Service
|
@Service
|
||||||
public class DistributedLinkStore
|
public class DistributedLinkStore
|
||||||
extends AbstractDistributedStore<LinkEvent, LinkStoreDelegate>
|
extends AbstractHazelcastStore<LinkEvent, LinkStoreDelegate>
|
||||||
implements LinkStore {
|
implements LinkStore {
|
||||||
|
|
||||||
private final Logger log = getLogger(getClass());
|
private final Logger log = getLogger(getClass());
|
@ -0,0 +1,444 @@
|
|||||||
|
package org.onlab.onos.store.topology.impl;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.ImmutableSetMultimap;
|
||||||
|
import org.onlab.graph.DijkstraGraphSearch;
|
||||||
|
import org.onlab.graph.GraphPathSearch;
|
||||||
|
import org.onlab.graph.TarjanGraphSearch;
|
||||||
|
import org.onlab.onos.net.AbstractModel;
|
||||||
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
|
import org.onlab.onos.net.DefaultPath;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Link;
|
||||||
|
import org.onlab.onos.net.Path;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.net.topology.ClusterId;
|
||||||
|
import org.onlab.onos.net.topology.DefaultTopologyCluster;
|
||||||
|
import org.onlab.onos.net.topology.DefaultTopologyVertex;
|
||||||
|
import org.onlab.onos.net.topology.GraphDescription;
|
||||||
|
import org.onlab.onos.net.topology.LinkWeight;
|
||||||
|
import org.onlab.onos.net.topology.Topology;
|
||||||
|
import org.onlab.onos.net.topology.TopologyCluster;
|
||||||
|
import org.onlab.onos.net.topology.TopologyEdge;
|
||||||
|
import org.onlab.onos.net.topology.TopologyGraph;
|
||||||
|
import org.onlab.onos.net.topology.TopologyVertex;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import static com.google.common.base.MoreObjects.toStringHelper;
|
||||||
|
import static com.google.common.collect.ImmutableSetMultimap.Builder;
|
||||||
|
import static org.onlab.graph.GraphPathSearch.Result;
|
||||||
|
import static org.onlab.graph.TarjanGraphSearch.SCCResult;
|
||||||
|
import static org.onlab.onos.net.Link.Type.INDIRECT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default implementation of the topology descriptor. This carries the
|
||||||
|
* backing topology data.
|
||||||
|
*/
|
||||||
|
public class DefaultTopology extends AbstractModel implements Topology {
|
||||||
|
|
||||||
|
private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
|
||||||
|
new DijkstraGraphSearch<>();
|
||||||
|
private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
|
||||||
|
new TarjanGraphSearch<>();
|
||||||
|
|
||||||
|
private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
|
||||||
|
|
||||||
|
private final long time;
|
||||||
|
private final TopologyGraph graph;
|
||||||
|
|
||||||
|
private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
|
||||||
|
private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
|
||||||
|
private final ImmutableSetMultimap<PathKey, Path> paths;
|
||||||
|
|
||||||
|
private final ImmutableMap<ClusterId, TopologyCluster> clusters;
|
||||||
|
private final ImmutableSet<ConnectPoint> infrastructurePoints;
|
||||||
|
private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
|
||||||
|
|
||||||
|
private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
|
||||||
|
private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
|
||||||
|
private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a topology descriptor attributed to the specified provider.
|
||||||
|
*
|
||||||
|
* @param providerId identity of the provider
|
||||||
|
* @param description data describing the new topology
|
||||||
|
*/
|
||||||
|
DefaultTopology(ProviderId providerId, GraphDescription description) {
|
||||||
|
super(providerId);
|
||||||
|
this.time = description.timestamp();
|
||||||
|
|
||||||
|
// Build the graph
|
||||||
|
this.graph = new DefaultTopologyGraph(description.vertexes(),
|
||||||
|
description.edges());
|
||||||
|
|
||||||
|
this.results = searchForShortestPaths();
|
||||||
|
this.paths = buildPaths();
|
||||||
|
|
||||||
|
this.clusterResults = searchForClusters();
|
||||||
|
this.clusters = buildTopologyClusters();
|
||||||
|
|
||||||
|
buildIndexes();
|
||||||
|
|
||||||
|
this.broadcastSets = buildBroadcastSets();
|
||||||
|
this.infrastructurePoints = findInfrastructurePoints();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long time() {
|
||||||
|
return time;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int clusterCount() {
|
||||||
|
return clusters.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int deviceCount() {
|
||||||
|
return graph.getVertexes().size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int linkCount() {
|
||||||
|
return graph.getEdges().size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int pathCount() {
|
||||||
|
return paths.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the backing topology graph.
|
||||||
|
*
|
||||||
|
* @return topology graph
|
||||||
|
*/
|
||||||
|
TopologyGraph getGraph() {
|
||||||
|
return graph;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the set of topology clusters.
|
||||||
|
*
|
||||||
|
* @return set of clusters
|
||||||
|
*/
|
||||||
|
Set<TopologyCluster> getClusters() {
|
||||||
|
return ImmutableSet.copyOf(clusters.values());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the specified topology cluster.
|
||||||
|
*
|
||||||
|
* @param clusterId cluster identifier
|
||||||
|
* @return topology cluster
|
||||||
|
*/
|
||||||
|
TopologyCluster getCluster(ClusterId clusterId) {
|
||||||
|
return clusters.get(clusterId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the topology cluster that contains the given device.
|
||||||
|
*
|
||||||
|
* @param deviceId device identifier
|
||||||
|
* @return topology cluster
|
||||||
|
*/
|
||||||
|
TopologyCluster getCluster(DeviceId deviceId) {
|
||||||
|
return clustersByDevice.get(deviceId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the set of cluster devices.
|
||||||
|
*
|
||||||
|
* @param cluster topology cluster
|
||||||
|
* @return cluster devices
|
||||||
|
*/
|
||||||
|
Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
|
||||||
|
return devicesByCluster.get(cluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the set of cluster links.
|
||||||
|
*
|
||||||
|
* @param cluster topology cluster
|
||||||
|
* @return cluster links
|
||||||
|
*/
|
||||||
|
Set<Link> getClusterLinks(TopologyCluster cluster) {
|
||||||
|
return linksByCluster.get(cluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates whether the given point is an infrastructure link end-point.
|
||||||
|
*
|
||||||
|
* @param connectPoint connection point
|
||||||
|
* @return true if infrastructure
|
||||||
|
*/
|
||||||
|
boolean isInfrastructure(ConnectPoint connectPoint) {
|
||||||
|
return infrastructurePoints.contains(connectPoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates whether the given point is part of a broadcast set.
|
||||||
|
*
|
||||||
|
* @param connectPoint connection point
|
||||||
|
* @return true if in broadcast set
|
||||||
|
*/
|
||||||
|
boolean isBroadcastPoint(ConnectPoint connectPoint) {
|
||||||
|
// Any non-infrastructure, i.e. edge points are assumed to be OK.
|
||||||
|
if (!isInfrastructure(connectPoint)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the cluster to which the device belongs.
|
||||||
|
TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
|
||||||
|
if (cluster == null) {
|
||||||
|
throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the broadcast set is null or empty, or if the point explicitly
|
||||||
|
// belongs to it, return true;
|
||||||
|
Set<ConnectPoint> points = broadcastSets.get(cluster.id());
|
||||||
|
return points == null || points.isEmpty() || points.contains(connectPoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the size of the cluster broadcast set.
|
||||||
|
*
|
||||||
|
* @param clusterId cluster identifier
|
||||||
|
* @return size of the cluster broadcast set
|
||||||
|
*/
|
||||||
|
int broadcastSetSize(ClusterId clusterId) {
|
||||||
|
return broadcastSets.get(clusterId).size();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the set of pre-computed shortest paths between source and
|
||||||
|
* destination devices.
|
||||||
|
*
|
||||||
|
* @param src source device
|
||||||
|
* @param dst destination device
|
||||||
|
* @return set of shortest paths
|
||||||
|
*/
|
||||||
|
Set<Path> getPaths(DeviceId src, DeviceId dst) {
|
||||||
|
return paths.get(new PathKey(src, dst));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes on-demand the set of shortest paths between source and
|
||||||
|
* destination devices.
|
||||||
|
*
|
||||||
|
* @param src source device
|
||||||
|
* @param dst destination device
|
||||||
|
* @return set of shortest paths
|
||||||
|
*/
|
||||||
|
Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
|
||||||
|
GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
|
||||||
|
DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
|
||||||
|
new DefaultTopologyVertex(dst), weight);
|
||||||
|
ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
|
||||||
|
for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
|
||||||
|
builder.add(networkPath(path));
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Searches the graph for all shortest paths and returns the search results.
|
||||||
|
private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
|
||||||
|
ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
|
||||||
|
|
||||||
|
// Search graph paths for each source to all destinations.
|
||||||
|
LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
|
||||||
|
for (TopologyVertex src : graph.getVertexes()) {
|
||||||
|
builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds network paths from the graph path search results
|
||||||
|
private ImmutableSetMultimap<PathKey, Path> buildPaths() {
|
||||||
|
Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
|
||||||
|
for (DeviceId deviceId : results.keySet()) {
|
||||||
|
Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
|
||||||
|
for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
|
||||||
|
builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
|
||||||
|
networkPath(path));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts graph path to a network path with the same cost.
|
||||||
|
private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
|
||||||
|
List<Link> links = new ArrayList<>();
|
||||||
|
for (TopologyEdge edge : path.edges()) {
|
||||||
|
links.add(edge.link());
|
||||||
|
}
|
||||||
|
return new DefaultPath(PID, links, path.cost());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Searches for SCC clusters in the network topology graph using Tarjan
|
||||||
|
// algorithm.
|
||||||
|
private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
|
||||||
|
return TARJAN.search(graph, new NoIndirectLinksWeight());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds the topology clusters and returns the id-cluster bindings.
|
||||||
|
private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
|
||||||
|
ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
|
||||||
|
SCCResult<TopologyVertex, TopologyEdge> result =
|
||||||
|
TARJAN.search(graph, new NoIndirectLinksWeight());
|
||||||
|
|
||||||
|
// Extract both vertexes and edges from the results; the lists form
|
||||||
|
// pairs along the same index.
|
||||||
|
List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
|
||||||
|
List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
|
||||||
|
|
||||||
|
// Scan over the lists and create a cluster from the results.
|
||||||
|
for (int i = 0, n = result.clusterCount(); i < n; i++) {
|
||||||
|
Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
|
||||||
|
Set<TopologyEdge> edgeSet = clusterEdges.get(i);
|
||||||
|
|
||||||
|
ClusterId cid = ClusterId.clusterId(i);
|
||||||
|
DefaultTopologyCluster cluster =
|
||||||
|
new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
|
||||||
|
findRoot(vertexSet).deviceId());
|
||||||
|
clusterBuilder.put(cid, cluster);
|
||||||
|
}
|
||||||
|
return clusterBuilder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds the vertex whose device id is the lexicographical minimum in the
|
||||||
|
// specified set.
|
||||||
|
private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
|
||||||
|
TopologyVertex minVertex = null;
|
||||||
|
for (TopologyVertex vertex : vertexSet) {
|
||||||
|
if (minVertex == null ||
|
||||||
|
minVertex.deviceId().toString()
|
||||||
|
.compareTo(minVertex.deviceId().toString()) < 0) {
|
||||||
|
minVertex = vertex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return minVertex;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes a map of broadcast sets for each cluster.
|
||||||
|
private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
|
||||||
|
Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
|
||||||
|
for (TopologyCluster cluster : clusters.values()) {
|
||||||
|
addClusterBroadcastSet(cluster, builder);
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds all broadcast points for the cluster. These are those connection
|
||||||
|
// points which lie along the shortest paths between the cluster root and
|
||||||
|
// all other devices within the cluster.
|
||||||
|
private void addClusterBroadcastSet(TopologyCluster cluster,
|
||||||
|
Builder<ClusterId, ConnectPoint> builder) {
|
||||||
|
// Use the graph root search results to build the broadcast set.
|
||||||
|
Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
|
||||||
|
for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
|
||||||
|
TopologyVertex vertex = entry.getKey();
|
||||||
|
|
||||||
|
// Ignore any parents that lead outside the cluster.
|
||||||
|
if (clustersByDevice.get(vertex.deviceId()) != cluster) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore any back-link sets that are empty.
|
||||||
|
Set<TopologyEdge> parents = entry.getValue();
|
||||||
|
if (parents.isEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the first back-link source and destinations to add to the
|
||||||
|
// broadcast set.
|
||||||
|
Link link = parents.iterator().next().link();
|
||||||
|
builder.put(cluster.id(), link.src());
|
||||||
|
builder.put(cluster.id(), link.dst());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collects and returns an set of all infrastructure link end-points.
|
||||||
|
private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
|
||||||
|
ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
|
||||||
|
for (TopologyEdge edge : graph.getEdges()) {
|
||||||
|
builder.add(edge.link().src());
|
||||||
|
builder.add(edge.link().dst());
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds cluster-devices, cluster-links and device-cluster indexes.
|
||||||
|
private void buildIndexes() {
|
||||||
|
// Prepare the index builders
|
||||||
|
ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
|
||||||
|
ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
|
||||||
|
ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
|
||||||
|
|
||||||
|
// Now scan through all the clusters
|
||||||
|
for (TopologyCluster cluster : clusters.values()) {
|
||||||
|
int i = cluster.id().index();
|
||||||
|
|
||||||
|
// Scan through all the cluster vertexes.
|
||||||
|
for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
|
||||||
|
devicesBuilder.put(cluster, vertex.deviceId());
|
||||||
|
clusterBuilder.put(vertex.deviceId(), cluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan through all the cluster edges.
|
||||||
|
for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
|
||||||
|
linksBuilder.put(cluster, edge.link());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize all indexes.
|
||||||
|
clustersByDevice = clusterBuilder.build();
|
||||||
|
devicesByCluster = devicesBuilder.build();
|
||||||
|
linksByCluster = linksBuilder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link weight for measuring link cost as hop count with indirect links
|
||||||
|
// being as expensive as traversing the entire graph to assume the worst.
|
||||||
|
private static class HopCountLinkWeight implements LinkWeight {
|
||||||
|
private final int indirectLinkCost;
|
||||||
|
|
||||||
|
HopCountLinkWeight(int indirectLinkCost) {
|
||||||
|
this.indirectLinkCost = indirectLinkCost;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double weight(TopologyEdge edge) {
|
||||||
|
// To force preference to use direct paths first, make indirect
|
||||||
|
// links as expensive as the linear vertex traversal.
|
||||||
|
return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link weight for preventing traversal over indirect links.
|
||||||
|
private static class NoIndirectLinksWeight implements LinkWeight {
|
||||||
|
@Override
|
||||||
|
public double weight(TopologyEdge edge) {
|
||||||
|
return edge.link().type() == INDIRECT ? -1 : 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return toStringHelper(this)
|
||||||
|
.add("time", time)
|
||||||
|
.add("clusters", clusterCount())
|
||||||
|
.add("devices", deviceCount())
|
||||||
|
.add("links", linkCount())
|
||||||
|
.add("pathCount", pathCount())
|
||||||
|
.toString();
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,28 @@
|
|||||||
|
package org.onlab.onos.store.topology.impl;
|
||||||
|
|
||||||
|
import org.onlab.graph.AdjacencyListsGraph;
|
||||||
|
import org.onlab.onos.net.topology.TopologyEdge;
|
||||||
|
import org.onlab.onos.net.topology.TopologyGraph;
|
||||||
|
import org.onlab.onos.net.topology.TopologyVertex;
|
||||||
|
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default implementation of an immutable topology graph based on a generic
|
||||||
|
* implementation of adjacency lists graph.
|
||||||
|
*/
|
||||||
|
public class DefaultTopologyGraph
|
||||||
|
extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
|
||||||
|
implements TopologyGraph {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a topology graph comprising of the specified vertexes and edges.
|
||||||
|
*
|
||||||
|
* @param vertexes set of graph vertexes
|
||||||
|
* @param edges set of graph edges
|
||||||
|
*/
|
||||||
|
public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
|
||||||
|
super(vertexes, edges);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,141 @@
|
|||||||
|
package org.onlab.onos.store.topology.impl;
|
||||||
|
|
||||||
|
import static org.slf4j.LoggerFactory.getLogger;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Activate;
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Deactivate;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.event.Event;
|
||||||
|
import org.onlab.onos.net.ConnectPoint;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.net.Link;
|
||||||
|
import org.onlab.onos.net.Path;
|
||||||
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.net.topology.ClusterId;
|
||||||
|
import org.onlab.onos.net.topology.GraphDescription;
|
||||||
|
import org.onlab.onos.net.topology.LinkWeight;
|
||||||
|
import org.onlab.onos.net.topology.Topology;
|
||||||
|
import org.onlab.onos.net.topology.TopologyCluster;
|
||||||
|
import org.onlab.onos.net.topology.TopologyEvent;
|
||||||
|
import org.onlab.onos.net.topology.TopologyGraph;
|
||||||
|
import org.onlab.onos.net.topology.TopologyStore;
|
||||||
|
import org.onlab.onos.net.topology.TopologyStoreDelegate;
|
||||||
|
import org.onlab.onos.store.AbstractStore;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages inventory of topology snapshots using trivial in-memory
|
||||||
|
* structures implementation.
|
||||||
|
*/
|
||||||
|
//FIXME: I LIE I AM NOT DISTRIBUTED
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class DistributedTopologyStore
|
||||||
|
extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
|
||||||
|
implements TopologyStore {
|
||||||
|
|
||||||
|
private final Logger log = getLogger(getClass());
|
||||||
|
|
||||||
|
private volatile DefaultTopology current;
|
||||||
|
|
||||||
|
@Activate
|
||||||
|
public void activate() {
|
||||||
|
log.info("Started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Deactivate
|
||||||
|
public void deactivate() {
|
||||||
|
log.info("Stopped");
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public Topology currentTopology() {
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isLatest(Topology topology) {
|
||||||
|
// Topology is current only if it is the same as our current topology
|
||||||
|
return topology == current;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TopologyGraph getGraph(Topology topology) {
|
||||||
|
return defaultTopology(topology).getGraph();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<TopologyCluster> getClusters(Topology topology) {
|
||||||
|
return defaultTopology(topology).getClusters();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
|
||||||
|
return defaultTopology(topology).getCluster(clusterId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
|
||||||
|
return defaultTopology(topology).getClusterDevices(cluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
|
||||||
|
return defaultTopology(topology).getClusterLinks(cluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
|
||||||
|
return defaultTopology(topology).getPaths(src, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
|
||||||
|
LinkWeight weight) {
|
||||||
|
return defaultTopology(topology).getPaths(src, dst, weight);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
|
||||||
|
return defaultTopology(topology).isInfrastructure(connectPoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
|
||||||
|
return defaultTopology(topology).isBroadcastPoint(connectPoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TopologyEvent updateTopology(ProviderId providerId,
|
||||||
|
GraphDescription graphDescription,
|
||||||
|
List<Event> reasons) {
|
||||||
|
// First off, make sure that what we're given is indeed newer than
|
||||||
|
// what we already have.
|
||||||
|
if (current != null && graphDescription.timestamp() < current.time()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Have the default topology construct self from the description data.
|
||||||
|
DefaultTopology newTopology =
|
||||||
|
new DefaultTopology(providerId, graphDescription);
|
||||||
|
|
||||||
|
// Promote the new topology to current and return a ready-to-send event.
|
||||||
|
synchronized (this) {
|
||||||
|
current = newTopology;
|
||||||
|
return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates the specified topology and returns it as a default
|
||||||
|
private DefaultTopology defaultTopology(Topology topology) {
|
||||||
|
if (topology instanceof DefaultTopology) {
|
||||||
|
return (DefaultTopology) topology;
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Topology class " + topology.getClass() +
|
||||||
|
" not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,40 @@
|
|||||||
|
package org.onlab.onos.store.topology.impl;
|
||||||
|
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Key for filing pre-computed paths between source and destination devices.
|
||||||
|
*/
|
||||||
|
class PathKey {
|
||||||
|
private final DeviceId src;
|
||||||
|
private final DeviceId dst;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a path key from the given source/dest pair.
|
||||||
|
* @param src source device
|
||||||
|
* @param dst destination device
|
||||||
|
*/
|
||||||
|
PathKey(DeviceId src, DeviceId dst) {
|
||||||
|
this.src = src;
|
||||||
|
this.dst = dst;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(src, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (this == obj) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (obj instanceof PathKey) {
|
||||||
|
final PathKey other = (PathKey) obj;
|
||||||
|
return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
@ -32,9 +32,9 @@ import org.onlab.onos.net.device.DeviceEvent;
|
|||||||
import org.onlab.onos.net.device.DeviceStoreDelegate;
|
import org.onlab.onos.net.device.DeviceStoreDelegate;
|
||||||
import org.onlab.onos.net.device.PortDescription;
|
import org.onlab.onos.net.device.PortDescription;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.store.common.StoreManager;
|
||||||
import org.onlab.onos.store.common.StoreService;
|
import org.onlab.onos.store.common.StoreService;
|
||||||
import org.onlab.onos.store.impl.StoreManager;
|
import org.onlab.onos.store.common.TestStoreManager;
|
||||||
import org.onlab.onos.store.impl.TestStoreManager;
|
|
||||||
|
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
@ -26,9 +26,9 @@ import org.onlab.onos.net.link.DefaultLinkDescription;
|
|||||||
import org.onlab.onos.net.link.LinkEvent;
|
import org.onlab.onos.net.link.LinkEvent;
|
||||||
import org.onlab.onos.net.link.LinkStoreDelegate;
|
import org.onlab.onos.net.link.LinkStoreDelegate;
|
||||||
import org.onlab.onos.net.provider.ProviderId;
|
import org.onlab.onos.net.provider.ProviderId;
|
||||||
|
import org.onlab.onos.store.common.StoreManager;
|
||||||
import org.onlab.onos.store.common.StoreService;
|
import org.onlab.onos.store.common.StoreService;
|
||||||
import org.onlab.onos.store.impl.StoreManager;
|
import org.onlab.onos.store.common.TestStoreManager;
|
||||||
import org.onlab.onos.store.impl.TestStoreManager;
|
|
||||||
|
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.hazelcast.config.Config;
|
import com.hazelcast.config.Config;
|
51
core/store/hz/pom.xml
Normal file
51
core/store/hz/pom.xml
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-store</artifactId>
|
||||||
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>onos-core-hz</artifactId>
|
||||||
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
|
<description>ONOS Core Hazelcast Store subsystem</description>
|
||||||
|
|
||||||
|
<modules>
|
||||||
|
<module>common</module>
|
||||||
|
<module>cluster</module>
|
||||||
|
<module>net</module>
|
||||||
|
</modules>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.guava</groupId>
|
||||||
|
<artifactId>guava</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onlab-misc</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onlab-junit</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.hazelcast</groupId>
|
||||||
|
<artifactId>hazelcast</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>maven-bundle-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
@ -1,7 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
@ -12,34 +10,41 @@
|
|||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<artifactId>onos-core-store</artifactId>
|
<artifactId>onos-core-store</artifactId>
|
||||||
<packaging>bundle</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
<description>ONOS distributed store subsystems</description>
|
<description>ONOS Core Store subsystem</description>
|
||||||
|
|
||||||
|
<modules>
|
||||||
|
<module>trivial</module>
|
||||||
|
<module>dist</module>
|
||||||
|
<module>hz</module>
|
||||||
|
<module>serializers</module>
|
||||||
|
</modules>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.onlab.onos</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>onos-api</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.felix</groupId>
|
<groupId>org.onlab.onos</groupId>
|
||||||
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
<artifactId>onlab-misc</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onlab-junit</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.hazelcast</groupId>
|
<groupId>com.hazelcast</groupId>
|
||||||
<artifactId>hazelcast</artifactId>
|
<artifactId>hazelcast</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>de.javakaffee</groupId>
|
|
||||||
<artifactId>kryo-serializers</artifactId>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.felix</groupId>
|
<groupId>org.apache.felix</groupId>
|
||||||
<artifactId>maven-scr-plugin</artifactId>
|
<artifactId>maven-bundle-plugin</artifactId>
|
||||||
</plugin>
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
|
43
core/store/serializers/pom.xml
Normal file
43
core/store/serializers/pom.xml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-core-store</artifactId>
|
||||||
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>onos-core-serializers</artifactId>
|
||||||
|
<packaging>bundle</packaging>
|
||||||
|
|
||||||
|
<description>Serializers for ONOS classes</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.onlab.onos</groupId>
|
||||||
|
<artifactId>onos-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>org.apache.felix.scr.annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>de.javakaffee</groupId>
|
||||||
|
<artifactId>kryo-serializers</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.felix</groupId>
|
||||||
|
<artifactId>maven-scr-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.onlab.onos</groupId>
|
<groupId>org.onlab.onos</groupId>
|
||||||
<artifactId>onos-core</artifactId>
|
<artifactId>onos-core-store</artifactId>
|
||||||
<version>1.0.0-SNAPSHOT</version>
|
<version>1.0.0-SNAPSHOT</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
@ -0,0 +1,32 @@
|
|||||||
|
package org.onlab.onos.net.trivial.impl;
|
||||||
|
|
||||||
|
import org.apache.felix.scr.annotations.Component;
|
||||||
|
import org.apache.felix.scr.annotations.Service;
|
||||||
|
import org.onlab.onos.cluster.MastershipTerm;
|
||||||
|
import org.onlab.onos.net.DeviceId;
|
||||||
|
import org.onlab.onos.store.ClockService;
|
||||||
|
import org.onlab.onos.store.Timestamp;
|
||||||
|
|
||||||
|
//FIXME: Code clone in onos-core-trivial, onos-core-hz-net
|
||||||
|
/**
|
||||||
|
* Dummy implementation of {@link ClockService}.
|
||||||
|
*/
|
||||||
|
@Component(immediate = true)
|
||||||
|
@Service
|
||||||
|
public class NoOpClockService implements ClockService {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Timestamp getTimestamp(DeviceId deviceId) {
|
||||||
|
return new Timestamp() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int compareTo(Timestamp o) {
|
||||||
|
throw new IllegalStateException("Never expected to be used.");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
|
||||||
|
}
|
||||||
|
}
|
@ -49,7 +49,20 @@
|
|||||||
description="ONOS core components">
|
description="ONOS core components">
|
||||||
<feature>onos-api</feature>
|
<feature>onos-api</feature>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
||||||
<bundle>mvn:org.onlab.onos/onos-core-store/1.0.0-SNAPSHOT</bundle>
|
<bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-hz-net/1.0.0-SNAPSHOT</bundle>
|
||||||
|
</feature>
|
||||||
|
|
||||||
|
<feature name="onos-core-dist" version="1.0.0"
|
||||||
|
description="ONOS core components">
|
||||||
|
<feature>onos-api</feature>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-net/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-hz-common/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-serializers/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-hz-cluster/1.0.0-SNAPSHOT</bundle>
|
||||||
|
<bundle>mvn:org.onlab.onos/onos-core-dist/1.0.0-SNAPSHOT</bundle>
|
||||||
</feature>
|
</feature>
|
||||||
|
|
||||||
<feature name="onos-core-trivial" version="1.0.0"
|
<feature name="onos-core-trivial" version="1.0.0"
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
<project>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4_0_0.xsd">
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.onlab.tools</groupId>
|
<groupId>org.onlab.tools</groupId>
|
||||||
<artifactId>onos-build-conf</artifactId>
|
<artifactId>onos-build-conf</artifactId>
|
||||||
|
17
tools/test/bin/onos-start-network
Executable file
17
tools/test/bin/onos-start-network
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#-------------------------------------------------------------------------------
|
||||||
|
# Verifies connectivity to each node in ONOS cell.
|
||||||
|
#-------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
|
||||||
|
. $ONOS_ROOT/tools/build/envDefaults
|
||||||
|
|
||||||
|
SSHCMD="ssh -o PasswordAuthentication=no"
|
||||||
|
SCPCMD="scp -q -o PasswordAuthentication=no"
|
||||||
|
|
||||||
|
echo "Copying topology files to mininet vm."
|
||||||
|
$SSHCMD -n $ONOS_USER@$OCN mkdir -p topos
|
||||||
|
$SCPCMD $ONOS_ROOT/tools/test/topos/* $ONOS_USER@$OCN:topos/
|
||||||
|
|
||||||
|
echo "Starting Network."
|
||||||
|
$SSHCMD -t $ONOS_USER@$OCN sudo python topos/sol.py $(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
|
@ -2,4 +2,4 @@
|
|||||||
* Mechanism to transfer messages over network using IO loop and
|
* Mechanism to transfer messages over network using IO loop and
|
||||||
* message stream, backed by NIO byte buffers.
|
* message stream, backed by NIO byte buffers.
|
||||||
*/
|
*/
|
||||||
package org.onlab.nio;
|
package org.onlab.nio;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user