mirror of
https://github.com/opennetworkinglab/onos.git
synced 2025-10-16 18:02:05 +02:00
[CORD-2607] Mcast buckets correction
Change-Id: Ib47b2d8e40babdbb2ccdba61b48365a141752016
This commit is contained in:
parent
ec6ac42337
commit
b72201bcda
@ -50,6 +50,7 @@ import org.onosproject.net.flowobjective.ForwardingObjective;
|
|||||||
import org.onosproject.net.flowobjective.NextObjective;
|
import org.onosproject.net.flowobjective.NextObjective;
|
||||||
import org.onosproject.net.flowobjective.ObjectiveContext;
|
import org.onosproject.net.flowobjective.ObjectiveContext;
|
||||||
import org.onosproject.net.mcast.McastEvent;
|
import org.onosproject.net.mcast.McastEvent;
|
||||||
|
import org.onosproject.net.mcast.McastRoute;
|
||||||
import org.onosproject.net.mcast.McastRouteInfo;
|
import org.onosproject.net.mcast.McastRouteInfo;
|
||||||
import org.onosproject.net.topology.TopologyService;
|
import org.onosproject.net.topology.TopologyService;
|
||||||
import org.onosproject.segmentrouting.config.SegmentRoutingAppConfig;
|
import org.onosproject.segmentrouting.config.SegmentRoutingAppConfig;
|
||||||
@ -62,6 +63,7 @@ import org.onosproject.store.service.Versioned;
|
|||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
@ -69,9 +71,15 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkState;
|
import static com.google.common.base.Preconditions.checkState;
|
||||||
|
import static java.util.concurrent.Executors.newScheduledThreadPool;
|
||||||
|
import static org.onlab.util.Tools.groupedThreads;
|
||||||
import static org.onosproject.segmentrouting.SegmentRoutingManager.INTERNAL_VLAN;
|
import static org.onosproject.segmentrouting.SegmentRoutingManager.INTERNAL_VLAN;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -87,6 +95,49 @@ public class McastHandler {
|
|||||||
private final KryoNamespace.Builder mcastKryo;
|
private final KryoNamespace.Builder mcastKryo;
|
||||||
private final ConsistentMap<McastStoreKey, McastRole> mcastRoleStore;
|
private final ConsistentMap<McastStoreKey, McastRole> mcastRoleStore;
|
||||||
|
|
||||||
|
// Mcast lock to serialize local operations
|
||||||
|
private final Lock mcastLock = new ReentrantLock();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquires the lock used when making mcast changes.
|
||||||
|
*/
|
||||||
|
private void mcastLock() {
|
||||||
|
mcastLock.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Releases the lock used when making mcast changes.
|
||||||
|
*/
|
||||||
|
private void mcastUnlock() {
|
||||||
|
mcastLock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stability threshold for Mcast. Seconds
|
||||||
|
private static final long MCAST_STABLITY_THRESHOLD = 5;
|
||||||
|
// Last change done
|
||||||
|
private Instant lastMcastChange = Instant.now();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if mcast in the network has been stable in the last
|
||||||
|
* MCAST_STABLITY_THRESHOLD seconds, by comparing the current time
|
||||||
|
* to the last mcast change timestamp.
|
||||||
|
*
|
||||||
|
* @return true if stable
|
||||||
|
*/
|
||||||
|
private boolean isMcastStable() {
|
||||||
|
long last = (long) (lastMcastChange.toEpochMilli() / 1000.0);
|
||||||
|
long now = (long) (Instant.now().toEpochMilli() / 1000.0);
|
||||||
|
log.debug("Mcast stable since {}s", now - last);
|
||||||
|
return (now - last) > MCAST_STABLITY_THRESHOLD;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify interval for Mcast
|
||||||
|
private static final long MCAST_VERIFY_INTERVAL = 30;
|
||||||
|
|
||||||
|
// Executor for mcast bucket corrector
|
||||||
|
private ScheduledExecutorService executorService
|
||||||
|
= newScheduledThreadPool(1, groupedThreads("mcastBktCorrector", "mcastbktC-%d", log));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Role in the multicast tree.
|
* Role in the multicast tree.
|
||||||
*/
|
*/
|
||||||
@ -129,6 +180,10 @@ public class McastHandler {
|
|||||||
.withName("onos-mcast-role-store")
|
.withName("onos-mcast-role-store")
|
||||||
.withSerializer(Serializer.using(mcastKryo.build("McastHandler-Role")))
|
.withSerializer(Serializer.using(mcastKryo.build("McastHandler-Role")))
|
||||||
.build();
|
.build();
|
||||||
|
// Init the executor service and the buckets corrector
|
||||||
|
executorService.scheduleWithFixedDelay(new McastBucketCorrector(), 10,
|
||||||
|
MCAST_VERIFY_INTERVAL,
|
||||||
|
TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -144,6 +199,13 @@ public class McastHandler {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up when deactivating the application.
|
||||||
|
*/
|
||||||
|
protected void terminate() {
|
||||||
|
executorService.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Processes the SOURCE_ADDED event.
|
* Processes the SOURCE_ADDED event.
|
||||||
*
|
*
|
||||||
@ -160,9 +222,7 @@ public class McastHandler {
|
|||||||
Set<ConnectPoint> sinks = mcastRouteInfo.sinks();
|
Set<ConnectPoint> sinks = mcastRouteInfo.sinks();
|
||||||
IpAddress mcastIp = mcastRouteInfo.route().group();
|
IpAddress mcastIp = mcastRouteInfo.route().group();
|
||||||
|
|
||||||
sinks.forEach(sink -> {
|
sinks.forEach(sink -> processSinkAddedInternal(source, sink, mcastIp));
|
||||||
processSinkAddedInternal(source, sink, mcastIp);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -200,43 +260,65 @@ public class McastHandler {
|
|||||||
ConnectPoint sink = mcastRouteInfo.sink().orElse(null);
|
ConnectPoint sink = mcastRouteInfo.sink().orElse(null);
|
||||||
IpAddress mcastIp = mcastRouteInfo.route().group();
|
IpAddress mcastIp = mcastRouteInfo.route().group();
|
||||||
|
|
||||||
// Continue only when this instance is the master of source device
|
processSinkRemovedInternal(source, sink, mcastIp);
|
||||||
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
}
|
||||||
log.info("Skip {} due to lack of mastership of the source device {}",
|
|
||||||
mcastIp, source.deviceId());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// When source and sink are on the same device
|
/**
|
||||||
if (source.deviceId().equals(sink.deviceId())) {
|
* Removes a path from source to sink for given multicast group.
|
||||||
// Source and sink are on even the same port. There must be something wrong.
|
*
|
||||||
if (source.port().equals(sink.port())) {
|
* @param source connect point of the multicast source
|
||||||
log.warn("Sink is on the same port of source. Abort");
|
* @param sink connection point of the multicast sink
|
||||||
|
* @param mcastIp multicast group IP address
|
||||||
|
*/
|
||||||
|
private void processSinkRemovedInternal(ConnectPoint source, ConnectPoint sink,
|
||||||
|
IpAddress mcastIp) {
|
||||||
|
lastMcastChange = Instant.now();
|
||||||
|
mcastLock();
|
||||||
|
try {
|
||||||
|
// Continue only when this instance is the master of source device
|
||||||
|
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
||||||
|
log.debug("Skip {} due to lack of mastership of the source device {}",
|
||||||
|
mcastIp, source.deviceId());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
removePortFromDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(source));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process the egress device
|
// When source and sink are on the same device
|
||||||
boolean isLast = removePortFromDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(null));
|
if (source.deviceId().equals(sink.deviceId())) {
|
||||||
if (isLast) {
|
// Source and sink are on even the same port. There must be something wrong.
|
||||||
mcastRoleStore.remove(new McastStoreKey(mcastIp, sink.deviceId()));
|
if (source.port().equals(sink.port())) {
|
||||||
}
|
log.warn("Skip {} since sink {} is on the same port of source {}. Abort",
|
||||||
|
mcastIp, sink, source);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
removePortFromDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(source));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// If this is the last sink on the device, also update upstream
|
// Process the egress device
|
||||||
Optional<Path> mcastPath = getPath(source.deviceId(), sink.deviceId(), mcastIp);
|
boolean isLast = removePortFromDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(null));
|
||||||
if (mcastPath.isPresent()) {
|
if (isLast) {
|
||||||
List<Link> links = Lists.newArrayList(mcastPath.get().links());
|
mcastRoleStore.remove(new McastStoreKey(mcastIp, sink.deviceId()));
|
||||||
Collections.reverse(links);
|
}
|
||||||
for (Link link : links) {
|
|
||||||
if (isLast) {
|
// If this is the last sink on the device, also update upstream
|
||||||
isLast = removePortFromDevice(link.src().deviceId(), link.src().port(),
|
Optional<Path> mcastPath = getPath(source.deviceId(), sink.deviceId(), mcastIp);
|
||||||
mcastIp,
|
if (mcastPath.isPresent()) {
|
||||||
assignedVlan(link.src().deviceId().equals(source.deviceId()) ? source : null));
|
List<Link> links = Lists.newArrayList(mcastPath.get().links());
|
||||||
mcastRoleStore.remove(new McastStoreKey(mcastIp, link.src().deviceId()));
|
Collections.reverse(links);
|
||||||
|
for (Link link : links) {
|
||||||
|
if (isLast) {
|
||||||
|
isLast = removePortFromDevice(
|
||||||
|
link.src().deviceId(),
|
||||||
|
link.src().port(),
|
||||||
|
mcastIp,
|
||||||
|
assignedVlan(link.src().deviceId().equals(source.deviceId()) ? source : null)
|
||||||
|
);
|
||||||
|
mcastRoleStore.remove(new McastStoreKey(mcastIp, link.src().deviceId()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
mcastUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,54 +331,61 @@ public class McastHandler {
|
|||||||
*/
|
*/
|
||||||
private void processSinkAddedInternal(ConnectPoint source, ConnectPoint sink,
|
private void processSinkAddedInternal(ConnectPoint source, ConnectPoint sink,
|
||||||
IpAddress mcastIp) {
|
IpAddress mcastIp) {
|
||||||
// Continue only when this instance is the master of source device
|
lastMcastChange = Instant.now();
|
||||||
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
mcastLock();
|
||||||
log.info("Skip {} due to lack of mastership of the source device {}",
|
try {
|
||||||
source.deviceId());
|
// Continue only when this instance is the master of source device
|
||||||
return;
|
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
||||||
}
|
log.debug("Skip {} due to lack of mastership of the source device {}",
|
||||||
|
mcastIp, source.deviceId());
|
||||||
// Process the ingress device
|
|
||||||
addFilterToDevice(source.deviceId(), source.port(), assignedVlan(source), mcastIp);
|
|
||||||
|
|
||||||
// When source and sink are on the same device
|
|
||||||
if (source.deviceId().equals(sink.deviceId())) {
|
|
||||||
// Source and sink are on even the same port. There must be something wrong.
|
|
||||||
if (source.port().equals(sink.port())) {
|
|
||||||
log.warn("Sink is on the same port of source. Abort");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
addPortToDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(source));
|
|
||||||
mcastRoleStore.put(new McastStoreKey(mcastIp, sink.deviceId()), McastRole.INGRESS);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find a path. If present, create/update groups and flows for each hop
|
// Process the ingress device
|
||||||
Optional<Path> mcastPath = getPath(source.deviceId(), sink.deviceId(), mcastIp);
|
addFilterToDevice(source.deviceId(), source.port(), assignedVlan(source), mcastIp);
|
||||||
if (mcastPath.isPresent()) {
|
|
||||||
List<Link> links = mcastPath.get().links();
|
|
||||||
checkState(links.size() == 2,
|
|
||||||
"Path in leaf-spine topology should always be two hops: ", links);
|
|
||||||
|
|
||||||
links.forEach(link -> {
|
// When source and sink are on the same device
|
||||||
addPortToDevice(link.src().deviceId(), link.src().port(), mcastIp,
|
if (source.deviceId().equals(sink.deviceId())) {
|
||||||
assignedVlan(link.src().deviceId().equals(source.deviceId()) ? source : null));
|
// Source and sink are on even the same port. There must be something wrong.
|
||||||
addFilterToDevice(link.dst().deviceId(), link.dst().port(), assignedVlan(null), mcastIp);
|
if (source.port().equals(sink.port())) {
|
||||||
});
|
log.warn("Skip {} since sink {} is on the same port of source {}. Abort",
|
||||||
|
mcastIp, sink, source);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
addPortToDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(source));
|
||||||
|
mcastRoleStore.put(new McastStoreKey(mcastIp, sink.deviceId()), McastRole.INGRESS);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Process the egress device
|
// Find a path. If present, create/update groups and flows for each hop
|
||||||
addPortToDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(null));
|
Optional<Path> mcastPath = getPath(source.deviceId(), sink.deviceId(), mcastIp);
|
||||||
|
if (mcastPath.isPresent()) {
|
||||||
|
List<Link> links = mcastPath.get().links();
|
||||||
|
checkState(links.size() == 2,
|
||||||
|
"Path in leaf-spine topology should always be two hops: ", links);
|
||||||
|
|
||||||
// Setup mcast roles
|
links.forEach(link -> {
|
||||||
mcastRoleStore.put(new McastStoreKey(mcastIp, source.deviceId()),
|
addPortToDevice(link.src().deviceId(), link.src().port(), mcastIp,
|
||||||
McastRole.INGRESS);
|
assignedVlan(link.src().deviceId().equals(source.deviceId()) ? source : null));
|
||||||
mcastRoleStore.put(new McastStoreKey(mcastIp, links.get(0).dst().deviceId()),
|
addFilterToDevice(link.dst().deviceId(), link.dst().port(), assignedVlan(null), mcastIp);
|
||||||
McastRole.TRANSIT);
|
});
|
||||||
mcastRoleStore.put(new McastStoreKey(mcastIp, sink.deviceId()),
|
|
||||||
McastRole.EGRESS);
|
// Process the egress device
|
||||||
} else {
|
addPortToDevice(sink.deviceId(), sink.port(), mcastIp, assignedVlan(null));
|
||||||
log.warn("Unable to find a path from {} to {}. Abort sinkAdded",
|
|
||||||
source.deviceId(), sink.deviceId());
|
// Setup mcast roles
|
||||||
|
mcastRoleStore.put(new McastStoreKey(mcastIp, source.deviceId()),
|
||||||
|
McastRole.INGRESS);
|
||||||
|
mcastRoleStore.put(new McastStoreKey(mcastIp, links.get(0).dst().deviceId()),
|
||||||
|
McastRole.TRANSIT);
|
||||||
|
mcastRoleStore.put(new McastStoreKey(mcastIp, sink.deviceId()),
|
||||||
|
McastRole.EGRESS);
|
||||||
|
} else {
|
||||||
|
log.warn("Unable to find a path from {} to {}. Abort sinkAdded",
|
||||||
|
source.deviceId(), sink.deviceId());
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
mcastUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -306,56 +395,63 @@ public class McastHandler {
|
|||||||
* @param affectedLink Link that is going down
|
* @param affectedLink Link that is going down
|
||||||
*/
|
*/
|
||||||
protected void processLinkDown(Link affectedLink) {
|
protected void processLinkDown(Link affectedLink) {
|
||||||
getAffectedGroups(affectedLink).forEach(mcastIp -> {
|
lastMcastChange = Instant.now();
|
||||||
// TODO Optimize when the group editing is in place
|
mcastLock();
|
||||||
log.debug("Processing link down {} for group {}",
|
try {
|
||||||
affectedLink, mcastIp);
|
// Get groups affected by the link down event
|
||||||
|
getAffectedGroups(affectedLink).forEach(mcastIp -> {
|
||||||
|
// TODO Optimize when the group editing is in place
|
||||||
|
log.debug("Processing link down {} for group {}",
|
||||||
|
affectedLink, mcastIp);
|
||||||
|
|
||||||
// Find out the ingress, transit and egress device of affected group
|
// Find out the ingress, transit and egress device of affected group
|
||||||
DeviceId ingressDevice = getDevice(mcastIp, McastRole.INGRESS)
|
DeviceId ingressDevice = getDevice(mcastIp, McastRole.INGRESS)
|
||||||
.stream().findAny().orElse(null);
|
.stream().findAny().orElse(null);
|
||||||
DeviceId transitDevice = getDevice(mcastIp, McastRole.TRANSIT)
|
DeviceId transitDevice = getDevice(mcastIp, McastRole.TRANSIT)
|
||||||
.stream().findAny().orElse(null);
|
.stream().findAny().orElse(null);
|
||||||
Set<DeviceId> egressDevices = getDevice(mcastIp, McastRole.EGRESS);
|
Set<DeviceId> egressDevices = getDevice(mcastIp, McastRole.EGRESS);
|
||||||
ConnectPoint source = getSource(mcastIp);
|
ConnectPoint source = getSource(mcastIp);
|
||||||
|
|
||||||
// Do not proceed if any of these info is missing
|
// Do not proceed if any of these info is missing
|
||||||
if (ingressDevice == null || transitDevice == null
|
if (ingressDevice == null || transitDevice == null
|
||||||
|| egressDevices == null || source == null) {
|
|| egressDevices == null || source == null) {
|
||||||
log.warn("Missing ingress {}, transit {}, egress {} devices or source {}",
|
log.warn("Missing ingress {}, transit {}, egress {} devices or source {}",
|
||||||
ingressDevice, transitDevice, egressDevices, source);
|
ingressDevice, transitDevice, egressDevices, source);
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
// Continue only when this instance is the master of source device
|
|
||||||
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
|
||||||
log.info("Skip {} due to lack of mastership of the source device {}",
|
|
||||||
source.deviceId());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove entire transit
|
|
||||||
removeGroupFromDevice(transitDevice, mcastIp, assignedVlan(null));
|
|
||||||
|
|
||||||
// Remove transit-facing port on ingress device
|
|
||||||
PortNumber ingressTransitPort = ingressTransitPort(mcastIp);
|
|
||||||
if (ingressTransitPort != null) {
|
|
||||||
removePortFromDevice(ingressDevice, ingressTransitPort, mcastIp, assignedVlan(source));
|
|
||||||
mcastRoleStore.remove(new McastStoreKey(mcastIp, transitDevice));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a new path for each egress device
|
|
||||||
egressDevices.forEach(egressDevice -> {
|
|
||||||
Optional<Path> mcastPath = getPath(ingressDevice, egressDevice, mcastIp);
|
|
||||||
if (mcastPath.isPresent()) {
|
|
||||||
installPath(mcastIp, source, mcastPath.get());
|
|
||||||
} else {
|
|
||||||
log.warn("Fail to recover egress device {} from link failure {}",
|
|
||||||
egressDevice, affectedLink);
|
|
||||||
removeGroupFromDevice(egressDevice, mcastIp, assignedVlan(null));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Continue only when this instance is the master of source device
|
||||||
|
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
||||||
|
log.debug("Skip {} due to lack of mastership of the source device {}",
|
||||||
|
source.deviceId());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove entire transit
|
||||||
|
removeGroupFromDevice(transitDevice, mcastIp, assignedVlan(null));
|
||||||
|
|
||||||
|
// Remove transit-facing port on ingress device
|
||||||
|
PortNumber ingressTransitPort = ingressTransitPort(mcastIp);
|
||||||
|
if (ingressTransitPort != null) {
|
||||||
|
removePortFromDevice(ingressDevice, ingressTransitPort, mcastIp, assignedVlan(source));
|
||||||
|
mcastRoleStore.remove(new McastStoreKey(mcastIp, transitDevice));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct a new path for each egress device
|
||||||
|
egressDevices.forEach(egressDevice -> {
|
||||||
|
Optional<Path> mcastPath = getPath(ingressDevice, egressDevice, mcastIp);
|
||||||
|
if (mcastPath.isPresent()) {
|
||||||
|
installPath(mcastIp, source, mcastPath.get());
|
||||||
|
} else {
|
||||||
|
log.warn("Fail to recover egress device {} from link failure {}",
|
||||||
|
egressDevice, affectedLink);
|
||||||
|
removeGroupFromDevice(egressDevice, mcastIp, assignedVlan(null));
|
||||||
|
}
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
} finally {
|
||||||
|
mcastUnlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -364,103 +460,109 @@ public class McastHandler {
|
|||||||
* @param deviceDown device going down
|
* @param deviceDown device going down
|
||||||
*/
|
*/
|
||||||
protected void processDeviceDown(DeviceId deviceDown) {
|
protected void processDeviceDown(DeviceId deviceDown) {
|
||||||
// Get the mcast groups affected by the device going down
|
lastMcastChange = Instant.now();
|
||||||
getAffectedGroups(deviceDown).forEach(mcastIp -> {
|
mcastLock();
|
||||||
// TODO Optimize when the group editing is in place
|
try {
|
||||||
log.debug("Processing device down {} for group {}",
|
// Get the mcast groups affected by the device going down
|
||||||
deviceDown, mcastIp);
|
getAffectedGroups(deviceDown).forEach(mcastIp -> {
|
||||||
|
// TODO Optimize when the group editing is in place
|
||||||
|
log.debug("Processing device down {} for group {}",
|
||||||
|
deviceDown, mcastIp);
|
||||||
|
|
||||||
// Find out the ingress, transit and egress device of affected group
|
// Find out the ingress, transit and egress device of affected group
|
||||||
DeviceId ingressDevice = getDevice(mcastIp, McastRole.INGRESS)
|
DeviceId ingressDevice = getDevice(mcastIp, McastRole.INGRESS)
|
||||||
.stream().findAny().orElse(null);
|
.stream().findAny().orElse(null);
|
||||||
DeviceId transitDevice = getDevice(mcastIp, McastRole.TRANSIT)
|
DeviceId transitDevice = getDevice(mcastIp, McastRole.TRANSIT)
|
||||||
.stream().findAny().orElse(null);
|
.stream().findAny().orElse(null);
|
||||||
Set<DeviceId> egressDevices = getDevice(mcastIp, McastRole.EGRESS);
|
Set<DeviceId> egressDevices = getDevice(mcastIp, McastRole.EGRESS);
|
||||||
ConnectPoint source = getSource(mcastIp);
|
ConnectPoint source = getSource(mcastIp);
|
||||||
|
|
||||||
// Do not proceed if ingress device or source of this group are missing
|
// Do not proceed if ingress device or source of this group are missing
|
||||||
// If sinks are in other leafs, we have ingress, transit, egress, and source
|
// If sinks are in other leafs, we have ingress, transit, egress, and source
|
||||||
// If sinks are in the same leaf, we have just ingress and source
|
// If sinks are in the same leaf, we have just ingress and source
|
||||||
if (ingressDevice == null || source == null) {
|
if (ingressDevice == null || source == null) {
|
||||||
log.warn("Missing ingress {} or source {} for group {}",
|
log.warn("Missing ingress {} or source {} for group {}",
|
||||||
ingressDevice, source, mcastIp);
|
ingressDevice, source, mcastIp);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Continue only when we have the mastership on the operation
|
|
||||||
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
|
||||||
// When the source is available we just check the mastership
|
|
||||||
if (srManager.deviceService.isAvailable(source.deviceId())) {
|
|
||||||
log.info("Skip {} due to lack of mastership of the source device {}",
|
|
||||||
mcastIp, source.deviceId());
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Fallback with Leadership service
|
|
||||||
// source id is used a topic
|
|
||||||
NodeId leader = srManager.leadershipService.runForLeadership(
|
|
||||||
source.deviceId().toString()).leaderNodeId();
|
|
||||||
// Verify if this node is the leader
|
|
||||||
if (!srManager.clusterService.getLocalNode().id().equals(leader)) {
|
|
||||||
log.info("Skip {} due to lack of leadership on the topic {}",
|
|
||||||
mcastIp, source.deviceId());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it exists, we have to remove it in any case
|
// Continue only when we have the mastership on the operation
|
||||||
if (transitDevice != null) {
|
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
||||||
// Remove entire transit
|
// When the source is available we just check the mastership
|
||||||
removeGroupFromDevice(transitDevice, mcastIp, assignedVlan(null));
|
if (srManager.deviceService.isAvailable(source.deviceId())) {
|
||||||
}
|
log.debug("Skip {} due to lack of mastership of the source device {}",
|
||||||
// If the ingress is down
|
mcastIp, source.deviceId());
|
||||||
if (ingressDevice.equals(deviceDown)) {
|
return;
|
||||||
// Remove entire ingress
|
}
|
||||||
removeGroupFromDevice(ingressDevice, mcastIp, assignedVlan(source));
|
// Fallback with Leadership service
|
||||||
// If other sinks different from the ingress exist
|
// source id is used a topic
|
||||||
if (!egressDevices.isEmpty()) {
|
NodeId leader = srManager.leadershipService.runForLeadership(
|
||||||
// Remove all the remaining egress
|
source.deviceId().toString()).leaderNodeId();
|
||||||
egressDevices.forEach(
|
// Verify if this node is the leader
|
||||||
egressDevice -> removeGroupFromDevice(egressDevice, mcastIp, assignedVlan(null))
|
if (!srManager.clusterService.getLocalNode().id().equals(leader)) {
|
||||||
);
|
log.debug("Skip {} due to lack of leadership on the topic {}",
|
||||||
}
|
mcastIp, source.deviceId());
|
||||||
} else {
|
|
||||||
// Egress or transit could be down at this point
|
|
||||||
// Get the ingress-transit port if it exists
|
|
||||||
PortNumber ingressTransitPort = ingressTransitPort(mcastIp);
|
|
||||||
if (ingressTransitPort != null) {
|
|
||||||
// Remove transit-facing port on ingress device
|
|
||||||
removePortFromDevice(ingressDevice, ingressTransitPort, mcastIp, assignedVlan(source));
|
|
||||||
}
|
|
||||||
// One of the egress device is down
|
|
||||||
if (egressDevices.contains(deviceDown)) {
|
|
||||||
// Remove entire device down
|
|
||||||
removeGroupFromDevice(deviceDown, mcastIp, assignedVlan(null));
|
|
||||||
// Remove the device down from egress
|
|
||||||
egressDevices.remove(deviceDown);
|
|
||||||
// If there are no more egress and ingress does not have sinks
|
|
||||||
if (egressDevices.isEmpty() && !hasSinks(ingressDevice, mcastIp)) {
|
|
||||||
// Remove entire ingress
|
|
||||||
mcastRoleStore.remove(new McastStoreKey(mcastIp, ingressDevice));
|
|
||||||
// We have done
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Construct a new path for each egress device
|
|
||||||
egressDevices.forEach(egressDevice -> {
|
// If it exists, we have to remove it in any case
|
||||||
Optional<Path> mcastPath = getPath(ingressDevice, egressDevice, mcastIp);
|
if (transitDevice != null) {
|
||||||
// If there is a new path
|
// Remove entire transit
|
||||||
if (mcastPath.isPresent()) {
|
removeGroupFromDevice(transitDevice, mcastIp, assignedVlan(null));
|
||||||
// Let's install the new mcast path for this egress
|
}
|
||||||
installPath(mcastIp, source, mcastPath.get());
|
// If the ingress is down
|
||||||
} else {
|
if (ingressDevice.equals(deviceDown)) {
|
||||||
// We were not able to find an alternative path for this egress
|
// Remove entire ingress
|
||||||
log.warn("Fail to recover egress device {} from device down {}",
|
removeGroupFromDevice(ingressDevice, mcastIp, assignedVlan(source));
|
||||||
egressDevice, deviceDown);
|
// If other sinks different from the ingress exist
|
||||||
removeGroupFromDevice(egressDevice, mcastIp, assignedVlan(null));
|
if (!egressDevices.isEmpty()) {
|
||||||
|
// Remove all the remaining egress
|
||||||
|
egressDevices.forEach(
|
||||||
|
egressDevice -> removeGroupFromDevice(egressDevice, mcastIp, assignedVlan(null))
|
||||||
|
);
|
||||||
}
|
}
|
||||||
});
|
} else {
|
||||||
}
|
// Egress or transit could be down at this point
|
||||||
});
|
// Get the ingress-transit port if it exists
|
||||||
|
PortNumber ingressTransitPort = ingressTransitPort(mcastIp);
|
||||||
|
if (ingressTransitPort != null) {
|
||||||
|
// Remove transit-facing port on ingress device
|
||||||
|
removePortFromDevice(ingressDevice, ingressTransitPort, mcastIp, assignedVlan(source));
|
||||||
|
}
|
||||||
|
// One of the egress device is down
|
||||||
|
if (egressDevices.contains(deviceDown)) {
|
||||||
|
// Remove entire device down
|
||||||
|
removeGroupFromDevice(deviceDown, mcastIp, assignedVlan(null));
|
||||||
|
// Remove the device down from egress
|
||||||
|
egressDevices.remove(deviceDown);
|
||||||
|
// If there are no more egress and ingress does not have sinks
|
||||||
|
if (egressDevices.isEmpty() && !hasSinks(ingressDevice, mcastIp)) {
|
||||||
|
// Remove entire ingress
|
||||||
|
mcastRoleStore.remove(new McastStoreKey(mcastIp, ingressDevice));
|
||||||
|
// We have done
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Construct a new path for each egress device
|
||||||
|
egressDevices.forEach(egressDevice -> {
|
||||||
|
Optional<Path> mcastPath = getPath(ingressDevice, egressDevice, mcastIp);
|
||||||
|
// If there is a new path
|
||||||
|
if (mcastPath.isPresent()) {
|
||||||
|
// Let's install the new mcast path for this egress
|
||||||
|
installPath(mcastIp, source, mcastPath.get());
|
||||||
|
} else {
|
||||||
|
// We were not able to find an alternative path for this egress
|
||||||
|
log.warn("Fail to recover egress device {} from device down {}",
|
||||||
|
egressDevice, deviceDown);
|
||||||
|
removeGroupFromDevice(egressDevice, mcastIp, assignedVlan(null));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
mcastUnlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1056,7 +1158,9 @@ public class McastHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds or removes filtering objective for given device and port.
|
* Updates filtering objective for given device and port.
|
||||||
|
* It is called in general when the mcast config has been
|
||||||
|
* changed.
|
||||||
*
|
*
|
||||||
* @param deviceId device ID
|
* @param deviceId device ID
|
||||||
* @param portNum ingress port number
|
* @param portNum ingress port number
|
||||||
@ -1065,15 +1169,118 @@ public class McastHandler {
|
|||||||
*/
|
*/
|
||||||
protected void updateFilterToDevice(DeviceId deviceId, PortNumber portNum,
|
protected void updateFilterToDevice(DeviceId deviceId, PortNumber portNum,
|
||||||
VlanId vlanId, boolean install) {
|
VlanId vlanId, boolean install) {
|
||||||
srManager.multicastRouteService.getRoutes().forEach(mcastRoute -> {
|
lastMcastChange = Instant.now();
|
||||||
ConnectPoint source = srManager.multicastRouteService.fetchSource(mcastRoute);
|
mcastLock();
|
||||||
if (source.deviceId().equals(deviceId) && source.port().equals(portNum)) {
|
try {
|
||||||
if (install) {
|
// Iterates over the route and updates properly the filtering objective
|
||||||
addFilterToDevice(deviceId, portNum, vlanId, mcastRoute.group());
|
// on the source device.
|
||||||
} else {
|
srManager.multicastRouteService.getRoutes().forEach(mcastRoute -> {
|
||||||
removeFilterToDevice(deviceId, portNum, vlanId, mcastRoute.group());
|
ConnectPoint source = srManager.multicastRouteService.fetchSource(mcastRoute);
|
||||||
|
if (source.deviceId().equals(deviceId) && source.port().equals(portNum)) {
|
||||||
|
if (install) {
|
||||||
|
addFilterToDevice(deviceId, portNum, vlanId, mcastRoute.group());
|
||||||
|
} else {
|
||||||
|
removeFilterToDevice(deviceId, portNum, vlanId, mcastRoute.group());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
mcastUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs bucket verification operation for all mcast groups in the devices.
|
||||||
|
* Firstly, it verifies that mcast is stable before trying verification operation.
|
||||||
|
* Verification consists in creating new nexts with VERIFY operation. Actually,
|
||||||
|
* the operation is totally delegated to the driver.
|
||||||
|
*/
|
||||||
|
private final class McastBucketCorrector implements Runnable {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
// Verify if the Mcast has been stable for MCAST_STABLITY_THRESHOLD
|
||||||
|
if (!isMcastStable()) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
});
|
// Acquires lock
|
||||||
|
mcastLock();
|
||||||
|
try {
|
||||||
|
// Iterates over the routes and verify the related next objectives
|
||||||
|
srManager.multicastRouteService.getRoutes()
|
||||||
|
.stream()
|
||||||
|
.map(McastRoute::group)
|
||||||
|
.forEach(mcastIp -> {
|
||||||
|
log.trace("Running mcast buckets corrector for mcast group: {}",
|
||||||
|
mcastIp);
|
||||||
|
|
||||||
|
// For each group we get current information in the store
|
||||||
|
// and issue a check of the next objectives in place
|
||||||
|
DeviceId ingressDevice = getDevice(mcastIp, McastRole.INGRESS)
|
||||||
|
.stream().findAny().orElse(null);
|
||||||
|
DeviceId transitDevice = getDevice(mcastIp, McastRole.TRANSIT)
|
||||||
|
.stream().findAny().orElse(null);
|
||||||
|
Set<DeviceId> egressDevices = getDevice(mcastIp, McastRole.EGRESS);
|
||||||
|
ConnectPoint source = getSource(mcastIp);
|
||||||
|
|
||||||
|
// Do not proceed if ingress device or source of this group are missing
|
||||||
|
if (ingressDevice == null || source == null) {
|
||||||
|
log.warn("Unable to run buckets corrector. " +
|
||||||
|
"Missing ingress {} or source {} for group {}",
|
||||||
|
ingressDevice, source, mcastIp);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue only when this instance is the master of source device
|
||||||
|
if (!srManager.mastershipService.isLocalMaster(source.deviceId())) {
|
||||||
|
log.trace("Unable to run buckets corrector. " +
|
||||||
|
"Skip {} due to lack of mastership " +
|
||||||
|
"of the source device {}",
|
||||||
|
mcastIp, source.deviceId());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the set of the devices to be processed
|
||||||
|
ImmutableSet.Builder<DeviceId> devicesBuilder = ImmutableSet.builder();
|
||||||
|
devicesBuilder.add(ingressDevice);
|
||||||
|
if (transitDevice != null) {
|
||||||
|
devicesBuilder.add(transitDevice);
|
||||||
|
}
|
||||||
|
if (!egressDevices.isEmpty()) {
|
||||||
|
devicesBuilder.addAll(egressDevices);
|
||||||
|
}
|
||||||
|
Set<DeviceId> devicesToProcess = devicesBuilder.build();
|
||||||
|
|
||||||
|
// Iterate over the devices
|
||||||
|
devicesToProcess.forEach(deviceId -> {
|
||||||
|
McastStoreKey currentKey = new McastStoreKey(mcastIp, deviceId);
|
||||||
|
// If next exists in our store verify related next objective
|
||||||
|
if (mcastNextObjStore.containsKey(currentKey)) {
|
||||||
|
NextObjective currentNext = mcastNextObjStore.get(currentKey).value();
|
||||||
|
// Get current ports
|
||||||
|
Set<PortNumber> currentPorts = getPorts(currentNext.next());
|
||||||
|
// Rebuild the next objective
|
||||||
|
currentNext = nextObjBuilder(
|
||||||
|
mcastIp,
|
||||||
|
assignedVlan(deviceId.equals(source.deviceId()) ? source : null),
|
||||||
|
currentPorts,
|
||||||
|
currentNext.id()
|
||||||
|
).verify();
|
||||||
|
// Send to the flowobjective service
|
||||||
|
srManager.flowObjectiveService.next(deviceId, currentNext);
|
||||||
|
} else {
|
||||||
|
log.warn("Unable to run buckets corrector." +
|
||||||
|
"Missing next for {} and group {}",
|
||||||
|
deviceId, mcastIp);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
// Finally, it releases the lock
|
||||||
|
mcastUnlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -490,6 +490,8 @@ public class SegmentRoutingManager implements SegmentRoutingService {
|
|||||||
portNextObjStore.destroy();
|
portNextObjStore.destroy();
|
||||||
tunnelStore.destroy();
|
tunnelStore.destroy();
|
||||||
policyStore.destroy();
|
policyStore.destroy();
|
||||||
|
|
||||||
|
mcastHandler.terminate();
|
||||||
log.info("Stopped");
|
log.info("Stopped");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1588,8 +1588,8 @@ public class Ofdpa2GroupHandler {
|
|||||||
* modified to match the given next objective
|
* modified to match the given next objective
|
||||||
*/
|
*/
|
||||||
protected void verifyGroup(NextObjective nextObjective, NextGroup next) {
|
protected void verifyGroup(NextObjective nextObjective, NextGroup next) {
|
||||||
if (nextObjective.type() != NextObjective.Type.HASHED) {
|
if (nextObjective.type() == NextObjective.Type.SIMPLE) {
|
||||||
log.warn("verification not supported for {} group", nextObjective.type());
|
log.warn("verification not supported for indirect group");
|
||||||
fail(nextObjective, ObjectiveError.UNSUPPORTED);
|
fail(nextObjective, ObjectiveError.UNSUPPORTED);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1640,17 +1640,25 @@ public class Ofdpa2GroupHandler {
|
|||||||
indicesToRemove.addAll(otherIndices);
|
indicesToRemove.addAll(otherIndices);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.debug("Buckets to create {}", bucketsToCreate);
|
||||||
|
log.debug("Indices to remove {}", indicesToRemove);
|
||||||
|
|
||||||
if (!bucketsToCreate.isEmpty()) {
|
if (!bucketsToCreate.isEmpty()) {
|
||||||
log.info("creating {} buckets as part of nextId: {} verification",
|
log.info("creating {} buckets as part of nextId: {} verification",
|
||||||
bucketsToCreate.size(), nextObjective.id());
|
bucketsToCreate.size(), nextObjective.id());
|
||||||
//create a nextObjective only with these buckets
|
//create a nextObjective only with these buckets
|
||||||
NextObjective.Builder nextObjBuilder = DefaultNextObjective.builder()
|
NextObjective.Builder nextObjBuilder = DefaultNextObjective.builder()
|
||||||
.withId(nextObjective.id())
|
.withId(nextObjective.id())
|
||||||
.withType(NextObjective.Type.HASHED)
|
.withType(nextObjective.type())
|
||||||
.withMeta(nextObjective.meta())
|
.withMeta(nextObjective.meta())
|
||||||
.fromApp(nextObjective.appId());
|
.fromApp(nextObjective.appId());
|
||||||
bucketsToCreate.forEach(bucket -> nextObjBuilder.addTreatment(bucket));
|
bucketsToCreate.forEach(nextObjBuilder::addTreatment);
|
||||||
addBucketToHashGroup(nextObjBuilder.addToExisting(), allActiveKeys);
|
// According to the next type we call the proper add function
|
||||||
|
if (nextObjective.type() == NextObjective.Type.HASHED) {
|
||||||
|
addBucketToHashGroup(nextObjBuilder.addToExisting(), allActiveKeys);
|
||||||
|
} else {
|
||||||
|
addBucketToBroadcastGroup(nextObjBuilder.addToExisting(), allActiveKeys);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!indicesToRemove.isEmpty()) {
|
if (!indicesToRemove.isEmpty()) {
|
||||||
@ -1667,9 +1675,9 @@ public class Ofdpa2GroupHandler {
|
|||||||
// Nevertheless groupStore may not be in sync due to bug in the store
|
// Nevertheless groupStore may not be in sync due to bug in the store
|
||||||
// - see CORD-1844. XXX When this bug is fixed, the rest of this verify
|
// - see CORD-1844. XXX When this bug is fixed, the rest of this verify
|
||||||
// method will not be required.
|
// method will not be required.
|
||||||
GroupKey hashGroupKey = allActiveKeys.get(0).peekFirst();
|
GroupKey topGroupKey = allActiveKeys.get(0).peekFirst();
|
||||||
Group hashGroup = groupService.getGroup(deviceId, hashGroupKey);
|
Group topGroup = groupService.getGroup(deviceId, topGroupKey);
|
||||||
int actualGroupSize = hashGroup.buckets().buckets().size();
|
int actualGroupSize = topGroup.buckets().buckets().size();
|
||||||
int objGroupSize = nextObjective.next().size();
|
int objGroupSize = nextObjective.next().size();
|
||||||
if (actualGroupSize != objGroupSize) {
|
if (actualGroupSize != objGroupSize) {
|
||||||
log.warn("Mismatch detected in device:{}, nextId:{}, nextObjective-size"
|
log.warn("Mismatch detected in device:{}, nextId:{}, nextObjective-size"
|
||||||
@ -1677,9 +1685,10 @@ public class Ofdpa2GroupHandler {
|
|||||||
objGroupSize, actualGroupSize);
|
objGroupSize, actualGroupSize);
|
||||||
}
|
}
|
||||||
if (actualGroupSize > objGroupSize) {
|
if (actualGroupSize > objGroupSize) {
|
||||||
|
// Group in the device has more chains
|
||||||
List<GroupBucket> bucketsToRemove = Lists.newArrayList();
|
List<GroupBucket> bucketsToRemove = Lists.newArrayList();
|
||||||
//check every bucket in the actual group
|
//check every bucket in the actual group
|
||||||
for (GroupBucket bucket : hashGroup.buckets().buckets()) {
|
for (GroupBucket bucket : topGroup.buckets().buckets()) {
|
||||||
GroupInstruction g = (GroupInstruction) bucket.treatment()
|
GroupInstruction g = (GroupInstruction) bucket.treatment()
|
||||||
.allInstructions().iterator().next();
|
.allInstructions().iterator().next();
|
||||||
GroupId gidToCheck = g.groupId(); // the group pointed to
|
GroupId gidToCheck = g.groupId(); // the group pointed to
|
||||||
@ -1707,11 +1716,12 @@ public class Ofdpa2GroupHandler {
|
|||||||
+ "buckets to remove");
|
+ "buckets to remove");
|
||||||
} else {
|
} else {
|
||||||
GroupBuckets removeBuckets = new GroupBuckets(bucketsToRemove);
|
GroupBuckets removeBuckets = new GroupBuckets(bucketsToRemove);
|
||||||
groupService.removeBucketsFromGroup(deviceId, hashGroupKey,
|
groupService.removeBucketsFromGroup(deviceId, topGroupKey,
|
||||||
removeBuckets, hashGroupKey,
|
removeBuckets, topGroupKey,
|
||||||
nextObjective.appId());
|
nextObjective.appId());
|
||||||
}
|
}
|
||||||
} else if (actualGroupSize < objGroupSize) {
|
} else if (actualGroupSize < objGroupSize) {
|
||||||
|
// Group in the device has less chains
|
||||||
// should also add buckets not in group-store but in obj-store
|
// should also add buckets not in group-store but in obj-store
|
||||||
List<GroupBucket> bucketsToAdd = Lists.newArrayList();
|
List<GroupBucket> bucketsToAdd = Lists.newArrayList();
|
||||||
//check every bucket in the obj
|
//check every bucket in the obj
|
||||||
@ -1727,7 +1737,7 @@ public class Ofdpa2GroupHandler {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
boolean matches = false;
|
boolean matches = false;
|
||||||
for (GroupBucket bucket : hashGroup.buckets().buckets()) {
|
for (GroupBucket bucket : topGroup.buckets().buckets()) {
|
||||||
GroupInstruction g = (GroupInstruction) bucket.treatment()
|
GroupInstruction g = (GroupInstruction) bucket.treatment()
|
||||||
.allInstructions().iterator().next();
|
.allInstructions().iterator().next();
|
||||||
GroupId gidToCheck = g.groupId(); // the group pointed to
|
GroupId gidToCheck = g.groupId(); // the group pointed to
|
||||||
@ -1741,7 +1751,12 @@ public class Ofdpa2GroupHandler {
|
|||||||
TrafficTreatment t = DefaultTrafficTreatment.builder()
|
TrafficTreatment t = DefaultTrafficTreatment.builder()
|
||||||
.group(pointedGroup.id())
|
.group(pointedGroup.id())
|
||||||
.build();
|
.build();
|
||||||
bucketsToAdd.add(DefaultGroupBucket.createSelectGroupBucket(t));
|
// Create the proper bucket according to the next type
|
||||||
|
if (nextObjective.type() == NextObjective.Type.HASHED) {
|
||||||
|
bucketsToAdd.add(DefaultGroupBucket.createSelectGroupBucket(t));
|
||||||
|
} else {
|
||||||
|
bucketsToAdd.add(DefaultGroupBucket.createAllGroupBucket(t));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (bucketsToAdd.isEmpty()) {
|
if (bucketsToAdd.isEmpty()) {
|
||||||
@ -1749,8 +1764,8 @@ public class Ofdpa2GroupHandler {
|
|||||||
+ "buckets to add");
|
+ "buckets to add");
|
||||||
} else {
|
} else {
|
||||||
GroupBuckets addBuckets = new GroupBuckets(bucketsToAdd);
|
GroupBuckets addBuckets = new GroupBuckets(bucketsToAdd);
|
||||||
groupService.addBucketsToGroup(deviceId, hashGroupKey,
|
groupService.addBucketsToGroup(deviceId, topGroupKey,
|
||||||
addBuckets, hashGroupKey,
|
addBuckets, topGroupKey,
|
||||||
nextObjective.appId());
|
nextObjective.appId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user