mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-06 15:17:01 +02:00
[MINOR] update ebtree to version 4.1
Ebtree version 4.1 brings lookup by ranges. This will be useful for the scheduler.
This commit is contained in:
parent
c820300adf
commit
5804434a0f
@ -100,6 +100,7 @@ static inline void eb32_delete(struct eb32_node *eb32)
|
||||
*/
|
||||
REGPRM2 struct eb32_node *eb32_lookup(struct eb_root *root, u32 x);
|
||||
REGPRM2 struct eb32_node *eb32i_lookup(struct eb_root *root, s32 x);
|
||||
REGPRM2 struct eb32_node *eb32_lookup_ge(struct eb_root *root, u32 x);
|
||||
REGPRM2 struct eb32_node *eb32_insert(struct eb_root *root, struct eb32_node *new);
|
||||
REGPRM2 struct eb32_node *eb32i_insert(struct eb_root *root, struct eb32_node *new);
|
||||
|
||||
@ -122,6 +123,7 @@ static forceinline struct eb32_node *__eb32_lookup(struct eb_root *root, u32 x)
|
||||
{
|
||||
struct eb32_node *node;
|
||||
eb_troot_t *troot;
|
||||
u32 y;
|
||||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
@ -139,7 +141,8 @@ static forceinline struct eb32_node *__eb32_lookup(struct eb_root *root, u32 x)
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct eb32_node, node.branches);
|
||||
|
||||
if (x == node->key) {
|
||||
y = node->key ^ x;
|
||||
if (!y) {
|
||||
/* Either we found the node which holds the key, or
|
||||
* we have a dup tree. In the later case, we have to
|
||||
* walk it down left to get the first entry.
|
||||
@ -154,6 +157,9 @@ static forceinline struct eb32_node *__eb32_lookup(struct eb_root *root, u32 x)
|
||||
return node;
|
||||
}
|
||||
|
||||
if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
|
||||
return NULL; /* no more common bits */
|
||||
|
||||
troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
|
||||
}
|
||||
}
|
||||
@ -167,6 +173,7 @@ static forceinline struct eb32_node *__eb32i_lookup(struct eb_root *root, s32 x)
|
||||
struct eb32_node *node;
|
||||
eb_troot_t *troot;
|
||||
u32 key = x ^ 0x80000000;
|
||||
u32 y;
|
||||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
@ -184,7 +191,8 @@ static forceinline struct eb32_node *__eb32i_lookup(struct eb_root *root, s32 x)
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct eb32_node, node.branches);
|
||||
|
||||
if (x == node->key) {
|
||||
y = node->key ^ x;
|
||||
if (!y) {
|
||||
/* Either we found the node which holds the key, or
|
||||
* we have a dup tree. In the later case, we have to
|
||||
* walk it down left to get the first entry.
|
||||
@ -199,6 +207,9 @@ static forceinline struct eb32_node *__eb32i_lookup(struct eb_root *root, s32 x)
|
||||
return node;
|
||||
}
|
||||
|
||||
if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
|
||||
return NULL; /* no more common bits */
|
||||
|
||||
troot = node->node.branches.b[(key >> node->node.bit) & EB_NODE_BRANCH_MASK];
|
||||
}
|
||||
}
|
||||
|
@ -122,6 +122,7 @@ static forceinline struct eb64_node *__eb64_lookup(struct eb_root *root, u64 x)
|
||||
{
|
||||
struct eb64_node *node;
|
||||
eb_troot_t *troot;
|
||||
u64 y;
|
||||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
@ -139,7 +140,8 @@ static forceinline struct eb64_node *__eb64_lookup(struct eb_root *root, u64 x)
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct eb64_node, node.branches);
|
||||
|
||||
if (x == node->key) {
|
||||
y = node->key ^ x;
|
||||
if (!y) {
|
||||
/* Either we found the node which holds the key, or
|
||||
* we have a dup tree. In the later case, we have to
|
||||
* walk it down left to get the first entry.
|
||||
@ -154,6 +156,9 @@ static forceinline struct eb64_node *__eb64_lookup(struct eb_root *root, u64 x)
|
||||
return node;
|
||||
}
|
||||
|
||||
if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
|
||||
return NULL; /* no more common bits */
|
||||
|
||||
troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
|
||||
}
|
||||
}
|
||||
@ -167,6 +172,7 @@ static forceinline struct eb64_node *__eb64i_lookup(struct eb_root *root, s64 x)
|
||||
struct eb64_node *node;
|
||||
eb_troot_t *troot;
|
||||
u64 key = x ^ (1ULL << 63);
|
||||
u64 y;
|
||||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
@ -184,7 +190,8 @@ static forceinline struct eb64_node *__eb64i_lookup(struct eb_root *root, s64 x)
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct eb64_node, node.branches);
|
||||
|
||||
if (x == node->key) {
|
||||
y = node->key ^ x;
|
||||
if (!y) {
|
||||
/* Either we found the node which holds the key, or
|
||||
* we have a dup tree. In the later case, we have to
|
||||
* walk it down left to get the first entry.
|
||||
@ -199,6 +206,9 @@ static forceinline struct eb64_node *__eb64i_lookup(struct eb_root *root, s64 x)
|
||||
return node;
|
||||
}
|
||||
|
||||
if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
|
||||
return NULL; /* no more common bits */
|
||||
|
||||
troot = node->node.branches.b[(key >> node->node.bit) & EB_NODE_BRANCH_MASK];
|
||||
}
|
||||
}
|
||||
|
@ -123,6 +123,7 @@ static forceinline struct ebpt_node *__ebpt_lookup(struct eb_root *root, void *x
|
||||
{
|
||||
struct ebpt_node *node;
|
||||
eb_troot_t *troot;
|
||||
ptr_t y;
|
||||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
@ -140,7 +141,8 @@ static forceinline struct ebpt_node *__ebpt_lookup(struct eb_root *root, void *x
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct ebpt_node, node.branches);
|
||||
|
||||
if (x == node->key) {
|
||||
y = (ptr_t)node->key ^ (ptr_t)x;
|
||||
if (!y) {
|
||||
/* Either we found the node which holds the key, or
|
||||
* we have a dup tree. In the later case, we have to
|
||||
* walk it down left to get the first entry.
|
||||
@ -155,6 +157,9 @@ static forceinline struct ebpt_node *__ebpt_lookup(struct eb_root *root, void *x
|
||||
return node;
|
||||
}
|
||||
|
||||
if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
|
||||
return NULL; /* no more common bits */
|
||||
|
||||
troot = node->node.branches.b[((ptr_t)x >> node->node.bit) & EB_NODE_BRANCH_MASK];
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Elastic Binary Trees - exported functions for operations on 32bit nodes.
|
||||
* (C) 2002-2007 - Willy Tarreau <w@1wt.eu>
|
||||
* (C) 2002-2009 - Willy Tarreau <w@1wt.eu>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -40,3 +40,90 @@ REGPRM2 struct eb32_node *eb32i_lookup(struct eb_root *root, s32 x)
|
||||
{
|
||||
return __eb32i_lookup(root, x);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the first occurrence of the lowest key in the tree <root>, which is
|
||||
* equal to or greater than <x>. NULL is returned is no key matches.
|
||||
*/
|
||||
REGPRM2 struct eb32_node *eb32_lookup_ge(struct eb_root *root, u32 x)
|
||||
{
|
||||
struct eb32_node *node;
|
||||
eb_troot_t *troot;
|
||||
|
||||
troot = root->b[EB_LEFT];
|
||||
if (unlikely(troot == NULL))
|
||||
return NULL;
|
||||
|
||||
while (1) {
|
||||
if ((eb_gettag(troot) == EB_LEAF)) {
|
||||
/* We reached a leaf, which means that the whole upper
|
||||
* parts were common. We will return either the current
|
||||
* node or its next one if the former is too small.
|
||||
*/
|
||||
node = container_of(eb_untag(troot, EB_LEAF),
|
||||
struct eb32_node, node.branches);
|
||||
if (node->key >= x)
|
||||
return node;
|
||||
/* return next */
|
||||
troot = node->node.leaf_p;
|
||||
break;
|
||||
}
|
||||
node = container_of(eb_untag(troot, EB_NODE),
|
||||
struct eb32_node, node.branches);
|
||||
|
||||
if (node->node.bit < 0) {
|
||||
/* We're at the top of a dup tree. Either we got a
|
||||
* matching value and we return the leftmost node, or
|
||||
* we don't and we skip the whole subtree to return the
|
||||
* next node after the subtree. Note that since we're
|
||||
* at the top of the dup tree, we can simply return the
|
||||
* next node without first trying to escape from the
|
||||
* tree.
|
||||
*/
|
||||
if (node->key >= x) {
|
||||
troot = node->node.branches.b[EB_LEFT];
|
||||
while (eb_gettag(troot) != EB_LEAF)
|
||||
troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
|
||||
return container_of(eb_untag(troot, EB_LEAF),
|
||||
struct eb32_node, node.branches);
|
||||
}
|
||||
/* return next */
|
||||
troot = node->node.node_p;
|
||||
break;
|
||||
}
|
||||
|
||||
if (((x ^ node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
|
||||
/* No more common bits at all. Either this node is too
|
||||
* large and we need to get its lowest value, or it is too
|
||||
* small, and we need to get the next value.
|
||||
*/
|
||||
if ((node->key >> node->node.bit) > (x >> node->node.bit)) {
|
||||
troot = node->node.branches.b[EB_LEFT];
|
||||
return eb32_entry(eb_walk_down(troot, EB_LEFT), struct eb32_node, node);
|
||||
}
|
||||
|
||||
/* Further values will be too low here, so return the next
|
||||
* unique node (if it exists).
|
||||
*/
|
||||
troot = node->node.node_p;
|
||||
break;
|
||||
}
|
||||
troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
|
||||
}
|
||||
|
||||
/* If we get here, it means we want to report next node after the
|
||||
* current one which is not below. <troot> is already initialised
|
||||
* to the parent's branches.
|
||||
*/
|
||||
while (eb_gettag(troot) != EB_LEFT)
|
||||
/* Walking up from right branch, so we cannot be below root */
|
||||
troot = (eb_root_to_node(eb_untag(troot, EB_RGHT)))->node_p;
|
||||
|
||||
/* Note that <troot> cannot be NULL at this stage */
|
||||
troot = (eb_untag(troot, EB_LEFT))->b[EB_RGHT];
|
||||
if (eb_clrtag(troot) == NULL)
|
||||
return NULL;
|
||||
|
||||
node = eb32_entry(eb_walk_down(troot, EB_LEFT), struct eb32_node, node);
|
||||
return node;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user