Browse Source

ckdb - use a klist for the ktree nodes, have temporary klists, and klists without locks allocated for both

master
kanoi 9 years ago
parent
commit
4c061b6f32
  1. 101
      src/ckdb.c
  2. 10
      src/ckdb.h
  3. 2
      src/ckdb_cmd.c
  4. 5
      src/ckdb_data.c
  5. 6
      src/ckdb_dbio.c
  6. 114
      src/klist.c
  7. 43
      src/klist.h
  8. 115
      src/ktree.c
  9. 28
      src/ktree.h

101
src/ckdb.c

@ -1032,47 +1032,48 @@ static void alloc_storage()
users_free = k_new_list("Users", sizeof(USERS),
ALLOC_USERS, LIMIT_USERS, true);
users_store = k_new_store(users_free);
users_root = new_ktree(cmp_users, users_free);
userid_root = new_ktree(cmp_userid, users_free);
users_root = new_ktree(NULL, cmp_users, users_free);
userid_root = new_ktree("UsersId", cmp_userid, users_free);
useratts_free = k_new_list("Useratts", sizeof(USERATTS),
ALLOC_USERATTS, LIMIT_USERATTS, true);
useratts_store = k_new_store(useratts_free);
useratts_root = new_ktree(cmp_useratts, useratts_free);
useratts_root = new_ktree(NULL, cmp_useratts, useratts_free);
optioncontrol_free = k_new_list("OptionControl", sizeof(OPTIONCONTROL),
ALLOC_OPTIONCONTROL,
LIMIT_OPTIONCONTROL, true);
optioncontrol_store = k_new_store(optioncontrol_free);
optioncontrol_root = new_ktree(cmp_optioncontrol, optioncontrol_free);
optioncontrol_root = new_ktree(NULL, cmp_optioncontrol,
optioncontrol_free);
workers_free = k_new_list("Workers", sizeof(WORKERS),
ALLOC_WORKERS, LIMIT_WORKERS, true);
workers_store = k_new_store(workers_free);
workers_root = new_ktree(cmp_workers, workers_free);
workers_root = new_ktree(NULL, cmp_workers, workers_free);
paymentaddresses_free = k_new_list("PaymentAddresses",
sizeof(PAYMENTADDRESSES),
paymentaddresses_free = k_new_list("PayAddr", sizeof(PAYMENTADDRESSES),
ALLOC_PAYMENTADDRESSES,
LIMIT_PAYMENTADDRESSES, true);
paymentaddresses_store = k_new_store(paymentaddresses_free);
paymentaddresses_root = new_ktree(cmp_paymentaddresses,
paymentaddresses_root = new_ktree(NULL, cmp_paymentaddresses,
paymentaddresses_free);
paymentaddresses_create_root = new_ktree(cmp_payaddr_create,
paymentaddresses_create_root = new_ktree("PayAddrCreate",
cmp_payaddr_create,
paymentaddresses_free);
paymentaddresses_free->dsp_func = dsp_paymentaddresses;
payments_free = k_new_list("Payments", sizeof(PAYMENTS),
ALLOC_PAYMENTS, LIMIT_PAYMENTS, true);
payments_store = k_new_store(payments_free);
payments_root = new_ktree(cmp_payments, payments_free);
payments_root = new_ktree(NULL, cmp_payments, payments_free);
accountbalance_free = k_new_list("AccountBalance",
sizeof(ACCOUNTBALANCE),
ALLOC_ACCOUNTBALANCE,
LIMIT_ACCOUNTBALANCE, true);
accountbalance_store = k_new_store(accountbalance_free);
accountbalance_root = new_ktree(cmp_accountbalance,
accountbalance_root = new_ktree(NULL, cmp_accountbalance,
accountbalance_free);
idcontrol_free = k_new_list("IDControl", sizeof(IDCONTROL),
@ -1082,9 +1083,10 @@ static void alloc_storage()
workinfo_free = k_new_list("WorkInfo", sizeof(WORKINFO),
ALLOC_WORKINFO, LIMIT_WORKINFO, true);
workinfo_store = k_new_store(workinfo_free);
workinfo_root = new_ktree(cmp_workinfo, workinfo_free);
workinfo_root = new_ktree(NULL, cmp_workinfo, workinfo_free);
if (!confirm_sharesummary) {
workinfo_height_root = new_ktree(cmp_workinfo_height,
workinfo_height_root = new_ktree("WorkInfoHeight",
cmp_workinfo_height,
workinfo_free);
}
@ -1092,94 +1094,109 @@ static void alloc_storage()
ALLOC_SHARES, LIMIT_SHARES, true);
shares_store = k_new_store(shares_free);
shares_early_store = k_new_store(shares_free);
shares_root = new_ktree(cmp_shares, shares_free);
shares_early_root = new_ktree(cmp_shares, shares_free);
shares_root = new_ktree(NULL, cmp_shares, shares_free);
shares_early_root = new_ktree("SharesEarly", cmp_shares, shares_free);
shareerrors_free = k_new_list("ShareErrors", sizeof(SHAREERRORS),
ALLOC_SHAREERRORS, LIMIT_SHAREERRORS, true);
ALLOC_SHAREERRORS, LIMIT_SHAREERRORS,
true);
shareerrors_store = k_new_store(shareerrors_free);
shareerrors_early_store = k_new_store(shareerrors_free);
shareerrors_root = new_ktree(cmp_shareerrors, shareerrors_free);
shareerrors_early_root = new_ktree(cmp_shareerrors, shareerrors_free);
shareerrors_root = new_ktree(NULL, cmp_shareerrors, shareerrors_free);
shareerrors_early_root = new_ktree("ShareErrorsEarly", cmp_shareerrors,
shareerrors_free);
sharesummary_free = k_new_list("ShareSummary", sizeof(SHARESUMMARY),
ALLOC_SHARESUMMARY, LIMIT_SHARESUMMARY, true);
ALLOC_SHARESUMMARY, LIMIT_SHARESUMMARY,
true);
sharesummary_store = k_new_store(sharesummary_free);
sharesummary_root = new_ktree(cmp_sharesummary, sharesummary_free);
sharesummary_workinfoid_root = new_ktree(cmp_sharesummary_workinfoid,
sharesummary_root = new_ktree(NULL, cmp_sharesummary,
sharesummary_free);
sharesummary_workinfoid_root = new_ktree("ShareSummaryWId",
cmp_sharesummary_workinfoid,
sharesummary_free);
sharesummary_free->dsp_func = dsp_sharesummary;
sharesummary_pool_store = k_new_store(sharesummary_free);
sharesummary_pool_root = new_ktree(cmp_sharesummary, sharesummary_free);
sharesummary_pool_root = new_ktree("ShareSummaryPool",
cmp_sharesummary, sharesummary_free);
blocks_free = k_new_list("Blocks", sizeof(BLOCKS),
ALLOC_BLOCKS, LIMIT_BLOCKS, true);
blocks_store = k_new_store(blocks_free);
blocks_root = new_ktree(cmp_blocks, blocks_free);
blocks_root = new_ktree(NULL, cmp_blocks, blocks_free);
blocks_free->dsp_func = dsp_blocks;
miningpayouts_free = k_new_list("MiningPayouts", sizeof(MININGPAYOUTS),
ALLOC_MININGPAYOUTS, LIMIT_MININGPAYOUTS, true);
ALLOC_MININGPAYOUTS, LIMIT_MININGPAYOUTS,
true);
miningpayouts_store = k_new_store(miningpayouts_free);
miningpayouts_root = new_ktree(cmp_miningpayouts, miningpayouts_free);
miningpayouts_root = new_ktree(NULL, cmp_miningpayouts,
miningpayouts_free);
payouts_free = k_new_list("Payouts", sizeof(PAYOUTS),
ALLOC_PAYOUTS, LIMIT_PAYOUTS, true);
payouts_store = k_new_store(payouts_free);
payouts_root = new_ktree(cmp_payouts, payouts_free);
payouts_id_root = new_ktree(cmp_payouts_id, payouts_free);
payouts_wid_root = new_ktree(cmp_payouts_wid, payouts_free);
payouts_root = new_ktree(NULL, cmp_payouts, payouts_free);
payouts_id_root = new_ktree("PayoutsId", cmp_payouts_id, payouts_free);
payouts_wid_root = new_ktree("PayoutsWId", cmp_payouts_wid,
payouts_free);
auths_free = k_new_list("Auths", sizeof(AUTHS),
ALLOC_AUTHS, LIMIT_AUTHS, true);
auths_store = k_new_store(auths_free);
auths_root = new_ktree(cmp_auths, auths_free);
auths_root = new_ktree(NULL, cmp_auths, auths_free);
poolstats_free = k_new_list("PoolStats", sizeof(POOLSTATS),
ALLOC_POOLSTATS, LIMIT_POOLSTATS, true);
poolstats_store = k_new_store(poolstats_free);
poolstats_root = new_ktree(cmp_poolstats, poolstats_free);
poolstats_root = new_ktree(NULL, cmp_poolstats, poolstats_free);
userstats_free = k_new_list("UserStats", sizeof(USERSTATS),
ALLOC_USERSTATS, LIMIT_USERSTATS, true);
userstats_store = k_new_store(userstats_free);
userstats_eos_store = k_new_store(userstats_free);
userstats_root = new_ktree(cmp_userstats, userstats_free);
userstats_root = new_ktree(NULL, cmp_userstats, userstats_free);
userstats_free->dsp_func = dsp_userstats;
workerstatus_free = k_new_list("WorkerStatus", sizeof(WORKERSTATUS),
ALLOC_WORKERSTATUS, LIMIT_WORKERSTATUS, true);
ALLOC_WORKERSTATUS, LIMIT_WORKERSTATUS,
true);
workerstatus_store = k_new_store(workerstatus_free);
workerstatus_root = new_ktree(cmp_workerstatus, workerstatus_free);
workerstatus_root = new_ktree(NULL, cmp_workerstatus, workerstatus_free);
markersummary_free = k_new_list("MarkerSummary", sizeof(MARKERSUMMARY),
ALLOC_MARKERSUMMARY, LIMIT_MARKERSUMMARY, true);
ALLOC_MARKERSUMMARY, LIMIT_MARKERSUMMARY,
true);
markersummary_store = k_new_store(markersummary_free);
markersummary_root = new_ktree(cmp_markersummary, markersummary_free);
markersummary_userid_root = new_ktree(cmp_markersummary_userid,
markersummary_root = new_ktree(NULL, cmp_markersummary,
markersummary_free);
markersummary_userid_root = new_ktree("MarkerSummaryUserId",
cmp_markersummary_userid,
markersummary_free);
markersummary_free->dsp_func = dsp_markersummary;
markersummary_pool_store = k_new_store(markersummary_free);
markersummary_pool_root = new_ktree(cmp_markersummary,
markersummary_pool_root = new_ktree("MarkerSummaryPool",
cmp_markersummary,
markersummary_free);
workmarkers_free = k_new_list("WorkMarkers", sizeof(WORKMARKERS),
ALLOC_WORKMARKERS, LIMIT_WORKMARKERS, true);
workmarkers_store = k_new_store(workmarkers_free);
workmarkers_root = new_ktree(cmp_workmarkers, workmarkers_free);
workmarkers_workinfoid_root = new_ktree(cmp_workmarkers_workinfoid,
workmarkers_root = new_ktree(NULL, cmp_workmarkers, workmarkers_free);
workmarkers_workinfoid_root = new_ktree("WorkMarkersWId",
cmp_workmarkers_workinfoid,
workmarkers_free);
workmarkers_free->dsp_func = dsp_workmarkers;
marks_free = k_new_list("Marks", sizeof(MARKS),
ALLOC_MARKS, LIMIT_MARKS, true);
marks_store = k_new_store(marks_free);
marks_root = new_ktree(cmp_marks, marks_free);
marks_root = new_ktree(NULL, cmp_marks, marks_free);
userinfo_free = k_new_list("UserInfo", sizeof(USERINFO),
ALLOC_USERINFO, LIMIT_USERINFO, true);
userinfo_store = k_new_store(userinfo_free);
userinfo_root = new_ktree(cmp_userinfo, userinfo_free);
userinfo_root = new_ktree(NULL, cmp_userinfo, userinfo_free);
#if LOCK_CHECK
DLPRIO(seqset, 91);
@ -2689,7 +2706,7 @@ static enum cmd_values breakdown(K_ITEM **ml_item, char *buf, tv_t *now,
}
// N.B. these aren't shared so they use _nolock, below
msgline->trf_root = new_ktree(cmp_transfer, transfer_free);
msgline->trf_root = new_ktree_auto("MsgTrf", cmp_transfer, transfer_free);
msgline->trf_store = k_new_store(transfer_free);
next = data;
if (next && strncmp(next, JSON_TRANSFER, JSON_TRANSFER_LEN) == 0) {

10
src/ckdb.h

@ -46,16 +46,12 @@
#include "klist.h"
#include "ktree.h"
/* TODO: any tree/list accessed in new threads needs
* to ensure all code using those trees/lists use locks
* This code's lock implementation is equivalent to table level locking
* Consider adding row level locking (a per kitem usage count) if needed
* TODO: verify all tables with multithread access are locked
*/
/* This code's lock implementation is equivalent to table level locking
* Consider adding row level locking (a per kitem usage count) if needed */
#define DB_VLOCK "1"
#define DB_VERSION "1.0.4"
#define CKDB_VERSION DB_VERSION"-1.704"
#define CKDB_VERSION DB_VERSION"-1.800"
#define WHERE_FFL " - from %s %s() line %d"
#define WHERE_FFL_HERE __FILE__, __func__, __LINE__

2
src/ckdb_cmd.c

@ -4189,7 +4189,7 @@ static char *cmd_pplns(__maybe_unused PGconn *conn, char *cmd, char *id,
ss_count = wm_count = ms_count = 0;
mu_store = k_new_store(miningpayouts_free);
mu_root = new_ktree(cmp_mu, miningpayouts_free);
mu_root = new_ktree_auto("OldMPU", cmp_mu, miningpayouts_free);
looksharesummary.workinfoid = block_workinfoid;
looksharesummary.userid = MAXID;

5
src/ckdb_data.c

@ -3705,7 +3705,10 @@ bool process_pplns(int32_t height, char *blockhash, tv_t *addr_cd)
ss_count = wm_count = ms_count = 0;
mu_store = k_new_store(miningpayouts_free);
mu_root = new_ktree(cmp_mu, miningpayouts_free);
/* Use the master size for this local tree since
* it's large and doesn't get created often */
mu_root = new_ktree_local("PPLNSMPU", cmp_mu, miningpayouts_free);
looksharesummary.workinfoid = blocks->workinfoid;
looksharesummary.userid = MAXID;

6
src/ckdb_dbio.c

@ -3813,7 +3813,11 @@ bool sharesummaries_to_markersummaries(PGconn *conn, WORKMARKERS *workmarkers,
K_STORE *old_sharesummary_store = k_new_store(sharesummary_free);
K_STORE *new_markersummary_store = k_new_store(markersummary_free);
K_TREE *ms_root = new_ktree(cmp_markersummary, markersummary_free);
/* Use the master size for this local tree since
* it's large and doesn't get created often */
K_TREE *ms_root = new_ktree_local(shortname, cmp_markersummary,
markersummary_free);
if (!CURRENT(&(workmarkers->expirydate))) {
reason = "unexpired";

114
src/klist.c

@ -10,6 +10,8 @@
#include "klist.h"
const char *tree_node_list_name = "TreeNodes";
#if LOCK_CHECK
bool check_locks = true;
const char *thread_noname = "UNSET";
@ -145,7 +147,8 @@ K_STORE *_k_new_store(K_LIST *list, KLIST_FFL_ARGS)
}
K_LIST *_k_new_list(const char *name, size_t siz, int allocate, int limit,
bool do_tail, bool lock_only, KLIST_FFL_ARGS)
bool do_tail, bool lock_only, bool without_lock,
bool local_list, const char *name2, KLIST_FFL_ARGS)
{
K_LIST *list;
@ -162,14 +165,20 @@ K_LIST *_k_new_list(const char *name, size_t siz, int allocate, int limit,
list->master = list;
list->is_store = false;
list->is_lock_only = lock_only;
list->local_list = local_list;
list->lock = calloc(1, sizeof(*(list->lock)));
if (!(list->lock))
quithere(1, "Failed to calloc lock for list %s", name);
if (without_lock)
list->lock = NULL;
else {
list->lock = calloc(1, sizeof(*(list->lock)));
if (!(list->lock))
quithere(1, "Failed to calloc lock for list %s", name);
cklock_init(list->lock);
cklock_init(list->lock);
}
list->name = name;
list->name2 = name2;
list->siz = siz;
list->allocate = allocate;
list->limit = limit;
@ -179,24 +188,28 @@ K_LIST *_k_new_list(const char *name, size_t siz, int allocate, int limit,
k_alloc_items(list, KLIST_FFL_PASS);
#if LOCK_CHECK
K_LISTS *klists;
// not locked :P
if (!lock_check_init) {
quitfrom(1, file, func, line,
"in %s(), lock_check_lock has not been initialised!",
__func__);
}
/* Don't want to keep track of short lived (tree) lists
* since they wont use locking anyway */
if (!list->local_list) {
K_LISTS *klists;
// not locked :P
if (!lock_check_init) {
quitfrom(1, file, func, line,
"in %s(), lock_check_lock has not been initialised!",
__func__);
}
klists = calloc(1, sizeof(*klists));
if (!klists)
quithere(1, "Failed to calloc klists %s", name);
klists = calloc(1, sizeof(*klists));
if (!klists)
quithere(1, "Failed to calloc klists %s", name);
klists->klist = list;
ck_wlock(&lock_check_lock);
klists->next = all_klists;
all_klists = klists;
ck_wunlock(&lock_check_lock);
klists->klist = list;
ck_wlock(&lock_check_lock);
klists->next = all_klists;
all_klists = klists;
ck_wunlock(&lock_check_lock);
}
#endif
return list;
@ -522,38 +535,43 @@ K_LIST *_k_free_list(K_LIST *list, KLIST_FFL_ARGS)
free(list->data_memory[i]);
free(list->data_memory);
cklock_destroy(list->lock);
if (list->lock) {
cklock_destroy(list->lock);
free(list->lock);
free(list->lock);
}
#if LOCK_CHECK
K_LISTS *klists, *klists_prev = NULL;
// not locked :P
if (!lock_check_init) {
quitfrom(1, file, func, line,
"in %s(), lock_check_lock has not been initialised!",
__func__);
}
// local_list lists are not stored in all_klists
if (!list->local_list) {
K_LISTS *klists, *klists_prev = NULL;
// not locked :P
if (!lock_check_init) {
quitfrom(1, file, func, line,
"in %s(), lock_check_lock has not been initialised!",
__func__);
}
ck_wlock(&lock_check_lock);
klists = all_klists;
while (klists && klists->klist != list) {
klists_prev = klists;
klists = klists->next;
}
if (!klists) {
quitfrom(1, file, func, line,
"in %s(), list %s not in klists",
__func__, list->name);
} else {
if (klists_prev)
klists_prev->next = klists->next;
else
all_klists = klists->next;
free(klists);
ck_wlock(&lock_check_lock);
klists = all_klists;
while (klists && klists->klist != list) {
klists_prev = klists;
klists = klists->next;
}
if (!klists) {
quitfrom(1, file, func, line,
"in %s(), list %s not in klists",
__func__, list->name);
} else {
if (klists_prev)
klists_prev->next = klists->next;
else
all_klists = klists->next;
free(klists);
}
ck_wunlock(&lock_check_lock);
}
ck_wunlock(&lock_check_lock);
#endif
free(list);

43
src/klist.h

@ -25,6 +25,8 @@
__maybe_unused const char *func, \
__maybe_unused const int line
extern const char *tree_node_list_name;
/* Code to check the state of locks being requested and also check
* the state of locks when accessing the klist or ktree
* You can disable it with ckpmsg 'locks.ID.locks' so you can compare
@ -128,10 +130,12 @@ typedef struct k_lock {
typedef struct k_list {
const char *name;
const char *name2; // name of the tree if it's a tree node list
struct k_list *master;
bool is_store;
bool is_lock_only; // a lock emulating a list for lock checking
cklock_t *lock;
bool local_list; // local (tree) lists doesn't need lock checking at all
cklock_t *lock; // NULL for tree lists
struct k_item *head;
struct k_item *tail;
size_t siz; // item data size
@ -540,10 +544,30 @@ static inline K_ITEM *list_rtail(K_LIST *list)
#define LIST_HEAD_NOLOCK(_list) (_list)->head
#define LIST_TAIL_NOLOCK(_list) (_list)->tail
#define K_WLOCK(_list) CHECK_WLOCK(_list)
#define K_WUNLOCK(_list) CHECK_WUNLOCK(_list)
#define K_RLOCK(_list) CHECK_RLOCK(_list)
#define K_RUNLOCK(_list) CHECK_RUNLOCK(_list)
#define CHECK_lock(_list) do { \
if ((_list)->lock == NULL) { \
quithere(1, "Attempt to lock list '%s' master '%s' " \
" that has no lock", \
(_list)->name, (_list)->master->name); \
} \
} while (0)
#define K_WLOCK(_list) do { \
CHECK_lock(_list); \
CHECK_WLOCK(_list); \
} while (0)
#define K_WUNLOCK(_list) do { \
CHECK_lock(_list); \
CHECK_WUNLOCK(_list); \
} while (0)
#define K_RLOCK(_list) do { \
CHECK_lock(_list); \
CHECK_RLOCK(_list); \
} while (0)
#define K_RUNLOCK(_list) do { \
CHECK_lock(_list); \
CHECK_RUNLOCK(_list); \
} while (0)
#define STORE_WHEAD(_s) LIST_WHEAD(_s)
#define STORE_RHEAD(_s) LIST_RHEAD(_s)
@ -558,11 +582,14 @@ extern K_STORE *_k_new_store(K_LIST *list, KLIST_FFL_ARGS);
#define k_new_store(_list) _k_new_store(_list, KLIST_FFL_HERE)
extern K_LIST *_k_new_list(const char *name, size_t siz, int allocate,
int limit, bool do_tail, bool lock_only,
KLIST_FFL_ARGS);
bool without_lock, bool local_list,
const char *name2, KLIST_FFL_ARGS);
#define k_new_list(_name, _siz, _allocate, _limit, _do_tail) \
_k_new_list(_name, _siz, _allocate, _limit, _do_tail, false, KLIST_FFL_HERE)
_k_new_list(_name, _siz, _allocate, _limit, _do_tail, false, false, false, NULL, KLIST_FFL_HERE)
#define k_lock_only_list(_name) \
_k_new_list(_name, 1, 1, 1, true, true, KLIST_FFL_HERE)
_k_new_list(_name, 1, 1, 1, true, true, false, false, NULL, KLIST_FFL_HERE)
#define k_new_tree_list(_name, _siz, _allocate, _limit, _do_tail, _local_tree, _name2) \
_k_new_list(_name, _siz, _allocate, _limit, _do_tail, false, true, _local_tree, _name2, KLIST_FFL_HERE)
extern K_ITEM *_k_unlink_head(K_LIST *list, LOCK_MAYBE bool chklock, KLIST_FFL_ARGS);
#define k_unlink_head(_list) _k_unlink_head(_list, true, KLIST_FFL_HERE)
#define k_unlink_head_nolock(_list) _k_unlink_head(_list, false, KLIST_FFL_HERE)

115
src/ktree.c

@ -15,7 +15,7 @@ static const int dbg = 0;
#define FAIL(fmt, ...) do \
{ \
quithere(1, fmt KTREE_FFL, ##__VA_ARGS__, KTREE_FFL_PASS); \
} while (0);
} while (0)
#define RED_RED true
#define RED_BLACK false
@ -23,34 +23,59 @@ static const int dbg = 0;
#define Yo true
#define No false
static K_NODE nil[1] = { { Yo, RED_BLACK, NULL, NULL, NULL, NULL, 0 } };
static K_NODE nil[1] = { { NULL, Yo, RED_BLACK, NULL, NULL, NULL, NULL, 0 } };
static K_NODE *_new_knode(KTREE_FFL_ARGS)
static K_NODE *_new_knode(K_TREE *tree, LOCK_MAYBE bool chklock, KTREE_FFL_ARGS)
{
K_NODE *node = (K_NODE *)malloc(sizeof(*node));
if (node == NULL)
FAIL("%s", "node OOM");
K_ITEM *kitem;
K_NODE *knode;
node->isNil = Yo;
node->red = RED_BLACK;
node->parent = nil;
node->left = nil;
node->right = nil;
node->data = NULL;
node->test = 0;
// master protects the tree's node list
_TREE_WRITE(tree, chklock, file, func, line);
kitem = k_unlink_head_nolock(tree->node_free);
if (!kitem)
FAIL("%s", "node list OOM");
k_add_head_nolock(tree->node_store, kitem);
knode = (K_NODE *)(kitem->data);
knode->kitem = kitem;
knode->isNil = Yo;
knode->red = RED_BLACK;
knode->parent = nil;
knode->left = nil;
knode->right = nil;
knode->data = NULL;
knode->test = 0;
return node;
return knode;
}
K_TREE *_new_ktree(cmp_t (*cmp_funct)(K_ITEM *, K_ITEM *), K_LIST *master, KTREE_FFL_ARGS)
K_TREE *_new_ktree(const char *name, cmp_t (*cmp_funct)(K_ITEM *, K_ITEM *),
K_LIST *master, int alloc, int limit, bool local_tree,
KTREE_FFL_ARGS)
{
K_TREE *tree = (K_TREE *)malloc(sizeof(*tree));
if (tree == NULL)
FAIL("%s", "tree OOM");
tree->root = _new_knode(KTREE_FFL_PASS);
if (name == NULL)
tree->name = master->name;
else
tree->name = name;
/* A unique "name" isn't needed since it can't use the wrong list
* and thus we can also identify all tree node lists */
tree->node_free = k_new_tree_list(tree_node_list_name, sizeof(K_NODE),
alloc, limit, true, local_tree,
tree->name);
#if LOCK_CHECK
DLPRIO(tree->node, PRIO_TERMINAL);
#endif
tree->node_store = k_new_store(tree->node_free);
// A new tree's list doesn't need to be locked during creation
tree->root = _new_knode(tree, false, KTREE_FFL_PASS);
tree->cmp_funct = cmp_funct;
@ -59,13 +84,20 @@ K_TREE *_new_ktree(cmp_t (*cmp_funct)(K_ITEM *, K_ITEM *), K_LIST *master, KTREE
return tree;
}
static K_NODE *new_data(K_ITEM *data, KTREE_FFL_ARGS)
static K_NODE *new_data(K_TREE *tree, K_ITEM *data, LOCK_MAYBE bool chklock, KTREE_FFL_ARGS)
{
K_NODE *knode = (K_NODE *)malloc(sizeof(*knode));
K_ITEM *kitem;
K_NODE *knode;
if (knode == NULL)
FAIL("%s", "OOM");
// master protects the tree's node list
_TREE_WRITE(tree, chklock, file, func, line);
kitem = k_unlink_head_nolock(tree->node_free);
if (!kitem)
FAIL("%s", "node list OOM");
k_add_head_nolock(tree->node_store, kitem);
knode = (K_NODE *)(kitem->data);
knode->kitem = kitem;
knode->isNil = No;
knode->red = RED_RED;
knode->parent = nil;
@ -514,13 +546,17 @@ void _add_to_ktree(K_TREE *tree, K_ITEM *data, LOCK_MAYBE bool chklock, KTREE_FF
_TREE_WRITE(tree, chklock, file, func, line);
knode = new_data(data, KTREE_FFL_PASS);
// chklock is false since we've already tested it
knode = new_data(tree, data, false, KTREE_FFL_PASS);
if (tree->root->isNil == Yo)
{
if (tree->root != nil)
free(tree->root);
{
// _nolock since we've already tested it if necessary
k_unlink_item_nolock(tree->node_store, tree->root->kitem);
k_add_head_nolock(tree->node_free, tree->root->kitem);
}
tree->root = knode;
}
else
@ -609,7 +645,8 @@ K_ITEM *_find_in_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, bool chklock
if (tree->root == NULL)
FAIL("%s", "FINDNULL find tree->root is NULL");
if (chklock) {
if (chklock)
{
_TREE_READ(tree, true, file, func, line);
}
@ -825,7 +862,7 @@ static K_NODE *removeFixup(K_NODE *root, K_NODE *fix)
// Does this work OK when you remove the last element in the tree?
// It should return the root as 'nil'
void _remove_from_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, KTREE_FFL_ARGS)
void _remove_from_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, LOCK_MAYBE bool chklock, KTREE_FFL_ARGS)
{
K_TREE_CTX tmpctx[1];
K_NODE *found;
@ -842,7 +879,7 @@ void _remove_from_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, KTREE_FFL_A
if (tree->root == NULL)
FAIL("%s", "REMNULL remove tree->root is NULL");
_TREE_WRITE(tree, true, file, func, line);
_TREE_WRITE(tree, chklock, file, func, line);
if (tree->root->isNil == Yo)
{
@ -889,7 +926,8 @@ void _remove_from_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, KTREE_FFL_A
nil2 = NULL;
else
{
nil2 = _new_knode(KTREE_FFL_PASS);
// chklock is false since we've already tested it
nil2 = _new_knode(tree, false, KTREE_FFL_PASS);
x = nil2;
}
@ -983,7 +1021,9 @@ DBG("@remove found nil2 in ktree(right) %d!!!\n", (int)cmp);
}
}
*/
free(nil2);
// _nolock since we've already tested it if necessary
k_unlink_item_nolock(tree->node_store, nil2->kitem);
k_add_head_nolock(tree->node_free, nil2->kitem);
}
/*
@ -1013,14 +1053,11 @@ DBG("@remove after balance=%d :(\n", (int)cmp);
return;
}
void _remove_from_ktree_free(K_TREE *root, K_ITEM *data, KTREE_FFL_ARGS)
void _remove_from_ktree_free(K_TREE *root, K_ITEM *data, bool chklock, KTREE_FFL_ARGS)
{
K_TREE_CTX ctx[1];
_remove_from_ktree(root, data, ctx, KTREE_FFL_PASS);
if (*ctx)
free(*ctx);
_remove_from_ktree(root, data, ctx, chklock, KTREE_FFL_PASS);
}
static void free_ktree_sub(K_NODE *knode, void (*free_funct)(void *))
@ -1032,11 +1069,11 @@ static void free_ktree_sub(K_NODE *knode, void (*free_funct)(void *))
free_ktree_sub(knode->left, free_funct);
free_ktree_sub(knode->right, free_funct);
free(knode);
}
}
/* TODO: remove free_funct, it's not the tree's job to free the item data
* that should be done when freeing the data list itself */
void _free_ktree(K_TREE *tree, void (*free_funct)(void *), KTREE_FFL_ARGS)
{
if (tree == NULL)
@ -1045,5 +1082,9 @@ void _free_ktree(K_TREE *tree, void (*free_funct)(void *), KTREE_FFL_ARGS)
if (tree->root->parent != NULL && tree->root->parent != nil)
FAIL("%s", "FREENOTROOT free tree->root not root");
free_ktree_sub(tree->root, free_funct);
if (free_funct)
free_ktree_sub(tree->root, free_funct);
tree->node_store = k_free_store(tree->node_store);
tree->node_free = k_free_list(tree->node_free);
}

28
src/ktree.h

@ -39,6 +39,7 @@
typedef struct knode
{
K_ITEM *kitem;
bool isNil;
bool red;
struct knode *parent;
@ -50,15 +51,31 @@ typedef struct knode
typedef struct ktree
{
const char *name;
K_NODE *root;
cmp_t (*cmp_funct)(K_ITEM *, K_ITEM *);
K_LIST *master;
K_LIST *node_free;
K_STORE *node_store;
} K_TREE;
typedef void *K_TREE_CTX;
extern K_TREE *_new_ktree(cmp_t (*cmp_funct)(K_ITEM *, K_ITEM *), K_LIST *master, KTREE_FFL_ARGS);
#define new_ktree(_cmp_funct, _master) _new_ktree(_cmp_funct, _master, KLIST_FFL_HERE)
// Avoid allocating too much ram up front for temporary trees
#define NODE_ALLOC 64
#define NODE_LIMIT 0
extern K_TREE *_new_ktree(const char *name, cmp_t (*cmp_funct)(K_ITEM *, K_ITEM *),
K_LIST *master, int alloc, int limit, bool local_tree,
KTREE_FFL_ARGS);
#define new_ktree(_name, _cmp_funct, _master) \
_new_ktree(_name, _cmp_funct, _master, _master->allocate, _master->limit, false, KLIST_FFL_HERE)
#define new_ktree_local(_name, _cmp_funct, _master) \
_new_ktree(_name, _cmp_funct, _master, _master->allocate, _master->limit, true, KLIST_FFL_HERE)
#define new_ktree_auto(_name, _cmp_funct, _master) \
_new_ktree(_name, _cmp_funct, _master, NODE_ALLOC, NODE_LIMIT, true, KLIST_FFL_HERE)
#define new_ktree_size(_name, _cmp_funct, _master, _alloc, _limit) \
_new_ktree(_name, _cmp_funct, _master, _alloc, _limit, false, KLIST_FFL_HERE)
extern void _dump_ktree(K_TREE *tree, char *(*dsp_funct)(K_ITEM *), KTREE_FFL_ARGS);
#define dump_ktree(_tree, _dsp_funct) _dump_ktree(_tree, _dsp_funct, KLIST_FFL_HERE)
extern void _dsp_ktree(K_TREE *tree, char *filename, char *msg, KTREE_FFL_ARGS);
@ -87,9 +104,10 @@ extern K_ITEM *_find_after_in_ktree(K_TREE *ktree, K_ITEM *data, K_TREE_CTX *ctx
//#define find_after_in_ktree_nolock(_ktree, _data, _ctx) _find_after_in_ktree(_ktree, _data, _ctx, false, KLIST_FFL_HERE)
extern K_ITEM *_find_before_in_ktree(K_TREE *ktree, K_ITEM *data, K_TREE_CTX *ctx, KTREE_FFL_ARGS);
#define find_before_in_ktree(_ktree, _data, _ctx) _find_before_in_ktree(_ktree, _data, _ctx, KLIST_FFL_HERE)
extern void _remove_from_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, KTREE_FFL_ARGS);
extern void _remove_from_ktree_free(K_TREE *tree, K_ITEM *data, KTREE_FFL_ARGS);
#define remove_from_ktree(_tree, _data) _remove_from_ktree_free(_tree, _data, KLIST_FFL_HERE)
extern void _remove_from_ktree(K_TREE *tree, K_ITEM *data, K_TREE_CTX *ctx, LOCK_MAYBE bool chklock, KTREE_FFL_ARGS);
extern void _remove_from_ktree_free(K_TREE *tree, K_ITEM *data, bool chklock, KTREE_FFL_ARGS);
#define remove_from_ktree(_tree, _data) _remove_from_ktree_free(_tree, _data, true, KLIST_FFL_HERE)
//#define remove_from_ktree_nolock(_tree, _data) _remove_from_ktree_free(_tree, _data, false, KLIST_FFL_HERE)
extern void _free_ktree(K_TREE *tree, void (*free_funct)(void *), KTREE_FFL_ARGS);
#define free_ktree(_tree, _free_funct) do { \
_free_ktree(_tree, _free_funct, KLIST_FFL_HERE); \

Loading…
Cancel
Save