Browse Source

Use the terse hash helpers in the stratifier

master
Con Kolivas 10 years ago
parent
commit
950a20e76f
  1. 76
      src/stratifier.c

76
src/stratifier.c

@ -1178,14 +1178,14 @@ static proxy_t *existing_subproxy(sdata_t *sdata, const int id, const int subid)
/* Set proxy to the current proxy and calculate how much headroom it has */
static int64_t current_headroom(sdata_t *sdata, proxy_t **proxy)
{
proxy_t *subproxy, *tmp;
int64_t headroom = 0;
proxy_t *subproxy;
mutex_lock(&sdata->proxy_lock);
*proxy = sdata->proxy;
if (!*proxy)
goto out_unlock;
HASH_ITER(sh, (*proxy)->subproxies, subproxy, tmp) {
HASH_FOREACH(sh, (*proxy)->subproxies, subproxy) {
if (subproxy->dead)
continue;
headroom += subproxy->max_clients - subproxy->clients;
@ -1212,8 +1212,8 @@ static void generator_recruit(const ckpool_t *ckp, const int recruits)
* switched lazily. */
static void reconnect_clients(sdata_t *sdata)
{
stratum_instance_t *client, *tmpclient;
int reconnects = 0, hard = 0;
stratum_instance_t *client;
int64_t headroom;
proxy_t *proxy;
@ -1222,7 +1222,7 @@ static void reconnect_clients(sdata_t *sdata)
return;
ck_rlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmpclient) {
HASH_ITERATE(sdata->stratum_instances, client) {
if (client->proxyid == proxy->id)
continue;
if (headroom-- < 1)
@ -1248,10 +1248,10 @@ static void reconnect_clients(sdata_t *sdata)
static bool __subproxies_alive(proxy_t *proxy)
{
proxy_t *subproxy, *tmp;
bool alive = false;
proxy_t *subproxy;
HASH_ITER(sh, proxy->subproxies, subproxy, tmp) {
HASH_FOREACH(sh, proxy->subproxies, subproxy) {
if (!subproxy->dead) {
alive = true;
break;
@ -1264,13 +1264,13 @@ static bool __subproxies_alive(proxy_t *proxy)
* the highest priority alive one. Uses ckp sdata */
static void check_bestproxy(sdata_t *sdata)
{
proxy_t *proxy, *tmp;
int changed_id = -1;
proxy_t *proxy;
mutex_lock(&sdata->proxy_lock);
if (sdata->proxy && !__subproxies_alive(sdata->proxy))
sdata->proxy = NULL;
HASH_ITER(hh, sdata->proxies, proxy, tmp) {
HASH_ITERATE(sdata->proxies, proxy) {
if (!__subproxies_alive(proxy))
continue;
if (!sdata->proxy || sdata->proxy->id > proxy->id) {
@ -1288,8 +1288,8 @@ static void check_bestproxy(sdata_t *sdata)
static void dead_proxyid(sdata_t *sdata, const int id, const int subid)
{
stratum_instance_t *client, *tmp;
int reconnects = 0, hard = 0;
stratum_instance_t *client;
int64_t headroom;
proxy_t *proxy;
@ -1301,7 +1301,7 @@ static void dead_proxyid(sdata_t *sdata, const int id, const int subid)
headroom = current_headroom(sdata, &proxy);
ck_rlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmp) {
HASH_ITERATE(sdata->stratum_instances, client) {
if (client->proxyid != id || client->subproxyid != subid)
continue;
reconnects++;
@ -1514,7 +1514,7 @@ static void stratum_send_diff(sdata_t *sdata, const stratum_instance_t *client);
static void update_diff(ckpool_t *ckp, const char *cmd)
{
sdata_t *sdata = ckp->data, *dsdata;
stratum_instance_t *client, *tmp;
stratum_instance_t *client;
double old_diff, diff;
int id = 0, subid = 0;
const char *buf;
@ -1568,7 +1568,7 @@ static void update_diff(ckpool_t *ckp, const char *cmd)
/* If the diff has dropped, iterate over all the clients and check
* they're at or below the new diff, and update it if not. */
ck_rlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmp) {
HASH_ITERATE(sdata->stratum_instances, client) {
if (client->proxyid != id)
continue;
if (client->subproxyid != subid)
@ -1602,14 +1602,14 @@ static void free_proxy(proxy_t *proxy)
* those. */
static void reap_proxies(ckpool_t *ckp, sdata_t *sdata)
{
proxy_t *proxy, *proxytmp, *subproxy, *subtmp;
proxy_t *proxy, *subproxy, *subtmp;
int dead = 0;
if (!ckp->proxy)
return;
mutex_lock(&sdata->proxy_lock);
HASH_ITER(hh, sdata->proxies, proxy, proxytmp) {
HASH_ITERATE(sdata->proxies, proxy) {
HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) {
if (!subproxy->bound_clients && !subproxy->dead) {
/* Reset the counter to reuse this proxy */
@ -1682,10 +1682,10 @@ static stratum_instance_t *ref_instance_by_id(sdata_t *sdata, const int64_t id)
/* Has this client_id already been used and is now in one of the dropped lists */
static bool __dropped_instance(sdata_t *sdata, const int64_t id)
{
stratum_instance_t *client, *tmp;
stratum_instance_t *client;
bool ret = false;
HASH_ITER(hh, sdata->disconnected_instances, client, tmp) {
HASH_ITERATE(sdata->disconnected_instances, client) {
if (unlikely(client->id == id)) {
ret = true;
goto out;
@ -1802,8 +1802,8 @@ static stratum_instance_t *__stratum_add_instance(ckpool_t *ckp, const int64_t i
static uint64_t disconnected_sessionid_exists(sdata_t *sdata, const char *sessionid, const int64_t id)
{
stratum_instance_t *client, *tmp;
uint64_t enonce1_64 = 0, ret = 0;
stratum_instance_t *client;
int64_t old_id = 0;
int slen;
@ -1820,7 +1820,7 @@ static uint64_t disconnected_sessionid_exists(sdata_t *sdata, const char *sessio
hex2bin(&enonce1_64, sessionid, slen);
ck_wlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmp) {
HASH_ITERATE(sdata->stratum_instances, client) {
if (client->id == id)
continue;
if (client->enonce1_64 == enonce1_64) {
@ -1869,7 +1869,7 @@ static void stratum_broadcast(sdata_t *sdata, json_t *val)
{
ckpool_t *ckp = sdata->ckp;
sdata_t *ckp_sdata = ckp->data;
stratum_instance_t *client, *tmp;
stratum_instance_t *client;
ckmsg_t *bulk_send = NULL;
time_t now_t = time(NULL);
int hard_reconnect = 0;
@ -1882,7 +1882,7 @@ static void stratum_broadcast(sdata_t *sdata, json_t *val)
/* Use this locking as an opportunity to test other clients. */
ck_rlock(&ckp_sdata->instance_lock);
HASH_ITER(hh, ckp_sdata->stratum_instances, client, tmp) {
HASH_ITERATE(ckp_sdata->stratum_instances, client) {
ckmsg_t *client_msg;
smsg_t *msg;
@ -2003,7 +2003,7 @@ static void stratum_broadcast_message(sdata_t *sdata, const char *msg)
static void request_reconnect(sdata_t *sdata, const char *cmd)
{
char *port = strdupa(cmd), *url = NULL;
stratum_instance_t *client, *tmp;
stratum_instance_t *client;
json_t *json_msg;
strsep(&port, ":");
@ -2023,7 +2023,7 @@ static void request_reconnect(sdata_t *sdata, const char *cmd)
/* Tag all existing clients as dropped now so they can be removed
* lazily */
ck_wlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmp) {
HASH_ITERATE(sdata->stratum_instances, client) {
client->dropped = true;
}
ck_wunlock(&sdata->instance_lock);
@ -2031,14 +2031,14 @@ static void request_reconnect(sdata_t *sdata, const char *cmd)
static void reset_bestshares(sdata_t *sdata)
{
user_instance_t *user, *tmpuser;
stratum_instance_t *client, *tmp;
stratum_instance_t *client;
user_instance_t *user;
ck_rlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmp) {
HASH_ITERATE(sdata->stratum_instances, client) {
client->best_diff = 0;
}
HASH_ITER(hh, sdata->user_instances, user, tmpuser) {
HASH_ITERATE(sdata->user_instances, user) {
worker_instance_t *worker;
user->best_diff = 0;
@ -2561,7 +2561,7 @@ static void stratum_send_message(sdata_t *sdata, const stratum_instance_t *clien
* running out of room. */
static sdata_t *select_sdata(const ckpool_t *ckp, sdata_t *ckp_sdata)
{
proxy_t *current, *proxy, *subproxy, *best = NULL, *tmp, *tmpsub;
proxy_t *current, *proxy, *subproxy, *best = NULL;
int best_subid = 0, best_id;
if (!ckp->proxy || ckp->passthrough)
@ -2574,12 +2574,12 @@ static sdata_t *select_sdata(const ckpool_t *ckp, sdata_t *ckp_sdata)
best_id = maxint;
mutex_lock(&ckp_sdata->proxy_lock);
HASH_ITER(hh, ckp_sdata->proxies, proxy, tmp) {
HASH_ITERATE(ckp_sdata->proxies, proxy) {
int64_t max_headroom;
best = NULL;
proxy->headroom = max_headroom = 0;
HASH_ITER(sh, proxy->subproxies, subproxy, tmpsub) {
HASH_FOREACH(sh, proxy->subproxies, subproxy) {
int64_t subproxy_headroom;
if (subproxy->dead)
@ -4610,7 +4610,7 @@ out:
static void update_workerstats(ckpool_t *ckp, sdata_t *sdata)
{
json_entry_t *json_list = NULL, *entry, *tmpentry;
user_instance_t *user, *tmp;
user_instance_t *user;
char cdfield[64];
time_t now_t;
ts_t ts_now;
@ -4628,7 +4628,7 @@ static void update_workerstats(ckpool_t *ckp, sdata_t *sdata)
now_t = ts_now.tv_sec;
ck_rlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->user_instances, user, tmp) {
HASH_ITERATE(sdata->user_instances, user) {
worker_instance_t *worker;
uint8_t cycle_mask;
@ -4732,10 +4732,10 @@ static void *statsupdate(void *arg)
double ghs, ghs1, ghs5, ghs15, ghs60, ghs360, ghs1440, ghs10080, per_tdiff;
char suffix1[16], suffix5[16], suffix15[16], suffix60[16], cdfield[64];
char suffix360[16], suffix1440[16], suffix10080[16];
stratum_instance_t *client, *tmp;
log_entry_t *log_entries = NULL;
user_instance_t *user, *tmpuser;
char_entry_t *char_list = NULL;
stratum_instance_t *client;
user_instance_t *user;
int idle_workers = 0;
char *fname, *s, *sp;
tv_t now, diff;
@ -4748,7 +4748,7 @@ static void *statsupdate(void *arg)
timersub(&now, &stats->start_time, &diff);
ck_rlock(&sdata->instance_lock);
HASH_ITER(hh, sdata->stratum_instances, client, tmp) {
HASH_ITERATE(sdata->stratum_instances, client) {
if (!client_active(client))
continue;
@ -4768,7 +4768,7 @@ static void *statsupdate(void *arg)
}
}
HASH_ITER(hh, sdata->user_instances, user, tmpuser) {
HASH_ITERATE(sdata->user_instances, user) {
worker_instance_t *worker;
bool idle = false;
@ -4934,7 +4934,7 @@ static void *statsupdate(void *arg)
fclose(fp);
if (ckp->proxy && sdata->proxy) {
proxy_t *proxy, *proxytmp, *subproxy, *subtmp;
proxy_t *proxy, *subproxy;
mutex_lock(&sdata->proxy_lock);
JSON_CPACK(val, "{sI,si,si}",
@ -4949,7 +4949,7 @@ static void *statsupdate(void *arg)
dealloc(s);
mutex_lock(&sdata->proxy_lock);
HASH_ITER(hh, sdata->proxies, proxy, proxytmp) {
HASH_ITERATE(sdata->proxies, proxy) {
JSON_CPACK(val, "{sI,si,sI,sb}",
"id", proxy->id,
"subproxies", proxy->subproxy_count,
@ -4960,7 +4960,7 @@ static void *statsupdate(void *arg)
ASPRINTF(&sp, "Proxies:%s", s);
dealloc(s);
add_msg_entry(&char_list, &sp);
HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) {
HASH_FOREACH(sh, proxy->subproxies, subproxy) {
JSON_CPACK(val, "{sI,si,si,sI,sI,sf,sb}",
"id", subproxy->id,
"subid", subproxy->subid,

Loading…
Cancel
Save