Mercurial > sci
changeset 20:f98ea578b1ef
misc: revamp database
author | David Demelier <markand@malikania.fr> |
---|---|
date | Tue, 19 Jul 2022 21:52:42 +0200 |
parents | de4bf839b565 |
children | ec30e1b078a9 |
files | Makefile config.def.h lib/apic.c lib/apic.h lib/db.c lib/db.h lib/types.c lib/types.h lib/util.c lib/util.h scid/http.c scid/page-api-jobs.c scid/page-api-jobs.h scid/page-api-projects.c scid/page-api-todo.c scid/page-api-todo.h scid/page-api-workers.c sciworkerd/sciworkerd.c |
diffstat | 18 files changed, 716 insertions(+), 353 deletions(-) [+] |
line wrap: on
line diff
--- a/Makefile Fri Jul 15 11:11:48 2022 +0200 +++ b/Makefile Tue Jul 19 21:52:42 2022 +0200 @@ -45,8 +45,8 @@ SCID_SRCS= scid/http.c \ scid/main.c \ - scid/page-api-jobs.c \ scid/page-api-projects.c \ + scid/page-api-todo.c \ scid/page-api-workers.c \ scid/page.c SCID_OBJS= ${SCID_SRCS:.c=.o}
--- a/config.def.h Fri Jul 15 11:11:48 2022 +0200 +++ b/config.def.h Tue Jul 19 21:52:42 2022 +0200 @@ -22,6 +22,7 @@ /* Database limits. */ #define SCI_PROJECT_MAX 64 /* Projects allowed in database. */ #define SCI_WORKER_MAX 32 /* Workers allowed in database. */ +#define SCI_TAG_MAX 128 /* Usage limits. */ #define SCI_JOB_LIST_MAX 128 /* Jobs max list size. */
--- a/lib/apic.c Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/apic.c Tue Jul 19 21:52:42 2022 +0200 @@ -7,6 +7,7 @@ #include <curl/curl.h> #include "apic.h" +#include "types.h" #include "util.h" struct curlpack { @@ -15,6 +16,17 @@ struct curl_slist *headers; }; +struct converter { + void *data; + size_t datasz; + int (*unpack)(void *, size_t, json_t *); + json_t *(*pack)(const void *, size_t); +}; + +struct apiconf apiconf = { + .baseurl = "http://127.0.0.1" +}; + static size_t writer(char *in, size_t n, size_t w, FILE *fp) { @@ -28,13 +40,16 @@ create_url(const char *fmt, va_list args) { static _Thread_local char ret[256]; + char page[128]; va_list ap; ret[0] = 0; va_copy(ap, args); - vsnprintf(ret, sizeof (ret), fmt, ap); + vsnprintf(page, sizeof (page), fmt, ap); va_end(ap); + snprintf(ret, sizeof (ret), "%s/%s", apiconf.baseurl, page); + return ret; } @@ -79,7 +94,7 @@ } static int -perform(struct apicreq *req, const char *body, const char *fmt, va_list ap) +perform(struct apic *req, const char *body, const char *fmt, va_list ap) { FILE *fp; char *response, *url; @@ -119,7 +134,7 @@ } int -apic_get(struct apicreq *req, const char *fmt, ...) +apic_get(struct apic *req, const char *fmt, ...) { assert(req); assert(fmt); @@ -135,7 +150,7 @@ } int -apic_post(struct apicreq *req, const json_t *doc, const char *fmt, ...) +apic_post(struct apic *req, const json_t *doc, const char *fmt, ...) { assert(req); assert(fmt); @@ -155,3 +170,287 @@ return ret; } + +static int +get(struct apic *req, const struct converter *cv, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = perform(req, NULL, fmt, ap); + va_end(ap); + + memset(req, 0, sizeof (*req)); + + if (ret < 0) + return -1; + if (!req->doc || !json_is_object(req->doc)) + return snprintf(req->error, sizeof (req->error), "invalid JSON object received"), -1; + if (cv->unpack(cv->data, cv->datasz, req->doc) < 0) + return snprintf(req->error, sizeof (req->error), "%s", strerror(errno)); + + return 0; +} + +static int +create(struct apic *req, const struct converter *cv, const char *fmt, ...) +{ + va_list ap; + int ret; + json_t *doc; + char *body; + + memset(req, 0, sizeof (*req)); + + if (!(doc = cv->pack(cv->data, cv->datasz))) + return snprintf(req->error, sizeof (req->error), "%s", strerror(errno)); + if (!(body = json_dumps(doc, JSON_COMPACT))) { + json_decref(doc); + return snprintf(req->error, sizeof (req->error), "%s", strerror(errno)); + } + + va_start(ap, fmt); + ret = perform(req, body, fmt, ap); + va_end(ap); + json_decref(doc); + free(body); + + if (cv->unpack(cv->data, cv->datasz, req->doc) < 0) + return snprintf(req->error, sizeof (req->error), "%s", strerror(errno)); + + return 0; +} + +static json_t * +wrap_job_to(const void *data, size_t datasz) +{ + return job_to(data, datasz); +} + +static int +wrap_job_from(void *data, size_t datasz, json_t *doc) +{ + return job_from(data, datasz, doc); +} + +static json_t * +wrap_jobresult_to(const void *data, size_t datasz) +{ + return jobresult_to(data, datasz); +} + +static int +wrap_project_from(void *data, size_t datasz, json_t *doc) +{ + return project_from(data, datasz, doc); +} + +static json_t * +wrap_project_to(const void *data, size_t datasz) +{ + return project_to(data, datasz); +} + +static int +wrap_worker_from(void *data, size_t datasz, json_t *doc) +{ + return worker_from(data, datasz, doc); +} + +static json_t * +wrap_worker_to(const void *data, size_t datasz) +{ + return worker_to(data, datasz); +} + +static int +wrap_jobresult_from(void *data, size_t datasz, json_t *doc) +{ + return jobresult_from(data, datasz, doc); +} + +int +apic_job_add(struct apic *req, struct job *job) +{ + assert(req); + assert(job); + + const struct converter cv = { + .data = job, + .datasz = 1, + .pack = wrap_job_to, + .unpack = wrap_job_from + }; + + return create(req, &cv, "api/v1/jobs"); +} + +ssize_t +apic_job_todo(struct apic *req, struct job *jobs, size_t jobsz, int worker_id) +{ + assert(req); + assert(jobs); + + struct converter cv = { + .data = jobs, + .datasz = 1, + .unpack = wrap_job_from + }; + + return get(req, &cv, "api/v1/jobs/%d", worker_id); +} + +int +apic_jobresult_add(struct apic *req, struct jobresult *result) +{ + assert(req); + assert(result); + + struct converter cv = { + .data = result, + .datasz = 1, + .pack = wrap_jobresult_to, + .unpack = wrap_jobresult_from + }; + + return create(req, &cv, "api/v1/jobresults"); +} + +int +apic_project_add(struct apic *req, struct project *project) +{ + assert(req); + assert(project); + + struct converter cv = { + .data = project, + .datasz = 1, + .pack = wrap_project_to, + .unpack = wrap_project_from + }; + + return create(req, &cv, "api/v1/projects"); +} + +int +apic_project_update(struct apic *req, struct project *project) +{ + assert(req); + assert(project); + + struct converter cv = { + .data = project, + .datasz = 1, + .pack = wrap_project_to, + .unpack = wrap_project_from + }; + + return create(req, &cv, "api/v1/projects"); +} + +ssize_t +apic_project_list(struct apic *req, struct project *projects, size_t projectsz) +{ + assert(req); + assert(projects); + + struct converter cv = { + .data = projects, + .datasz = projectsz, + .unpack = wrap_project_from + }; + + return get(req, &cv, "api/v1/projects"); +} + +int +apic_project_find(struct apic *req, struct project *project) +{ + assert(req); + assert(project); + + struct converter cv = { + .data = project, + .datasz = 1, + .unpack = wrap_project_from + }; + + return get(req, &cv, "api/v1/projects/%s", project->name); +} + +int +apic_project_find_id(struct apic *req, struct project *project) +{ + assert(req); + assert(project); + + struct converter cv = { + .data = project, + .datasz = 1, + .unpack = wrap_project_from + }; + + return get(req, &cv, "api/v1/projects/%jd", project->id); +} + +int +apic_worker_add(struct apic *req, struct worker *wk) +{ + assert(req); + assert(wk); + + struct converter cv = { + .data = wk, + .datasz = 1, + .pack = wrap_worker_to + }; + + return create(req, &cv, "api/v1/workers"); +} + +ssize_t +apic_worker_list(struct apic *req, struct worker *wk, size_t wksz) +{ + assert(req); + assert(wk); + assert(wksz); + + struct converter cv = { + .data = wk, + .datasz = wksz, + .unpack = wrap_worker_from + }; + + return get(req, &cv, "api/v1/workers"); +} + +int +apic_worker_find(struct apic *req, struct worker *wk) +{ + assert(req); + assert(wk); + + struct converter cv = { + .data = wk, + .datasz = 1, + .unpack = wrap_worker_from + }; + + return get(req, &cv, "api/v1/workers/%s", wk->name); +} + +int +apic_worker_find_id(struct apic *req, struct worker *wk) +{ + assert(req); + assert(wk); + + struct converter cv = { + .data = wk, + .datasz = 1, + .unpack = wrap_worker_from + }; + + return get(req, &cv, "api/v1/workers/%jd", wk->id); +}
--- a/lib/apic.h Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/apic.h Tue Jul 19 21:52:42 2022 +0200 @@ -3,18 +3,80 @@ #include <jansson.h> +#include "config.h" + #define APIC_ERR_MAX 128 -struct apicreq { +struct job; +struct jobresult; +struct project; +struct worker; + +struct apic { json_t *doc; char error[APIC_ERR_MAX]; long status; }; +extern struct apiconf { + char baseurl[SCI_URL_MAX]; +} apiconf; + +/* Generic HTTP commands using JSON. */ + +/* Perform GET request. */ int -apic_get(struct apicreq *req, const char *url, ...); +apic_get(struct apic *, const char *, ...); + +/* Perform POST request with JSON body. */ +int +apic_post(struct apic *, const json_t *, const char *, ...); + +/* + * Commands to fetch, create, delete or update data. + * + * Any of the following commands need to keep apic structure alive as long as + * data objects are being used because they reference JSON values directly from + * the HTTP response. + */ + +int +apic_job_add(struct apic *, struct job *); + +ssize_t +apic_job_todo(struct apic *, struct job *, size_t, int); int -apic_post(struct apicreq *req, const json_t *body, const char *url, ...); +apic_jobresult_add(struct apic *, struct jobresult *); + +int +apic_project_add(struct apic *, struct project *); + +int +apic_project_update(struct apic *, struct project *); + +ssize_t +apic_project_list(struct apic *, struct project *, size_t); + +int +apic_project_find(struct apic *, struct project *); + +int +apic_project_find_id(struct apic *, struct project *); + +int +apic_worker_add(struct apic *, struct worker *); + +ssize_t +apic_worker_list(struct apic *, struct worker *, size_t); + +int +apic_worker_find(struct apic *, struct worker *); + +int +apic_worker_find_id(struct apic *, struct worker *); + +void +apic_finish(struct apic *); #endif /* !SCI_APIC_H */
--- a/lib/db.c Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/db.c Tue Jul 19 21:52:42 2022 +0200 @@ -47,69 +47,42 @@ static sqlite3 *db; -typedef void (*unpacker)(sqlite3_stmt *, struct db_ctx *, void *); - -struct str { - char *str; - struct str *next; -}; - struct list { - unpacker unpack; + void (*unpack)(sqlite3_stmt *, void *); void *data; size_t datasz; size_t elemwidth; - struct db_ctx *ctx; }; -static const char * -strlist_add(struct db_ctx *ctx, const char *text) +static void +project_unpacker(sqlite3_stmt *stmt, void *data) { - struct str *s, *list = ctx->handle; + struct project *project = data; - s = util_calloc(1, sizeof (*s)); - s->str = util_strdup(text); - LL_APPEND(list, s); - - return s->str; + project->id = sqlite3_column_int(stmt, 0); + project->name = util_strdup(CHAR(sqlite3_column_text(stmt, 1))); + project->desc = util_strdup(CHAR(sqlite3_column_text(stmt, 2))); + project->url = util_strdup(CHAR(sqlite3_column_text(stmt, 3))); + project->script = util_strdup(CHAR(sqlite3_column_text(stmt, 4))); } static void -strlist_free(struct db_ctx *ctx) +worker_unpacker(sqlite3_stmt *stmt, void *data) { - struct str *s, *tmp, *list = ctx->handle; + struct worker *w = data; - LL_FOREACH_SAFE(list, s, tmp) { - free(s->str); - free(s); - } - - ctx->handle = NULL; + w->id = sqlite3_column_int(stmt, 0); + w->name = util_strdup(CHAR(sqlite3_column_text(stmt, 1))); + w->desc = util_strdup(CHAR(sqlite3_column_text(stmt, 2))); } static void -project_unpacker(sqlite3_stmt *stmt, struct db_ctx *ctx, struct project *project) +job_unpacker(sqlite3_stmt *stmt, void *data) { - project->id = sqlite3_column_int(stmt, 0); - project->name = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 1))); - project->desc = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 2))); - project->url = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 3))); - project->script = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 4))); -} + struct job *job = data; -static void -worker_unpacker(sqlite3_stmt *stmt, struct db_ctx *ctx, struct worker *w) -{ - w->id = sqlite3_column_int(stmt, 0); - w->name = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 1))); - w->desc = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 2))); -} - -static void -job_unpacker(sqlite3_stmt *stmt, struct db_ctx *ctx, struct job *job) -{ job->id = sqlite3_column_int(stmt, 0); - job->tag = strlist_add(ctx, CHAR(sqlite3_column_text(stmt, 1))); + job->tag = util_strdup(CHAR(sqlite3_column_text(stmt, 1))); job->project_id = sqlite3_column_int(stmt, 2); } @@ -121,6 +94,9 @@ case 'i': sqlite3_bind_int(stmt, index++, va_arg(ap, int)); break; + case 'j': + sqlite3_bind_int64(stmt, index++, va_arg(ap, intmax_t)); + break; case 's': sqlite3_bind_text(stmt, index++, va_arg(ap, const char *), -1, SQLITE_STATIC); break; @@ -197,8 +173,6 @@ ssize_t ret = -1; size_t tot = 0; - sel->ctx->handle = NULL; - if (sqlite3_prepare(db, sql, -1, &stmt, NULL) != SQLITE_OK) return log_warn("db: %s", sqlite3_errmsg(db)), -1; @@ -207,15 +181,12 @@ va_end(ap); while (tot < sel->datasz && (step = sqlite3_step(stmt)) == SQLITE_ROW) - sel->unpack(stmt, sel->ctx, (unsigned char *)sel->data + (tot++ * sel->elemwidth)); + sel->unpack(stmt, (unsigned char *)sel->data + (tot++ * sel->elemwidth)); if (step == SQLITE_OK || step == SQLITE_DONE || step == SQLITE_ROW) ret = tot; - else { + else memset(sel->data, 0, sel->datasz * sel->elemwidth); - strlist_free(sel->ctx->handle); - sel->ctx->handle = NULL; - } sqlite3_finalize(stmt); @@ -256,45 +227,42 @@ } ssize_t -db_project_list(struct db_ctx *ctx, struct project *projects, size_t projectsz) +db_project_list(struct project *projects, size_t projectsz) { struct list sel = { - .unpack = (unpacker)project_unpacker, + .unpack = project_unpacker, .data = projects, .datasz = projectsz, .elemwidth = sizeof (*projects), - .ctx = ctx }; return list(&sel, CHAR(sql_project_list), "z", projectsz); } int -db_project_find(struct db_ctx *ctx, struct project *project) +db_project_find(struct project *project, const char *name) { struct list sel = { - .unpack = (unpacker)project_unpacker, + .unpack = project_unpacker, .data = project, .datasz = 1, .elemwidth = sizeof (*project), - .ctx = ctx }; - return list(&sel, CHAR(sql_project_find), "s", project->name) == 1 ? 0 : -1; + return list(&sel, CHAR(sql_project_find), "s", name) == 1 ? 0 : -1; } int -db_project_find_id(struct db_ctx *ctx, struct project *project) +db_project_find_id(struct project *project, intmax_t id) { struct list sel = { - .unpack = (unpacker)project_unpacker, + .unpack = project_unpacker, .data = project, .datasz = 1, .elemwidth = sizeof (*project), - .ctx = ctx }; - return list(&sel, CHAR(sql_project_find_id), "i", project->id) == 1 ? 0 : -1; + return list(&sel, CHAR(sql_project_find_id), "i", id) == 1 ? 0 : -1; } int @@ -306,48 +274,44 @@ } ssize_t -db_worker_list(struct db_ctx *ctx, struct worker *wk, size_t wksz) +db_worker_list(struct worker *wk, size_t wksz) { - assert(ctx); assert(wk); struct list sel = { - .unpack = (unpacker)worker_unpacker, + .unpack = worker_unpacker, .data = wk, .datasz = wksz, .elemwidth = sizeof (*wk), - .ctx = ctx }; return list(&sel, CHAR(sql_worker_list), "z", wksz); } int -db_worker_find(struct db_ctx *ctx, struct worker *wk) +db_worker_find(struct worker *wk, const char *name) { struct list sel = { - .unpack = (unpacker)worker_unpacker, + .unpack = worker_unpacker, .data = wk, .datasz = 1, .elemwidth = sizeof (*wk), - .ctx = ctx }; - return list(&sel, CHAR(sql_worker_find), "s", wk->name) == 1 ? 0 : -1; + return list(&sel, CHAR(sql_worker_find), "s", name) == 1 ? 0 : -1; } int -db_worker_find_id(struct db_ctx *ctx, struct worker *wk) +db_worker_find_id(struct worker *wk, intmax_t id) { struct list sel = { - .unpack = (unpacker)worker_unpacker, + .unpack = worker_unpacker, .data = wk, .datasz = 1, .elemwidth = sizeof (*wk), - .ctx = ctx }; - return list(&sel, CHAR(sql_worker_find_id), "i", wk->id) == 1 ? 0 : -1; + return list(&sel, CHAR(sql_worker_find_id), "i", id) == 1 ? 0 : -1; } int @@ -360,17 +324,15 @@ } ssize_t -db_job_todo(struct db_ctx *ctx, struct job *jobs, size_t jobsz, int worker_id) +db_job_todo(struct job *jobs, size_t jobsz, int worker_id) { - assert(ctx); assert(jobs); struct list sel = { - .unpack = (unpacker)job_unpacker, + .unpack = job_unpacker, .data = jobs, .datasz = jobsz, .elemwidth = sizeof (*jobs), - .ctx = ctx }; return list(&sel, CHAR(sql_job_todo), "iiz", worker_id, worker_id, jobsz); @@ -393,12 +355,3 @@ db = NULL; } } - -void -db_ctx_finish(struct db_ctx *ctx) -{ - if (ctx->handle) { - strlist_free(ctx->handle); - ctx->handle = NULL; - } -}
--- a/lib/db.h Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/db.h Tue Jul 19 21:52:42 2022 +0200 @@ -21,16 +21,13 @@ #include <sys/types.h> #include <stddef.h> +#include <stdint.h> struct project; struct worker; struct job; struct jobresult; -struct db_ctx { - void *handle; -}; - int db_open(const char *); @@ -38,7 +35,7 @@ db_job_add(struct job *); ssize_t -db_job_todo(struct db_ctx *, struct job *, size_t, int); +db_job_todo(struct job *, size_t, int); int db_jobresult_add(struct jobresult *); @@ -50,30 +47,27 @@ db_project_update(const struct project *); ssize_t -db_project_list(struct db_ctx *, struct project *, size_t); +db_project_list(struct project *, size_t); int -db_project_find(struct db_ctx *, struct project *); +db_project_find(struct project *, const char *); int -db_project_find_id(struct db_ctx *, struct project *); +db_project_find_id(struct project *, intmax_t); int db_worker_add(struct worker *); ssize_t -db_worker_list(struct db_ctx *, struct worker *, size_t); +db_worker_list(struct worker *, size_t); int -db_worker_find(struct db_ctx *, struct worker *); +db_worker_find(struct worker *, const char *); int -db_worker_find_id(struct db_ctx *, struct worker *); +db_worker_find_id(struct worker *, intmax_t); void db_finish(void); -void -db_ctx_finish(struct db_ctx *); - #endif /* !SCI_DB_H */
--- a/lib/types.c Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/types.c Tue Jul 19 21:52:42 2022 +0200 @@ -20,6 +20,7 @@ #include <errno.h> #include "types.h" +#include "util.h" typedef json_t * (*packer)(const void *); typedef int (*unpacker)(void *, json_t *); @@ -37,11 +38,16 @@ static inline int job_unpacker(struct job *job, json_t *doc) { - return json_unpack(doc, "{si si ss}", + const int ret = json_unpack(doc, "{si si ss}", "id", &job->id, "project_id", &job->project_id, "tag", &job->tag ); + + if (ret == 0) + job->tag = util_strdup(job->tag); + + return ret; } static inline json_t * @@ -59,13 +65,18 @@ static inline int jobresult_unpacker(struct jobresult *res, json_t *doc) { - return json_unpack(doc, "{si si si si ss}", + const int ret = json_unpack(doc, "{si si si si ss}", "id", &res->id, "job_id", &res->job_id, "worker_id", &res->worker_id, "exitcode", &res->exitcode, "log", &res->log ); + + if (ret == 0) + res->log = util_strdup(res->log); + + return ret; } static inline json_t * @@ -81,11 +92,18 @@ static inline int worker_unpacker(struct worker *w, json_t *doc) { - return json_unpack(doc, "{si ss ss}", + const int ret = json_unpack(doc, "{si ss ss}", "id", &w->id, "name", &w->name, "desc", &w->desc ); + + if (ret == 0) { + w->name = util_strdup(w->name); + w->desc = util_strdup(w->desc); + } + + return ret; } static inline json_t * @@ -103,13 +121,22 @@ static inline int project_unpacker(struct project *p, json_t *doc) { - return json_unpack(doc, "{si ss ss ss ss}", + const int ret = json_unpack(doc, "{si ss ss ss ss}", "id", &p->id, "name", &p->name, "desc", &p->desc, "url", &p->url, "script", &p->script ); + + if (ret == 0) { + p->name = util_strdup(p->name); + p->desc = util_strdup(p->desc); + p->url = util_strdup(p->url); + p->script = util_strdup(p->script); + } + + return ret; } static json_t *
--- a/lib/types.h Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/types.h Tue Jul 19 21:52:42 2022 +0200 @@ -28,7 +28,7 @@ struct job { intmax_t id; int project_id; - const char *tag; + char *tag; }; struct jobresult { @@ -36,45 +36,65 @@ int job_id; int worker_id; int exitcode; - const char *log; + char *log; }; struct worker { intmax_t id; - const char *name; - const char *desc; + char *name; + char *desc; }; struct project { intmax_t id; - const char *name; - const char *desc; - const char *url; - const char *script; + char *name; + char *desc; + char *url; + char *script; }; +/* job */ + json_t * job_to(const struct job *, size_t); ssize_t job_from(struct job *, size_t, json_t *); +void +job_finish(struct job *); + +/* jobresult */ + json_t * jobresult_to(const struct jobresult *, size_t); ssize_t jobresult_from(struct jobresult *, size_t, json_t *); +void +jobresult_finish(struct jobresult *); + +/* worker */ + json_t * worker_to(const struct worker *, size_t); ssize_t worker_from(struct worker *, size_t, json_t *); +void +worker_finish(struct worker *); + +/* project */ + json_t * project_to(const struct project *, size_t); ssize_t project_from(struct project *, size_t, json_t *); +void +project_finish(struct project *); + #endif /* !SCI_TYPES_H */
--- a/lib/util.c Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/util.c Tue Jul 19 21:52:42 2022 +0200 @@ -104,8 +104,8 @@ static char ret[PATH_MAX]; char tmp[PATH_MAX]; - strlcpy(tmp, str, sizeof (tmp)); - strlcpy(ret, basename(tmp), sizeof (ret)); + util_strlcpy(tmp, str, sizeof (tmp)); + util_strlcpy(ret, basename(tmp), sizeof (ret)); return ret; } @@ -116,8 +116,8 @@ static char ret[PATH_MAX]; char tmp[PATH_MAX]; - strlcpy(tmp, str, sizeof (tmp)); - strlcpy(ret, dirname(tmp), sizeof (ret)); + util_strlcpy(tmp, str, sizeof (tmp)); + util_strlcpy(ret, dirname(tmp), sizeof (ret)); return ret; }
--- a/lib/util.h Fri Jul 15 11:11:48 2022 +0200 +++ b/lib/util.h Tue Jul 19 21:52:42 2022 +0200 @@ -45,6 +45,9 @@ char * util_strndup(const char *, size_t); +size_t +util_strlcpy(char *, const char *, size_t); + char * util_basename(const char *);
--- a/scid/http.c Fri Jul 15 11:11:48 2022 +0200 +++ b/scid/http.c Tue Jul 19 21:52:42 2022 +0200 @@ -28,8 +28,8 @@ #include "http.h" #include "log.h" #include "page.h" -#include "page-api-jobs.h" #include "page-api-projects.h" +#include "page-api-todo.h" #include "page-api-workers.h" enum page { @@ -44,7 +44,7 @@ const char *prefix; void (*handler)(struct kreq *); } apis[] = { - { "v1/jobs", page_api_v1_jobs }, + { "v1/todo", page_api_v1_todo }, { "v1/projects", page_api_v1_projects }, { "v1/workers", page_api_v1_workers }, { NULL, NULL }
--- a/scid/page-api-jobs.c Fri Jul 15 11:11:48 2022 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,133 +0,0 @@ -/* - * page-api-jobs.c -- /api/v?/jobs route - * - * Copyright (c) 2021 David Demelier <markand@malikania.fr> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <sys/types.h> -#include <assert.h> -#include <stdarg.h> -#include <stdint.h> -#include <string.h> - -#include <kcgi.h> -#include <jansson.h> - -#include "config.h" -#include "db.h" -#include "log.h" -#include "page-api-jobs.h" -#include "page.h" -#include "types.h" -#include "util.h" - -static void -list(struct kreq *r, const struct job *jobs, size_t jobsz) -{ - json_t *doc; - char *dump; - - doc = job_to(jobs, jobsz); - dump = json_dumps(doc, JSON_COMPACT); - - khttp_puts(r, dump); - free(dump); - json_decref(doc); -} - -static int -save(const char *json) -{ - struct jobresult res = {0}; - int ret = -1; - - json_t *doc; - json_error_t err; - - if (!(doc = json_loads(json, 0, &err))) - log_warn("api/post: invalid JSON input: %s", err.text); - else if (jobresult_from(&res, 1, doc) < 0) - log_warn("api/post: failed to decode parameters"); - else if (db_jobresult_add(&res) < 0) - log_warn("api/post: database save error"); - else - ret = 0; - - json_decref(doc); - - return ret; -} - -static void -get(struct kreq *r) -{ - struct db_ctx ctx; - struct job jobs[SCI_JOB_LIST_MAX]; - ssize_t jobsz; - struct worker wk = { - .name = util_basename(r->path) - }; - - if (db_worker_find(&ctx, &wk) < 0) { - page(r, NULL, KHTTP_404, KMIME_APP_JSON, NULL); - return; - } - - db_ctx_finish(&ctx); - - if ((jobsz = db_job_todo(&ctx, jobs, UTIL_SIZE(jobs), wk.id)) < 0) - page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); - else { - khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); - khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); - khttp_body(r); - list(r, jobs, jobsz); - db_ctx_finish(&ctx); - khttp_free(r); - } -} - -static void -post(struct kreq *r) -{ - if (r->fieldsz < 1) - page(r, NULL, KHTTP_400, KMIME_APP_JSON, NULL); - else if (save(r->fields[0].val) < 0) - page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); - else { - khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); - khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); - khttp_body(r); - khttp_free(r); - } -} - -void -page_api_v1_jobs(struct kreq *r) -{ - assert(r); - - switch (r->method) { - case KMETHOD_GET: - get(r); - break; - case KMETHOD_POST: - post(r); - break; - default: - page(r, NULL, KHTTP_400, KMIME_APP_JSON, NULL); - break; - } -}
--- a/scid/page-api-jobs.h Fri Jul 15 11:11:48 2022 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,27 +0,0 @@ -/* - * page-api-jobs.h -- /api/v?/jobs route - * - * Copyright (c) 2021 David Demelier <markand@malikania.fr> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef SCI_PAGE_API_JOBS_H -#define SCI_PAGE_API_JOBS_H - -struct kreq; - -void -page_api_v1_jobs(struct kreq *); - -#endif /* !SCI_PAGE_API_JOBS_H */
--- a/scid/page-api-projects.c Fri Jul 15 11:11:48 2022 +0200 +++ b/scid/page-api-projects.c Tue Jul 19 21:52:42 2022 +0200 @@ -85,61 +85,63 @@ static void get_one(struct kreq *r, const char *name) { - struct db_ctx ctx; - struct project project = { - .name = name - }; + struct project project = {0}; - if (db_project_find(&ctx, &project) < 0) + if (db_project_find(&project, name) < 0) page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); else { push(r, &project); - db_ctx_finish(&ctx); + project_finish(&project); } } static void -get_one_id(struct kreq *r, int id) +get_one_id(struct kreq *r, intmax_t id) { - struct db_ctx ctx; - struct project project = { - .id = id - }; + struct project project = {0}; - if (db_project_find_id(&ctx, &project) < 0) + if (db_project_find_id(&project, id) < 0) page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); else { push(r, &project); - db_ctx_finish(&ctx); + project_finish(&project); } } static void get_all(struct kreq *r) { - struct db_ctx ctx; - struct project projects[SCI_PROJECT_MAX]; + struct project projects[SCI_PROJECT_MAX] = {0}; ssize_t projectsz; - if ((projectsz = db_project_list(&ctx, projects, UTIL_SIZE(projects))) < 0) + if ((projectsz = db_project_list(projects, UTIL_SIZE(projects))) < 0) page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); else { khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); khttp_body(r); list(r, projects, projectsz); - db_ctx_finish(&ctx); khttp_free(r); } + + for (ssize_t i = 0; i < projectsz; ++i) + project_finish(&projects[i]); } +/* + * GET /api/v1/projects[/<name|id>] + * ------------------------------ + * + * Retrieve one project or a list of projects depending on the presence of + * <name> or <id> parameter. + */ static void get(struct kreq *r) { char name[128]; - int id; + intmax_t id; - if (sscanf(r->path, "v1/projects/%d", &id) == 1) + if (sscanf(r->path, "v1/projects/%jd", &id) == 1) get_one_id(r, id); else if (sscanf(r->path, "v1/projects/%127s", name) == 1) get_one(r, name); @@ -147,6 +149,12 @@ get_all(r); } +/* + * POST /api/v1/projects + * --------------------- + * + * Create a new project. + */ static void post(struct kreq *r) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scid/page-api-todo.c Tue Jul 19 21:52:42 2022 +0200 @@ -0,0 +1,122 @@ +/* + * page-api-todo.c -- /api/v?/todo route + * + * Copyright (c) 2021 David Demelier <markand@malikania.fr> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <sys/types.h> +#include <assert.h> +#include <stdarg.h> +#include <stdint.h> +#include <string.h> + +#include <kcgi.h> +#include <jansson.h> + +#include "config.h" +#include "db.h" +#include "log.h" +#include "page-api-todo.h" +#include "page.h" +#include "types.h" +#include "util.h" + +static void +list(struct kreq *r, const struct job *jobs, size_t jobsz) +{ + json_t *doc; + char *dump; + + doc = job_to(jobs, jobsz); + dump = json_dumps(doc, JSON_COMPACT); + + khttp_puts(r, dump); + free(dump); + json_decref(doc); +} + +#if 0 + +static int +save(const char *json) +{ + struct jobresult res = {0}; + int ret = -1; + + json_t *doc; + json_error_t err; + + if (!(doc = json_loads(json, 0, &err))) + log_warn("api/post: invalid JSON input: %s", err.text); + else if (jobresult_from(&res, 1, doc) < 0) + log_warn("api/post: failed to decode parameters"); + else if (db_jobresult_add(&res) < 0) + log_warn("api/post: database save error"); + else + ret = 0; + + json_decref(doc); + + return ret; +} + +#endif + +/* + * GET /api/v1/todo/<worker-name> + * ---------------- + * + * Retrieve a list of jobs to perform for this worker name. + */ +static void +get(struct kreq *r) +{ + struct job jobs[SCI_JOB_LIST_MAX]; + ssize_t jobsz; + struct worker wk = {0}; + + if (db_worker_find(&wk, util_basename(r->path)) < 0) { + page(r, NULL, KHTTP_404, KMIME_APP_JSON, NULL); + return; + } + + if ((jobsz = db_job_todo(jobs, UTIL_SIZE(jobs), wk.id)) < 0) + page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); + else { + khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); + khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); + khttp_body(r); + list(r, jobs, jobsz); + khttp_free(r); + } + + for (ssize_t i = 0; i < jobsz; ++i) + job_finish(&jobs[i]); +} + +void +page_api_v1_jobs(struct kreq *r) +{ + assert(r); + + switch (r->method) { + case KMETHOD_GET: + get(r); + break; + default: + page(r, NULL, KHTTP_400, KMIME_APP_JSON, NULL); + break; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scid/page-api-todo.h Tue Jul 19 21:52:42 2022 +0200 @@ -0,0 +1,27 @@ +/* + * page-api-todo.h -- /api/v?/todo route + * + * Copyright (c) 2021 David Demelier <markand@malikania.fr> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SCI_PAGE_API_TODO_H +#define SCI_PAGE_API_TODO_H + +struct kreq; + +void +page_api_v1_todo(struct kreq *); + +#endif /* !SCI_PAGE_API_TODO_H */
--- a/scid/page-api-workers.c Fri Jul 15 11:11:48 2022 +0200 +++ b/scid/page-api-workers.c Tue Jul 19 21:52:42 2022 +0200 @@ -11,7 +11,7 @@ static void list(struct kreq *r, const struct worker *workers, size_t workersz) { - struct json_t *doc; + json_t *doc; char *dump; doc = worker_to(workers, workersz); @@ -67,54 +67,56 @@ static void get_one(struct kreq *r, const char *name) { - struct db_ctx ctx; - struct worker worker = { - .name = name - }; + struct worker worker = {0}; - if (db_worker_find(&ctx, &worker) < 0) + if (db_worker_find(&worker, name) < 0) page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); else { push(r, &worker); - db_ctx_finish(&ctx); + worker_finish(&worker); } } static void -get_one_id(struct kreq *r, int id) +get_one_id(struct kreq *r, intmax_t id) { - struct db_ctx ctx; - struct worker worker = { - .id = id - }; + struct worker worker = {0}; - if (db_worker_find_id(&ctx, &worker) < 0) + if (db_worker_find_id(&worker, id) < 0) page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); else { push(r, &worker); - db_ctx_finish(&ctx); + worker_finish(&worker); } } static void get_all(struct kreq *r) { - struct db_ctx ctx; - struct worker workers[SCI_PROJECT_MAX]; + struct worker workers[SCI_PROJECT_MAX] = {0}; ssize_t workersz; - if ((workersz = db_worker_list(&ctx, workers, UTIL_SIZE(workers))) < 0) + if ((workersz = db_worker_list(workers, UTIL_SIZE(workers))) < 0) page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); else { khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); khttp_body(r); list(r, workers, workersz); - db_ctx_finish(&ctx); khttp_free(r); } + + for (ssize_t i = 0; i < workersz; ++i) + worker_finish(&workers[i]); } +/* + * GET /api/v1/workers[/<name|id>] + * ------------------------------ + * + * Retrieve one worker or a list of workers depending on the presence of + * <name> or <id> parameter. + */ static void get(struct kreq *r) { @@ -129,6 +131,12 @@ get_all(r); } +/* + * POST /api/v1/workers + * -------------------- + * + * Create a worker. + */ static void post(struct kreq *r) {
--- a/sciworkerd/sciworkerd.c Fri Jul 15 11:11:48 2022 +0200 +++ b/sciworkerd/sciworkerd.c Tue Jul 19 21:52:42 2022 +0200 @@ -78,24 +78,18 @@ } static void -merge(json_t *doc) +merge(const struct job *jobs, size_t jobsz) { - struct job jobs[SCI_JOB_LIST_MAX]; - ssize_t jobsz; size_t total = 0; - if ((jobsz = job_from(jobs, UTIL_SIZE(jobs), doc)) < 0) - log_warn(TAG "error while parsing jobs: %s", strerror(errno)); - else { - for (ssize_t i = 0; i < jobsz; ++i) { - if (!pending(jobs[i].id)) { - queue(&jobs[i]); - total++; - } + for (ssize_t i = 0; i < jobsz; ++i) { + if (!pending(jobs[i].id)) { + queue(&jobs[i]); + total++; } + } - log_info(TAG "added %zu new pending tasks", total); - } + log_info(TAG "added %zu new pending tasks", total); } /* @@ -106,7 +100,9 @@ { static time_t startup; time_t now; - struct apicreq req; + struct apic req; + struct job todo[SCI_JOB_LIST_MAX]; + ssize_t todosz; if (!startup) startup = time(NULL); @@ -114,12 +110,12 @@ if (difftime((now = time(NULL)), startup) >= sciworkerd.fetchinterval) { startup = now; - if (apic_get(&req, "%s/api/v1/%s", sciworkerd.url, sciworkerd.name) < 0) + if ((todosz = apic_job_todo(&req, todo, UTIL_SIZE(todo), worker.id)) < 0) log_warn(TAG "unable to fetch jobs: %s", req.error); - if (req.doc) { - merge(req.doc); - json_decref(req.doc); - } + else + merge(todo, todosz); + + apic_finish(&req); } } @@ -129,20 +125,18 @@ static void fetch_worker(void) { - struct apicreq req; + struct apic req; - if (apic_get(&req, "%s/api/v1/workers/%s", sciworkerd.url, sciworkerd.name) < 0) - log_warn(TAG "unable to fetch worker info: %s", req.error); - if (!req.doc) - log_die(TAG "empty worker response"); - if (worker_from(&worker, 1, req.doc) < 0) - log_die(TAG "unable to parse worker", strerror(errno)); + util_strlcpy(&worker.name, sciworkerd.name); + + if (apic_worker_find(&req, &worker) < 0) + log_die(TAG, "unable to fetch worker info: %s", req.error); log_info("worker id: %d", worker.id); log_info("worker name: %s", worker.name); log_info("worker description: %s", worker.desc); - json_decref(req.doc); + apic_finish(&req); } /* @@ -151,14 +145,18 @@ static int fetch_project(struct project *project, int id) { - struct apicreq req; + struct apic req; + if (apic_project_find_id(&req, project, id) < 0) + return -1; +#if 0 if (apic_get(&req, "%s/api/v1/projects/%d", id) < 0) return log_warn(TAG "unable to fetch project info: %s", req.error), -1; if (!req.doc) return log_warn(TAG "empty project response"), -1; if (project_from(project, 1, req.doc) < 0) return log_warn(TAG "unable to parse project: %s", strerror(errno)), -1; +#endif return 0; } @@ -182,10 +180,11 @@ static int start(struct taskentry *entry) { + struct apic; struct project project; pid_t pid; - if (fetch_project(&project, entry->job.project_id) < 0) + if (apic_project_find_id(&project, entry->job.project_id) < 0) return log_warn(TAG "unable to fetch project, dropping task"), -1; if (task_setup(entry->task, project.script) < 0) return log_warn(TAG "unable to setup script code: %s, dropping task", strerror(errno)), -1;