# HG changeset patch # User David Demelier # Date 1623314361 -7200 # Node ID 5fa3d2f479b2ada4a152920e978589eda5c381d3 # Parent 5afdb14df924a4f82a3a8741f9d9989b980486a5 sci: initial upload support diff -r 5afdb14df924 -r 5fa3d2f479b2 Makefile --- a/Makefile Tue Jun 08 08:40:01 2021 +0200 +++ b/Makefile Thu Jun 10 10:39:21 2021 +0200 @@ -29,7 +29,7 @@ extern/libsqlite/sqlite3.c SCID_DATA= sql/init.h \ sql/job-queue.h \ - sql/job-queue-list.h \ + sql/job-result-todo.h \ sql/job-save.h \ sql/project-find.h \ sql/project-get.h \ @@ -48,7 +48,15 @@ SCICTL_OBJS= ${SCICTL_SRCS:.c=.o} SCICTL_DEPS= ${SCICTL_SRCS:.c=.d} -SCIWEBD_SRCS= base64.c sciwebd.c util.c +SCIWEBD_SRCS= base64.c \ + http.c \ + log.c \ + page-api-jobs.c \ + page-api-script.c \ + page.c \ + req.c \ + sciwebd.c \ + util.c SCIWEBD_OBJS= ${SCIWEBD_SRCS:.c=.o} SCIWEBD_DEPS= ${SCIWEBD_SRCS:.c=.d} @@ -63,12 +71,16 @@ LIBCURL_INCS= `pkg-config --cflags libcurl` LIBCURL_LIBS= `pkg-config --libs libcurl` +JANSSON_INCS= `pkg-config --cflags jansson` +JANSSON_LIBS= `pkg-config --libs jansson` + KCGI_INCS= `pkg-config --cflags kcgi` KCGI_LIBS= `pkg-config --libs kcgi` ZSTD_INCS= `pkg-config --cflags libzstd` ZSTD_LIBS= `pkg-config --libs libzstd` +INCS= -Iextern/libsqlite DEFS= -DVARDIR=\"${VARDIR}\" \ -DTMPDIR=\"${TMPDIR}\" @@ -78,7 +90,7 @@ all: scid scictl sciworkerd sciwebd .c.o: - ${CC} ${DEFS} ${LIBBSD_INCS} ${KCGI_INCS} ${ZSTD_INCS} ${CFLAGS} -c $< -o $@ + ${CC} ${INCS} ${DEFS} ${LIBBSD_INCS} ${KCGI_INCS} ${JANSSON_INCS} ${ZSTD_INCS} ${CFLAGS} -MMD -c $< -o $@ .sql.h: ./bcc -sc0 $< $< > $@ @@ -101,7 +113,7 @@ ${SCIWORKERD_OBJS}: config.h sciworkerd: ${SCIWORKERD_OBJS} - ${CC} ${CFLAGS} -o $@ ${SCIWORKERD_OBJS} ${LIBBSD_LIBS} ${LIBCURL_LIBS} ${ZSTD_LIBS} ${LDFLAGS} + ${CC} ${CFLAGS} -o $@ ${SCIWORKERD_OBJS} ${LIBBSD_LIBS} ${LIBCURL_LIBS} ${JANSSON_LIBS} ${ZSTD_LIBS} ${LDFLAGS} ${SCICTL_OBJS}: config.h @@ -111,7 +123,7 @@ ${SCIWEBD_OBJS}: config.h sciwebd: ${SCIWEBD_OBJS} - ${CC} ${CFLAGS} -o $@ ${SCIWEBD_OBJS} ${LIBBSD_LIBS} ${KCGI_LIBS} ${ZSTD_LIBS} ${LDFLAGS} + ${CC} ${CFLAGS} -o $@ ${SCIWEBD_OBJS} ${LIBBSD_LIBS} ${KCGI_LIBS} ${JANSSON_LIBS} ${ZSTD_LIBS} ${LDFLAGS} clean: rm -f bcc config.h diff -r 5afdb14df924 -r 5fa3d2f479b2 db.c --- a/db.c Tue Jun 08 08:40:01 2021 +0200 +++ b/db.c Thu Jun 10 10:39:21 2021 +0200 @@ -12,7 +12,7 @@ #include "sql/init.h" #include "sql/job-queue.h" -#include "sql/job-queue-list.h" +#include "sql/job-result-todo.h" #include "sql/job-save.h" #include "sql/project-insert.h" #include "sql/project-get.h" @@ -195,10 +195,9 @@ } int -db_worker_find(struct worker *w, const char *name) +db_worker_find(struct worker *w) { assert(w); - assert(name); sqlite3_stmt *stmt = NULL; int ret = -1; @@ -206,7 +205,7 @@ if (sqlite3_prepare(db, CHAR(sql_worker_find), -1, &stmt, NULL) != SQLITE_OK) goto sqlite3_err; - sqlite3_bind_text(stmt, 1, name, -1, SQLITE_STATIC); + sqlite3_bind_text(stmt, 1, w->name, -1, SQLITE_STATIC); if (sqlite3_step(stmt) != SQLITE_ROW) goto sqlite3_err; @@ -255,36 +254,28 @@ } ssize_t -db_job_result_todo(struct job_result *re, size_t resz, int64_t project_id) +db_job_result_todo(struct job_result *re, size_t resz, int64_t worker_id) { assert(re); sqlite3_stmt *stmt = NULL; ssize_t ret = 0; - if (sqlite3_prepare(db, CHAR(sql_job_queue_list), -1, &stmt, NULL) != SQLITE_OK) { + if (sqlite3_prepare(db, CHAR(sql_job_result_todo), -1, &stmt, NULL) != SQLITE_OK) { log_warn("db: %s", sqlite3_errmsg(db)); return -1; } - sqlite3_bind_int64(stmt, 1, project_id); + sqlite3_bind_int64(stmt, 1, worker_id); sqlite3_bind_int64(stmt, 2, resz); while (sqlite3_step(stmt) == SQLITE_ROW && (size_t)ret++ < resz) { memset(re, 0, sizeof (*re)); - re->job.id = sqlite3_column_int64(stmt, 0); - strlcpy(re->job.tag, CHAR(sqlite3_column_text(stmt, 1)), sizeof (re->job.tag)); - - re->worker.id = sqlite3_column_int64(stmt, 2); - strlcpy(re->worker.name, CHAR(sqlite3_column_text(stmt, 3)), sizeof (re->worker.name)); - strlcpy(re->worker.desc, CHAR(sqlite3_column_text(stmt, 4)), sizeof (re->worker.desc)); - - re->job.project.id = sqlite3_column_int64(stmt, 5); - strlcpy(re->job.project.name, CHAR(sqlite3_column_text(stmt, 6)), sizeof (re->job.project.name)); - strlcpy(re->job.project.desc, CHAR(sqlite3_column_text(stmt, 7)), sizeof (re->job.project.desc)); - strlcpy(re->job.project.url, CHAR(sqlite3_column_text(stmt, 8)), sizeof (re->job.project.url)); - strlcpy(re->job.project.script, CHAR(sqlite3_column_text(stmt, 9)), sizeof (re->job.project.script)); + strlcpy(re->job.tag, CHAR(sqlite3_column_text(stmt, 1)), + sizeof (re->job.tag)); + strlcpy(re->job.project.name, CHAR(sqlite3_column_text(stmt, 2)), + sizeof (re->job.project.name)); ++re; }; diff -r 5afdb14df924 -r 5fa3d2f479b2 db.h --- a/db.h Tue Jun 08 08:40:01 2021 +0200 +++ b/db.h Thu Jun 10 10:39:21 2021 +0200 @@ -29,7 +29,7 @@ db_worker_get(struct worker *, size_t); int -db_worker_find(struct worker *, const char *); +db_worker_find(struct worker *); int db_job_queue(struct job *); diff -r 5afdb14df924 -r 5fa3d2f479b2 doc/api.md --- a/doc/api.md Tue Jun 08 08:40:01 2021 +0200 +++ b/doc/api.md Thu Jun 10 10:39:21 2021 +0200 @@ -42,3 +42,19 @@ "output": "stdout/stderr combined" } ``` + +### (GET) /script/

+ +Get script code for project `p`. + +Request: + +No data. + +Reponse: + +``` +{ + "code": "#!/bin/sh exit 0" +} +``` diff -r 5afdb14df924 -r 5fa3d2f479b2 http.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/http.c Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,95 @@ +#include +#include +#include +#include +#include +#include + +#include + +#include "http.h" +#include "log.h" +#include "page.h" +#include "page-api-jobs.h" +#include "page-api-script.h" +#include "req.h" + +enum page { + PAGE_API, + PAGE_LAST /* Not used. */ +}; + +static void +dispatch_api(struct kreq *req) +{ + static const struct { + const char *prefix; + void (*handler)(struct kreq *); + } apis[] = { + { "v1/jobs", page_api_v1_jobs }, + { "v1/script", page_api_v1_script }, + { NULL, NULL } + }; + + if (req_connect(VARDIR "/run/sci.sock") < 0) { + page(req, NULL, KHTTP_500, KMIME_TEXT_HTML, "pages/500.html"); + return; + } + + for (size_t i = 0; apis[i].prefix; ++i) { + if (strncmp(req->path, apis[i].prefix, strlen(apis[i].prefix)) == 0) { + apis[i].handler(req); + goto finish; + } + } + + page(req, NULL, KHTTP_404, KMIME_TEXT_HTML, "pages/404.html"); + +finish: + req_finish(); +} + +static const char *pages[] = { + [PAGE_API] = "api" +}; + +static void (*handlers[])(struct kreq *req) = { + [PAGE_API] = dispatch_api +}; + +static void +process(struct kreq *req) +{ + assert(req); + + log_debug("http: accessing page '%s'", req->path); + + if (req->page == PAGE_LAST) + page(req, NULL, KHTTP_404, KMIME_TEXT_HTML, "pages/404.html"); + else + handlers[req->page](req); +} + +void +http_fcgi_run(void) +{ + struct kreq req; + struct kfcgi *fcgi; + + if (khttp_fcgi_init(&fcgi, NULL, 0, pages, PAGE_LAST, 0) != KCGI_OK) + return; + + while (khttp_fcgi_parse(fcgi, &req) == KCGI_OK) + process(&req); + + khttp_fcgi_free(fcgi); +} + +void +http_cgi_run(void) +{ + struct kreq req; + + if (khttp_parse(&req, NULL, 0, pages, PAGE_LAST, 0) == KCGI_OK) + process(&req); +} diff -r 5afdb14df924 -r 5fa3d2f479b2 http.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/http.h Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,28 @@ +/* + * http.h -- HTTP parsing and rendering + * + * Copyright (c) 2021 David Demelier + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SCI_HTTP_H +#define SCI_HTTP_H + +void +http_fcgi_run(void); + +void +http_cgi_run(void); + +#endif /* !SCI_HTTP_H */ diff -r 5afdb14df924 -r 5fa3d2f479b2 log.h --- a/log.h Tue Jun 08 08:40:01 2021 +0200 +++ b/log.h Thu Jun 10 10:39:21 2021 +0200 @@ -2,11 +2,11 @@ * log.h -- logging routines * * Copyright (c) 2020-2021 David Demelier - * + * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. - * + * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR @@ -16,8 +16,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef IMGUP_LOG_H -#define IMGUP_LOG_H +#ifndef SCI_LOG_H +#define SCI_LOG_H #include @@ -43,4 +43,4 @@ void log_finish(void); -#endif /* !IMGUP_LOG_H */ +#endif /* !SCI_LOG_H */ diff -r 5afdb14df924 -r 5fa3d2f479b2 output --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/output Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,1 @@ +Success! diff -r 5afdb14df924 -r 5fa3d2f479b2 page-api-jobs.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page-api-jobs.c Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,129 @@ +#include +#include +#include +#include +#include + +#include +#include + +#include "config.h" +#include "job.h" +#include "log.h" +#include "page.h" +#include "req.h" +#include "util.h" + +static void +list(struct kreq *r, const struct job_result *jobs, size_t jobsz) +{ + json_t *array, *obj; + + array = json_array(); + + for (size_t i = 0; i < jobsz; ++i) { + obj = json_object(); + json_object_set(obj, "id", json_integer(jobs[i].job.id)); + json_object_set(obj, "tag", json_string(jobs[i].job.tag)); + json_object_set(obj, "project", json_string(jobs[i].job.project.name)); + json_array_append(array, obj); + } + + khttp_puts(r, json_dumps(array, JSON_COMPACT)); +} + +static void +get(struct kreq *r) +{ + struct req req; + struct job_result jobs[SCI_JOB_LIST_MAX]; + size_t jobsz = UTIL_SIZE(jobs); + const char *worker = util_basename(r->path); + + if ((req = req_job_list(jobs, &jobsz, worker)).status) + page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); + else { + khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); + khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); + khttp_body(r); + list(r, jobs, jobsz); + khttp_free(r); + } +} + +static int +parse(struct job_result *res, const char *json) +{ + json_t *doc, *code, *id, *retcode; + json_error_t err; + + if (!(doc = json_loads(json, 0, &err))) + return log_warn("api/post: invalid JSON input: %s", err.text), -1; + if (!json_is_object(doc) || + !json_is_string((code = json_object_get(doc, "code"))) || + !json_is_integer((id = json_object_get(doc, "id"))) || + !json_is_integer((retcode = json_object_get(doc, "retcode")))) { + log_warn("api/post: invalid JSON input"); + json_decref(doc); + return -1; + } + + res->job.id = json_integer_value(id); + res->retcode = json_integer_value(retcode); + res->console = util_strdup(json_string_value(code)); + json_decref(doc); + + return 0; +} + +static int +save(struct job_result *res) +{ + struct req req; + + if ((req = req_job_save(res)).status) { + log_warn("api/post: save error: %s", strerror(req.status)); + return -1; + } + + return 0; +} + +static void +post(struct kreq *r) +{ + struct job_result res = {0}; + const char *worker = util_basename(r->path); + + strlcpy(res.worker.name, worker, sizeof (res.worker.name)); + log_info("data=%s", r->fields[0].key); + + if (r->fieldsz < 1 || parse(&res, r->fields[0].key) || save(&res) < 0) + page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); + else { + khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); + khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); + khttp_body(r); + khttp_free(r); + } + + free(res.console); +} + +void +page_api_v1_jobs(struct kreq *r) +{ + assert(r); + + switch (r->method) { + case KMETHOD_GET: + get(r); + break; + case KMETHOD_POST: + post(r); + break; + default: + page(r, NULL, KHTTP_400, KMIME_APP_JSON, NULL); + break; + } +} diff -r 5afdb14df924 -r 5fa3d2f479b2 page-api-jobs.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page-api-jobs.h Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,9 @@ +#ifndef SCI_PAGE_API_JOBS_H +#define SCI_PAGE_API_JOBS_H + +struct kreq; + +void +page_api_v1_jobs(struct kreq *); + +#endif /* !SCI_PAGE_API_JOBS_H */ diff -r 5afdb14df924 -r 5fa3d2f479b2 page-api-script.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page-api-script.c Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +#include +#include + +#include "config.h" +#include "page.h" +#include "req.h" +#include "util.h" + +static void +content(struct kreq *r, const char *code) +{ + json_t *doc; + + doc = json_object(); + json_object_set(doc, "code", json_string(code)); + khttp_puts(r, json_dumps(doc, JSON_COMPACT)); + json_decref(doc); +} + +static void +get(struct kreq *r) +{ + struct req req; + char script[SCI_MSG_MAX]; + const char *project = util_basename(r->path); + + if ((req = req_script_get(project, script, sizeof (script))).status) + page(r, NULL, KHTTP_500, KMIME_APP_JSON, NULL); + else { + khttp_head(r, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[KMIME_APP_JSON]); + khttp_head(r, kresps[KRESP_STATUS], "%s", khttps[KHTTP_200]); + khttp_body(r); + content(r, script); + khttp_free(r); + } +} + +void +page_api_v1_script(struct kreq *r) +{ + assert(r); + + switch (r->method) { + case KMETHOD_GET: + get(r); + break; + default: + page(r, NULL, KHTTP_400, KMIME_APP_JSON, NULL); + break; + } +} diff -r 5afdb14df924 -r 5fa3d2f479b2 page-api-script.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page-api-script.h Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,9 @@ +#ifndef SCI_PAGE_API_SCRIPT_H +#define SCI_PAGE_API_SCRIPT_H + +struct kreq; + +void +page_api_v1_script(struct kreq *); + +#endif /* !SCI_PAGE_API_SCRIPT_H */ diff -r 5afdb14df924 -r 5fa3d2f479b2 page-index.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page-index.c Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,105 @@ +/* + * page-index.c -- page / + * + * Copyright (c) 2020-2021 David Demelier + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include + +#include "database.h" +#include "fragment-paste.h" +#include "page-index.h" +#include "page.h" +#include "paste.h" +#include "util.h" + +struct template { + struct kreq *req; + const struct paste *pastes; + size_t pastesz; +}; + +static const char *keywords[] = { + "pastes" +}; + +static int +template(size_t keyword, void *arg) +{ + struct template *tp = arg; + + switch (keyword) { + case 0: + for (size_t i = 0; i < tp->pastesz; ++i) + fragment_paste(tp->req, &tp->pastes[i]); + break; + default: + break; + } + + return 1; +} + +static void +get(struct kreq *r) +{ + struct paste pastes[10] = {0}; + size_t pastesz = NELEM(pastes); + + if (!database_recents(pastes, &pastesz)) + page(r, NULL, KHTTP_500, "pages/500.html"); + else + page_index_render(r, pastes, pastesz); + + for (size_t i = 0; i < pastesz; ++i) + paste_finish(&pastes[i]); +} + +void +page_index_render(struct kreq *r, const struct paste *pastes, size_t pastesz) +{ + struct template data = { + .req = r, + .pastes = pastes, + .pastesz = pastesz + }; + + struct ktemplate kt = { + .key = keywords, + .keysz = NELEM(keywords), + .arg = &data, + .cb = template + }; + + page(r, &kt, KHTTP_200, "pages/index.html"); +} + +void +page_index(struct kreq *r) +{ + switch (r->method) { + case KMETHOD_GET: + get(r); + break; + default: + page(r, NULL, KHTTP_400, "400.html"); + break; + } +} diff -r 5afdb14df924 -r 5fa3d2f479b2 page.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page.c Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,40 @@ +/* + * page.c -- page renderer + * + * Copyright (c) 2021 David Demelier + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "page.h" +#include "util.h" + +void +page(struct kreq *req, + const struct ktemplate *tmpl, + enum khttp status, + enum kmime mime, + const char *file) +{ + khttp_head(req, kresps[KRESP_CONTENT_TYPE], "%s", kmimetypes[mime]); + khttp_head(req, kresps[KRESP_STATUS], "%s", khttps[status]); + khttp_body(req); + + if (file) { + khttp_template(req, NULL, util_path("fragments/header.html")); + khttp_template(req, tmpl, util_path(file)); + khttp_template(req, NULL, util_path("fragments/footer.html")); + } + + khttp_free(req); +} diff -r 5afdb14df924 -r 5fa3d2f479b2 page.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/page.h Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,30 @@ +/* + * page.h -- page renderer + * + * Copyright (c) 2021 David Demelier + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SCI_PAGE_H +#define SCI_PAGE_H + +#include +#include +#include +#include + +void +page(struct kreq *, const struct ktemplate *, enum khttp, enum kmime, const char *); + +#endif /* !SCI_PAGE_H */ diff -r 5afdb14df924 -r 5fa3d2f479b2 req.c --- a/req.c Tue Jun 08 08:40:01 2021 +0200 +++ b/req.c Thu Jun 10 10:39:21 2021 +0200 @@ -1,4 +1,3 @@ -#define _BSD_SOURCE #include #include #include @@ -72,38 +71,6 @@ return res; } -static char * -readfile(const char *path) -{ - FILE *fp, *str; - char *console, *b64, buf[BUFSIZ]; - size_t consolesz, nr; - - if (strcmp(path, "-") == 0) - fp = stdin; - else if (!(fp = fopen(path, "r"))) - return NULL; - - if (!(str = open_memstream(&console, &consolesz))) - err(1, NULL); - - while ((nr = fread(buf, 1, sizeof (buf), fp))) - fwrite(buf, 1, nr, str); - - if ((ferror(fp) && !feof(fp)) || (ferror(str) && !feof(fp))) { - free(console); - console = NULL; - } - - fclose(str); - fclose(fp); - - b64 = util_zbase64_enc(console); - free(console); - - return b64; -} - int req_connect(const char *path) { @@ -128,12 +95,11 @@ } struct req -req_job_queue(const char *project, const char *tag) +req_job_queue(const struct job *job) { - assert(project); - assert(tag); + assert(job); - return ask("job-queue %s|%s", project, tag); + return ask("job-queue %s|%s", job->project.name, job->tag); } struct req @@ -149,13 +115,11 @@ if ((req = ask("job-list %s", project)).status) return req; - snprintf(fmt, sizeof (fmt), "%%zd|%%%zu[^|]|%%%zu[^|]|%%%zu[^\n]\n", - sizeof (jobs->job.tag), sizeof (jobs->job.project.name), - sizeof (jobs->worker.name)); + snprintf(fmt, sizeof (fmt), "%%zd|%%%zu[^|]|%%%zu[^|]\n", + sizeof (jobs->job.tag), sizeof (jobs->job.project.name)); while ((token = strtok_r(p, "\n", &p)) && tot < *jobsz) { - if (sscanf(token, fmt, &jobs->job.id, jobs->job.tag, - jobs->job.project.name, jobs->worker.name) == 4) { + if (sscanf(token, fmt, &jobs->job.id, jobs->job.tag, jobs->job.project.name) == 3) { ++jobs; ++tot; } @@ -167,25 +131,18 @@ } struct req -req_job_save(const char *id, - const char *worker, - const char *status, - const char *retcode, - const char *console) +req_job_save(const struct job_result *res) { - assert(id); - assert(worker); - assert(status); - assert(retcode); - assert(console); + assert(res); char *b64; struct req req = {0}; - if (!(b64 = readfile(console))) + if (!(b64 = util_zbase64_enc(res->console))) req.status = errno; else { - req = ask("job-save %s|%s|%s|%s|%s", id, worker, status, retcode, b64); + req = ask("job-save %lld|%s|%s|%s|%s", (long long int)res->job.id, + res->worker.name, res->status, res->retcode, b64); free(b64); } @@ -264,6 +221,29 @@ return req; } +struct req +req_script_get(const char *project, char *out, size_t outsz) +{ + assert(out); + + struct req req; + char *script; + + if ((req = ask("script-get %s", project)).status) + return req; + if (!(script = util_zbase64_dec(req.msg))) { + req.status = EINVAL; + return req; + } + + if (strlcpy(out, script, outsz) >= outsz) + req.status = errno; + + free(script); + + return req; +} + void req_finish(void) { diff -r 5afdb14df924 -r 5fa3d2f479b2 req.h --- a/req.h Tue Jun 08 08:40:01 2021 +0200 +++ b/req.h Thu Jun 10 10:39:21 2021 +0200 @@ -1,8 +1,6 @@ #ifndef SCI_REQ_H #define SCI_REQ_H -/* TODO: remove reference to db structures. */ - #include struct req { @@ -12,23 +10,20 @@ struct worker; struct project; +struct job; struct job_result; int req_connect(const char *); struct req -req_job_queue(const char *, const char *); +req_job_queue(const struct job *); struct req req_job_list(struct job_result *, size_t *, const char *); struct req -req_job_save(const char *, - const char *, - const char *, - const char *, - const char *); +req_job_save(const struct job_result *); struct req req_project_add(const struct project *); @@ -37,6 +32,9 @@ req_project_list(struct project *, size_t *); struct req +req_script_get(const char *, char *, size_t); + +struct req req_worker_add(const struct worker *); struct req diff -r 5afdb14df924 -r 5fa3d2f479b2 scictl.c --- a/scictl.c Tue Jun 08 08:40:01 2021 +0200 +++ b/scictl.c Thu Jun 10 10:39:21 2021 +0200 @@ -24,22 +24,56 @@ help(void) { fprintf(stderr, "usage: %s job-queue project tag\n", getprogname()); - fprintf(stderr, " %s job-list project\n", getprogname()); + fprintf(stderr, " %s job-list worker\n", getprogname()); fprintf(stderr, " %s job-save id worker status retcode console\n", getprogname()); fprintf(stderr, " %s project-add name desc url script\n", getprogname()); fprintf(stderr, " %s project-list\n", getprogname()); + fprintf(stderr, " %s script-get project\n", getprogname()); fprintf(stderr, " %s worker-add name desc\n", getprogname()); fprintf(stderr, " %s worker-list\n", getprogname()); exit(0); } +static char * +readfile(const char *path) +{ + FILE *fp, *str; + static char console[SCI_MSG_MAX]; + char buf[BUFSIZ], *ret = console; + size_t nr; + + if (strcmp(path, "-") == 0) + fp = stdin; + else if (!(fp = fopen(path, "r"))) + err(1, "%s", path); + + if (!(str = fmemopen(console, sizeof (console), "w"))) + err(1, "fmemopen"); + + while ((nr = fread(buf, 1, sizeof (buf), fp)) > 0) + fwrite(buf, 1, nr, str); + + if ((ferror(fp) && !feof(fp)) || (ferror(str) && !feof(str))) + ret = NULL; + + fclose(str); + fclose(fp); + + return ret; +} + static struct req cmd_job_queue(int argc, char **argv) { + struct job job = {0}; + if (argc < 2) usage(); - return req_job_queue(argv[0], argv[1]); + strlcpy(job.project.name, argv[0], sizeof (job.project.name)); + strlcpy(job.tag, argv[1], sizeof (job.tag)); + + return req_job_queue(&job); } static struct req @@ -55,11 +89,11 @@ if ((req = req_job_list(jobs, &jobsz, argv[0])).status) return req; - printf("%-16s%-16s%-16s%s\n", "ID", "TAG", "PROJECT", "WORKER"); + printf("%-16s%-16s%s\n", "ID", "TAG", "PROJECT"); for (size_t i = 0; i < jobsz; ++i) { - printf("%-16lld%-16s%-16s%s\n", (long long int)jobs[i].job.id, - jobs[i].job.tag, jobs[i].job.project.name, jobs[i].worker.name); + printf("%-16lld%-16s%s\n", (long long int)jobs[i].job.id, + jobs[i].job.tag, jobs[i].job.project.name); } return req; @@ -68,10 +102,18 @@ static struct req cmd_job_save(int argc, char **argv) { + struct job_result res = {0}; + if (argc < 5) usage(); - return req_job_save(argv[0], argv[1], argv[2], argv[3], argv[4]); + res.job.id = strtoll(argv[0], NULL, 10); + res.status = strtoll(argv[2], NULL, 10); + res.retcode = strtoll(argv[3], NULL, 10); + res.console = readfile(argv[4]); + strlcpy(res.worker.name, argv[1], sizeof (res.worker.name)); + + return req_job_save(&res); } static struct req @@ -116,6 +158,29 @@ } static struct req +cmd_script_get(int argc, char **argv) +{ + char script[SCI_MSG_MAX]; + struct req req; + + if (argc < 1) + usage(); + if ((req = req_script_get(argv[0], script, sizeof (script))).status) + return req; + + printf("%s", script); + + /* + * Don't break up the terminal output if the script does not contain a + * final new line. + */ + if (script[strlen(script) - 1] != '\n') + printf("\n"); + + return req; +} + +static struct req cmd_worker_add(int argc, char **argv) { struct worker wk; @@ -160,6 +225,7 @@ { "job-save", cmd_job_save }, { "project-add", cmd_project_add }, { "project-list", cmd_project_list }, + { "script-get", cmd_script_get }, { "worker-add", cmd_worker_add }, { "worker-list", cmd_worker_list }, { NULL, NULL } diff -r 5afdb14df924 -r 5fa3d2f479b2 scid.c --- a/scid.c Tue Jun 08 08:40:01 2021 +0200 +++ b/scid.c Thu Jun 10 10:39:21 2021 +0200 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -159,7 +160,7 @@ { char *args[1] = {0}, buf[SCI_MSG_MAX]; struct job_result jobs[SCI_JOB_LIST_MAX]; - struct project project; + struct worker worker; ssize_t n; FILE *fp; @@ -170,21 +171,21 @@ if (!(fp = fmemopen(buf, sizeof (buf), "w"))) return ENOMEM; - strlcpy(project.name, args[0], sizeof (project.name)); + strlcpy(worker.name, args[0], sizeof (worker.name)); - if (db_project_find(&project) < 0) { - log_warn("project %s not found", args[0]); + if (db_worker_find(&worker) < 0) { + log_warn("worker %s not found", args[0]); return ENOENT; } - if ((n = db_job_result_todo(jobs, UTIL_SIZE(jobs), project.id)) < 0) + if ((n = db_job_result_todo(jobs, UTIL_SIZE(jobs), worker.id)) < 0) return answer(fd, "ERR unable to retrieve jobs list"); fprintf(fp, "OK\n"); for (ssize_t i = 0; i < n; ++i) - fprintf(fp, "%lld|%s|%s|%s\n", (long long int)jobs[i].job.id, - jobs[i].job.tag, jobs[i].job.project.name, jobs[i].worker.name); + fprintf(fp, "%lld|%s|%s\n", (long long int)jobs[i].job.id, + jobs[i].job.tag, jobs[i].job.project.name); fclose(fp); @@ -207,12 +208,15 @@ log_warn("invalid job-save invocation"); return EINVAL; } - if (db_worker_find(&res.worker, args[1]) < 0) { + + strlcpy(res.worker.name, args[1], sizeof (res.worker.name)); + + if (db_worker_find(&res.worker) < 0) { log_warn("worker %s not found", args[1]); return ENOENT; } - res.id = strtoll(args[0], NULL, 10); + res.job.id = strtoll(args[0], NULL, 10); res.status = strtoll(args[2], NULL, 10); res.retcode = strtoll(args[3], NULL, 10); res.console = util_zbase64_dec(args[4]); @@ -386,6 +390,37 @@ return answer(fd, "%s", buf); } +static int +cmd_script_get(int fd, char *cmd) +{ + char buf[SCI_MSG_MAX], *b64; + struct project project = {0}; + int filed, ret; + ssize_t nr; + + strlcpy(project.name, cmd, sizeof (project.name)); + + if (db_project_find(&project) < 0) + return ENOENT; + if ((filed = open(project.script, O_RDONLY)) < 0) + return errno; + if ((nr = read(filed, buf, sizeof (buf) - 1)) <= 0) { + close(filed); + return errno; + } + + buf[nr] = 0; + close(filed); + + if (!(b64 = util_zbase64_enc(buf))) + return errno; + + ret = answer(fd, "OK\n%s", b64); + free(b64); + + return ret; +} + static void dispatch(int fd, char *cmd) { @@ -402,6 +437,7 @@ { "project-list", cmd_project_list }, { "worker-add", cmd_worker_add }, { "worker-list", cmd_worker_list }, + { "script-get", cmd_script_get }, { NULL, NULL } }; diff -r 5afdb14df924 -r 5fa3d2f479b2 sciwebd.c --- a/sciwebd.c Tue Jun 08 08:40:01 2021 +0200 +++ b/sciwebd.c Thu Jun 10 10:39:21 2021 +0200 @@ -1,4 +1,55 @@ +#include +#include +#include +#include + +#include "http.h" +#include "log.h" + +const char *sock = VARDIR "/run/sci.sock"; + +noreturn static void +usage(void) +{ + fprintf(stderr, "usage: %s [-f] [-s sock]\n", getprogname()); + exit(1); +} + +static void +init(void) +{ + log_open(getprogname()); +} + +static void +finish(void) +{ + log_finish(); +} + int -main(void) +main(int argc, char **argv) { + int ch; + void (*run)(void) = &(http_cgi_run); + + setprogname("sciwebd"); + + while ((ch = getopt(argc, argv, "fs:")) != -1) { + switch (ch) { + case 'f': + run = &(http_fcgi_run); + break; + case 's': + sock = optarg; + break; + default: + usage(); + break; + } + } + + init(); + run(); + finish(); } diff -r 5afdb14df924 -r 5fa3d2f479b2 sciworkerd.c --- a/sciworkerd.c Tue Jun 08 08:40:01 2021 +0200 +++ b/sciworkerd.c Thu Jun 10 10:39:21 2021 +0200 @@ -5,27 +5,45 @@ #include #include #include +#include #include #include #include #include +#include #include #include -#include "project.h" +#include +#include + #include "config.h" +#include "job.h" #include "log.h" +#include "project.h" #include "util.h" -struct job { +enum taskst { + TASKST_PENDING, /* not started yet. */ + TASKST_RUNNING, /* currently running. */ + TASKST_COMPLETED, /* completed but not synced yet. */ + TASKST_SYNCING /* was unable to send result to host. */ +}; + +struct task { + enum taskst status; pid_t child; - int running; int pipe[2]; - char project[PROJECT_NAME_MAX]; + int retcode; + struct job job; char out[SCI_CONSOLE_MAX]; - TAILQ_ENTRY(job) link; + char script[PATH_MAX]; + int scriptfd; + TAILQ_ENTRY(task) link; }; +TAILQ_HEAD(tasks, task); + struct fds { struct pollfd *list; size_t listsz; @@ -36,115 +54,170 @@ int status; }; -TAILQ_HEAD(jobs, job); - -static struct jobs running = TAILQ_HEAD_INITIALIZER(running); -static struct jobs queue = TAILQ_HEAD_INITIALIZER(queue); -static int sigpipe[2]; +struct fetch { + char buf[SCI_MSG_MAX]; + FILE *bufp; +}; -static struct job * -find(const char *project) -{ - struct job *j; +static struct { + char *url; + char *worker; + int maxbuilds; +} config = { + .url = "http://localhost", + .worker = "default", + .maxbuilds = 4 +}; - TAILQ_FOREACH(j, &running, link) - if (strcmp(j->project, project) == 0) - return j; +static struct tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); + +#if 0 +static int sigpipe[2]; +#endif - return NULL; +noreturn static void +usage(void) +{ + fprintf(stderr, "usage: %s [-m maxbuild] [-u url] [-w worker]\n", getprogname()); + exit(1); } -static struct job * +static inline struct task * find_by_fd(int fd) { - struct job *j; + struct task *tk; - TAILQ_FOREACH(j, &running, link) - if (j->pipe[0] == fd) - return j; + TAILQ_FOREACH(tk, &tasks, link) + if (tk->pipe[0] == fd) + return tk; return NULL; } -static struct job * +static inline struct task * find_by_pid(pid_t pid) { - struct job *j; + struct task *t; - TAILQ_FOREACH(j, &running, link) - if (j->child == pid) - return j; + TAILQ_FOREACH(t, &tasks, link) + if (t->child == pid) + return t; return NULL; } -static int -spawn(const char *project, const char *script) +static void +destroy(struct task *tk) { - struct job *j; + log_debug("destroying task %lld", tk->job.id); - if (find(project)) - return -1; + if (tk->pipe[0]) + close(tk->pipe[0]); + if (tk->pipe[1]) + close(tk->pipe[1]); + if (tk->scriptfd) { + unlink(tk->script); + close(tk->scriptfd); + } - j = util_calloc(1, sizeof (*j)); - j->pipe[0] = j->pipe[1] = -1; - j->running = 1; - strlcpy(j->project, project, sizeof (j->project)); + TAILQ_REMOVE(&tasks, tk, link); + memset(tk, 0, sizeof (*tk)); + free(tk); +} - if (pipe(j->pipe) < 0) +static int +spawn(struct task *tk) +{ + if (pipe(tk->pipe) < 0) goto cleanup; - switch ((j->child = fork())) { + switch ((tk->child = fork())) { case -1: + log_warn("spawn: %s", strerror(errno)); goto cleanup; case 0: /* Child. */ - dup2(j->pipe[1], STDOUT_FILENO); - dup2(j->pipe[1], STDERR_FILENO); - close(j->pipe[0]); - close(j->pipe[1]); + dup2(tk->pipe[1], STDOUT_FILENO); + dup2(tk->pipe[1], STDERR_FILENO); + close(tk->pipe[0]); + close(tk->pipe[1]); + log_debug("spawn: running process (%lld) %s", tk->child, tk->script); - if (execl(script, script, NULL) < 0) + tk->status = TASKST_RUNNING; + + if (execl(tk->script, tk->script, tk->job.tag, NULL) < 0) { + tk->status = TASKST_PENDING; + log_warn("exec %s: %s", tk->script, strerror(errno)); exit(0); + } break; default: /* Parent */ - TAILQ_INSERT_TAIL(&running, j, link); break; } return 0; cleanup: - if (j->pipe[0] != -1) - close(j->pipe[0]); - if (j->pipe[1] != -1) - close(j->pipe[1]); - - free(j); + destroy(tk); return -1; } +static const char * +makeurl(const char *fmt, ...) +{ + assert(fmt); + + static char url[256]; + char page[128] = {0}; + va_list ap; + + va_start(ap, fmt); + vsnprintf(page, sizeof (page), fmt, ap); + va_end(ap); + + snprintf(url, sizeof (url), "%s/%s", config.url, page); + + return url; +} + static void complete(int signum, siginfo_t *sinfo, void *ctx) { (void)ctx; (void)signum; +#if 0 struct result r; + struct task *tk; +#endif + struct task *tk; + int status = 0; if (sinfo->si_code != CLD_EXITED) return; +#if 0 r.pid = sinfo->si_pid; r.status = 0; +#endif - if (waitpid(sinfo->si_pid, &r.status, 0) < 0) { + if (waitpid(sinfo->si_pid, &status, 0) < 0) { log_warn("waitpid: %s", strerror(errno)); return; } + + if ((tk = find_by_pid(sinfo->si_pid))) { + log_debug("process %lld completed", (long long int)sinfo->si_pid); + close(tk->pipe[1]); + tk->status = TASKST_COMPLETED; + tk->retcode = status; + tk->pipe[1] = 0; + } + +#if 0 /* * Signal may happen at any time from any thread so we can't use * mutexes so use the good old self-pipe trick. Yes, signals are @@ -152,13 +225,13 @@ */ if (write(sigpipe[1], &r, sizeof (r)) < 0) err(1, "write"); +#endif } static void init(void) { struct sigaction sa; - int flags; sa.sa_flags = SA_SIGINFO; sa.sa_sigaction = complete; @@ -169,93 +242,418 @@ log_open("sciworkerd"); +#if 0 if (pipe(sigpipe) < 0) err(1, "pipe"); if ((flags = fcntl(sigpipe[1], F_GETFL, 0)) < 0 || fcntl(sigpipe[1], F_SETFL, flags | O_NONBLOCK) < 0) err(1, "fcntl"); +#endif } static struct fds prepare(void) { struct fds fds = {0}; - struct job *j; - size_t i = 1; + struct task *tk; + size_t i = 0; - TAILQ_FOREACH(j, &running, link) - fds.listsz++; + TAILQ_FOREACH(tk, &tasks, link) + if (tk->status == TASKST_RUNNING || tk->status == TASKST_COMPLETED) + fds.listsz++; - fds.list = util_calloc(++fds.listsz, sizeof (*fds.list)); + fds.list = util_calloc(fds.listsz, sizeof (*fds.list)); + +#if 0 fds.list[0].fd = sigpipe[0]; fds.list[0].events = POLLIN; +#endif + printf("fd => %zu\n", fds.listsz); - TAILQ_FOREACH(j, &running, link) { - fds.list[i].fd = j->pipe[0]; - fds.list[i++].events = POLLIN | POLLPRI; + TAILQ_FOREACH(tk, &tasks, link) { + if (tk->status == TASKST_RUNNING || tk->status == TASKST_COMPLETED) { + printf("adding %d to pollin\n", tk->pipe[0]); + fds.list[i].fd = tk->pipe[0]; + fds.list[i++].events = POLLIN | POLLPRI; + } } return fds; } +static const char * +uploadenc(const struct task *tk) +{ + static char json[SCI_MSG_MAX]; + json_t *object; + + object = json_object(); + json_object_set(object, "code", json_string(tk->out)); + json_object_set(object, "id", json_integer(tk->job.id)); + json_object_set(object, "retcode", json_integer(tk->retcode)); + strlcpy(json, json_dumps(object, JSON_COMPACT), sizeof (json)); + json_decref(object); + + return json; +} + +static size_t +getcb(char *in, size_t n, size_t w, FILE *fp) +{ + if (fwrite(in, n, w, fp) != w) + return log_warn("get: %s", strerror(errno)), 0; + + return w; +} + +static const char * +get(const char *topic, const char *url) +{ + CURL *curl; + CURLcode code; + static char buf[SCI_MSG_MAX]; + long status; + FILE *fp; + + curl = curl_easy_init(); + + if (!(fp = fmemopen(buf, sizeof (buf), "w"))) + err(1, "fmemopen"); + +#if 0 + curl_easy_setopt(curl, CURLOPT_URL, makeurl("api/v1/script/%s", tk->job.project.name)); +#endif + curl_easy_setopt(curl, CURLOPT_URL, url); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 3L); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getcb); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, fp); + + if ((code = curl_easy_perform(curl)) != CURLE_OK) + log_warn("%s: %s", topic, curl_easy_strerror(code)); + + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status); + curl_easy_cleanup(curl); + + fclose(fp); + + if (code != CURLE_OK) + return log_warn("%s: %s", topic, curl_easy_strerror(code)), NULL; + if (status != 200) + return log_warn("%s: unexpected status code %ld", topic, status), NULL; + + return buf; +} + +static size_t +silent(char *in, size_t n, size_t w, void *data) +{ + (void)in; + (void)n; + (void)data; + + return w; +} + static void -finished(pid_t pid) +upload(struct task *tk) +{ + CURL *curl; + CURLcode code; + long status; + + curl = curl_easy_init(); + curl_easy_setopt(curl, CURLOPT_URL, makeurl("api/v1/jobs/%s", config.worker)); + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 3L); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, silent); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, uploadenc(tk)); + code = curl_easy_perform(curl); + + /* + * If we fail to upload data, we put the result into syncing mode so + * that we retry later without redoing the job over and over + */ + tk->status = TASKST_SYNCING; + + if (code != CURLE_OK) + log_warn("upload: %s", curl_easy_strerror(code)); + else { + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status); + + if (status != 200) + log_warn("upload: unexpected return code: %ld", status); + else + destroy(tk); + } + + curl_easy_cleanup(curl); +} + +static inline void +finished(struct task *tk) +{ + log_info("task %d: completed with exit code %d", tk->child, tk->retcode); + printf("== OUTPUT ==\n"); + puts(tk->out); + upload(tk); +} + +static inline int +pending(int64_t id) +{ + struct task *t; + + TAILQ_FOREACH(t, &tasks, link) + if (t->job.id == id) + return 1; + + return 0; +} + +static void +push(int64_t id, const char *tag, const char *project) { - struct job *job; + struct task *tk; + + log_info("queued job build (%lld) for project %s, tag %s\n", id, project, tag); + + tk = util_calloc(1, sizeof (*tk)); + tk->job.id = id; + strlcpy(tk->job.tag, tag, sizeof (tk->job.tag)); + strlcpy(tk->job.project.name, project, sizeof (tk->job.project.name)); + + TAILQ_INSERT_TAIL(&tasks, tk, link); +} + +static void +merge(const char *str) +{ + json_t *array, *obj, *id, *tag, *project; + json_error_t err; + size_t i; + + if (!(array = json_loads(str, 0, &err))) { + log_warn("fetch: failed to decode JSON: %s", err.text); + return; + } + if (!json_is_array(array)) + goto invalid; + + json_array_foreach(array, i, obj) { + if (!json_is_object(obj) || + !json_is_integer((id = json_object_get(obj, "id"))) || + !json_is_string((tag = json_object_get(obj, "tag"))) || + !json_is_string((project = json_object_get(obj, "project")))) + goto invalid; + + if (!pending(json_integer_value(id))) + push(json_integer_value(id), json_string_value(tag), + json_string_value(project)); + } + + json_decref(array); + + return; + +invalid: + log_warn("fetch: invalid JSON input"); + json_decref(array); +} + +static void +fetchjobs(void) +{ + const char *json; + + if (!(json = get("fetch", makeurl("api/v1/jobs/%s", config.worker)))) + log_warn("unable to retrieve jobs"); + else + merge(json); +} + +/* + * This function reads stdout/stderr pipe from child and optionally remove them + * if they have completed. + */ +static void +readall(struct fds *fds) +{ + struct task *tk; + char buf[BUFSIZ]; + ssize_t nr; + + for (size_t i = 0; i < fds->listsz; ++i) { + if (fds->list[i].revents == 0) + continue; + if (!(tk = find_by_fd(fds->list[i].fd))) + continue; - if (!(job = find_by_pid(pid))) + /* Read stdout/stderr from children pipe. */ + if ((nr = read(fds->list[i].fd, buf, sizeof (buf) - 1)) <= 0) + tk->status = TASKST_SYNCING; + else { + buf[nr] = 0; + strlcat(tk->out, buf, sizeof (tk->out)); + } + } +} + +/* + * Retrieve status code from spawned process complete or upload again if they + * failed to sync. + */ +static void +flushall(void) +{ + struct task *tk, *tmp; + + TAILQ_FOREACH_SAFE(tk, &tasks, link, tmp) + if (tk->status == TASKST_SYNCING) + upload(tk); +} + +static int +extract(struct task *tk, const char *json) +{ + json_t *doc, *code; + json_error_t err; + size_t len; + + if (!(doc = json_loads(json, 0, &err))) { + log_warn("fetchscript: failed to decode JSON: %s", err.text); + return -1; + } + if (!json_is_object(doc) || + !json_is_string((code = json_object_get(doc, "code")))) + goto invalid; + + len = strlen(json_string_value(code)); + + if ((size_t)write(tk->scriptfd, json_string_value(code), len) != len) { + log_warn("fetchscript: %s", strerror(errno)); + json_decref(doc); + + return -1; + } + + /* Close so we can finally spawn it. */ + close(tk->scriptfd); + tk->scriptfd = 0; + + return 0; + +invalid: + log_warn("fetchscript: invalid JSON"); + json_decref(doc); + + return -1; +} + +static int +fetchscript(struct task *tk) +{ + const char *json; + + if (!(json = get("fetchscript", makeurl("api/v1/script/%s", tk->job.project.name)))) + return -1; + + return extract(tk, json); +} + +static void +createtask(struct task *tk) +{ + if (tk->status != TASKST_PENDING) return; - /* TODO: send response. */ + log_debug("creating task (id=%lld, project=%s, tag=%s)", + tk->job.id, tk->job.project.name, tk->job.tag); + + snprintf(tk->script, sizeof (tk->script), "/tmp/sciworkerd-%s-XXXXXX", + tk->job.project.name); + + if ((tk->scriptfd = mkstemp(tk->script)) < 0 || + fchmod(tk->scriptfd, S_IRUSR | S_IWUSR | S_IXUSR) < 0) { + unlink(tk->script); + log_warn("%s", strerror(errno)); + return; + } + + if (fetchscript(tk) < 0) { + unlink(tk->script); + close(tk->scriptfd); + tk->scriptfd = 0; + } else + spawn(tk); +} - TAILQ_REMOVE(&running, job, link); - free(job); +static void +startall(void) +{ + size_t nrunning = 0; + struct task *tk; + + TAILQ_FOREACH(tk, &tasks, link) + if (tk->status == TASKST_RUNNING) + ++nrunning; + + if (nrunning >= (size_t)config.maxbuilds) { + log_debug("not spawning new process because limit is reached"); + } else { + tk = TAILQ_FIRST(&tasks); + + while (tk && nrunning++ < (size_t)config.maxbuilds) { + createtask(tk); + tk = TAILQ_NEXT(tk, link); + } + } } static void run(void) { struct fds fds; - struct result r; - struct job *job; - char buf[BUFSIZ]; - ssize_t nr; fds = prepare(); - if (poll(fds.list, fds.listsz, -1) < 0 && errno != EINTR) + if (poll(fds.list, fds.listsz, 5000) < 0 && errno != EINTR) err(1, "poll"); - for (size_t i = 1; i < fds.listsz; ++i) { - if (fds.list[i].revents == 0) - continue; - if (!(job = find_by_fd(fds.list[i].fd))) - continue; - - if ((nr = read(fds.list[i].fd, buf, sizeof (buf) - 1)) <= 0) { - finished(job->child); - } else { - buf[nr] = 0; - strlcat(job->out, buf, sizeof (job->out)); - } - } - - if (fds.list->revents) { - r.pid = 0; - r.status = 0; - - if (read(sigpipe[0], &r, sizeof (r)) <= 0 && errno != EINTR) - err(1, "read"); - - finished(r.pid); - } + fetchjobs(); + readall(&fds); + startall(); + flushall(); } int main(int argc, char **argv) { - (void)argc; - (void)argv; + int ch; + const char *errstr; + + setprogname("sciworkerd"); + + while ((ch = getopt(argc, argv, "m:u:w:")) != -1) { + switch (ch) { + case 'm': + config.maxbuilds = strtonum(optarg, 0, INT_MAX, &errstr); + + if (errstr) + errx(1, "%s: %s", optarg, errstr); + + break; + case 'u': + config.url = optarg; + break; + case 'w': + config.worker = optarg; + break; + default: + usage(); + break; + } + } init(); diff -r 5afdb14df924 -r 5fa3d2f479b2 sql/job-queue-list.sql --- a/sql/job-queue-list.sql Tue Jun 08 08:40:01 2021 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,20 +0,0 @@ - SELECT job.id - , job.tag - , worker.id - , worker.name - , worker.desc - , project.id - , project.name - , project.desc - , project.url - , project.script - FROM job, worker, project - WHERE job.project_id = ? - AND job.project_id = project.id - AND job.id - NOT IN ( - SELECT job_result.job_id - FROM job_result - WHERE job_result.worker_id = worker.id - ) - LIMIT ? diff -r 5afdb14df924 -r 5fa3d2f479b2 sql/job-result-todo.sql --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/sql/job-result-todo.sql Thu Jun 10 10:39:21 2021 +0200 @@ -0,0 +1,12 @@ + SELECT job.id + , job.tag + , project.name + FROM job, project + WHERE job.project_id = project.id + AND job.id + NOT IN ( + SELECT job_result.job_id + FROM job_result + WHERE job_result.worker_id = ? + ) + LIMIT ? diff -r 5afdb14df924 -r 5fa3d2f479b2 util.c --- a/util.c Tue Jun 08 08:40:01 2021 +0200 +++ b/util.c Thu Jun 10 10:39:21 2021 +0200 @@ -225,3 +225,16 @@ return text; } + +const char * +util_path(const char *filename) +{ + assert(filename); + + /* Build path to the template file. */ + static char path[PATH_MAX]; + + //snprintf(path, sizeof (path), "%s/%s", config.themedir, filename); + + return path; +} diff -r 5afdb14df924 -r 5fa3d2f479b2 util.h --- a/util.h Tue Jun 08 08:40:01 2021 +0200 +++ b/util.h Thu Jun 10 10:39:21 2021 +0200 @@ -62,4 +62,7 @@ char * util_zbase64_dec(const char *); +const char * +util_path(const char *); + #endif /* !SCI_UTIL_H */