Index: Makefile ================================================================== --- Makefile +++ Makefile @@ -15,31 +15,58 @@ # You should have received a copy of the GNU General Public License # along with Megatest. If not, see . # make install CSCOPTS='-accumulate-profile -profile-name $(PWD)/profile-ww$(shell date +%V.%u)' # rm .o ; make install CSCOPTS='-profile' ; ... ; chicken-profile | less + +all : $(PREFIX)/bin/.$(ARCHSTR) mtest dboard mtut + +recent-commits.csv : .fslckout + fossil timeline -n 350 -t ci -F "%h,%a,%b,%t,\"%c\"" > recent-commits.csv + + SHELL=/bin/bash PREFIX=$(PWD) CSCOPTS= INSTALL=install SRCFILES = common.scm items.scm launch.scm ods.scm runconfig.scm \ - server.scm configf.scm db.scm keys.scm margs.scm \ + server.scm configf.scm db.scm keys.scm \ process.scm runs.scm tasks.scm tests.scm genexample.scm \ - http-transport.scm tdb.scm client.scm mt.scm \ - ezsteps.scm lock-queue.scm rmt.scm api.scm \ - subrun.scm portlogger.scm archive.scm env.scm \ + tdb.scm mt.scm \ + ezsteps.scm rmt.scm api.scm \ + subrun.scm archive.scm env.scm \ diff-report.scm cgisetup/models/pgdb.scm # module source files -MSRCFILES = dbfile.scm debugprint.scm mtargs.scm commonmod.scm dbmod.scm +MSRCFILES = dbfile.scm debugprint.scm mtargs.scm commonmod.scm dbmod.scm \ + tcp-transportmod.scm rmtmod.scm portlogger.scm -all : $(PREFIX)/bin/.$(ARCHSTR) mtest dboard mtut tcmt +transport-mode.scm : transport-mode.scm.template + @if [[ -e transport-mode.scm ]];then \ + echo "WARNING: transport-mode.scm.template is newer than transport-mode.scm"; else \ + cp transport-mode.scm.template transport-mode.scm; fi + +dashboard-transport-mode.scm : dashboard-transport-mode.scm.template + @if [[ -e dashboard-transport-mode.scm ]];then \ + echo "WARNING: dashboard-transport-mode.scm.template is newer than dashboard-transport-mode.scm"; else \ + cp dashboard-transport-mode.scm.template dashboard-transport-mode.scm; fi + +megatest.scm : transport-mode.scm +dashboard.scm : dashboard-transport-mode.scm # dbmod.import.o is just a hack here -mofiles/dbfile.o : mofiles/debugprint.o mofiles/commonmod.o dbmod.import.o -db.o : dbmod.import.o +mofiles/portlogger.o : mofiles/dbmod.o + +mofiles/dbfile.o : \ + mofiles/debugprint.o mofiles/commonmod.o + +configf.o : commonmod.import.o +mofiles/dbfile.o : mofiles/debugprint.o +mofiles/rmtmod.o mofiles/dbmod.o : mofiles/dbfile.o mofiles/commonmod.o mofiles/debugprint.o +db.o : mofiles/dbmod.o mofiles/dbfile.o mofiles/debugprint.o : mofiles/mtargs.o +mofiles/tcp-transportmod.o : mofiles/portlogger.o # ftail.scm rmtmod.scm commonmod.scm removed # MSRCFILES = ducttape-lib.scm pkts.scm stml2.scm cookie.scm mutils.scm \ # mtargs.scm commonmod.scm dbmod.scm adjutant.scm ulex.scm \ # rmtmod.scm apimod.scm @@ -63,13 +90,14 @@ # @[ -e mofiles ] || mkdir -p mofiles # csc $(CSCOPTS) -I mofiles -I $* -J -c $< -o $*.o # cp $*.o mofiles/$*.o # @touch $*.import.scm # ensure it is touched after the .o is made -mofiles/%.o : %.scm - mkdir -p mofiles +%.import.scm mofiles/%.o : %.scm + @mkdir -p mofiles csc $(CSCOPTS) -J -c $< -o mofiles/$*.o + @if [[ -e $*.import.scm ]];then touch $*.import.scm;fi # ensure it is touched after the .o is made ADTLSCR=mt_laststep mt_runstep mt_ezstep HELPERS=$(addprefix $(PREFIX)/bin/,$(ADTLSCR)) DEPLOYHELPERS=$(addprefix deploytarg/,$(ADTLSCR)) MTESTHASH=$(shell fossil info|grep checkout:| awk '{print $$2}') @@ -96,49 +124,45 @@ @echo $(MTESTHASH) dboard : $(OFILES) $(GOFILES) dashboard.scm $(MOFILES) $(MOIMPFILES) megatest-version.scm megatest-fossil-hash.scm csc $(CSCOPTS) $(OFILES) dashboard.scm $(GOFILES) $(MOFILES) $(MOIMPFILES) -o dboard -mtut: $(OFILES) $(MOFILES) megatest-fossil-hash.scm mtut.scm megatest-version.scm - csc $(CSCOPTS) $(OFILES) $(MOFILES) mtut.scm -o mtut +mtut: $(OFILES) $(MOFILES) $(MOIMPFILES) megatest-fossil-hash.scm mtut.scm megatest-version.scm + csc $(CSCOPTS) $(OFILES) $(MOFILES) $(MOIMPFILES) mtut.scm -o mtut # include makefile.inc -TCMTOBJS = \ - api.o \ - archive.o \ - cgisetup/models/pgdb.o \ - client.o \ - common.o \ - configf.o \ - db.o \ - env.o \ - http-transport.o \ - items.o \ - keys.o \ - launch.o \ - lock-queue.o \ - margs.o \ - mt.o \ - ods.o \ - portlogger.o \ - process.o \ - rmt.o \ - runconfig.o \ - runs.o \ - server.o \ - tasks.o \ - tdb.o \ - tests.o \ - subrun.o \ - ezsteps.o - -# mofiles/rmtmod.o \ -# mofiles/commonmod.o \ - -tcmt : $(TCMTOBJS) tcmt.scm megatest-version.scm $(MOFILES) $(MOIMPFILES) - csc $(CSCOPTS) $(TCMTOBJS) $(MOFILES) $(MOIMPFILES) tcmt.scm -o tcmt +# TCMTOBJS = \ +# api.o \ +# archive.o \ +# cgisetup/models/pgdb.o \ +# common.o \ +# configf.o \ +# db.o \ +# env.o \ +# items.o \ +# keys.o \ +# launch.o \ +# margs.o \ +# mt.o \ +# ods.o \ +# process.o \ +# rmt.o \ +# runconfig.o \ +# runs.o \ +# server.o \ +# tasks.o \ +# tdb.o \ +# tests.o \ +# subrun.o \ +# ezsteps.o +# +# # mofiles/rmtmod.o \ +# # mofiles/commonmod.o \ +# +# tcmt : $(TCMTOBJS) tcmt.scm megatest-version.scm $(MOFILES) $(MOIMPFILES) +# csc $(CSCOPTS) $(TCMTOBJS) $(MOFILES) $(MOIMPFILES) tcmt.scm -o tcmt # install documentation to $(PREFIX)/docs # DOES NOT REBUILD DOCS # $(PREFIX)/share/docs/megatest_manual.html : docs/manual/megatest_manual.html @@ -156,11 +180,11 @@ $(INSTALL) mt-pg.sql $(PREFIX)/share/db/mt-pg.sql # Special dependencies for the includes $(MOFILE) $(MOIMPFILES) : megatest-fossil-hash.scm -mofiles/commonmod.o : megatest-fossil-hash.scm +mofiles/commonmod.o : mofiles/debugprint.o megatest-fossil-hash.scm common.o : mofiles/commonmod.o # mofiles/dbmod.o : mofiles/configfmod.o # commonmod.o dashboard.o megatest.o tcmt.o apimod.o : megatest-fossil-hash.scm @@ -176,16 +200,17 @@ tests.o tasks.o dashboard-tasks.o : task_records.scm runs.o : test_records.scm -mofiles-made : $(MOFILES) - make $(MOIMPFILES) +# mofiles-made : $(MOFILES) +# make $(MOIMPFILES) +# touch mofiles-made -megatest.o : mofiles-made megatest-fossil-hash.scm megatest-version.scm $(MOFILES) $(MOIMPFILES) +megatest.o : megatest-fossil-hash.scm megatest-version.scm $(MOFILES) $(MOIMPFILES) -rmt.scm client.scm common.scm configf.scm dashboard-guimonitor.scm dashboard-tests.scm dashboard.scm db.scm dcommon.scm ezsteps.scm http-transport.scm index-tree.scm items.scm keys.scm launch.scm megatest.scm monitor.scm mt.scm newdashboard.scm runconfig.scm runs.scm server.scm tdb.scm tests.scm tree.scm : common_records.scm megatest-version.scm +rmt.scm common.scm configf.scm dashboard-guimonitor.scm dashboard-tests.scm dashboard.scm db.scm dcommon.scm ezsteps.scm index-tree.scm items.scm keys.scm launch.scm megatest.scm monitor.scm mt.scm newdashboard.scm runconfig.scm runs.scm server.scm tdb.scm tests.scm tree.scm : common_records.scm megatest-version.scm common_records.scm : altdb.scm mofiles/dbfile.o : mofiles/commonmod.o @@ -260,16 +285,16 @@ utils/mk_wrapper $(PREFIX) mtexec $(PREFIX)/bin/mtexec chmod a+x $(PREFIX)/bin/mtexec # tcmt -$(PREFIX)/bin/.$(ARCHSTR)/tcmt : tcmt - $(INSTALL) tcmt $(PREFIX)/bin/.$(ARCHSTR)/tcmt - -$(PREFIX)/bin/tcmt : $(PREFIX)/bin/.$(ARCHSTR)/tcmt utils/mk_wrapper - utils/mk_wrapper $(PREFIX) tcmt $(PREFIX)/bin/tcmt - chmod a+x $(PREFIX)/bin/tcmt +# $(PREFIX)/bin/.$(ARCHSTR)/tcmt : tcmt +# $(INSTALL) tcmt $(PREFIX)/bin/.$(ARCHSTR)/tcmt +# +# $(PREFIX)/bin/tcmt : $(PREFIX)/bin/.$(ARCHSTR)/tcmt utils/mk_wrapper +# utils/mk_wrapper $(PREFIX) tcmt $(PREFIX)/bin/tcmt +# chmod a+x $(PREFIX)/bin/tcmt $(PREFIX)/bin/mt_laststep : utils/mt_laststep $(INSTALL) $< $@ chmod a+x $@ @@ -359,22 +384,22 @@ $(INSTALL) lib/libxcb-xlib.so.0 $(PREFIX)/bin/.$(ARCHSTR)/lib/libxcb-xlib.so.0; \ fi install : $(PREFIX)/bin/.$(ARCHSTR) $(PREFIX)/bin/.$(ARCHSTR)/mtest $(PREFIX)/bin/megatest \ $(PREFIX)/bin/.$(ARCHSTR)/dboard $(PREFIX)/bin/dashboard $(HELPERS) $(PREFIX)/bin/nbfake \ - $(PREFIX)/bin/.$(ARCHSTR)/mtexec $(PREFIX)/bin/mtexec $(PREFIX)/bin/serialize-env \ $(PREFIX)/bin/nbfind $(PREFIX)/bin/mtrunner $(PREFIX)/bin/viewscreen $(PREFIX)/bin/mt_xterm \ $(PREFIX)/bin/mt-old-to-new.sh $(PREFIX)/bin/mt-new-to-old.sh \ $(PREFIX)/share/docs/megatest_manual.html $(PREFIX)/bin/remrun \ $(PREFIX)/share/docs/megatest_manual.html $(PREFIX)/bin/remrun $(PREFIX)/bin/mtutil \ - $(PREFIX)/bin/tcmt $(PREFIX)/share/db/mt-pg.sql \ + $(PREFIX)/share/db/mt-pg.sql \ $(PREFIX)/share/js/jquery-3.1.0.slim.min.js \ $(PREFIX)/bin/.$(ARCHSTR)/lib/libpangox-1.0.so \ $(PREFIX)/bin/.$(ARCHSTR)/lib/libpangox-1.0.so.0 \ - $(PREFIX)/bin/.$(ARCHSTR)/lib/libxcb-xlib.so.0 -# $(PREFIX)/bin/.$(ARCHSTR)/ndboard + $(PREFIX)/bin/.$(ARCHSTR)/lib/libxcb-xlib.so.0 $(PREFIX)/bin/serialize-env +# $(PREFIX)/bin/.$(ARCHSTR)/ndboard $(PREFIX)/bin/tcmt +# $(PREFIX)/bin/.$(ARCHSTR)/mtexec $(PREFIX)/bin/mtexec $(PREFIX)/bin/serialize-env \ # $(PREFIX)/bin/newdashboard $(PREFIX)/bin/.$(ARCHSTR) : mkdir -p $(PREFIX)/bin/.$(ARCHSTR) mkdir -p $(PREFIX)/bin/.$(ARCHSTR)/lib @@ -471,35 +496,42 @@ fi altdb.scm : echo ";; optional alternate db setup" > altdb.scm echo "(define *available-db* (make-hash-table))" >> altdb.scm - if csi -ne '(use mysql-client)';then \ + if csi -ne '(use mysql-client)' &> /dev/null;then \ echo "(use mysql-client)(hash-table-set! *available-db* 'mysql #t)" >> altdb.scm; \ fi - if csi -ne '(use postgresql)';then \ + if csi -ne '(use postgresql)'&> /dev/null;then \ echo "(use postgresql)(hash-table-set! *available-db* 'postgresql #t)" >> altdb.scm;\ fi + if csi -ne '(import mysql-client)'&> /dev/null;then \ + echo "(import mysql-client)(hash-table-set! *available-db* 'mysql #t)" >> altdb.scm; \ + fi + if csi -ne '(import postgresql)'&> /dev/null;then \ + echo "(import postgresql)(hash-table-set! *available-db* 'postgresql #t)" >> altdb.scm;\ + fi + +# portlogger-example : portlogger-example.scm api.o archive.o common.o configf.o dashboard-tests.o dashboard-context-menu.o db.o dcommon.o ezsteps.o genexample.o gutils.o items.o keys.o launch.o lock-queue.o margs.o mt.o ods.o portlogger.o process.o rmt.o runconfig.o runs.o server.o synchash.o tasks.o tdb.o tests.o tree.o +# csc $(CSCOPTS) portlogger-example.scm api.o archive.o common.o configf.o dashboard-tests.o dashboard-context-menu.o db.o dcommon.o ezsteps.o genexample.o gutils.o items.o keys.o launch.o lock-queue.o margs.o mt.o ods.o portlogger.o process.o rmt.o runconfig.o runs.o server.o sync-hash.o tasks.o tdb.o tests.o tree.o + +unitdeps.dot : *scm ./utils/plot-uses Makefile + ./utils/plot-uses todot commonmod.import,mtargs.import,mtargs,debugprint *.scm > unitdeps.dot + +unitdeps.pdf : unitdeps.dot + dot unitdeps.dot -Tpdf -o unitdeps.pdf -portlogger-example : portlogger-example.scm api.o archive.o client.o common.o configf.o dashboard-tests.o dashboard-context-menu.o db.o dcommon.o ezsteps.o genexample.o gutils.o http-transport.o items.o keys.o launch.o lock-queue.o margs.o mt.o ods.o portlogger.o process.o rmt.o runconfig.o runs.o server.o synchash.o tasks.o tdb.o tests.o tree.o - csc $(CSCOPTS) portlogger-example.scm api.o archive.o client.o common.o configf.o dashboard-tests.o dashboard-context-menu.o db.o dcommon.o ezsteps.o genexample.o gutils.o http-transport.o items.o keys.o launch.o lock-queue.o margs.o mt.o ods.o portlogger.o process.o rmt.o runconfig.o runs.o server.o synchash.o tasks.o tdb.o tests.o tree.o +./utils/plot-uses : utils/plot-uses.scm + csc utils/plot-uses.scm # create a pdf dot graphviz diagram from notations in rmt.scm rmt.pdf : rmt.scm grep ';;DOT' rmt.scm | sed -e 's/.*;;DOT //' > rmt.dot;dot -Tpdf rmt.dot -o rmt.pdf buildmanual: cd docs/manual && make -wikipage=plan -editwiki: - cd docs/manual && ../../utils/editwiki $(wikipage) - -viewmanual: - arora docs/manual/megatest_manual.html - targets: @grep : Makefile | perl -ne '/^([A-Za-z0-9_-]+):/ && print "$$1\n"' - unit : cd tests;make unit Index: TODO ================================================================== --- TODO +++ TODO @@ -16,10 +16,16 @@ # along with Megatest. If not, see . TODO ==== +23WW21 +. Dashboard needs its own cache db in /tmp + +23WW07 +. Remove use of *dbstruct-dbs* + WW15 . fill newview matrix with data, filter pipeline gui elements . improve [script], especially indent handling WW16 Index: api.scm ================================================================== --- api.scm +++ api.scm @@ -1,7 +1,5 @@ - - ;;====================================================================== ;; Copyright 2006-2013, Matthew Welland. ;; ;; This file is part of Megatest. ;; @@ -18,21 +16,30 @@ ;; You should have received a copy of the GNU General Public License ;; along with Megatest. If not, see . ;; ;;====================================================================== -(use srfi-69 posix) - (declare (unit api)) -(declare (uses rmt)) (declare (uses db)) +(declare (uses debugprint)) +(declare (uses commonmod)) (declare (uses dbmod)) (declare (uses dbfile)) (declare (uses tasks)) +(declare (uses tcp-transportmod)) +(import commonmod) (import dbmod) (import dbfile) +(import debugprint) +(import tcp-transportmod) + +(use srfi-69 + srfi-18 + posix + matchable + s11n) ;; allow these queries through without starting a server ;; (define api:read-only-queries '(get-key-val-pairs @@ -39,10 +46,11 @@ get-var get-keys get-key-vals test-toplevel-num-items get-test-info-by-id + get-test-state-status-by-id get-steps-info-by-id get-data-info-by-id test-get-rundir-from-test-id get-count-tests-running-for-testname get-count-tests-running @@ -59,12 +67,12 @@ get-run-info get-run-status get-run-state get-run-stats get-run-times - get-targets get-target + get-targets ;; register-run get-tests-tags get-test-times get-tests-for-run get-tests-for-run-state-status @@ -86,11 +94,10 @@ read-test-data-varpatt login tasks-get-last testmeta-get-record have-incompletes? - ;; synchash-get get-changed-record-ids get-run-record-ids get-not-completed-cnt)) (define api:write-queries @@ -126,11 +133,11 @@ ;; TEST DATA test-data-rollup csv->test-data ;; MISC - sync-inmem->db + sync-cachedb->db drop-all-triggers create-all-triggers update-tesdata-on-repilcate-db ;; TESTMETA @@ -141,262 +148,362 @@ tasks-add tasks-set-state-given-param-key )) (define *db-write-mutexes* (make-hash-table)) - -;; These are called by the server on recipt of /api calls -;; - keep it simple, only return the actual result of the call, i.e. no meta info here -;; -;; - returns #( flag result ) -;; -(define (api:execute-requests dbstruct dat) - (db:open-no-sync-db) ;; sets *no-sync-db* -;; (handle-exceptions -;; exn -;; (let ((call-chain (get-call-chain))) -;; (debug:print 0 *default-log-port* "WARNING: api:execute-requests received an exception from peer, dat=" dat ", exn=" exn) -;; (print-call-chain (current-error-port)) -;; (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) - ;; (vector #f (vector exn call-chain dat))) ;; return some stuff for debug if an exception happens - (if (> *api-process-request-count* 200) - (begin - (if (common:low-noise-print 30 "too many threads") - (debug:print 0 *default-log-port* "WARNING: "*api-process-request-count*" threads, potential overload, adding 0.5 sec delay.")) - (thread-sleep! 0.5) ;; take a nap - )) - (cond - ((not (vector? dat)) ;; it is an error to not receive a vector - (vector #f (vector #f "remote must be called with a vector"))) - #;((> *api-process-request-count* 200) ;; 20) - (debug:print 0 *default-log-port* "WARNING: api:execute-requests received an overloaded message.") - (set! *server-overloaded* #t) - (vector #f (vector #f 'overloaded))) ;; the inner vector is what gets returned. nope, don't know why. please refactor! - (else - (let* ((cmd-in (vector-ref dat 0)) - (cmd (if (symbol? cmd-in) - cmd-in - (string->symbol cmd-in))) - (params (vector-ref dat 1)) - (run-id (if (null? params) - 0 - (car params))) - (write-mutex (if (hash-table-exists? *db-write-mutexes* run-id) - (hash-table-ref *db-write-mutexes* run-id) - (let* ((newmutex (make-mutex))) - (hash-table-set! *db-write-mutexes* run-id newmutex) - newmutex))) - (start-t (current-milliseconds)) - (readonly-mode (dbr:dbstruct-read-only dbstruct)) - (readonly-command (member cmd api:read-only-queries)) - (writecmd-in-readonly-mode (and readonly-mode (not readonly-command)))) - (if (not readonly-command) - (mutex-lock! write-mutex)) - (let* ((res - (if writecmd-in-readonly-mode - (conc "attempt to run write command "cmd" on a read-only database") - (case cmd - ;;=============================================== - ;; READ/WRITE QUERIES - ;;=============================================== - - ((get-keys-write) (db:get-keys dbstruct)) ;; force a dummy "write" query to force server; for debug in -repl - - ;; SERVERS - ((start-server) (apply server:kind-run params)) - ((kill-server) (set! *server-run* #f)) - - ;; TESTS - - ;;((test-set-state-status-by-id) (apply mt:test-set-state-status-by-id dbstruct params)) - ;;BB - commented out above because it was calling below, eventually, incorrectly (dbstruct passed to mt:test-set-state-status-by-id, which previosly did more, but now only passes thru to db:set-state-status-and-roll-up-items. - ((test-set-state-status-by-id) - - ;; (define (db:set-state-status-and-roll-up-items dbstruct run-id test-name item-path state status comment) - (db:set-state-status-and-roll-up-items - dbstruct - (list-ref params 0) ; run-id - (list-ref params 1) ; test-name - #f ; item-path - (list-ref params 2) ; state - (list-ref params 3) ; status - (list-ref params 4) ; comment - )) - - ((delete-test-records) (apply db:delete-test-records dbstruct params)) - ((delete-old-deleted-test-records) (apply db:delete-old-deleted-test-records dbstruct params)) - ((test-set-state-status) (apply db:test-set-state-status dbstruct params)) - ((test-set-top-process-pid) (apply db:test-set-top-process-pid dbstruct params)) - ((set-state-status-and-roll-up-items) (apply db:set-state-status-and-roll-up-items dbstruct params)) - ((set-state-status-and-roll-up-run) (apply db:set-state-status-and-roll-up-run dbstruct params)) - ((top-test-set-per-pf-counts) (apply db:top-test-set-per-pf-counts dbstruct params)) - ((test-set-archive-block-id) (apply db:test-set-archive-block-id dbstruct params)) - - ;; RUNS - ((register-run) (apply db:register-run dbstruct params)) - ((set-tests-state-status) (apply db:set-tests-state-status dbstruct params)) - ((delete-run) (apply db:delete-run dbstruct params)) - ((lock/unlock-run) (apply db:lock/unlock-run dbstruct params)) - ((update-run-event_time) (apply db:update-run-event_time dbstruct params)) - ((update-run-stats) (apply db:update-run-stats dbstruct params)) - ((set-var) (apply db:set-var dbstruct params)) - ((inc-var) (apply db:inc-var dbstruct params)) - ((dec-var) (apply db:dec-var dbstruct params)) - ((del-var) (apply db:del-var dbstruct params)) - ((add-var) (apply db:add-var dbstruct params)) - - ;; STEPS - ((teststep-set-status!) (apply db:teststep-set-status! dbstruct params)) - ((delete-steps-for-test!) (apply db:delete-steps-for-test! dbstruct params)) - - ;; TEST DATA - ((test-data-rollup) (apply db:test-data-rollup dbstruct params)) - ((csv->test-data) (apply db:csv->test-data dbstruct params)) - - ;; MISC - ((sync-inmem->db) (let ((run-id (car params))) - (db:sync-touched dbstruct run-id db:initialize-main-db force-sync: #t))) - ((mark-incomplete) (apply db:find-and-mark-incomplete dbstruct params)) - ((create-all-triggers) (db:create-all-triggers dbstruct)) - ((drop-all-triggers) (db:drop-all-triggers dbstruct)) - - ;; TESTMETA - ((testmeta-add-record) (apply db:testmeta-add-record dbstruct params)) - ((testmeta-update-field) (apply db:testmeta-update-field dbstruct params)) - ((get-tests-tags) (db:get-tests-tags dbstruct)) - - ;; TASKS - ((tasks-add) (apply tasks:add dbstruct params)) - ((tasks-set-state-given-param-key) (apply tasks:set-state-given-param-key dbstruct params)) - ((tasks-get-last) (apply tasks:get-last dbstruct params)) - - ;; NO SYNC DB - ((no-sync-set) (apply db:no-sync-set *no-sync-db* params)) - ((no-sync-get/default) (apply db:no-sync-get/default *no-sync-db* params)) - ((no-sync-del!) (apply db:no-sync-del! *no-sync-db* params)) - ((no-sync-get-lock) (apply db:no-sync-get-lock *no-sync-db* params)) - - ;; ARCHIVES - ;; ((archive-get-allocations) - ((archive-register-disk) (apply db:archive-register-disk dbstruct params)) - ((archive-register-block-name)(apply db:archive-register-block-name dbstruct params)) - ;; ((archive-allocate-testsuite/area-to-block)(apply db:archive-allocate-testsuite/area-to-block dbstruct block-id testsuite-name areakey)) - - ;;====================================================================== - ;; READ ONLY QUERIES - ;;====================================================================== - - ;; KEYS - ((get-key-val-pairs) (apply db:get-key-val-pairs dbstruct params)) - ((get-keys) (db:get-keys dbstruct)) - ((get-key-vals) (apply db:get-key-vals dbstruct params)) - ((get-target) (apply db:get-target dbstruct params)) - ((get-targets) (db:get-targets dbstruct)) - - ;; ARCHIVES - ((test-get-archive-block-info) (apply db:test-get-archive-block-info dbstruct params)) - - ;; TESTS - ((test-toplevel-num-items) (apply db:test-toplevel-num-items dbstruct params)) - ((get-test-info-by-id) (apply db:get-test-info-by-id dbstruct params)) - ((test-get-rundir-from-test-id) (apply db:test-get-rundir-from-test-id dbstruct params)) - ((get-count-tests-running-for-testname) (apply db:get-count-tests-running-for-testname dbstruct params)) - ((get-count-tests-running) (apply db:get-count-tests-running dbstruct params)) - ((get-count-tests-running-in-jobgroup) (apply db:get-count-tests-running-in-jobgroup dbstruct params)) - ;; ((delete-test-step-records) (apply db:delete-test-step-records dbstruct params)) - ;; ((get-previous-test-run-record) (apply db:get-previous-test-run-record dbstruct params)) - ((get-matching-previous-test-run-records)(apply db:get-matching-previous-test-run-records dbstruct params)) - ((test-get-logfile-info) (apply db:test-get-logfile-info dbstruct params)) - ((test-get-records-for-index-file) (apply db:test-get-records-for-index-file dbstruct params)) - ((get-testinfo-state-status) (apply db:get-testinfo-state-status dbstruct params)) - ((test-get-top-process-pid) (apply db:test-get-top-process-pid dbstruct params)) - ((test-get-paths-matching-keynames-target-new) (apply db:test-get-paths-matching-keynames-target-new dbstruct params)) - ((get-prereqs-not-met) (apply db:get-prereqs-not-met dbstruct params)) - ((get-count-tests-running-for-run-id) (apply db:get-count-tests-running-for-run-id dbstruct params)) - ((get-not-completed-cnt) (apply db:get-not-completed-cnt dbstruct params)) - ;; ((synchash-get) (apply synchash:server-get dbstruct params)) - ((get-raw-run-stats) (apply db:get-raw-run-stats dbstruct params)) - ((get-test-times) (apply db:get-test-times dbstruct params)) - - ;; RUNS - ((get-run-info) (apply db:get-run-info dbstruct params)) - ((get-run-status) (apply db:get-run-status dbstruct params)) - ((get-run-state) (apply db:get-run-state dbstruct params)) - ((set-run-status) (apply db:set-run-status dbstruct params)) - ((set-run-state-status) (apply db:set-run-state-status dbstruct params)) - ((update-tesdata-on-repilcate-db) (apply db:update-tesdata-on-repilcate-db dbstruct params)) - ((get-tests-for-run) (apply db:get-tests-for-run dbstruct params)) - ((get-tests-for-run-state-status) (apply db:get-tests-for-run-state-status dbstruct params)) - ((get-test-id) (apply db:get-test-id dbstruct params)) - ((get-tests-for-run-mindata) (apply db:get-tests-for-run-mindata dbstruct params)) - ;; ((get-tests-for-runs-mindata) (apply db:get-tests-for-runs-mindata dbstruct params)) - ((get-runs) (apply db:get-runs dbstruct params)) - ((simple-get-runs) (apply db:simple-get-runs dbstruct params)) - ((get-num-runs) (apply db:get-num-runs dbstruct params)) - ((get-runs-cnt-by-patt) (apply db:get-runs-cnt-by-patt dbstruct params)) - ((get-all-run-ids) (db:get-all-run-ids dbstruct)) - ((get-prev-run-ids) (apply db:get-prev-run-ids dbstruct params)) - ((get-run-ids-matching-target) (apply db:get-run-ids-matching-target dbstruct params)) - ((get-runs-by-patt) (apply db:get-runs-by-patt dbstruct params)) - ((get-run-name-from-id) (apply db:get-run-name-from-id dbstruct params)) - ((get-main-run-stats) (apply db:get-main-run-stats dbstruct params)) - ((get-var) (apply db:get-var dbstruct params)) - ((get-run-stats) (apply db:get-run-stats dbstruct params)) - ((get-run-times) (apply db:get-run-times dbstruct params)) - - ;; STEPS - ((get-steps-data) (apply db:get-steps-data dbstruct params)) - ((get-steps-for-test) (apply db:get-steps-for-test dbstruct params)) - ((get-steps-info-by-id) (apply db:get-steps-info-by-id dbstruct params)) - - ;; TEST DATA - ((read-test-data) (apply db:read-test-data dbstruct params)) - ((read-test-data-varpatt) (apply db:read-test-data-varpatt dbstruct params)) - ((get-data-info-by-id) (apply db:get-data-info-by-id dbstruct params)) - - ;; MISC - ((get-latest-host-load) (apply db:get-latest-host-load dbstruct params)) - ((have-incompletes?) (apply db:have-incompletes? dbstruct params)) - ((login) (apply db:login dbstruct params)) - ((general-call) (let ((stmtname (car params)) - (run-id (cadr params)) - (realparams (cddr params))) - (db:general-call dbstruct run-id stmtname realparams))) - ((sdb-qry) (apply sdb:qry params)) - ((ping) (current-process-id)) - ((get-changed-record-ids) (apply db:get-changed-record-ids dbstruct params)) - ((get-run-record-ids) (apply db:get-run-record-ids dbstruct params)) - ;; TESTMETA - ((testmeta-get-record) (apply db:testmeta-get-record dbstruct params)) - - ;; TASKS - ((find-task-queue-records) (apply tasks:find-task-queue-records dbstruct params)) - (else - (debug:print 0 *default-log-port* "ERROR: bad api call " cmd) - (conc "ERROR: BAD api call " cmd)))))) - (if (not readonly-command) - (mutex-unlock! write-mutex)) - - ;; save all stats - (let ((delta-t (- (current-milliseconds) - start-t)) - (modified-cmd (if (eq? cmd 'general-call) - (string->symbol (conc "general-call-" (car params))) - cmd))) - (hash-table-set! *db-api-call-time* modified-cmd - (cons delta-t (hash-table-ref/default *db-api-call-time* modified-cmd '())))) - (if writecmd-in-readonly-mode - (begin - #;(common:telemetry-log (conc "api-out:"(->string cmd)) - payload: `((params . ,params) - (ok-res . #t))) - (vector #f res)) - (begin - #;(common:telemetry-log (conc "api-out:"(->string cmd)) - payload: `((params . ,params) - (ok-res . #f))) - (vector #t res)))))))) +(define *server-signature* #f) +;; ;; These are called by the server on recipt of /api calls +;; ;; - keep it simple, only return the actual result of the call, i.e. no meta info here +;; ;; +;; ;; - returns #( flag result ) +;; ;; +;; (define (api:execute-requests dbstruct dat) +;; (if (> *api-process-request-count* 50) +;; (begin +;; (if (common:low-noise-print 30 "too many threads") +;; (debug:print 0 *default-log-port* "WARNING: "*api-process-request-count*" threads, potential overload, adding 0.5 sec delay.")) +;; ;; (thread-sleep! 0.5) ;; take a nap - no, the napping is moved to the clients via tt:backoff-incr +;; )) +;; (cond +;; ((not (vector? dat)) ;; it is an error to not receive a vector +;; (vector #f (vector #f "remote must be called with a vector"))) +;; (else +;; (let* ((cmd-in (vector-ref dat 0)) +;; (cmd (if (symbol? cmd-in) +;; cmd-in +;; (string->symbol cmd-in))) +;; (params (vector-ref dat 1)) +;; (run-id (if (null? params) +;; 0 +;; (car params))) +;; (write-mutex (if (hash-table-exists? *db-write-mutexes* run-id) +;; (hash-table-ref *db-write-mutexes* run-id) +;; (let* ((newmutex (make-mutex))) +;; (hash-table-set! *db-write-mutexes* run-id newmutex) +;; newmutex))) +;; (start-t (current-milliseconds)) +;; (readonly-mode (dbr:dbstruct-read-only dbstruct)) +;; (readonly-command (member cmd api:read-only-queries)) +;; (writecmd-in-readonly-mode (and readonly-mode (not readonly-command)))) +;; (if (not readonly-command) +;; (mutex-lock! write-mutex)) +;; (let* ((tmppath (dbr:dbstruct-tmppath dbstruct)) +;; (clean-run-id (cond +;; ((number? run-id) run-id) +;; ((equal? run-id #f) "main") +;; (else "other"))) +;; (crumbfile (dbfile:wait-for-qif tmppath clean-run-id (cons cmd params))) +;; (res +;; (if writecmd-in-readonly-mode +;; (conc "attempt to run write command "cmd" on a read-only database") +;; (api:dispatch-request dbstruct cmd run-id params)))) +;; (delete-file* crumbfile) +;; (if (not readonly-command) +;; (mutex-unlock! write-mutex)) +;; +;; ;; save all stats +;; (let ((delta-t (- (current-milliseconds) +;; start-t)) +;; (modified-cmd (if (eq? cmd 'general-call) +;; (string->symbol (conc "general-call-" (car params))) +;; cmd))) +;; (hash-table-set! *db-api-call-time* modified-cmd +;; (cons delta-t (hash-table-ref/default *db-api-call-time* modified-cmd '())))) +;; (if writecmd-in-readonly-mode +;; (begin +;; #;(common:telemetry-log (conc "api-out:"(->string cmd)) +;; payload: `((params . ,params) +;; (ok-res . #t))) +;; (vector #f res)) +;; (begin +;; #;(common:telemetry-log (conc "api-out:"(->string cmd)) +;; payload: `((params . ,params) +;; (ok-res . #f))) +;; (vector #t res)))))))) + +(define *api-threads* '()) +(define (api:register-thread th-in) + (set! *api-threads* (cons (cons th-in (current-seconds)) *api-threads*))) + +(define (api:unregister-thread th-in) + (set! *api-threads* (filter (lambda (thdat) + (not (eq? th-in (car thdat)))) + *api-threads*))) + +(define (api:remove-dead-or-terminated) + (set! *api-threads* (filter (lambda (thdat) + (not (member (thread-state (car thdat)) '(terminated dead)))) + *api-threads*))) + +(define (api:get-count-threads-alive) + (length *api-threads*)) + + +;; indat is (cmd run-id params meta) +;; +;; WARNING: Do not print anything in the lambda of this function as it +;; reads/writes to current in/out port +;; +(define (api:tcp-dispatch-request-make-handler dbstruct) ;; cmd run-id params) + (assert *toppath* "FATAL: api:tcp-dispatch-request-make-handler called but *toppath* not set.") + (if (not *server-signature*) + (set! *server-signature* (tt:mk-signature *toppath*))) + (lambda (indat) + (api:register-thread (current-thread)) + (let* (;; (indat (deserialize)) + (newcount (+ *api-process-request-count* 1)) + (numthreads (api:get-count-threads-alive)) + (delay-wait (if (> newcount 10) + (- newcount 10) + 0)) + (normal-proc (lambda (cmd run-id params) + (case cmd + ((ping) *server-signature*) + (else + (api:dispatch-request dbstruct cmd run-id params)))))) + (set! *api-process-request-count* newcount) + (set! *db-last-access* (current-seconds)) + (if (not (eq? newcount numthreads)) + (begin + (api:remove-dead-or-terminated) + (let ((threads-now (api:get-count-threads-alive))) + (debug:print 0 *default-log-port* "WARNING: newcount="newcount", numthreads="numthreads", remaining="threads-now) + (set! newcount threads-now)))) + (match indat + ((cmd run-id params meta) + (let* ((db-ok (let* ((dbfname (dbmod:run-id->dbfname run-id)) + (ok (equal? dbfname (dbr:dbstruct-dbfname dbstruct)))) + (case cmd + ((ping) #t) ;; we are fine + (else + (if (not ok)(debug:print 0 *default-log-port* "ERROR: "cmd", run-id "run-id", not correct for dbfname "(dbr:dbstruct-dbfname dbstruct))) + (assert ok "FATAL: database file and run-id not aligned."))))) + (ttdat *server-info*) + (server-state (tt-state ttdat)) + (status (cond + ((> newcount 3) 'busy) + ;; ((> newcount 5) 'loaded) ;; this gets transmitted to the client which calls tt:backoff-incr to slow stuff down. + (else 'ok))) + (errmsg (case status + ((busy) (conc "Server overloaded, "newcount" threads in flight")) + ((loaded) (conc "Server loaded, "newcount" threads in flight")) + (else #f))) + (result (case status + ((busy) + (if (eq? cmd 'ping) + (normal-proc cmd run-id params) + ;; newcount must be greater than 5 for busy + (* 1 (- newcount 3)) ;; was 15 + )) ;; (- newcount 29)) ;; call back in as many seconds + ((loaded) +;; (if (eq? (rmt:transport-mode) 'tcp) +;; (thread-sleep! 0.5)) + (normal-proc cmd run-id params)) + (else + (normal-proc cmd run-id params)))) + (meta (case cmd + ((ping) `((sstate . ,server-state))) + (else `((wait . ,delay-wait))))) + (payload (list status errmsg result meta))) + (set! *api-process-request-count* (- *api-process-request-count* 1)) + ;; (serialize payload) + (api:unregister-thread (current-thread)) + payload)) + (else + (assert #f "FATAL: failed to deserialize indat "indat)))))) + + +(define (api:dispatch-request dbstruct cmd run-id params) + (if (not *no-sync-db*) + (db:open-no-sync-db)) + (case cmd + ;;=============================================== + ;; READ/WRITE QUERIES + ;;=============================================== + + ((get-keys-write) (db:get-keys dbstruct)) ;; force a dummy "write" query to force server; for debug in -repl + + ;; SERVERS + ((start-server) (apply server:kind-run params)) + ((kill-server) (set! *server-run* #f)) + + ;; TESTS + + ;;((test-set-state-status-by-id) (apply mt:test-set-state-status-by-id dbstruct params)) + ;;BB - commented out above because it was calling below, eventually, incorrectly (dbstruct passed to mt:test-set-state-status-by-id, which previosly did more, but now only passes thru to db:set-state-status-and-roll-up-items. + ((test-set-state-status-by-id) + + ;; (define (db:set-state-status-and-roll-up-items dbstruct run-id test-name item-path state status comment) + (db:set-state-status-and-roll-up-items + dbstruct + (list-ref params 0) ; run-id + (list-ref params 1) ; test-name + #f ; item-path + (list-ref params 2) ; state + (list-ref params 3) ; status + (list-ref params 4) ; comment + )) + + ((delete-test-records) (apply db:delete-test-records dbstruct params)) + ((delete-old-deleted-test-records) (apply db:delete-old-deleted-test-records dbstruct params)) + ((test-set-state-status) (apply db:test-set-state-status dbstruct params)) + ((test-set-top-process-pid) (apply db:test-set-top-process-pid dbstruct params)) + ((set-state-status-and-roll-up-items) (apply db:set-state-status-and-roll-up-items dbstruct params)) + ((set-state-status-and-roll-up-run) (apply db:set-state-status-and-roll-up-run dbstruct params)) + ((top-test-set-per-pf-counts) (apply db:top-test-set-per-pf-counts dbstruct params)) + ((test-set-archive-block-id) (apply db:test-set-archive-block-id dbstruct params)) + + ((insert-test) (db:insert-test dbstruct run-id params)) + + ;; RUNS + ((register-run) (apply db:register-run dbstruct params)) + ((set-tests-state-status) (apply db:set-tests-state-status dbstruct params)) + ((delete-run) (apply db:delete-run dbstruct params)) + ((lock/unlock-run) (apply db:lock/unlock-run dbstruct params)) + ((update-run-event_time) (apply db:update-run-event_time dbstruct params)) + ((update-run-stats) (apply db:update-run-stats dbstruct params)) + ((set-var) (apply db:set-var dbstruct params)) + ((inc-var) (apply db:inc-var dbstruct params)) + ((dec-var) (apply db:dec-var dbstruct params)) + ((del-var) (apply db:del-var dbstruct params)) + ((add-var) (apply db:add-var dbstruct params)) + + ((insert-run) (apply db:insert-run dbstruct params)) + + ;; STEPS + ((teststep-set-status!) (apply db:teststep-set-status! dbstruct params)) + ((delete-steps-for-test!) (apply db:delete-steps-for-test! dbstruct params)) + + ;; TEST DATA + ((test-data-rollup) (apply db:test-data-rollup dbstruct params)) + ((csv->test-data) (apply db:csv->test-data dbstruct params)) + + ;; MISC + ((sync-cachedb->db) (let ((run-id (car params))) + (db:sync-touched dbstruct run-id db:initialize-main-db force-sync: #t))) + ((get-toplevels-and-incompletes) (apply db:get-toplevels-and-incompletes dbstruct params)) + ((mark-incomplete) #f);;(thread-start! (make-thread (lambda () ;; no need to block on this one + ;; (apply db:find-and-mark-incomplete dbstruct params) + ;; #t)))) + ((create-all-triggers) (db:create-all-triggers dbstruct)) + ((drop-all-triggers) (db:drop-all-triggers dbstruct)) + + ;; TESTMETA + ((testmeta-add-record) (apply db:testmeta-add-record dbstruct params)) + ((testmeta-update-field) (apply db:testmeta-update-field dbstruct params)) + ((get-tests-tags) (db:get-tests-tags dbstruct)) + + ;; TASKS + ((tasks-add) (apply tasks:add dbstruct params)) + ((tasks-set-state-given-param-key) (apply tasks:set-state-given-param-key dbstruct params)) + ((tasks-get-last) (apply tasks:get-last dbstruct params)) + + ;; NO SYNC DB + ((no-sync-set) (apply db:no-sync-set *no-sync-db* params)) + ((no-sync-get/default) (apply db:no-sync-get/default *no-sync-db* params)) + ((no-sync-del!) (apply db:no-sync-del! *no-sync-db* params)) + ((no-sync-get-lock) (apply db:no-sync-get-lock *no-sync-db* params)) + + ;; ARCHIVES + ;; ((archive-get-allocations) + ((archive-register-disk) (apply db:archive-register-disk dbstruct params)) + ((archive-register-block-name)(apply db:archive-register-block-name dbstruct params)) + ;; ((archive-allocate-testsuite/area-to-block)(apply db:archive-allocate-testsuite/area-to-block dbstruct block-id testsuite-name areakey)) + + ;;====================================================================== + ;; READ ONLY QUERIES + ;;====================================================================== + + ;; KEYS + ((get-key-val-pairs) (apply db:get-key-val-pairs dbstruct params)) + ((get-keys) (db:get-keys dbstruct)) + ((get-key-vals) (apply db:get-key-vals dbstruct params)) + ((get-target) (apply db:get-target dbstruct params)) + ((get-targets) (db:get-targets dbstruct)) + + ;; ARCHIVES + ((test-get-archive-block-info) (apply db:test-get-archive-block-info dbstruct params)) + + ;; TESTS + ((test-toplevel-num-items) (apply db:test-toplevel-num-items dbstruct params)) + ((get-test-info-by-id) (apply db:get-test-info-by-id dbstruct params)) + ((get-test-state-status-by-id) (apply db:get-test-state-status-by-id dbstruct params)) + ((test-get-rundir-from-test-id) (apply db:test-get-rundir-from-test-id dbstruct params)) + ((get-count-tests-running-for-testname) (apply db:get-count-tests-running-for-testname dbstruct params)) + ((get-count-tests-running) (apply db:get-count-tests-running dbstruct params)) + ((get-count-tests-running-in-jobgroup) (apply db:get-count-tests-running-in-jobgroup dbstruct params)) + ;; ((delete-test-step-records) (apply db:delete-test-step-records dbstruct params)) + ;; ((get-previous-test-run-record) (apply db:get-previous-test-run-record dbstruct params)) + ((get-matching-previous-test-run-records)(apply db:get-matching-previous-test-run-records dbstruct params)) + ((test-get-logfile-info) (apply db:test-get-logfile-info dbstruct params)) + ((test-get-records-for-index-file) (apply db:test-get-records-for-index-file dbstruct params)) + ((get-testinfo-state-status) (apply db:get-testinfo-state-status dbstruct params)) + ((test-get-top-process-pid) (apply db:test-get-top-process-pid dbstruct params)) + ((test-get-paths-matching-keynames-target-new) (apply db:test-get-paths-matching-keynames-target-new dbstruct params)) + ((get-prereqs-not-met) (apply db:get-prereqs-not-met dbstruct params)) + ((get-count-tests-running-for-run-id) (apply db:get-count-tests-running-for-run-id dbstruct params)) + ((get-not-completed-cnt) (apply db:get-not-completed-cnt dbstruct params)) + ((get-raw-run-stats) (apply db:get-raw-run-stats dbstruct params)) + ((get-test-times) (apply db:get-test-times dbstruct params)) + + ;; RUNS + ((get-run-info) (apply db:get-run-info dbstruct params)) + ((get-run-status) (apply db:get-run-status dbstruct params)) + ((get-run-state) (apply db:get-run-state dbstruct params)) + ((get-run-state-status) (apply db:get-run-state-status dbstruct params)) + ((set-run-status) (apply db:set-run-status dbstruct params)) + ((set-run-state-status) (apply db:set-run-state-status dbstruct params)) + ((update-tesdata-on-repilcate-db) (apply db:update-tesdata-on-repilcate-db dbstruct params)) + ((get-tests-for-run) (apply db:get-tests-for-run dbstruct params)) + ((get-tests-for-run-state-status) (apply db:get-tests-for-run-state-status dbstruct params)) + ((get-test-id) (apply db:get-test-id dbstruct params)) + ((get-tests-for-run-mindata) (apply db:get-tests-for-run-mindata dbstruct params)) + ;; ((get-tests-for-runs-mindata) (apply db:get-tests-for-runs-mindata dbstruct params)) + ((get-runs) (apply db:get-runs dbstruct params)) + ((simple-get-runs) (apply db:simple-get-runs dbstruct params)) + ((get-num-runs) (apply db:get-num-runs dbstruct params)) + ((get-runs-cnt-by-patt) (apply db:get-runs-cnt-by-patt dbstruct params)) + ((get-all-run-ids) (db:get-all-run-ids dbstruct)) + ((get-prev-run-ids) (apply db:get-prev-run-ids dbstruct params)) + ((get-run-ids-matching-target) (apply db:get-run-ids-matching-target dbstruct params)) + ((get-runs-by-patt) (apply db:get-runs-by-patt dbstruct params)) + ((get-run-name-from-id) (apply db:get-run-name-from-id dbstruct params)) + ((get-main-run-stats) (apply db:get-main-run-stats dbstruct params)) + ((get-var) (apply db:get-var dbstruct params)) + ((get-run-stats) (apply db:get-run-stats dbstruct params)) + ((get-run-times) (apply db:get-run-times dbstruct params)) + + ;; STEPS + ((get-steps-data) (apply db:get-steps-data dbstruct params)) + ((get-steps-for-test) (apply db:get-steps-for-test dbstruct params)) + ((get-steps-info-by-id) (apply db:get-steps-info-by-id dbstruct params)) + + ;; TEST DATA + ((read-test-data) (apply db:read-test-data dbstruct params)) + ((read-test-data-varpatt) (apply db:read-test-data-varpatt dbstruct params)) + ((get-data-info-by-id) (apply db:get-data-info-by-id dbstruct params)) + + ;; MISC + ((get-latest-host-load) (apply db:get-latest-host-load dbstruct params)) + ((have-incompletes?) (apply db:have-incompletes? dbstruct params)) + ((login) (apply db:login dbstruct params)) + ((general-call) (let ((stmtname (car params)) + (run-id (cadr params)) + (realparams (cddr params))) + (db:general-call dbstruct run-id stmtname realparams))) + ((sdb-qry) (apply sdb:qry params)) + ((ping) (current-process-id)) + ((get-changed-record-ids) (apply db:get-changed-record-ids dbstruct params)) + ((get-run-record-ids) (apply db:get-run-record-ids dbstruct params)) + ;; TESTMETA + ((testmeta-get-record) (apply db:testmeta-get-record dbstruct params)) + + ;; TASKS + ((find-task-queue-records) (apply tasks:find-task-queue-records dbstruct params)) + (else + (debug:print 0 *default-log-port* "ERROR: bad api call " cmd) + (conc "ERROR: BAD api call " cmd)))) ;; http-server send-response ;; api:process-request ;; db:* ;; Index: apimod.scm ================================================================== --- apimod.scm +++ apimod.scm @@ -18,17 +18,15 @@ ;;====================================================================== (declare (unit apimod)) (declare (uses commonmod)) -(declare (uses ulex)) (module apimod * (import scheme chicken data-structures extras) (import (prefix sqlite3 sqlite3:) posix typed-records srfi-18) (import commonmod) -(import (prefix ulex ulex:)) ) Index: archive.scm ================================================================== --- archive.scm +++ archive.scm @@ -16,15 +16,25 @@ ;; along with Megatest. If not, see . ;; ;; strftime('%m/%d/%Y %H:%M:%S','now','localtime') -(use (prefix sqlite3 sqlite3:) srfi-1 posix regex regex-case srfi-69 format md5 message-digest srfi-18) - (declare (unit archive)) (declare (uses db)) +(declare (uses debugprint)) +(declare (uses mtargs)) (declare (uses common)) +(declare (uses commonmod)) +(declare (uses rmtmod)) + +(use (prefix sqlite3 sqlite3:) srfi-1 posix regex regex-case srfi-69 + format md5 message-digest srfi-18) + +(import commonmod + debugprint + rmtmod + (prefix mtargs args:)) (include "common_records.scm") (include "db_records.scm") ;;====================================================================== ADDED artifacts.scm Index: artifacts.scm ================================================================== --- /dev/null +++ artifacts.scm @@ -0,0 +1,24 @@ +;;====================================================================== +;; Copyright 2019, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . + +;;====================================================================== + +(declare (unit artifacts)) + +(include "artifacts/artifacts.scm") + ADDED artifacts/README Index: artifacts/README ================================================================== --- /dev/null +++ artifacts/README @@ -0,0 +1,1 @@ +NOTE: keep megatest/artifacts/ in sync with datastore/artifacts ADDED artifacts/artifacts.meta Index: artifacts/artifacts.meta ================================================================== --- /dev/null +++ artifacts/artifacts.meta @@ -0,0 +1,21 @@ +;; -*- scheme -*- +( +; Your egg's license: +(license "BSD") + +; Pick one from the list of categories (see below) for your egg and enter it +; here. +(category db) + +; A list of eggs pkts depends on. If none, you can omit this declaration +; altogether. If you are making an egg for chicken 3 and you need to use +; procedures from the `files' unit, be sure to include the `files' egg in the +; `needs' section (chicken versions < 3.4.0 don't provide the `files' unit). +; `depends' is an alias to `needs'. +;; (needs (autoload "3.0")) + +; A list of eggs required for TESTING ONLY. See the `Tests' section. +(test-depends test) + +(author "Matt Welland") +(synopsis "A sha1-chain based datastore similar to the data format in fossil scm, consisting of artifacts of single line cards.")) ADDED artifacts/artifacts.release-info Index: artifacts/artifacts.release-info ================================================================== --- /dev/null +++ artifacts/artifacts.release-info @@ -0,0 +1,3 @@ +(repo fossil "http://www.kiatoa.com/cgi-bin/fossils/{egg-name}") +(uri zip "http://www.kiatoa.com/cgi-bin/fossils/{egg-name}/zip/{egg-name}.zip?uuid={egg-release}") +(release "1.0") ADDED artifacts/artifacts.scm Index: artifacts/artifacts.scm ================================================================== --- /dev/null +++ artifacts/artifacts.scm @@ -0,0 +1,1624 @@ +;; Copyright 2006-2017, Matthew Welland. +;; +;; This file is part of artifacts +;; +;; Pkts is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Pkts is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Pkts. If not, see . +;; + +;; CARDS: +;; +;; A card is a line of text, the first two characters are a letter followed by a +;; space. The letter is the card type. +;; +;; artifact: +;; +;; An artifact is a sorted list of cards with a final card Z that contains the shar1 hash +;; of all of the preceding cards. +;; +;; AARTIFACT: +;; +;; An alist mapping card types to card data +;; '((T . "artifacttype") +;; (a . "some content")) +;; +;; EARTIFACT: +;; +;; Extended packet using friendly keys. Must use a artifactspec to convert to/from eartifacts +;; '((ptype . "artifacttype") +;; (adata . "some content)) +;; +;; DARTIFACT: +;; +;; artifacts pulled from the database have this format: +;; +;;((aartifact (Z . "7de89c09ac024b3832c93e16cd78d11e2e28733b") <= this is a the alist +;; (t . "v1.63/tip/dev") +;; (c . "QUICKPATT") +;; (T . "runstart") +;; (P . "354eeb67120a921e3e33155ecab1b522ac10b6bd") +;; (D . "1488995096.0")) +;; (id . 8) +;; (group-id . 0) +;; (uuid . "7de89c09ac024b3832c93e16cd78d11e2e28733b") +;; (parent . "") +;; (artifact-type . "runstart") +;; (artifact . "D 1488995096.0\nP 354eeb67120a921e3e33155ecab1b522ac10b6bd\nT runstart\nc QUICKPATT\nt v1.63/tip/dev\nZ 7de89c09ac024b3832c93e16cd78d11e2e28733b")) +;; +;; artifactspec is alist of alists mapping types and nicekeys to keys +;; +;; '((posting . ((title . t) +;; (url . u) +;; (blurb . b))) +;; (comment . ((comment . c) +;; (score . s)))) + +;; Reserved cards: +;; P : artifact parent +;; R : reference artifact containing mapping of short string -> sha1sum strings +;; T : artifact type +;; D : current time from (current-time), unless provided +;; Z : shar1 hash of the packet + +;; Example usage: +;; +;; Create a artifact: +;; +;; (use artifacts) +;; (define-values (uuid artifact) +;; (alist->artifact +;; '((fruit . "apple") (meat . "beef")) ;; this is the data to convert +;; '((foods (fruit . f) (meat . m))) ;; this is the artifact spec +;; ptype: +;; 'foods)) +;; +;; Add to artifact queue: +;; +;; (define db (open-queue-db "/tmp/artifacts" "artifacts.db")) +;; (add-to-queue db artifact uuid 'foods #f 0) ;; no parent and use group_id of 0 +;; +;; Retrieve the packet from the db and extract a value: +;; +;; (alist-ref +;; 'meat +;; (dartifact->alist +;; (car (get-dartifacts db #f 0 #f)) +;; '((foods (fruit . f) +;; (meat . m))))) +;; => "beef" +;; + +(module artifacts +( +;; cards, util and misc +;; sort-cards +;; calc-sha1 +;; +;; low-level constructor procs, exposed only for development/testing, will be removed +construct-sdat +construct-artifact +card->type/value +add-z-card + +;; queue database procs +open-queue-db +add-to-queue +create-and-queue +;; lookup-by-uuid +lookup-by-id +get-dartifacts +get-not-processed-artifacts +get-related +find-artifacts +process-artifacts +get-descendents +get-ancestors +get-artifacts +;; get-last-descendent +;; with-queue-db +;; load-artifacts-to-db + +;; procs that operate directly on artifacts, sdat, aartifacts, dartifacts etc. +artifact->alist ;; artifact -> aartifact (i.e. alist) +artifact->sdat ;; artifact -> '("a aval" "b bval" ...) +sdat->alist ;; '("a aval" "b bval"...) -> ((a . "aval")(b . "bval") ...) +dblst->dartifacts ;; convert list of tuples from queue db into dartifacts +dartifact->alist ;; flatten a dartifact into an alist containing all db fields and the artifact alist +dartifacts->alists ;; apply dartifact->alist to a list of alists using a artifact-spec +alist->artifact ;; returns two values uuid, artifact +get-value ;; looks up a value given a key in a dartifact +flatten-all ;; merge the list of values from a query which includes a artifact into a flat alist <== really useful! +check-artifact + +;; artifact alists +write-alist->artifact +read-artifact->alist + +;; archive database +;; archive-open-db +;; write-archive-artifacts +;; archive-artifacts +;; mark-processed + +;; artifactsdb +artifactdb-conn ;; useful +artifactdb-fname +artifactsdb-open +artifactsdb-close +artifactsdb-add-record +;; temporary +artifactdb-artifactspec + +;; utility procs +increment-string ;; used to get indexes for strings in ref artifacts +make-report ;; make a .dot file +calc-sha1 +uuid-first-two-letters +uuid-remaining-letters + +;; file and directory utils +multi-glob +capture-dir +file-get-sha1 +check-same +link-or-copy +same-partition? +link-if-same-partition +archive-copy +write-to-archive +artifact-rollup +read-artifacts-into-hash +hash-of-artifacts->bundle +archive-dest + +;; pathname-full-filename + +;; minimal artifact functions +minimal-artifact-read +minimal-artifact->alist +afact-get-D +afact-get-Z +afact-get-T +afact-get +afact-get-number/default + + +;; bundles +write-bundle +read-bundle + +;; new artifacts db +with-todays-adb +get-all-artifacts +refresh-artifacts-db + +) + +(import (chicken base) scheme (chicken process) (chicken time posix) + (chicken io) (chicken file) (chicken pathname) + chicken.process-context.posix (chicken string) + (chicken time) (chicken sort) (chicken file posix) (chicken condition) srfi-1 + regex srfi-13 srfi-69 (chicken port) (chicken process-context) + crypt sha1 matchable message-digest sqlite3 typed-records + directory-utils + scsh-process) + +;;====================================================================== +;; DATA MANIPULATION UTILS +;;====================================================================== + +(define-inline (unescape-data data) + (string-translate* data '(("\\n" . "\n") ("\\\\" . "\\")))) + +(define-inline (escape-data data) + (string-translate* data '(("\n" . "\\n") ("\\" . "\\\\")))) + +(define-inline (make-card type data) + (conc type " " (escape-data (->string data)))) + +;; reverse an alist for doing artifactkey -> external key conversions +;; +(define-inline (reverse-aspec aspec) + (map (lambda (dat) + (cons (cdr dat)(car dat))) + aspec)) + +;; add a card to the list of cards, sdat +;; if type is #f return only sdat +;; if data is #f return only sdat +;; +(define-inline (add-card sdat type data) + (if (and type data) + (cons (make-card type data) sdat) + sdat)) + +;;====================================================================== +;; STRING AS FUNKY NUMBER +;;====================================================================== + +;; NOTE: PTDZ are removed as they are reserved. NB// the R card is not used in a +;; ref, instead the P parent card is used. +;; Question: Why does it matter to remove PTDZ? +;; To make the ref easier to use the ref strings will be the keys +;; so we cannot have overlap with any actual keys. But this is a +;; bit silly. What we need to do instead is reject keys of length +;; one where the char is in PTDZ +;; +;; This is basically base92 +;; +(define string-num-chars (string->list "!#$%&'()*+,-./0123456789:;<=>?@ABCEFGHIJKLMNOQRSUVWXY[\\]^_abcdefghijklmnopqrstuvwxyz{|}~")) +;; "0123456789abcdefghijklmnopqrstuvwxyzABCEFGHIJKLMNOQSUVWXY!#$%&'()*+,-./[]:;<=>?\\^_{}|")) + +(define (char-incr inchar) + (let* ((carry #f) + (next-char (let ((rem (member inchar string-num-chars))) + (if (eq? (length rem) 1) ;; we are at the last character in our string-num-chars list + (begin + (set! carry #t) + (car string-num-chars)) + (cadr rem))))) + (values next-char carry))) + +(define (increment-string str) + (if (string-null? str) + "0" + (let ((strlst (reverse (string->list str)))) ;; need to process the string from the lsd + (list->string + (let loop ((hed (car strlst)) + (tal (cdr strlst)) + (res '())) + (let-values (((newhed carry)(char-incr hed))) + ;; (print "newhed: " newhed " carry: " carry " tal: " tal) + (let ((newres (cons newhed res))) + (if carry ;; we'll have to propagate the carry + (if (null? tal) ;; at the end, tack on "0" (which is really a "1") + (cons (car string-num-chars) newres) + (loop (car tal)(cdr tal) newres)) + (append (reverse tal) newres))))))))) + +;;====================================================================== +;; P K T S D B I N T E R F A C E +;; +;; INTEGER, REAL, TEXT +;;====================================================================== +;; +;; spec +;; ( (tablename1 . (field1name L1 TYPE) +;; (field2name L2 TYPE) ... ) +;; (tablename2 ... )) +;; +;; Example: (tests (testname n TEXT) +;; (rundir r TEXT) +;; ... ) +;; +;; artifact keys are taken from the first letter, if that is not unique +;; then look at the next letter and so on +;; + +;; simplify frequent need to get one result with default +;; +(define (get-one db default qry . params) + (apply fold-row + car + default + db + qry + params)) + +(define (get-rows db qry . params) + (apply fold-row + cons + db + qry + params)) + +;; use this struct to hold the artifactspec and the db handle +;; +(defstruct artifactdb + (fname #f) + (artifactsdb-spec #f) + (artifactspec #f) ;; cache the artifactspec + (field-keys #f) ;; cache the field->key mapping (field1 . k1) ... + (key-fields #f) ;; cache the key->field mapping + (conn #f) + ) + +;; WARNING: There is a simplification in the artifactsdb spec w.r.t. artifactspec. +;; The field specs are the cdr of the table list - not a full +;; list. The extra list level in artifactspec is gratuitous and should +;; be removed. +;; +(define (artifactsdb-spec->artifactspec tables-spec) + (map (lambda (tablespec) + (list (car tablespec) + (map (lambda (field-spec) + (cons (car field-spec)(cadr field-spec))) + (cdr tablespec)))) + tables-spec)) + +(define (artifactsdb-open dbfname artifactsdb-spec) + (let* ((pdb (make-artifactdb)) + (dbexists (file-exists? dbfname)) + (db (open-database dbfname))) + (artifactdb-artifactsdb-spec-set! pdb artifactsdb-spec) + (artifactdb-artifactspec-set! pdb (artifactsdb-spec->artifactspec artifactsdb-spec)) + (artifactdb-fname-set! pdb dbfname) + (artifactdb-conn-set! pdb db) + (if (not dbexists) + (artifactsdb-init pdb)) + pdb)) + +(define (artifactsdb-init artifactsdb) + (let* ((db (artifactdb-conn artifactsdb)) + (artifactsdb-spec (artifactdb-artifactsdb-spec artifactsdb))) + ;; create a table for the artifacts themselves + (execute db "CREATE TABLE IF NOT EXISTS artifacts (id INTEGER PRIMARY KEY, zkey TEXT, record_id INTEGER, artifact TEXT);") + (for-each + (lambda (table) + (let* ((table-name (car table)) + (fields (cdr table)) + (stmt (conc "CREATE TABLE IF NOT EXISTS " + table-name + " (id INTEGER PRIMARY KEY," + (string-intersperse + (map (lambda (fieldspec) + (conc (car fieldspec) " " + (caddr fieldspec))) + fields) + ",") + ");"))) + (execute db stmt))) + artifactsdb-spec))) + +;; create artifact from the data and insert into artifacts table +;; +;; data is assoc list of (field . value) ... +;; tablename is a symbol matching the table name +;; +(define (artifactsdb-add-record artifactsdb tablename data #!optional (parent #f)) + (let*-values (((zkey artifact) (alist->artifact data (artifactdb-artifactspec artifactsdb) ptype: tablename))) + ;; have the data as alist so insert it into appropriate table also + (let* ((db (artifactdb-conn artifactsdb))) + ;; TODO: Address collisions + (execute db "INSERT INTO artifacts (zkey,artifact,record_id) VALUES (?,?,?);" + zkey artifact -1) + (let* (;; (artifactid (artifactsdb-artifactkey->artifactid artifactsdb artifactkey)) + (record-id (artifactsdb-insert artifactsdb tablename data))) + (execute db "UPDATE artifacts SET record_id=? WHERE zkey=?;" + record-id zkey) + )))) + +;; +(define (artifactsdb-insert artifactsdb tablename data) + (let* ((db (artifactdb-conn artifactsdb)) + (stmt (conc "INSERT INTO " tablename + " (" (string-intersperse (map conc (map car data)) ",") + ") VALUES ('" + ;; TODO: Add lookup of data type and do not + ;; wrap integers with quotes + (string-intersperse (map conc (map cdr data)) "','") + "');"))) + (print "stmt: " stmt) + (execute db stmt) + ;; lookup the record-id and return it + + )) + +(define (artifactsdb-close artifactsdb) + (finalize! (artifactdb-conn artifactsdb))) + +;; (let loop ((s "0")(n 0))(print s)(if (< n 5000)(loop (increment-string s)(+ n 1)))) + +;;====================================================================== +;; CARDS, MISC and UTIL +;;====================================================================== + +;; given string (likely multi-line) "dat" return shar1 hash +;; +(define (calc-sha1 instr) + (message-digest-string + (sha1-primitive) + instr)) + +;; given a single card return its type and value +;; +(define (card->type/value card) + (let ((ctype (substring card 0 1)) + (cval (substring card 2 (string-length card)))) + (values (string->symbol ctype) cval))) + +;;====================================================================== +;; SDAT procs +;; sdat is legacy/internal usage. Intention is to remove sdat calls from +;; the exposed calls. +;;====================================================================== + +;; sort list of cards +;; +(define-inline (sort-cards sdat) + (sort sdat string<=?)) + +;; artifact rules +;; 1. one card per line +;; 2. at least one card +;; 3. no blank lines + +;; given sdat, a list of cards return uuid, packet (as sdat) +;; +(define (add-z-card sdat) + (let* ((sorted-sdat (sort-cards sdat)) + (dat (string-intersperse sorted-sdat "\n")) + (uuid (calc-sha1 dat))) + (values + uuid + (conc + dat + "\nZ " + uuid)))) + +(define (check-artifact artifact) + (handle-exceptions + exn + #f ;; anything goes wrong - call it a crappy artifact + (let* ((sdat (string-split artifact "\n")) + (rdat (reverse sdat)) ;; reversed + (zdat (car rdat)) + (Z (cadr (string-split zdat))) + (cdat (string-intersperse (reverse (cdr rdat)) "\n"))) + (equal? Z (calc-sha1 cdat))))) + +;;====================================================================== +;; AARTIFACTs +;;====================================================================== + +;; convert a sdat (list of cards) to an alist +;; +(define (sdat->alist sdat) + (let loop ((hed (car sdat)) + (tal (cdr sdat)) + (res '())) + (let-values (( (ctype cval)(card->type/value hed) )) + ;; if this card is not one of the common ones tack it on to rem + (let* ((oldval (alist-ref ctype res)) + (newres (cons (cons ctype + (if oldval ;; list or string + (if (list? oldval) + (cons cval oldval) + (cons cval (list oldval))) + cval)) + res))) + (if (null? tal) + newres + (loop (car tal)(cdr tal) newres)))))) + +;;((aartifact (Z . "7de89c09ac024b3832c93e16cd78d11e2e28733b") <= this is a the alist +;; (t . "v1.63/tip/dev") +;; (c . "QUICKPATT") +;; (T . "runstart") +;; (P . "354eeb67120a921e3e33155ecab1b522ac10b6bd") +;; (D . "1488995096.0")) +;; (id . 8) +;; (group-id . 0) +;; (uuid . "7de89c09ac024b3832c93e16cd78d11e2e28733b") +;; (parent . "") +;; (artifact-type . "runstart") +;; (artifact . "D 1488995096.0\nP 354eeb67120a921e3e33155ecab1b522ac10b6bd\nT runstart\nc QUICKPATT\nt v1.63/tip/dev\nZ 7de89c09ac024b3832c93e16cd78d11e2e28733b")) +;; +;; artifactspec is alist of alists mapping types and nicekeys to keys +;; +;; '((posting . ((title . t) +;; (url . u) +;; (blurb . b))) +;; (comment . ((comment . c) +;; (score . s)))) + +;; DON'T USE? +;; +(define (get-value field dartifact . spec-in) + (if (null? spec-in) + (alist-ref field dartifact) + (let* ((spec (car spec-in)) + (aartifact (alist-ref 'aartifact dartifact))) ;; get the artifact alist + (if (and aartifact spec) + (let* ((ptype (alist-ref 'artifact-type dartifact)) + (pspec (alist-ref (string->symbol ptype) spec))) ;; do we have a spec for this type of artifact + (and pspec + (let* ((key (alist-ref field pspec))) + (and key (alist-ref key aartifact))))) + #f)))) + +;; convert a dartifact to a pure alist given a artifactspec +;; this flattens out the alist to include the data from +;; the queue database record +;; +(define (dartifact->alist dartifact artifactspec) + (let* ((aartifact (alist-ref 'aartifact dartifact)) + (artifact-type (or (alist-ref 'artifact-type dartifact) ;; artifact-type is from the database field artifact_type + (alist-ref 'T aartifact))) + (artifact-fields (alist-ref (string->symbol artifact-type) artifactspec)) + (rev-fields (if artifact-fields + (reverse-aspec artifact-fields) + '()))) + (append (map (lambda (entry) + (let* ((artifact-key (car entry)) + (new-key (or (alist-ref artifact-key rev-fields) artifact-key))) + `(,new-key . ,(cdr entry)))) + aartifact) + dartifact))) + +;; convert a list of dartifacts into a list of alists using artifact-spec +;; +(define (dartifacts->alists dartifacts artifact-spec) + (map (lambda (x) + (dartifact->alist x artifact-spec)) + dartifacts)) + +;; Generic flattener, make the tuple and artifact into a single flat alist +;; +;; qry-result-spec is a list of symbols corresponding to each field +;; +(define (flatten-all inlst artifactspec . qry-result-spec) + (map + (lambda (tuple) + (dartifact->alist + (apply dblst->dartifacts tuple qry-result-spec) + artifactspec)) + inlst)) + +;; call like this: +;; (construct-sdat 'a "a data" 'S "S data" ...) +;; returns list of cards +;; ( "A a value" "D 12345678900" ...) +;; +(define (construct-sdat . alldat) + (let ((have-D-card #f)) ;; flag + (if (even? (length alldat)) + (let loop ((type (car alldat)) + (data (cadr alldat)) + (tail (cddr alldat)) + (res '())) + (if (eq? type 'D)(set! have-D-card #t)) + (if (null? tail) + (if have-D-card ;; return the constructed artifact, add a D card if none found + (add-card res type data) + (add-card + (add-card res 'D (current-seconds)) + type data)) + (loop (car tail) + (cadr tail) + (cddr tail) + (add-card res type data)))) + #f))) ;; #f means it failed to create the sdat + +(define (construct-artifact . alldat) + (add-z-card + (apply construct-sdat alldat))) + +;;====================================================================== +;; CONVERTERS +;;====================================================================== + +(define (artifact->sdat artifact) + (map unescape-data (string-split artifact "\n"))) + +;; given a pure artifact return an alist +;; +(define (artifact->alist artifact #!key (artifactspec #f)) + (let ((sdat (cond + ((string? artifact) (artifact->sdat artifact)) + ((list? artifact) artifact) + (else #f)))) + (if artifact + (if artifactspec + (dartifact->alist (list (cons 'aartifact (sdat->alist sdat))) artifactspec) + (sdat->alist sdat)) + #f))) + +;; convert an alist to an sdat +;; in: '((a . "blah")(b . "foo")) +;; out: '("a blah" "b foo") +;; +(define (alist->sdat adat) + (map (lambda (dat) + (conc (car dat) " " (cdr dat))) + adat)) + +;; adat is the incoming alist, aspec is the mapping +;; from incoming key to the artifact key (usually one +;; letter to keep data tight) see the artifactspec at the +;; top of this file +;; +;; NOTE: alists can contain multiple instances of the same key (supported fine by artifacts) +;; but you (obviously I suppose) cannot use alist-ref to access those entries. +;; +(define (alist->artifact adat aspec #!key (ptype #f)(no-d #f)) + (let* ((artifact-type (or ptype + (alist-ref 'T adat) ;; can provide in the incoming alist + #f)) + (artifact-spec (if artifact-type ;; alist of external-key -> key + (or (alist-ref artifact-type aspec) '()) + (if (null? aspec) + '() + (cdar aspec)))) ;; default to first one if nothing specified + (new-alist (map (lambda (dat) + (let* ((key (car dat)) + (val (cdr dat)) + (newkey (or (alist-ref key artifact-spec) + key))) + (cons newkey (escape-data (conc val))))) ;; convert all incoming data (symbols, numbers etc.) to a string and then escape newlines. + adat)) + (new-with-type (if (alist-ref 'T new-alist) + new-alist + (cons `(T . ,artifact-type) new-alist))) + (with-d-card (if (or no-d ;; no timestamp wanted + (alist-ref 'D new-with-type)) + new-with-type + (cons `(D . ,(current-seconds)) + new-with-type)))) + (add-z-card + (alist->sdat with-d-card)))) + +;;====================================================================== +;; D B Q U E U E I N T E R F A C E +;;====================================================================== + +;; artifacts ( +;; id SERIAL PRIMARY KEY, +;; uuid TEXT NOT NULL, +;; parent_uuid TEXT default '', +;; artifact_type INTEGER DEFAULT 0, +;; group_id INTEGER NOT NULL, +;; artifact TEXT NOT NULL + +;; schema is list of SQL statements - can be used to extend db with more tables +;; +(define (open-queue-db dbpath dbfile #!key (schema '())) + (let* ((dbfname (conc dbpath "/" dbfile)) + (dbexists (if (file-exists? dbfname) #t (begin (create-directory dbpath #t) #f))) + (db (open-database dbfname))) + ;; (set-busy-handler! (dbi:db-conn db) (busy-timeout 10000)) + (if (not dbexists) ;; NOTE: In the archive we allow duplicates and other messiness. + (for-each + (lambda (stmt) + (execute db stmt)) + (cons "CREATE TABLE IF NOT EXISTS artifacts + (id INTEGER PRIMARY KEY, + group_id INTEGER NOT NULL, + uuid TEXT NOT NULL, + parent_uuid TEXT TEXT DEFAULT '', + artifact_type TEXT NOT NULL, + artifact TEXT NOT NULL, + processed INTEGER DEFAULT 0)" + schema))) ;; 0=not processed, 1=processed, 2... for expansion + db)) + +(define (add-to-queue db artifact uuid artifact-type parent-uuid group-id) + (execute db "INSERT INTO artifacts (uuid,parent_uuid,artifact_type,artifact,group_id) + VALUES(?,?,?,?,?);" ;; $1,$2,$3,$4,$5);" + uuid + (if parent-uuid parent-uuid "");; use null string as placeholder for no parent uuid. + (if artifact-type (conc artifact-type) "") + artifact + group-id)) + +;; given all needed parameters create a artifact and store it in the queue +;; procs is an alist that maps artifact-type to a function that takes a list of artifact params +;; in data and returns the uuid and artifact +;; +(define (create-and-queue conn procs artifact-type parent-uuid group-id data) + (let ((proc (alist-ref artifact-type procs))) + (if proc + (let-values (( (uuid artifact) (proc data) )) + (add-to-queue conn artifact uuid artifact-type parent-uuid group-id) + uuid) + #f))) + +;; given uuid get artifact, if group-id is specified use it (reduces probablity of +;; being messed up by a uuid collision) +;; +(define (lookup-by-uuid db artifact-uuid group-id) + (if group-id + (get-one db "SELECT artifact FROM artifacts WHERE group_id=? AND uuid=?;" group-id artifact-uuid) + (get-one db "SELECT artifact FROM artifacts WHERE uuid=?;" artifact-uuid))) + +;; find a packet by its id +;; +(define (lookup-by-id db id) + (get-one db "SELECT artifact FROM artifacts WHERE id=?;" id)) + + +;;====================================================================== +;; P R O C E S S P K T S +;;====================================================================== + +;; given a list of field values pulled from the queue db generate a list +;; of dartifact's +;; +(define (dblst->dartifacts lst . altmap) + (let* ((maplst (if (null? altmap) + '(id group-id uuid parent artifact-type artifact processed) + altmap)) + (res (map cons maplst lst))) ;; produces list of pairs, i.e an alist + (cons `(aartifact . ,(artifact->alist (alist-ref 'artifact res))) + res))) + +;; NB// ptypes is a list of symbols, '() or #f find all types +;; +(define (get-dartifacts db ptypes group-id parent-uuid #!key (uuid #f)) + (let* ((ptype-qry (if (and ptypes + (not (null? ptypes))) + (conc " IN ('" (string-intersperse (map conc ptypes) "','") "')") + (conc " LIKE '%' "))) + (rows (get-rows + db + (conc + "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact,processed FROM artifacts + WHERE artifact_type " ptype-qry " AND group_id=? + AND processed=0 " + (if parent-uuid (conc "AND parent_uuid='" parent-uuid "' ") "") + (if uuid (conc "AND uuid='" uuid "' ") "") + "ORDER BY id DESC;") + group-id))) + (map dblst->dartifacts (map vector->list rows)))) + +;; get N artifacts not yet processed for group-id +;; +(define (get-not-processed-artifacts db group-id artifact-type limit offset) + (map dblst->dartifacts + (map vector->list + (get-rows + db + "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact,processed FROM artifacts + WHERE artifact_type = ? AND group_id = ? AND processed=0 + LIMIT ? OFFSET ?;" + (conc artifact-type) ;; convert symbols to string + group-id + limit + offset + )))) + +;; given a uuid, get not processed child artifacts +;; +(define (get-related db group-id uuid) + (map dblst->dartifacts + (get-rows + db + "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact,processed FROM artifacts + WHERE parent_uuid=? AND group_id=? AND processed=0;" + uuid group-id))) + +;; generic artifact processor +;; +;; find all packets in group-id of type in ptypes and apply proc to artifactdat +;; +(define (process-artifacts conn group-id ptypes parent-uuid proc) + (let* ((artifacts (get-dartifacts conn ptypes group-id parent-uuid))) + (map proc artifacts))) + +;; criteria is an alist ((k . valpatt) ...) +;; - valpatt is a regex +;; - ptypes is a list of types (symbols expected) +;; match-type: 'any or 'all +;; +(define (find-artifacts db ptypes criteria #!key (processed #f)(match-type 'any)(artifact-spec #f)) ;; processed=#f, don't use, else use + (let* ((artifacts (get-dartifacts db ptypes 0 #f)) + (match-rules (lambda (artifactdat) ;; returns a list of matching rules + (filter (lambda (c) + ;; (print "c: " c) + (let* ((ctype (car c)) ;; card type + (rx (cdr c)) ;; card pattern + ;; (t (alist-ref 'artifact-type artifactdat)) + (artifact (alist-ref 'artifact artifactdat)) + (aartifact (artifact->alist artifact)) + (cdat (alist-ref ctype aartifact))) + ;; (print "cdat: " cdat) ;; " aartifact: " aartifact) + (if cdat + (string-match rx cdat) + #f))) + criteria))) + (res (filter (lambda (artifactdat) + (if (null? criteria) ;; looking for all artifacts + #t + (case match-type + ((any)(not (null? (match-rules artifactdat)))) + ((all)(eq? (length (match-rules artifactdat))(length criteria))) + (else + (print "ERROR: bad match type " match-type ", expecting any or all."))))) + artifacts))) + (if artifact-spec + (dartifacts->alists res artifact-spec) + res))) + +;; get descendents of parent-uuid +;; +;; NOTE: Should be doing something like the following: +;; +;; given a uuid, get not processed child artifacts +;; processed: +;; #f => get all +;; 0 => get not processed +;; 1 => get processed +;; +(define (get-ancestors db group-id uuid #!key (processed #f)) + (map dblst->dartifacts + (map vector->list + (get-rows + db + (conc + "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact,processed + FROM artifacts + WHERE uuid IN + (WITH RECURSIVE + tree(uuid,parent_uuid) + AS + ( + SELECT uuid, parent_uuid + FROM artifacts + WHERE uuid = ? + UNION ALL + SELECT t.uuid, t.parent_uuid + FROM artifacts t + JOIN tree ON t.uuid = tree.parent_uuid + ) + SELECT uuid FROM tree) + AND group_id=?" (if processed (conc " AND processed=" processed) "") ";") + uuid group-id)))) + +;; Untested +;; +(define (get-descendents db group-id uuid #!key (processed #f)) + (map dblst->dartifacts + (map vector->list + (get-rows + db + (conc + "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact,processed + FROM artifacts + WHERE uuid IN + (WITH RECURSIVE + tree(uuid,parent_uuid) + AS + ( + SELECT uuid, parent_uuid + FROM artifacts + WHERE uuid = ? + UNION ALL + SELECT t.uuid, t.parent_uuid + FROM artifacts t + JOIN tree ON t.parent_uuid = tree.uuid + ) + SELECT uuid FROM tree) + AND group_id=?" (if processed (conc " AND processed=" processed) "") ";") + uuid group-id)))) + +;; look up descendents based on given info unless passed in a list via inlst +;; +;; (define (get-last-descendent db group-id uuid #!key (processed #f)(inlst #f)) +;; (let ((descendents (or inlst (get-descendents db group-id uuid processed: processed)))) +;; (if (null? descendents) +;; #f +;; (last descendents)))) + +;;====================================================================== +;; A R C H I V E S - always to a sqlite3 db +;;====================================================================== + +;; open an archive db +;; path: archive-dir//month.db +;; +#;(define (archive-open-db archive-dir) + (let* ((curr-time (seconds->local-time (current-seconds))) + (dbpath (conc archive-dir "/" (time->string curr-time "%Y"))) + (dbfile (conc dbpath "/" (time->string curr-time "%m") ".db")) + (dbexists (if (file-exists? dbfile) #t (begin (create-directory dbpath #t) #f)))) + (let ((db (open-database dbfile))) + ;; (set-busy-handler! db (busy-timeout 10000)) + (if (not dbexists) ;; NOTE: In the archive we allow duplicates and other messiness. + (execute db "CREATE TABLE IF NOT EXISTS artifacts + (id INTEGER, + group_id INTEGER, + uuid TEXT, + parent_uuid TEXT, + artifact_type TEXT, + artifact TEXT, + processed INTEGER DEFAULT 0)")) + db))) + +;; turn on transactions! otherwise this will be painfully slow +;; +#;(define (write-archive-artifacts src-db db artifact-ids) + (let ((artifacts (get-rows + src-db + (conc "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact FROM artifacts WHERE id IN (" + (string-intersperse (map conc artifact-ids) ",") ")")))) + ;; (dbi:with-transaction + ;; db + (lambda () + (for-each + (lambda (artifact) + (apply execute db "INSERT INTO artifacts (id,group_id,uuid,parent_uuid,artifact_type,artifact) + VALUES (?,?,?,?,?,?)" + artifact)) + artifacts)))) ;; ) + +;; given a list of uuids and lists of uuids move all to +;; the sqlite3 db for the current archive period +;; +#;(define (archive-artifacts conn artifact-ids archive-dir) + (let ((db (archive-open-db archive-dir))) + (write-archive-artifacts conn db artifact-ids) + (finalize! db)) + ;; (pg:with-transaction + ;; conn + ;; (lambda () + (for-each + (lambda (id) + (get-one + conn + "DELETE FROM artifacts WHERE id=?" id)) + artifact-ids)) ;; )) + +;; given a list of ids mark all as processed +;; +(define (mark-processed conn artifact-ids) + ;; (pg:with-transaction + ;; conn + ;; (lambda () + (for-each + (lambda (id) + (get-one + conn + "UPDATE artifacts SET processed=1 WHERE id=?;" id)) + artifact-ids)) ;; x)) + +;; a generic artifact getter, gets from the artifacts db +;; +(define (get-artifacts conn ptypes) + (let* ((ptypes-str (if (null? ptypes) + "" + (conc " WHERE artifact_type IN ('" (string-intersperse ptypes ",") "') "))) + (qry-str (conc "SELECT id,group_id,uuid,parent_uuid,artifact_type,artifact,processed FROM artifacts" ptypes-str))) + (map vector->list (get-rows conn qry-str)))) + +;; make a report of the artifacts in the db +;; ptypes of '() gets all artifacts +;; display-fields +;; +(define (make-report dest conn artifactspec display-fields . ptypes) + (let* (;; (conn (dbi:db-conn (s:db))) + (all-rows (get-artifacts conn ptypes)) + (all-artifacts (flatten-all + all-rows + artifactspec + 'id 'group-id 'uuid 'parent 'artifact-type 'artifact 'processed)) + (by-uuid (let ((ht (make-hash-table))) + (for-each + (lambda (artifact) + (let ((uuid (alist-ref 'uuid artifact))) + (hash-table-set! ht uuid artifact))) + all-artifacts) + ht)) + (by-parent (let ((ht (make-hash-table))) + (for-each + (lambda (artifact) + (let ((parent (alist-ref 'parent artifact))) + (hash-table-set! ht parent (cons artifact (hash-table-ref/default ht parent '()))))) + all-artifacts) + ht)) + (oup (if dest (open-output-file dest) (current-output-port)))) + + (with-output-to-port + oup + (lambda () + (print "digraph megatest_state_status { + // ranksep=0.05 + rankdir=LR; + node [shape=\"box\"]; +") + ;; first all the names + (for-each + (lambda (artifact) + (let* ((uuid (alist-ref 'uuid artifact)) + (shortuuid (substring uuid 0 4)) + (type (alist-ref 'artifact-type artifact)) + (processed (alist-ref 'processed artifact))) + + (print "\"" uuid "\" [label=\"" shortuuid ", (" + type ", " + (if processed "processed" "not processed") ")") + (for-each + (lambda (key-field) + (let ((val (alist-ref key-field artifact))) + (if val + (print key-field "=" val)))) + display-fields) + (print "\" ];"))) + all-artifacts) + ;; now for parent-child relationships + (for-each + (lambda (artifact) + (let ((uuid (alist-ref 'uuid artifact)) + (parent (alist-ref 'parent artifact))) + (if (not (equal? parent "")) + (print "\"" parent "\" -> \"" uuid"\";")))) + all-artifacts) + + (print "}") + )) + (if dest + (begin + (close-output-port oup) + (system "dot -Tpdf out.dot -o out.pdf"))) + + )) + +;;====================================================================== +;; Read ref artifacts into a vector < laststr hash table > +;;====================================================================== + + + +;;====================================================================== +;; Read/write packets to files (convience functions) +;;====================================================================== + +;; write alist to a artifact file +;; +(define (write-alist->artifact targdir dat #!key (artifactspec '())(ptype #f)) + (let-values (((uuid artifact)(alist->artifact dat artifactspec ptype: ptype))) + (with-output-to-file (conc targdir "/" uuid ".artifact") + (lambda () + (print artifact))) + uuid)) ;; return the uuid + +;; read artifact into alist +;; +(define (read-artifact->alist artifact-file #!key (artifactspec #f)) + (artifact->alist (with-input-from-file + artifact-file + read-string) + artifactspec: artifactspec)) + +;;====================================================================== +;; File utils, stuff useful for file management +;;====================================================================== + +(define (file-get-sha1 fname) + (let* ((sha1-res (run/strings (sha1sum ,fname)))) + (car (string-split (car sha1-res))))) + +(define (link-or-copy srcf destf) + (or (handle-exceptions + exn + #f + (file-link srcf destf)) + (if (file-exists? destf) + (print "NOTE: destination already exists, skipping copy.") + (copy-file srcf destf)))) + +;; (define (files-diff file1 file2) +;; (let* ((diff-res (with-input-from-port +;; (run/port (diff "-q" ,file1 ,file2)) +;; (lambda () +;; (let* ((res (read-line))) +;; (read-lines) +;; res))))) +;; (car (string-split sha1-res)))) +;; + + +(define (check-same file1 file2) + (cond + ((not (and (file-exists? file1)(file-exists? file2))) #f) + ((not (equal? (file-size file1)(file-size file2))) #f) + (else + (let-values (((status run-ok process-id) + (run (diff "-q" ,file1 ,file2)))) + status)))) + +(define *pcache* (make-hash-table)) +(define (get-device dir) + (let ((indat (or (hash-table-ref/default *pcache* dir #f) + (let* ((inp (open-input-pipe (conc "df --output=source \""dir"\""))) + (res (read-lines inp))) + (close-input-port inp) + (hash-table-set! *pcache* dir res) + res)))) + (cadr indat))) + +(define (same-partition? dir1 dir2) + (equal? (get-device dir1)(get-device dir2))) + +(define (link-if-same-partition file1 file2) + (let* ((dir1 (pathname-directory file1)) + (dir2 (pathname-directory file2)) + (f1 (pathname-file file1)) + (f2 (pathname-file file2))) + (if (same-partition? dir1 dir2) + (let* ((tmpname (conc "."f2"-"(current-seconds)))) + ;; this steps needs to be executed as actual user + (move-file file2 (conc dir1 "/" tmpname)) + (file-link file1 file2) + (delete-file (conc dir1 "/" tmpname)))))) + +(define (uuid-first-two-letters sha1sum) + (substring sha1sum 0 2)) + +(define (uuid-remaining-letters sha1sum) + (let ((slen (string-length sha1sum))) + (substring sha1sum 2 slen))) + +(define (archive-dest destd sha1sum) + (let* ((subdir (uuid-first-two-letters sha1sum)) ;; (substring sha1sum 0 2)) + ;; (slen (string-length sha1sum)) + (rem sha1sum #;(uuid-remaining-letters sha1sum)) ;; (substring sha1sum 3 slen)) + (full-dest-dir (conc destd"/"subdir)) + (full-dest-file (conc full-dest-dir"/"rem))) + (if (not (directory-exists? full-dest-dir)) + (create-directory full-dest-dir #t)) + full-dest-file)) + +(define (write-to-archive data destd #!optional (nextnum #f)) + (let* ((sha1sum (calc-sha1 data)) + (full-dest (conc (archive-dest destd sha1sum) + (if nextnum (conc "."nextnum) "")))) + (if (file-exists? full-dest) + (if (equal? (string-intersperse (with-input-from-file full-dest read-lines) "\n") + data) + (begin + ;; (print "INFO: data already exists in "full-dest" and is identical") + sha1sum) + (let ((nextnum (if nextnum (+ nextnum 1) 0))) + (print "WARN: data already exists in "full-dest" but is different! Trying again...") + (write-to-archive data destd nextnum))) + (begin + (with-output-to-file + full-dest + (lambda () + (print data))) + sha1sum)))) ;; BUG? Does print munge data? + +;; copy srcf with sha1sum aabc... to aa/bc... +;; +(define (archive-copy srcf destd sha1sum) + (let* ((full-dest-file (archive-dest destd sha1sum))) + (let loop ((trynum 0)) + (let ((dest-name (if (> trynum 0) + (conc full-dest-file"-"trynum) + full-dest-file))) + (cond + ((not (file-exists? srcf)) #f) ;; this should be an error? + ((and (file-exists? srcf) + (file-exists? dest-name)) + (if (check-same srcf dest-name) + (link-if-same-partition dest-name srcf) + (loop (+ trynum 1)))) ;; collisions are rare, this protects against them + ((not (file-exists? dest-name)) + (link-or-copy srcf dest-name)) + (else #f)))))) + +;; multi-glob +(define (multi-glob globstrs inpath) + ;; (print "multi-glob: "globstrs", "inpath) + (if (equal? inpath "") + globstrs + (let* ((parts (string-split inpath "/" #t)) + (nextpart (car parts)) + (remaining (string-intersperse (cdr parts) "/"))) + (if (and (equal? nextpart "") ;; this must be a leading / meaning root directory + (null? globstrs)) + (multi-glob '("/") remaining) + (begin + ;; (print "nextpart="nextpart", remaining="remaining) + (apply append + (map (lambda (gstr) + (let* ((pathstr (conc gstr"/"nextpart)) + (pathstrs (glob pathstr))) + ;; (print "pathstr="pathstr) + (multi-glob pathstrs remaining))) + globstrs))))))) + + +;; perm[/user:group]: +;; DDD - octal perm (future expansion) +;; - - use umask/defacto perms (i.e. don't actively do anything) +;; x - mark as executable +;; +;; Cards: +;; file: f perm fname +;; directory: d perm fname artifactid +;; link: l perm lname destpath +;; +;; NOTE: cards are kept as (C . "value") +;; +;; given a directory path, ignore list and artifact store (hash-table): +;; 1. create sha1 tree at dest (e.g. aa/b3a7 ...) +;; 2. create artifact for each dir +;; - cards for all files +;; - cards for files that are symlinks or executables +;; 3. return (artifactid . artifact) +;; +;; NOTES: +;; Use destdir of #f to not create sha1 tree +;; Hard links will be used if srcdir and destdir appear to be same partion +;; +;; (alist->artifact adat aspec #!key (ptype #f)) +;; +;; +;; (load "../../artifacts/artifacts.scm")(import big-chicken srfi-69 artifacts)(define dirdat (make-hash-table)) +;; (capture-dir ".." ".." "/tmp/junk" '() dirdat) +;; +;; [procedure] (file-type FILE [LINK [ERROR]]) +;; Returns the file-type for FILE, which should be a filename, a file-descriptor or a port object. If LINK is given and true, symbolic-links are not followed: +;; +;; regular-file +;; directory +;; fifo +;; socket +;; symbolic-link +;; character-device +;; block-device +;; Note that not all types are supported on every platform. If ERROR is given and false, then file-type returns #f if the file does not exist; otherwise, it signals an error. +;; +;; +(define (capture-dir curr-dir src-dir dest-dir ignore-list artifacts all-seen) + (let* ((dir-dat (directory-fold + (lambda (fname res) ;; res is a list of artifact cards + (let* ((fullname (conc curr-dir"/"fname))) + ;; (print "INFO: processing "fullname) + (if (hash-table-ref/default all-seen fullname #f) ;; something circular going on + (begin + (print "WARNING: possible circular link(s) "fullname) + res) + (let* ((ftype (file-type fullname #t #f))) + (hash-table-set! all-seen fullname ftype) + (cons + (case ftype ;; get the card + ((directory) ;; (directory? fullname) + (let* ((new-curr-dir (conc curr-dir"/"fname)) + (new-src-dir (conc src-dir"/"fname))) + (let* ((dir-dat (capture-dir new-curr-dir new-src-dir + dest-dir ignore-list artifacts all-seen)) + (a-id (car dir-dat)) + (artf (cdr dir-dat))) + (hash-table-set! artifacts a-id artf) + (cons 'd (conc "- "a-id" "fname))))) ;; the card + ((symbolic-link) ;; (symbolic-link? fullname) + (let ((ldest (read-symbolic-link fullname))) + (cons 'l (conc "- "fname"/"ldest)))) ;; delimit link name from dest with / + ((regular-file) ;; must be a file + (let* ((start (current-seconds)) + (sha1sum (file-get-sha1 fullname)) + (perms (if (file-executable? fullname) "x" "-"))) + (let ((runtime (- (current-seconds) start))) + (if (> runtime 1) + (print "INFO: file "fullname" took "runtime" seconds to calculate sha1."))) + (if dest-dir + (archive-copy fullname dest-dir sha1sum)) + (cons 'f (conc perms " "sha1sum" "fname)))) + (else + (print "WARNING: file "fullname" of type "ftype" is NOT supported and will converted to empty file.") + (let* ((sha1sum (write-to-archive "" dest-dir))) + (cons 'f (conc "- "sha1sum" "fname))))) + res))))) + '() src-dir #:dotfiles? #t))) ;; => (values srcdir_artifact sub_artifacts_list) + ;; (print "dir-dat: " dir-dat) + (let-values (((a-id artf) + (alist->artifact dir-dat '() ptype: 'd no-d: #t))) + (hash-table-set! artifacts a-id artf) + (cons a-id artf)))) + +;; maybe move this into artifacts? +;; +;; currently moves *.artifact into a bundle and moves the artifacts into attic +;; future: move artifacts under 1 meg in size into bundle up to 10 meg in size +;; +(define (artifact-rollup bundle-dir) ;; cfg storepath) + ;; (let* ((bundle-dir (calc-bundle-dir cfg storepath))) + (let* ((bundles (glob (conc bundle-dir"/*.bundle"))) + (artifacts (glob (conc bundle-dir"/*.artifact")))) + (if (> (length artifacts) 30) ;; rollup only if > 30 artifacts + ;; if we have unbundled artifacts, bundle them + (let* ((ht (read-artifacts-into-hash #f artifacts: artifacts)) + (bundle (hash-of-artifacts->bundle ht))) + (write-bundle bundle bundle-dir) + (create-directory (conc bundle-dir"/attic") #t) + (for-each + (lambda (full-fname) + (let* ((fname (pathname-strip-directory full-fname)) + (newname (conc bundle-dir"/attic/"fname))) + (move-file full-fname newname #t))) + artifacts) + (conc "bundled "(length artifacts))) + "not enough artifacts to bundle"))) + +;; if destfile is a directory then calculate the sha1sum of the bundle and store it +;; by .bundle +;; +;; incoming dat is pure text (bundle already sorted and appended: +;; +(define (write-bundle bdl-data destdir) + (let* ((bdl-uuid (calc-sha1 bdl-data))) + (with-output-to-file + (conc destdir"/"bdl-uuid".bundle") + (lambda () + (print bdl-data))))) + +;; minimal (and hopefully fast) artifact reader +;; TODO: Add check of shar sum. +;; +(define (minimal-artifact-read fname) + (let* ((indat (with-input-from-file fname read-lines))) + (if (null? indat) + (values #f (conc "did not find an artifact in "fname)) + (let* ((zcard (last indat)) + (cardk (substring zcard 0 1)) + (cardv (substring zcard 2 (string-length zcard)))) + (if (equal? cardk "Z") + (values cardv (string-intersperse indat "\n")) + (values #f (conc fname" is not a valid artifact"))))))) + +;; read artifacts from directory into hash +;; NOTE: support for max-count not implemented yet +;; +(define (read-artifacts-into-hash dir #!key (artifacts #f) (max-count #f)(ht #f)) + (let* ((artifacts (or artifacts + (glob (conc dir"/*.artifact")))) + (ht (or ht (make-hash-table)))) + (for-each + (lambda (fname) + (let-values (((uuid afct) + (minimal-artifact-read fname))) + (hash-table-set! ht uuid afct))) + artifacts) + ht)) + +;; ht is: +;; uuid => artifact text +;; use write-bundle to put result into a bundle file +;; +(define (hash-of-artifacts->bundle ht) + (fold (lambda (k res) + (let* ((v (hash-table-ref ht k))) + (if res + (conc res"\n"v) + v))) + #f + (sort (hash-table-keys ht) string<=?))) + +;; minimal artifact to alist +;; +(define (minimal-artifact->alist afact) + (let* ((lines (string-split afact "\n"))) + (map (lambda (a) + (let* ((key (string->symbol (substring a 0 1))) + (sl (string-length a)) + (val (if (> sl 2) + (substring a 2 sl) + ""))) + (cons key val))) + lines))) + +;; some accessors for common cards +(define (afact-get-D afact) + (let ((dval (alist-ref 'D afact))) + (if dval + (string->number dval) + #f))) + +(define (afact-get-T afact) ;; get the artifact type as a symbol + (let ((val (alist-ref 'T afact))) + (if val + (string->symbol val) + val))) + +(define (afact-get-Z afact) + (alist-ref 'Z afact)) + +(define (afact-get afact key default) + (or (alist-ref key afact) + default)) + +(define (afact-get-number/default afact key default) + (let ((val (alist-ref key afact))) + (if val + (or (string->number val) default) ;; seems wrong + default))) + +;; bundles are never big and reading into memory for processing is fine +;; +(define (read-bundle srcfile #!optional (mode 'uuid-raw)) + (let* ((indat (with-input-from-file srcfile read-lines))) + (let loop ((tail indat) + (dat '()) ;; artifact being extracted + (res '())) ;; list of artifacts + (if (null? tail) + (reverse res) ;; last dat should be empty list + (let* ((curr-line (car tail))) + (let-values (((ctype cdata) + (card->type/value curr-line))) + (let* ((is-z-card (eq? 'Z ctype)) + (new-dat (cons (case mode + ((uuid-raw) curr-line) + (else (cons ctype cdata))) + dat))) + (if is-z-card + (loop (cdr tail) ;; done with this artifact + '() + (cons (case mode + ((uuid-raw) (cons cdata (string-intersperse (reverse new-dat) "\n"))) + (else (reverse new-dat))) + res)) + (loop (cdr tail) + new-dat + res))))))))) + + +;; find all .bundle and .artifacts files in bundle-dir +;; and inport them into sqlite handle adb +;; +(define (refresh-artifacts-db adb bundle-dir) + (let* ((bundles (glob (conc bundle-dir"/*.bundle"))) + (artifacts (glob (conc bundle-dir"/*.artifact"))) + (uuids (get-all-uuids adb 'hash))) + (with-transaction + adb + (lambda () + (for-each + (lambda (bundle-file) + ;; (print "Importing artifacts from "bundle-file) + (let* ((bdat (read-bundle bundle-file 'uuid-raw)) + (count 0) + (inc (lambda ()(set! count (+ count 1))))) + (for-each + (lambda (adat) + (match + adat + ((zval . artifact) + (if (not (hash-table-exists? uuids zval)) + (begin + ;; (print "INFO: importing new artifact "zval" from bundle "bundle-file) + (inc) + (execute adb "INSERT INTO artifacts (uuid, artifact) VALUES (?,?);" + zval artifact) + (hash-table-set! uuids zval #t)))) + (else + (print "ERROR: Bad artifact data "adat)))) + bdat) + (print "INFO: imported "count" artifacts from "bundle-file))) + bundles) + (for-each + (lambda (artifact-file) + ;; (print "Importing artifact from "artifact-file) + (let-values (((uuid artifact) (minimal-artifact-read artifact-file))) + (if uuid + (if (not (hash-table-exists? uuids uuid)) + (begin + ;; (print "INFO: importing new artifact "uuid" from "artifact-file) + (execute adb "INSERT INTO artifacts (uuid, artifact) VALUES (?,?);" + uuid artifact) + (hash-table-set! uuids uuid #t))) + (print "Bad artifact in "artifact-file)))) + artifacts))))) + +;;====================================================================== +;; Artifacts db cache +;;====================================================================== + +;; artifacts +;; id SERIAL PRIMARY KEY, +;; uuid TEXT NOT NULL, +;; artifact TEXT NOT NULL +;; +;; parents +;; id INTEGER REFERENCES artids.id, -- +;; parent_id REFERENCES artids.id +;; +;; schema is list of SQL statements - can be used to extend db with more tables +;; +(define (open-artifacts-db dbpath dbfile #!key (schema '())) + (let* ((dbfname (conc dbpath "/" dbfile)) + (dbexists (if (file-exists? dbfname) #t (begin (create-directory dbpath #t) #f))) + (adb (open-database dbfname))) + (set-busy-handler! adb (make-busy-timeout 10000)) + (execute adb "PRAGMA synchronous = 0;") + (if (not dbexists) + (with-transaction + adb + (lambda () + (for-each + (lambda (stmt) + (execute adb stmt)) + (append `("CREATE TABLE IF NOT EXISTS artifacts + (id INTEGER PRIMARY KEY, + uuid TEXT NOT NULL, + artifact TEXT NOT NULL)" + + "CREATE TABLE IF NOT EXISTS parents + (id INTEGER REFERENCES artifacts(id) NOT NULL, + parent_id INTEGER REFERENCES artifacts(id) NOT NULL)") + schema))))) + adb)) + +(define (generate-year-month-name #!optional (seconds #f)) + (let* ((curr-time (seconds->local-time (or seconds (current-seconds))))) + (time->string curr-time "%Y%m"))) + +;; I don't like this function. TODO: remove the +;; mode and option to return ht. Use instead the +;; get-all-artifacts below +;; +(define (get-all-uuids adb #!optional (mode #f)) + (let* ((res (fold-row + (lambda (res uuid) + (cons uuid res)) + '() + adb + "SELECT uuid FROM artifacts;"))) + (case mode + ((hash) + (let* ((ht (make-hash-table))) + (for-each + (lambda (uuid) + (hash-table-set! ht uuid #t)) + res) + ht)) + (else res)))) + +;; returns raw artifacts (i.e. NOT alists but instead plain text) +(define (get-all-artifacts adb) + (let* ((ht (make-hash-table))) + (for-each-row + (lambda (id uuid artifact) + (hash-table-set! ht uuid `(,id ,uuid ,artifact))) + adb + "SELECT id,uuid,artifact FROM artifacts;") + ht)) + +;; given a bundle-dir copy or create to /tmp and open +;; the YYMM.db file and hand the handle to the given proc +;; NOTE: we operate in /tmp/ to accomodate users on NFS +;; where slamming Unix locks at an NFS filer can cause +;; locking fails. Eventually this /tmp behavior will be +;; configurable. +;; +(define (with-todays-adb bundle-dir proc) + (let* ((dbname (conc (generate-year-month-name) ".db")) + (destname (conc bundle-dir"/"dbname)) + (tmparea (conc "/tmp/"(current-user-name)"-"(calc-sha1 bundle-dir))) + (tmpname (conc tmparea"/"dbname)) + (lockfile (conc destname".update-in-progress"))) + ;; (print "with-todays-adb, bundle-dir: "bundle-dir", dbname: "dbname", destname: "destname",\n tmparea: " tmparea", lockfile: "lockfile) + (if (not (file-exists? tmparea))(create-directory tmparea #t)) + (let loop ((count 0)) + (if (file-exists? lockfile) + (if (< count 30) ;; aproximately 30 seconds + (begin + (sleep 1) + (loop (+ 1 count))) + (print "ERROR: "lockfile" exists, proceeding anyway")) + (if (file-exists? destname) + (begin + (copy-file destname tmpname #t) + (copy-file destname lockfile #t))))) + (let* ((adb (open-artifacts-db tmparea dbname)) + (res (proc adb))) + (finalize! adb) + (copy-file tmpname destname #t) + (delete-file* lockfile) + res))) + +) ;; module artifacts + +;; ATTIC + ADDED artifacts/artifacts.setup Index: artifacts/artifacts.setup ================================================================== --- /dev/null +++ artifacts/artifacts.setup @@ -0,0 +1,11 @@ +;; Copyright 2007-2017, Matthew Welland. +;; +;; This program is made available under the GNU GPL version 2.0 or +;; greater. See the accompanying file COPYING for details. +;; +;; This program is distributed WITHOUT ANY WARRANTY; without even the +;; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +;; PURPOSE. + +;;;; pkts.setup +(standard-extension 'pkts "1.0") ADDED artifacts/artifactsrec.scm Index: artifacts/artifactsrec.scm ================================================================== --- /dev/null +++ artifacts/artifactsrec.scm @@ -0,0 +1,196 @@ +(define-syntax define-record-type + (syntax-rules () + ((define-record-type type + (constructor constructor-tag ...) + predicate + (field-tag accessor . more) ...) + (begin + (define type + (make-record-type 'type '(field-tag ...))) + (define constructor + (record-constructor type '(constructor-tag ...))) + (define predicate + (record-predicate type)) + (define-record-field type field-tag accessor . more) + ...)))) + +; An auxilliary macro for define field accessors and modifiers. +; This is needed only because modifiers are optional. + +(define-syntax define-record-field + (syntax-rules () + ((define-record-field type field-tag accessor) + (define accessor (record-accessor type 'field-tag))) + ((define-record-field type field-tag accessor modifier) + (begin + (define accessor (record-accessor type 'field-tag)) + (define modifier (record-modifier type 'field-tag)))))) + +; Record types + +; We define the following procedures: +; +; (make-record-type ) -> +; (record-constructor ) -> +; (record-predicate ) -> +; (record-accessor ) -> +; (record-modifier ) -> +; where +; ( ...) -> +; ( ) -> +; ( ) -> +; ( ) -> + +; Record types are implemented using vector-like records. The first +; slot of each record contains the record's type, which is itself a +; record. + +(define (record-type record) + (record-ref record 0)) + +;---------------- +; Record types are themselves records, so we first define the type for +; them. Except for problems with circularities, this could be defined as: +; (define-record-type :record-type +; (make-record-type name field-tags) +; record-type? +; (name record-type-name) +; (field-tags record-type-field-tags)) +; As it is, we need to define everything by hand. + +(define :record-type (make-record 3)) +(record-set! :record-type 0 :record-type) ; Its type is itself. +(record-set! :record-type 1 ':record-type) +(record-set! :record-type 2 '(name field-tags)) + +; Now that :record-type exists we can define a procedure for making more +; record types. + +(define (make-record-type name field-tags) + (let ((new (make-record 3))) + (record-set! new 0 :record-type) + (record-set! new 1 name) + (record-set! new 2 field-tags) + new)) + +; Accessors for record types. + +(define (record-type-name record-type) + (record-ref record-type 1)) + +(define (record-type-field-tags record-type) + (record-ref record-type 2)) + +;---------------- +; A utility for getting the offset of a field within a record. + +(define (field-index type tag) + (let loop ((i 1) (tags (record-type-field-tags type))) + (cond ((null? tags) + (error "record type has no such field" type tag)) + ((eq? tag (car tags)) + i) + (else + (loop (+ i 1) (cdr tags)))))) + +;---------------- +; Now we are ready to define RECORD-CONSTRUCTOR and the rest of the +; procedures used by the macro expansion of DEFINE-RECORD-TYPE. + +(define (record-constructor type tags) + (let ((size (length (record-type-field-tags type))) + (arg-count (length tags)) + (indexes (map (lambda (tag) + (field-index type tag)) + tags))) + (lambda args + (if (= (length args) + arg-count) + (let ((new (make-record (+ size 1)))) + (record-set! new 0 type) + (for-each (lambda (arg i) + (record-set! new i arg)) + args + indexes) + new) + (error "wrong number of arguments to constructor" type args))))) + +(define (record-predicate type) + (lambda (thing) + (and (record? thing) + (eq? (record-type thing) + type)))) + +(define (record-accessor type tag) + (let ((index (field-index type tag))) + (lambda (thing) + (if (and (record? thing) + (eq? (record-type thing) + type)) + (record-ref thing index) + (error "accessor applied to bad value" type tag thing))))) + +(define (record-modifier type tag) + (let ((index (field-index type tag))) + (lambda (thing value) + (if (and (record? thing) + (eq? (record-type thing) + type)) + (record-set! thing index value) + (error "modifier applied to bad value" type tag thing))))) + +Records + +; This implements a record abstraction that is identical to vectors, +; except that they are not vectors (VECTOR? returns false when given a +; record and RECORD? returns false when given a vector). The following +; procedures are provided: +; (record? ) -> +; (make-record ) -> +; (record-ref ) -> +; (record-set! ) -> +; +; These can implemented in R5RS Scheme as vectors with a distinguishing +; value at index zero, providing VECTOR? is redefined to be a procedure +; that returns false if its argument contains the distinguishing record +; value. EVAL is also redefined to use the new value of VECTOR?. + +; Define the marker and redefine VECTOR? and EVAL. + +(define record-marker (list 'record-marker)) + +(define real-vector? vector?) + +(define (vector? x) + (and (real-vector? x) + (or (= 0 (vector-length x)) + (not (eq? (vector-ref x 0) + record-marker))))) + +; This won't work if ENV is the interaction environment and someone has +; redefined LAMBDA there. + +(define eval + (let ((real-eval eval)) + (lambda (exp env) + ((real-eval `(lambda (vector?) ,exp)) + vector?)))) + +; Definitions of the record procedures. + +(define (record? x) + (and (real-vector? x) + (< 0 (vector-length x)) + (eq? (vector-ref x 0) + record-marker))) + +(define (make-record size) + (let ((new (make-vector (+ size 1)))) + (vector-set! new 0 record-marker) + new)) + +(define (record-ref record index) + (vector-ref record (+ index 1))) + +(define (record-set! record index value) + (vector-set! record (+ index 1) value)) ADDED artifacts/tests/run.scm Index: artifacts/tests/run.scm ================================================================== --- /dev/null +++ artifacts/tests/run.scm @@ -0,0 +1,139 @@ +(use test) + +;; (use (prefix pkts pkts:)) +(use pkts (prefix dbi dbi:)) +;; (use trace)(trace sdat->alist pkt->alist) + +(if (file-exists? "queue.db")(delete-file "queue.db")) + +(test-begin "pkts and pkt archives") + +;;====================================================================== +;; Basic pkt creation, parsing and conversion routines +;;====================================================================== + +(test-begin "basic packets") +(test #f '(A "This is a packet") (let-values (((t v) + (card->type/value "A This is a packet"))) + (list t v))) +(test #f "A A\nZ 664449e7299e0065a3e25c138ccef2df13ba291e" + (let-values (((uuid res) + (add-z-card '("A A")))) + res)) +(test #f '("CC C++" "D 1486332719.0" "a A" "b C")(sort (construct-sdat 'b "C" 'a "A" 'CC "C++" 'D 1486332719.0) + string<=?)) +(define pkt-example #f) +(test #f "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84" + (let-values (((uuid res) + (construct-pkt 'b "C" 'a "A" 'CC "C++" 'D 1486332719.0))) + (set! pkt-example (cons uuid res)) + res)) +(test-end "basic packets") + +;;====================================================================== +;; Sqlite and postgresql based queue of pkts +;;====================================================================== + +(test-begin "pkt queue") +(define db #f) +(test #f 'sqlite3 (let ((dbh (open-queue-db "." "queue.db"))) + (set! db dbh) + (dbi:db-dbtype dbh))) +(test #f (cdr pkt-example) + (begin + (add-to-queue db (cdr pkt-example)(car pkt-example) 'basic #f 0) + (lookup-by-uuid db (car pkt-example) 0))) +(test #f (cdr pkt-example) + (lookup-by-id db 1)) +(test #f 1 (length (find-pkts db '(basic) '()))) + +(test-end "pkt queue") + + +;;====================================================================== +;; Process groups of pkts +;;====================================================================== + +(test-begin "lists of packets") +(test #f '((apkt . #f) (id . 1) (group-id . 2) (uuid . 3) (parent . 4) (pkt-type . 5)) ;; ((id . 1) (group-id . 2) (uuid . 3) (parent . 4) (pkt-type . 5)) + (dblst->dpkts '(1 2 3 4 5))) +(test #f '(((apkt (Z . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (b . "C") (a . "A") (D . "1486332719.0") (C . " C++")) (id . 1) (group-id . 0) (uuid . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (parent . "") (pkt-type . "basic") (pkt . "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (processed . 0))) + ;; '(((apkt (Z . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (b . "C") (a . "A") (D . "1486332719.0") (C . " C++")) (id . 1) (group-id . 0) (uuid . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (parent . "") (pkt-type . "basic") (pkt . "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84"))) + ;; '(((id . 1) (group-id . 0) (uuid . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (parent . "") (pkt-type . "basic") (pkt . "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84"))) + (get-dpkts db '(basic) 0 #f)) +(test #f '(((apkt (Z . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (b . "C") (a . "A") (D . "1486332719.0") (C . " C++")) (id . 1) (group-id . 0) (uuid . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (parent . "") (pkt-type . "basic") (pkt . "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (processed . 0))) + ;; '(((apkt (Z . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (b . "C") (a . "A") (D . "1486332719.0") (C . " C++")) (id . 1) (group-id . 0) (uuid . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (parent . "") (pkt-type . "basic") (pkt . "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84"))) + ;; '(((id . 1) (group-id . 0) (uuid . "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84") (parent . "") (pkt-type . "basic") (pkt . "CC C++\nD 1486332719.0\na A\nb C\nZ 263eb3b6193de7fe65b1ded5bcda513e8b4d6b84"))) + (get-not-processed-pkts db 0 'basic 1000 0)) +(test-end "lists of packets") + +(test-begin "pkts as alists") +(define pktspec '((posting . ((title . t) ;; NOTE: same as ((posting (title . t)(url . u)(blub . b)) ... + (url . u) + (blurb . b))) + (comment . ((comment . c) + (score . s))) + (basic . ((b-field . b) + (a-field . a))))) +(define pktlst (find-pkts db '(basic) '())) +(define dpkt (car pktlst)) +(test #f "A" (get-value 'a-field dpkt pktspec)) + +(test #f "C" (alist-ref 'b-field (dpkt->alist dpkt pktspec))) + +(define basic-spec '((nada (foo . b)(bar . f))(basic (foo . f)(bar . b)))) +(define test-pkt '((foo . "fooval")(bar . "barval"))) +(let*-values (((u p) (alist->pkt test-pkt basic-spec ptype: 'basic)) + ((apkt) (pkt->alist p)) + ((bpkt) (pkt->alist p pktspec: basic-spec))) + (test #f "fooval" (alist-ref 'f apkt)) + (test #f "fooval" (alist-ref 'foo bpkt)) + (test #f #f (alist-ref 'f bpkt))) + +(test-end "pkts as alists") + +(test-begin "descendents and ancestors") + +(define (get-uuid pkt)(alist-ref 'uuid pkt)) + +;; add a child to 263e +(let-values (((uuid pkt) + (construct-pkt 'x "X" 'y "Y" 'P "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84" + 'D "1486332719.0"))) + (add-to-queue db pkt uuid 'basic "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84" 0)) + +(test #f '("263eb3b6193de7fe65b1ded5bcda513e8b4d6b84" "818fe30988c9673441b8f203972a8bda6af682f8") + (map (lambda (x)(alist-ref 'uuid x)) + (get-descendents + db 0 + "263eb3b6193de7fe65b1ded5bcda513e8b4d6b84"))) + +(test #f '("263eb3b6193de7fe65b1ded5bcda513e8b4d6b84" "818fe30988c9673441b8f203972a8bda6af682f8") + (map (lambda (x)(alist-ref 'uuid x)) + (get-ancestors + db 0 + "818fe30988c9673441b8f203972a8bda6af682f8"))) + +(test-end "descendents and ancestors") + +(test-end "pkts and pkt archives") + +(test-begin "pktsdb") + +(define spec '((tests (testname n TEXT) + (testpath p TEXT) + (duration d INTEGER)))) +;; (define pktsdb (make-pktdb)) +;; (pktdb-pktsdb-spec-set! pktsdb spec) + +(define pktsdb #f) + +(test #f #t (dbi:database? (let ((pdb (pktsdb-open "test.db" spec))) + (set! pktsdb pdb) + (pktdb-conn pdb)))) +;; (pp (pktdb-pktspec pktsdb)) +(test #f #t (pktsdb-add-record pktsdb 'tests '((testname . "test1")))) + +(pktsdb-close pktsdb) + +(test-end "pktsdb") ADDED attic/client.scm Index: attic/client.scm ================================================================== --- /dev/null +++ attic/client.scm @@ -0,0 +1,46 @@ + +;; Copyright 2006-2012, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . + +;;====================================================================== +;; C L I E N T S +;;====================================================================== + +(declare (unit client)) + +(declare (uses common)) +(declare (uses db)) +(declare (uses tasks)) ;; tasks are where stuff is maintained about what is running. +(declare (uses commonmod)) + +(use srfi-18 extras tcp s11n srfi-1 posix regex srfi-69 hostinfo md5 + message-digest matchable spiffy uri-common intarweb http-client + spiffy-request-vars uri-common intarweb directory-utils) + +(import commonmod + debugprint) + +(module client +* + +) + +(import client) + +(include "common_records.scm") +(include "db_records.scm") + ADDED attic/http-transport.scm Index: attic/http-transport.scm ================================================================== --- /dev/null +++ attic/http-transport.scm @@ -0,0 +1,708 @@ + +;; Copyright 2006-2012, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . + +(declare (unit http-transport)) + +(declare (uses common)) +(declare (uses debugprint)) +(declare (uses db)) +(declare (uses tests)) +(declare (uses tasks)) ;; tasks are where stuff is maintained about what is running. +(declare (uses server)) +;; (declare (uses daemon)) +(declare (uses portlogger)) +(declare (uses rmt)) +(declare (uses dbfile)) +(declare (uses commonmod)) +(declare (uses mtargs)) + +(module http-transport +* + + +(import srfi-1 posix regex regex-case srfi-69 hostinfo md5 + message-digest posix-extras spiffy uri-common intarweb http-client + spiffy-request-vars intarweb spiffy-directory-listing + (srfi 18) extras tcp s11n) + +(import scheme + chicken + + (prefix mtargs args:) + debugprint) + +;; Configurations for server +(tcp-buffer-size 2048) +(max-connections 2048) + +(include "common_records.scm") +(include "db_records.scm") +(include "js-path.scm") + +(import dbfile commonmod) + +(require-library stml) +(define (http-transport:make-server-url hostport) + (if (not hostport) + #f + (conc "http://" (car hostport) ":" (cadr hostport)))) + +(define *server-loop-heart-beat* (current-seconds)) + +;;====================================================================== +;; S E R V E R +;; ====================================================================== + +;; Call this to start the actual server +;; + +(define *db:process-queue-mutex* (make-mutex)) + +(define (http-transport:run hostn) + ;; Configurations for server + (tcp-buffer-size 2048) + (max-connections 2048) + (debug:print 2 *default-log-port* "Attempting to start the server ...") + (let* ((db #f) ;; (open-db)) ;; we don't want the server to be opening and closing the db unnecesarily + (hostname (get-host-name)) + (ipaddrstr (let ((ipstr (if (string=? "-" hostn) + ;; (string-intersperse (map number->string (u8vector->list (hostname->ip hostname))) ".") + (server:get-best-guess-address hostname) + #f))) + (if ipstr ipstr hostn))) ;; hostname))) + (start-port (portlogger:open-run-close portlogger:find-port)) + (link-tree-path (common:get-linktree)) + (tmp-area (common:get-db-tmp-area)) + (start-file (conc tmp-area "/.server-start"))) + (debug:print-info 0 *default-log-port* "portlogger recommended port: " start-port) + ;; set some parameters for the server + (root-path (if link-tree-path + link-tree-path + (current-directory))) ;; WARNING: SECURITY HOLE. FIX ASAP! + (handle-directory spiffy-directory-listing) + (handle-exception (lambda (exn chain) + (signal (make-composite-condition + (make-property-condition + 'server + 'message "server error"))))) + + ;; http-transport:handle-directory) ;; simple-directory-handler) + ;; Setup the web server and a /ctrl interface + ;; + (vhost-map `(((* any) . ,(lambda (continue) + ;; open the db on the first call + ;; This is were we set up the database connections + (let* (($ (request-vars source: 'both)) + (dat ($ 'dat)) + (res #f)) + (cond + ((equal? (uri-path (request-uri (current-request))) + '(/ "api")) + (send-response body: (api:process-request *dbstruct-dbs* $) ;; the $ is the request vars proc + headers: '((content-type text/plain))) + (mutex-lock! *heartbeat-mutex*) + (set! *db-last-access* (current-seconds)) + (mutex-unlock! *heartbeat-mutex*)) + ((equal? (uri-path (request-uri (current-request))) + '(/ "")) + (send-response body: (http-transport:main-page))) + ((equal? (uri-path (request-uri (current-request))) + '(/ "json_api")) + (send-response body: (http-transport:main-page))) + ((equal? (uri-path (request-uri (current-request))) + '(/ "runs")) + (send-response body: (http-transport:main-page))) + ((equal? (uri-path (request-uri (current-request))) + '(/ any)) + (send-response body: "hey there!\n" + headers: '((content-type text/plain)))) + ((equal? (uri-path (request-uri (current-request))) + '(/ "hey")) + (send-response body: "hey there!\n" + headers: '((content-type text/plain)))) + ((equal? (uri-path (request-uri (current-request))) + '(/ "jquery3.1.0.js")) + (send-response body: (http-transport:show-jquery) + headers: '((content-type application/javascript)))) + ((equal? (uri-path (request-uri (current-request))) + '(/ "test_log")) + (send-response body: (http-transport:html-test-log $) + headers: '((content-type text/HTML)))) + ((equal? (uri-path (request-uri (current-request))) + '(/ "dashboard")) + (send-response body: (http-transport:html-dboard $) + headers: '((content-type text/HTML)))) + (else (continue)))))))) + (handle-exceptions + exn + (debug:print 0 *default-log-port* "Failed to create file " start-file ", exn=" exn) + (with-output-to-file start-file (lambda ()(print (current-process-id))))) + (http-transport:try-start-server ipaddrstr start-port))) + +;; This is recursively run by http-transport:run until sucessful +;; +(define (http-transport:try-start-server ipaddrstr portnum) + (let ((config-hostname (configf:lookup *configdat* "server" "hostname")) + (config-use-proxy (equal? (configf:lookup *configdat* "client" "use-http_proxy") "yes"))) + (if (not config-use-proxy) + (determine-proxy (constantly #f))) + (debug:print-info 0 *default-log-port* "http-transport:try-start-server time=" (seconds->time-string (current-seconds)) " ipaddrsstr=" ipaddrstr " portnum=" portnum " config-hostname=" config-hostname) + (handle-exceptions + exn + (begin + ;; (print-error-message exn) + (if (< portnum 64000) + (begin + (debug:print 0 *default-log-port* "WARNING: attempt to start server failed. Trying again ...") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (debug:print 5 *default-log-port* "exn=" (condition->list exn)) + (portlogger:open-run-close portlogger:set-failed portnum) + (debug:print 0 *default-log-port* "WARNING: failed to start on portnum: " portnum ", trying next port") + (thread-sleep! 0.1) + + ;; get_next_port goes here + (http-transport:try-start-server ipaddrstr + (portlogger:open-run-close portlogger:find-port))) + (begin + (debug:print 0 *default-log-port* "ERROR: Tried and tried but could not start the server")))) + ;; any error in following steps will result in a retry + (set! *server-info* (list ipaddrstr portnum)) + (debug:print 0 *default-log-port* "INFO: Trying to start server on " ipaddrstr ":" portnum) + ;; This starts the spiffy server + ;; NEED WAY TO SET IP TO #f TO BIND ALL + ;; (start-server bind-address: ipaddrstr port: portnum) + (if config-hostname ;; this is a hint to bind directly + (start-server port: portnum) ;; bind-address: (if (equal? config-hostname "-") + ;; ipaddrstr + ;; config-hostname)) + (start-server port: portnum)) + (portlogger:open-run-close portlogger:set-port portnum "released") + (debug:print 1 *default-log-port* "INFO: server has been stopped")))) + +;;====================================================================== +;; S E R V E R U T I L I T I E S +;;====================================================================== + +;;====================================================================== +;; C L I E N T S +;;====================================================================== + +(define *http-mutex* (make-mutex)) + +;; NOTE: Large block of code from 32436b426188080f72fceb6894af541fbad9921e removed here +;; I'm pretty sure it is defunct. + +;; This next block all imported en-mass from the api branch +(define *http-requests-in-progress* 0) +(define *http-connections-next-cleanup* (current-seconds)) + +(define (http-transport:get-time-to-cleanup) + (let ((res #f)) + (mutex-lock! *http-mutex*) + (set! res (> (current-seconds) *http-connections-next-cleanup*)) + (mutex-unlock! *http-mutex*) + res)) + +(define (http-transport:inc-requests-count) + (mutex-lock! *http-mutex*) + (set! *http-requests-in-progress* (+ 1 *http-requests-in-progress*)) + ;; Use this opportunity to slow things down iff there are too many requests in flight + (if (> *http-requests-in-progress* 5) + (begin + (debug:print-info 0 *default-log-port* "Whoa there buddy, ease up...") + (thread-sleep! 1))) + (mutex-unlock! *http-mutex*)) + +(define (http-transport:dec-requests-count proc) + (mutex-lock! *http-mutex*) + (proc) + (set! *http-requests-in-progress* (- *http-requests-in-progress* 1)) + (mutex-unlock! *http-mutex*)) + +(define (http-transport:dec-requests-count-and-close-all-connections) + (set! *http-requests-in-progress* (- *http-requests-in-progress* 1)) + (let loop ((etime (+ (current-seconds) 5))) ;; give up in five seconds + (if (> *http-requests-in-progress* 0) + (if (> etime (current-seconds)) + (begin + (thread-sleep! 0.05) + (loop etime)) + (debug:print-error 0 *default-log-port* "requests still in progress after 5 seconds of waiting. I'm going to pass on cleaning up http connections")) + (close-all-connections!))) + (set! *http-connections-next-cleanup* (+ (current-seconds) 10)) + (mutex-unlock! *http-mutex*)) + +(define (http-transport:inc-requests-and-prep-to-close-all-connections) + (mutex-lock! *http-mutex*) + (set! *http-requests-in-progress* (+ 1 *http-requests-in-progress*))) + +;; Send "cmd" with json payload "params" to serverdat and receive result +;; +(define (http-transport:client-api-send-receive run-id runremote cmd params #!key (numretries 3)) + (assert (remote? runremote) "FATAL: http-transport:client-api-send-receive called with serverdat="serverdat) + (let* ((fullurl (remote-api-req runremote)) + (res (vector #f "uninitialized")) + (success #t) + (sparams (db:obj->string params transport: 'http)) + (server-id (remote-server-id runremote))) + (debug:print-info 11 *default-log-port* "cmd=" cmd " fullurl=" fullurl " server-id=" server-id " current time:" (current-seconds)) + + ;; set up the http-client here + (max-retry-attempts 1) + ;; consider all requests indempotent + (retry-request? (lambda (request) + #f)) + ;; send the data and get the response + ;; extract the needed info from the http data and + ;; process and return it. + (let* ((send-recieve (lambda () + (mutex-lock! *http-mutex*) + ;; (condition-case (with-input-from-request "http://localhost"; #f read-lines) + ;; ((exn http client-error) e (print e))) + (set! res (vector ;;; DON'T FORGET - THIS IS THE CLIENT SIDE! NOTE: consider moving this to client.scm since we are only supporting http transport at this time. + success + (db:string->obj + (handle-exceptions + exn + (let ((call-chain (get-call-chain)) + (msg ((condition-property-accessor 'exn 'message) exn))) + (set! success #f) + (if (debug:debug-mode 1) + (debug:print-info 0 *default-log-port* "couldn't talk to server, trying again ...") + (begin + (debug:print 0 *default-log-port* "WARNING: failure in with-input-from-request to " fullurl ".") + (debug:print 0 *default-log-port* " message: " msg ", exn=" exn) + (debug:print 0 *default-log-port* " cmd: " cmd " params: " params " key:" (or server-id "thekey")) + (debug:print 0 *default-log-port* " call-chain: " call-chain))) + ;; what if another thread is communicating ok? Can't happen due to mutex + (http-transport:close-connections runremote) + (mutex-unlock! *http-mutex*) + ;; (close-connection! fullurl) + (db:obj->string #f)) + (with-input-from-request ;; was dat + fullurl + (list (cons 'key (or server-id "thekey")) + (cons 'cmd cmd) + (cons 'params sparams)) + read-string)) + transport: 'http) + 0)) ;; added this speculatively + ;; Shouldn't this be a call to the managed call-all-connections stuff above? + ;; (close-all-connections!) ;; BUG? WHY IS THIS HERE? Are we failing to reuse connections? + (mutex-unlock! *http-mutex*) + )) + (time-out (lambda () + (thread-sleep! 45) + (debug:print 0 *default-log-port* "WARNING: send-receive took more than 45 seconds!!") + #f)) + (th1 (make-thread send-recieve "with-input-from-request")) + (th2 (make-thread time-out "time out"))) + (thread-start! th1) + (thread-start! th2) + (thread-join! th1) + (vector-set! res 0 success) + (thread-terminate! th2) + (if (vector? res) + (if (vector-ref res 0) ;; this is the first flag or the second flag? + (let* ((res-dat (vector-ref res 1))) + (if (and (string? res-dat) (string-contains res-dat "server-id mismatch")) + (signal (make-composite-condition + (make-property-condition + 'servermismatch + 'message (vector-ref res 1)))) + res)) ;; this is the *inner* vector? seriously? why? + (if (debug:debug-mode 11) + (let ((call-chain (get-call-chain))) ;; note: this code also called in nmsg-transport - consider consolidating it + (print-call-chain (current-error-port)) + (debug:print-error 11 *default-log-port* "error above occured at server, res=" res) ;; " message: " ((condition-property-accessor 'exn 'message) exn)) + (debug:print 11 *default-log-port* " server call chain:") + (pp (vector-ref res 1) (current-error-port)) + (signal (vector-ref res 0))) + res)) + (signal (make-composite-condition + (make-property-condition + 'timeout + 'message "nmsg-transport:client-api-send-receive-raw timed out talking to server"))))))) + +;; careful closing of connections stored in *runremote* +;; +(define (http-transport:close-connections runremote) + (if (remote? runremote) + (let ((api-dat (remote-api-uri runremote))) + (handle-exceptions + exn + (begin + (print-call-chain *default-log-port*) + (debug:print-error 0 *default-log-port* " closing connection failed with error: " ((condition-property-accessor 'exn 'message) exn) ", exn=" exn)) + (if (args:any-defined? "-server" "-execute" "-run") + (debug:print-info 0 *default-log-port* "Closing connections to "api-dat)) + (if api-dat (close-connection! api-dat)) + + ;; Would it be better to set *runremote* to #f? I don't think so. But we may + ;; need to clear more of the runremote fields + (remote-api-url-set! runremote #f) ;; used as a flag for connection up and running + + #t)) + #f)) + +;; run http-transport:keep-running in a parallel thread to monitor that the db is being +;; used and to shutdown after sometime if it is not. +;; +(define (http-transport:keep-running) + ;; if none running or if > 20 seconds since + ;; server last used then start shutdown + ;; This thread waits for the server to come alive + (debug:print-info 0 *default-log-port* "Starting the sync-back, keep alive thread in server") + (let* ((servinfofile #f) + (sdat #f) + (no-sync-db (db:open-no-sync-db)) + (tmp-area (common:get-db-tmp-area)) + (started-file (conc tmp-area "/.server-started")) + (server-start-time (current-seconds)) + (server-info (let loop ((start-time (current-seconds)) + (changed #t) + (last-sdat "not this")) + (begin ;; let ((sdat #f)) + (thread-sleep! 0.01) + (debug:print-info 0 *default-log-port* "Waiting for server alive signature") + (mutex-lock! *heartbeat-mutex*) + (set! sdat *server-info*) + (mutex-unlock! *heartbeat-mutex*) + (if (and sdat + (not changed) + (> (- (current-seconds) start-time) 2)) + (let* ((servinfodir (server:get-servinfo-dir *toppath*)) ;; (conc *toppath*"/.servinfo")) + (ipaddr (car sdat)) + (port (cadr sdat)) + (servinf (conc servinfodir"/"ipaddr":"port))) + (set! servinfofile servinf) + (if (not (file-exists? servinfodir)) + (create-directory servinfodir #t)) + (with-output-to-file servinf + (lambda () + (let* ((serv-id (server:mk-signature))) + (set! *server-id* serv-id) + (print "SERVER STARTED: "ipaddr":"port" AT "(current-seconds)" server-id: "serv-id" pid: "(current-process-id)) + (print "started: "(seconds->year-week/day-time (current-seconds)))))) + (set! *on-exit-procs* (cons + (lambda () + (delete-file* servinf)) + *on-exit-procs*)) + ;; put data about this server into a simple flat file host.port + (debug:print-info 0 *default-log-port* "Received server alive signature") + sdat) + (begin + (debug:print-info 0 *default-log-port* "Still waiting, last-sdat=" last-sdat) + (sleep 4) + (if (> (- (current-seconds) start-time) 120) ;; been waiting for two minutes + (if sdat + (let* ((ipaddr (car sdat)) + (port (cadr sdat)) + (servinf (conc (server:get-servinfo-dir *toppath*)"/"ipaddr":"port))) + (debug:print-error 0 *default-log-port* "transport appears to have died, exiting server") + (exit)) + (exit) + ) + (loop start-time + (equal? sdat last-sdat) + sdat))))))) + (iface (car server-info)) + (port (cadr server-info)) + (last-access 0) + (server-timeout (server:expiration-timeout)) + (server-going #f) + (server-log-file (args:get-arg "-log"))) ;; always set when we are a server + + (handle-exceptions + exn + (debug:print 0 *default-log-port* "Failed to create " started-file ", exn=" exn) + (with-output-to-file started-file (lambda ()(print (current-process-id))))) + + (let loop ((count 0) + (server-state 'available) + (bad-sync-count 0) + (start-time (current-milliseconds))) + + ;; Use this opportunity to sync the tmp db to megatest.db + (if (not server-going) ;; *dbstruct-dbs* + (begin + (debug:print 0 *default-log-port* "SERVER: dbprep") + (set! *dbstruct-dbs* (db:setup #t)) ;; run-id)) FIXME!!! + (set! server-going #t) + (debug:print 0 *default-log-port* "SERVER: running, megatest version: " (common:get-full-version))) ;; NOTE: the server is NOT yet marked as running in the log. We do that in the keep-running routine. + (if (and no-sync-db + (common:low-noise-print 10 "sync-all")) ;; cheesy way to reduce frequency of running sync :) + (begin + (if (common:low-noise-print 120 "sync-all-print") + (debug:print 0 *default-log-port* "keep-running calling db:all-db-sync at " (time->string (seconds->local-time) "%H:%M:%S"))) + (db:all-db-sync *dbstruct-dbs*) + ))) + + ;; when things go wrong we don't want to be doing the various queries too often + ;; so we strive to run this stuff only every four seconds or so. + (let* ((sync-time (- (current-milliseconds) start-time)) + (rem-time (quotient (- 4000 sync-time) 1000))) + (if (and (<= rem-time 4) + (> rem-time 0)) + (thread-sleep! rem-time))) + + (if (< count 1) ;; 3x3 = 9 secs aprox + (loop (+ count 1) 'running bad-sync-count (current-milliseconds))) + + ;; Check that iface and port have not changed (can happen if server port collides) + (mutex-lock! *heartbeat-mutex*) + (set! sdat *server-info*) + (mutex-unlock! *heartbeat-mutex*) + + (if (not (equal? sdat (list iface port))) + (let ((new-iface (car sdat)) + (new-port (cadr sdat))) + (debug:print-info 0 *default-log-port* "WARNING: interface changed, refreshing iface and port info") + (set! iface new-iface) + (set! port new-port) + (if (not *server-id*) + (set! *server-id* (server:mk-signature))) + (debug:print 0 *default-log-port* "SERVER STARTED: " iface ":" port " AT " (current-seconds) " server-id: " *server-id*) + (flush-output *default-log-port*))) + + ;; Transfer *db-last-access* to last-access to use in checking that we are still alive + (mutex-lock! *heartbeat-mutex*) + (set! last-access *db-last-access*) + (mutex-unlock! *heartbeat-mutex*) + + (if (common:low-noise-print 120 (conc "server running on " iface ":" port)) + (begin + (if (not *server-id*) + (set! *server-id* (server:mk-signature))) + (debug:print 0 *default-log-port* (current-seconds) (current-directory) (current-process-id) (argv)) + (debug:print 0 *default-log-port* "SERVER STARTED: " iface ":" port " AT " (current-seconds) " server-id: " *server-id*) + (flush-output *default-log-port*))) + (if (common:low-noise-print 60 "dbstats") + (begin + (debug:print 0 *default-log-port* "Server stats:") + (db:print-current-query-stats))) + (let* ((hrs-since-start (/ (- (current-seconds) server-start-time) 3600))) + (cond + ((and *server-run* + (> (+ last-access server-timeout) + (current-seconds))) + (if (common:low-noise-print 120 "server continuing") + (debug:print-info 0 *default-log-port* "Server continuing, seconds since last db access: " (- (current-seconds) last-access)) + (let ((curr-time (current-seconds))) + (handle-exceptions + exn + (debug:print 0 *default-log-port* "ERROR: Failed to change timestamp on info file " servinfofile ". Are you out of space on that disk? exn=" exn) + (if (and ;; (< (- (current-seconds) server-start-time) 600) ;; run for ten minutes for experiment, 3600 thereafter + (not *server-overloaded*) + (file-exists? servinfofile)) + (change-file-times servinfofile curr-time curr-time))) + (if (and (common:low-noise-print 120 "start new server") + (> *api-process-request-count* 50)) ;; if this server is kind of busy start up another + (begin + (debug:print-info 0 *default-log-port* "Server is busy, api-count "*api-process-request-count*", start another if possible...") + (server:kind-run *toppath*) + (if (> *api-process-request-count* 100) + (begin + (debug:print-info 0 *default-log-port* "Server is overloaded at api-count=" *api-process-request-count*", removing "servinfofile) + (delete-file* servinfofile))))))) + (loop 0 server-state bad-sync-count (current-milliseconds))) + (else + (debug:print-info 0 *default-log-port* "Server timed out. seconds since last db access: " (- (current-seconds) last-access)) + (http-transport:server-shutdown port))))))) + +(define (http-transport:server-shutdown port) + (begin + ;;(BB> "http-transport:server-shutdown called") + (debug:print-info 0 *default-log-port* "Starting to shutdown the server. pid="(current-process-id)) + ;; + ;; start_shutdown + ;; + (set! *time-to-exit* #t) ;; tell on-exit to be fast as we've already cleaned up + (portlogger:open-run-close portlogger:set-port port "released") + (thread-sleep! 1) + + ;; (debug:print-info 0 *default-log-port* "Max cached queries was " *max-cache-size*) + ;; (debug:print-info 0 *default-log-port* "Number of cached writes " *number-of-writes*) + ;; (debug:print-info 0 *default-log-port* "Average cached write time " + ;; (if (eq? *number-of-writes* 0) + ;; "n/a (no writes)" + ;; (/ *writes-total-delay* + ;; *number-of-writes*)) + ;; " ms") + ;; (debug:print-info 0 *default-log-port* "Number non-cached queries " *number-non-write-queries*) + ;; (debug:print-info 0 *default-log-port* "Average non-cached time " + ;; (if (eq? *number-non-write-queries* 0) + ;; "n/a (no queries)" + ;; (/ *total-non-write-delay* + ;; *number-non-write-queries*)) + ;; " ms") + + (db:print-current-query-stats) + #;(common:save-pkt `((action . exit) + (T . server) + (pid . ,(current-process-id))) + *configdat* #t) + + ;; remove .servinfo file(s) here + + (debug:print-info 0 *default-log-port* "Server shutdown complete. Exiting") + (exit))) + +;; all routes though here end in exit ... +;; +;; start_server? +;; +(define (http-transport:launch) + ;; check the .servinfo directory, are there other servers running on this + ;; or another host? + (let* ((server-start-is-ok (server:minimal-check *toppath*))) + (if (not server-start-is-ok) + (begin + (debug:print 0 *default-log-port* "ERROR: server start not ok, exiting now.") + (exit 1)))) + + ;; check that a server start is in progress, pause or exit if so + (let* ((th2 (make-thread (lambda () + (debug:print-info 0 *default-log-port* "Server run thread started") + (http-transport:run + (if (args:get-arg "-server") + (args:get-arg "-server") + "-") + )) "Server run")) + (th3 (make-thread (lambda () + (debug:print-info 0 *default-log-port* "Server monitor thread started") + (http-transport:keep-running) + "Keep running")))) + (thread-start! th2) + (thread-sleep! 0.25) ;; give the server time to settle before starting the keep-running monitor. + (thread-start! th3) + (set! *didsomething* #t) + (thread-join! th2) + (exit))) + +;; (define (http-transport:server-signal-handler signum) +;; (signal-mask! signum) +;; (handle-exceptions +;; exn +;; (debug:print 0 *default-log-port* " ... exiting ...") +;; (let ((th1 (make-thread (lambda () +;; (thread-sleep! 1)) +;; "eat response")) +;; (th2 (make-thread (lambda () +;; (debug:print-error 0 *default-log-port* "Received ^C, attempting clean exit. Please be patient and wait a few seconds before hitting ^C again.") +;; (thread-sleep! 3) ;; give the flush three seconds to do it's stuff +;; (debug:print 0 *default-log-port* " Done.") +;; (exit 4)) +;; "exit on ^C timer"))) +;; (thread-start! th2) +;; (thread-start! th1) +;; (thread-join! th2)))) + +;;=============================================== +;; Java script +;;=============================================== +(define (http-transport:show-jquery) + (let* ((data (tests:readlines *java-script-lib*))) +(string-join data "\n"))) + + + +;;====================================================================== +;; web pages +;;====================================================================== + +(define (http-transport:html-test-log $) + (let* ((run-id ($ 'runid)) + (test-item ($ 'testname)) + (parts (string-split test-item ":")) + (test-name (car parts)) + + (item-name (if (equal? (length parts) 1) + "" + (cadr parts)))) + ;(print $) +(tests:get-test-log run-id test-name item-name))) + + +(define (http-transport:html-dboard $) + (let* ((page ($ 'page)) + (oup (open-output-string)) + (bdy "--------------------------") + + (ret (tests:dynamic-dboard page))) + (s:output-new oup ret) + (close-output-port oup) + + (set! bdy (get-output-string oup)) + (conc "

Dashboard

" bdy "

" ))) + +(define (http-transport:main-page) + (let ((linkpath (root-path))) + (conc "

" (pathname-strip-directory *toppath*) "

" + "" + "Run area: " *toppath* + "

Server Stats

" + (http-transport:stats-table) + "
" + (http-transport:runs linkpath) + "
" + ;; (http-transport:run-stats) + "" + ))) + +(define (http-transport:stats-table) + (mutex-lock! *heartbeat-mutex*) + (let ((res + (conc "" + ;; "" + "" + "" + "" + ;; "" + "" + "
Max cached queries " *max-cache-size* "
Number of cached writes " *number-of-writes* "
Average cached write time " (if (eq? *number-of-writes* 0) + "n/a (no writes)" + (/ *writes-total-delay* + *number-of-writes*)) + " ms
Number non-cached queries " *number-non-write-queries* "
Average non-cached time " (if (eq? *number-non-write-queries* 0) + ;; "n/a (no queries)" + ;; (/ *total-non-write-delay* + ;; *number-non-write-queries*)) + " ms
Last access" (seconds->time-string *db-last-access*) "
"))) + (mutex-unlock! *heartbeat-mutex*) + res)) + +(define (http-transport:runs linkpath) + (conc "

Runs

" + (string-intersperse + (let ((files (map pathname-strip-directory (glob (conc linkpath "/*"))))) + (map (lambda (p) + (conc "" p "
")) + files)) + " "))) + +#;(define (http-transport:run-stats) + (let ((stats (open-run-close db:get-running-stats #f))) + (conc "" + (string-intersperse + (map (lambda (stat) + (conc "")) + stats) + " ") + "
" (car stat) "" (cadr stat) "
"))) +) ADDED attic/index-tree.scm Index: attic/index-tree.scm ================================================================== --- /dev/null +++ attic/index-tree.scm @@ -0,0 +1,61 @@ +;;====================================================================== +;; Copyright 2006-2013, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . +;; +;;====================================================================== + +;;====================================================================== +;; Tests +;;====================================================================== + +(use sqlite3 srfi-1 posix regex regex-case srfi-69 dot-locking tcp directory-utils) +(import (prefix sqlite3 sqlite3:)) + +(declare (unit tests)) +(declare (uses lock-queue)) +(declare (uses db)) +(declare (uses common)) +(declare (uses items)) +(declare (uses runconfig)) +(declare (uses commonmod)) +(import commonmod) + +(include "common_records.scm") +(include "key_records.scm") +(include "db_records.scm") +(include "run_records.scm") +(include "test_records.scm") + +;; Populate the links tree with index.html files +;; +;; - start from most recent tests and work towards oldest -OR- +;; start from deepest hierarchy and work way up +;; - look up tests in megatest.db +;; - cross-reference the tests to stats.db +;; - if newer than event_time in stats.db or not registered in stats.db regenerate +;; - run du and store in stats.db +;; - when all tests at that level done generate next level up index.html +;; +;; include in rollup html index.html: +;; sum of du +;; counts of PASS, FAIL, RUNNING, REMOTEHOSTSTART, LAUNCHED, CHECK etc. +;; overall status +;; +;; include in test specific index.html: +;; host, uname, cpu graph, disk avail graph, steps, data +;; meta data, state, status, du +;; ADDED attic/lock-queue.scm Index: attic/lock-queue.scm ================================================================== --- /dev/null +++ attic/lock-queue.scm @@ -0,0 +1,258 @@ +;; Copyright 2006-2013, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . +;; + +(use (prefix sqlite3 sqlite3:) srfi-18) + +(declare (unit lock-queue)) +(declare (uses common)) +(declare (uses debugprint)) +(declare (uses tasks)) +(declare (uses commonmod)) + +(import commonmod + debugprint) + +;;====================================================================== +;; attempt to prevent overlapping updates of rollup files by queueing +;; update requests in an sqlite db +;;====================================================================== + +;;====================================================================== +;; db record, +;;====================================================================== + +(define (make-lock-queue:db-dat)(make-vector 3)) +(define-inline (lock-queue:db-dat-get-db vec) (vector-ref vec 0)) +(define-inline (lock-queue:db-dat-get-path vec) (vector-ref vec 1)) +(define-inline (lock-queue:db-dat-set-db! vec val)(vector-set! vec 0 val)) +(define-inline (lock-queue:db-dat-set-path! vec val)(vector-set! vec 1 val)) + +(define (lock-queue:delete-lock-db dbdat) + (let ((fname (lock-queue:db-dat-get-path dbdat))) + (system (conc "rm -f " fname "*")))) + +(define (lock-queue:open-db fname #!key (count 10)) + (let* ((actualfname (conc fname ".lockdb")) + (dbexists (common:file-exists? actualfname)) + (db (sqlite3:open-database actualfname)) + (handler (make-busy-timeout 136000))) + (if dbexists + (vector db actualfname) + (begin + (handle-exceptions + exn + (begin + (thread-sleep! 10) + (if (> count 0) + (lock-queue:open-db fname count: (- count 1)) + (vector db actualfname))) + (sqlite3:with-transaction + db + (lambda () + (sqlite3:execute + db + "CREATE TABLE IF NOT EXISTS queue ( + id INTEGER PRIMARY KEY, + test_id INTEGER, + start_time INTEGER, + state TEXT, + CONSTRAINT queue_constraint UNIQUE (test_id));") + (sqlite3:execute + db + "CREATE TABLE IF NOT EXISTS runlocks ( + id INTEGER PRIMARY KEY, + test_id INTEGER, + run_lock TEXT, + CONSTRAINT runlock_constraint UNIQUE (run_lock));")))))) + (sqlite3:set-busy-handler! db handler) + (vector db actualfname))) + +(define (lock-queue:set-state dbdat test-id newstate #!key (remtries 10)) + (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200) + (handle-exceptions + exn + (if (> remtries 0) + (begin + (debug:print 0 *default-log-port* "WARNING: exception on lock-queue:set-state. Trying again in 30 seconds.") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (thread-sleep! 30) + (lock-queue:set-state dbdat test-id newstate remtries: (- remtries 1))) + (begin + (debug:print-error 0 *default-log-port* " Failed to set lock state for test with id " test-id ", error: " ((condition-property-accessor 'exn 'message) exn) ", giving up.") + #f)) + (sqlite3:execute (lock-queue:db-dat-get-db dbdat) "UPDATE queue SET state=? WHERE test_id=?;" + newstate + test-id))) + +(define (lock-queue:any-younger? dbdat mystart test-id #!key (remtries 10)) + ;; no need to wait on journal on read only queries + ;; (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200) + (handle-exceptions + exn + (if (> remtries 0) + (begin + (debug:print 0 *default-log-port* "WARNING: exception on lock-queue:any-younger. Removing lockdb and trying again in 5 seconds.") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (thread-sleep! 5) + (lock-queue:delete-lock-db dbdat) + (lock-queue:any-younger? dbdat mystart test-id remtries: (- remtries 1))) + (begin + (debug:print-error 0 *default-log-port* " Failed to find younger locks for test with id " test-id ", error: " ((condition-property-accessor 'exn 'message) exn) ", giving up.") + #f)) + (let ((res #f)) + (sqlite3:for-each-row + (lambda (tid) + ;; Actually this should not be needed as mystart cannot be simultaneously less than and test-id same as + (if (not (equal? tid test-id)) + (set! res tid))) + (lock-queue:db-dat-get-db dbdat) + "SELECT test_id FROM queue WHERE start_time > ?;" mystart) + res))) + +(define (lock-queue:get-lock dbdat test-id #!key (count 10)(waiting-msg #f)) + (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200 remove: #t waiting-msg: "lock-queue:get-lock, waiting on journal") + (let* ((res #f) + (db (lock-queue:db-dat-get-db dbdat)) + (lckqry (sqlite3:prepare db "SELECT test_id,run_lock FROM runlocks WHERE run_lock='locked';")) + (mklckqry (sqlite3:prepare db "INSERT INTO runlocks (test_id,run_lock) VALUES (?,'locked');"))) + (let ((result + (handle-exceptions + exn + (begin + (debug:print 0 *default-log-port* "WARNING: failed to get queue lock. Removing lock db and returning fail") ;; Will try again in a few seconds") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (thread-sleep! 10) + ;; (if (> count 0) + ;; #f ;; (lock-queue:get-lock dbdat test-id count: (- count 1)) - give up on retries + ;; (begin ;; never recovered, remote the lock file and return #f, no lock obtained + (lock-queue:delete-lock-db dbdat) + #f) + (sqlite3:with-transaction + db + (lambda () + (sqlite3:for-each-row (lambda (tid lockstate) + (set! res (list tid lockstate))) + lckqry) + (if res + (if (equal? (car res) test-id) + #t ;; already have the lock + #f) + (begin + (sqlite3:execute mklckqry test-id) + ;; if no error handled then return #t for got the lock + #t))))))) + (sqlite3:finalize! lckqry) + (sqlite3:finalize! mklckqry) + result))) + +(define (lock-queue:release-lock fname test-id #!key (count 10)) + (let* ((dbdat (lock-queue:open-db fname))) + (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200 "lock-queue:release-lock; waiting on journal") + (handle-exceptions + exn + (begin + (debug:print 0 *default-log-port* "WARNING: Failed to release queue lock. Will try again in few seconds") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (thread-sleep! (/ count 10)) + (if (> count 0) + (begin + (sqlite3:finalize! (lock-queue:db-dat-get-db dbdat)) + (lock-queue:release-lock fname test-id count: (- count 1))) + (let ((journal (conc fname "-journal"))) + ;; If we've tried ten times and failed there is a serious problem + ;; try to remove the lock db and allow it to be recreated + (handle-exceptions + exn + #f + (if (common:file-exists? journal)(delete-file journal)) + (if (common:file-exists? fname) (delete-file fname)) + #f)))) + (sqlite3:execute (lock-queue:db-dat-get-db dbdat) "DELETE FROM runlocks WHERE test_id=?;" test-id) + (sqlite3:finalize! (lock-queue:db-dat-get-db dbdat))))) + +(define (lock-queue:steal-lock dbdat test-id #!key (count 10)) + (debug:print-info 0 *default-log-port* "Attempting to steal lock at " (lock-queue:db-dat-get-path dbdat)) + (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200 "lock-queue:steal-lock; waiting on journal") + (handle-exceptions + exn + (begin + (debug:print 0 *default-log-port* "WARNING: Failed to steal queue lock. Will try again in few seconds") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (thread-sleep! 10) + (if (> count 0) + (lock-queue:steal-lock dbdat test-id count: (- count 1)) + #f)) + (sqlite3:execute (lock-queue:db-dat-get-db dbdat) "DELETE FROM runlocks WHERE run_lock='locked';")) + (lock-queue:get-lock dbdat test-it)) + +;; returns #f if ok to skip the task +;; returns #t if ok to proceed with task +;; otherwise waits +;; +(define (lock-queue:wait-turn fname test-id #!key (count 10)(waiting-msg #f)) + (let* ((dbdat (lock-queue:open-db fname)) + (mystart (current-seconds)) + (db (lock-queue:db-dat-get-db dbdat))) + ;; (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200 waiting-msg: "lock-queue:wait-turn; waiting on journal file") + (handle-exceptions + exn + (begin + (debug:print 0 *default-log-port* "WARNING: Failed to find out if it is ok to skip the wait queue. Will try again in few seconds") + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (print-call-chain (current-error-port)) + (thread-sleep! 10) + (if (> count 0) + (begin + (sqlite3:finalize! db) + (lock-queue:wait-turn fname test-id count: (- count 1))) + (begin + (debug:print 0 *default-log-port* "Giving up calls to lock-queue:wait-turn for test-id " test-id " at path " fname ", printing call chain") + (print-call-chain (current-error-port)) + #f))) + ;; wait 10 seconds and then check to see if someone is already updating the html + (thread-sleep! 10) + (if (not (lock-queue:any-younger? dbdat mystart test-id)) ;; no processing in flight, must try to start processing + (begin + (tasks:wait-on-journal (lock-queue:db-dat-get-path dbdat) 1200 waiting-msg: "lock-queue:wait-turn; waiting on journal file") + (sqlite3:execute + db + "INSERT OR REPLACE INTO queue (test_id,start_time,state) VALUES (?,?,'waiting');" + test-id mystart) + ;; (thread-sleep! 1) ;; give other tests a chance to register + (let ((result + (let loop ((younger-waiting (lock-queue:any-younger? dbdat mystart test-id))) + (if younger-waiting + (begin + ;; no need for us to wait. mark in the lock queue db as skipping + ;; no point in marking anything in the queue - simply never register this + ;; test as it is *covered* by a previously started update to the html file + ;; (lock-queue:set-state dbdat test-id "skipping") + #f) ;; let the calling process know that nothing needs to be done + (if (lock-queue:get-lock dbdat test-id) + #t + (if (> (- (current-seconds) mystart) 36000) ;; waited too long, steal the lock + (lock-queue:steal-lock dbdat test-id) + (begin + (thread-sleep! 1) + (loop (lock-queue:any-younger? dbdat mystart test-id))))))))) + (sqlite3:finalize! db) + result)))))) + + +;; (use trace) +;; (trace lock-queue:get-lock lock-queue:release-lock lock-queue:wait-turn lock-queue:any-younger? lock-queue:set-state) ADDED attic/margs.scm Index: attic/margs.scm ================================================================== --- /dev/null +++ attic/margs.scm @@ -0,0 +1,103 @@ +;; Copyright 2007-2010, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . + + +(declare (unit margs)) +;; (declare (uses common)) + +(define args:arg-hash (make-hash-table)) + +(define (args:get-arg arg . default) + (if (null? default) + (hash-table-ref/default args:arg-hash arg #f) + (hash-table-ref/default args:arg-hash arg (car default)))) + +;; get an arg as a number +(define (args:get-arg-number arg . default) + (let* ((val-str (args:get-arg arg)) + (val (if val-str (string->number val-str) #f))) + (if val + val + (if (null? default) + #f + default)))) + +(define (args:get-arg-from ht arg . default) + (if (null? default) + (hash-table-ref/default ht arg #f) + (hash-table-ref/default ht arg (car default)))) + + +(define (args:remove-arg-from-ht arg) + (hash-table-delete! args:arg-hash arg) +) + +(define (args:usage . args) + (if (> (length args) 0) + (apply print "ERROR: " args)) + (if (string? help) + (print help) + (print "Usage: " (car (argv)) " ... ")) + (exit 0)) + +(define (args:any-defined? . args) + (not (null? (filter (lambda (x) x) + (map args:get-arg args))))) + +;; ;; one-of args defined +;; (define (args:any-defined? . param) +;; (let ((res #f)) +;; (for-each +;; (lambda (arg) +;; (if (args:get-arg arg)(set! res #t))) +;; param) +;; res)) + +;; args: +(define (args:get-args args params switches arg-hash num-needed) + (let* ((numargs (length args)) + (adj-num-needed (if num-needed (+ num-needed 2) #f))) + (if (< numargs (if adj-num-needed adj-num-needed 2)) + (if (>= num-needed 1) + (args:usage "No arguments provided") + '()) + (let loop ((arg (cadr args)) + (tail (cddr args)) + (remargs '())) + (cond + ((member arg params) ;; args with params + (if (< (length tail) 1) + (args:usage "param given without argument " arg) + (let ((val (car tail)) + (newtail (cdr tail))) + (hash-table-set! arg-hash arg val) + (if (null? newtail) remargs + (loop (car newtail)(cdr newtail) remargs))))) + ((member arg switches) ;; args with no params (i.e. switches) + (hash-table-set! arg-hash arg #t) + (if (null? tail) remargs + (loop (car tail)(cdr tail) remargs))) + (else + (if (null? tail)(append remargs (list arg)) ;; return the non-used args + (loop (car tail)(cdr tail)(append remargs (list arg)))))))) + )) + +(define (args:print-args remargs arg-hash) + (print "ARGS: " remargs) + (for-each (lambda (arg) + (print " " arg " " (hash-table-ref/default arg-hash arg #f))) + (hash-table-keys arg-hash))) ADDED attic/mlaunch.scm Index: attic/mlaunch.scm ================================================================== --- /dev/null +++ attic/mlaunch.scm @@ -0,0 +1,35 @@ +;; Copyright 2006-2014, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . + +;; strftime('%m/%d/%Y %H:%M:%S','now','localtime') + +;;====================================================================== +;; MLAUNCH +;; +;; take jobs from the given queue and keep launching them keeping +;; the cpu load at the targeted level +;; +;;====================================================================== + +(use sqlite3 srfi-1 posix regex regex-case srfi-69 format) + +(declare (unit mlaunch)) +(declare (uses db)) +(declare (uses common)) +(declare (uses commonmod)) +(import commonmod) + ADDED attic/portlogger-example.scm Index: attic/portlogger-example.scm ================================================================== --- /dev/null +++ attic/portlogger-example.scm @@ -0,0 +1,21 @@ +;; Copyright 2006-2017, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . + + +(declare (uses portlogger)) + +(print (apply portlogger:main (cdr (argv)))) ADDED attic/synchash.scm Index: attic/synchash.scm ================================================================== --- /dev/null +++ attic/synchash.scm @@ -0,0 +1,137 @@ +;;====================================================================== +;; Copyright 2006-2012, Matthew Welland. +;; +;; This file is part of Megatest. +;; +;; Megatest is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. +;; +;; Megatest is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with Megatest. If not, see . +;; +;;====================================================================== + +;;====================================================================== +;; A hash of hashes that can be kept in sync by sending minial deltas +;;====================================================================== + +(use format) +(use srfi-1 srfi-69 sqlite3) +(import (prefix sqlite3 sqlite3:)) + +(declare (unit synchash)) +(declare (uses db)) +(declare (uses server)) +(declare (uses rmtmod)) + +(include "db_records.scm") + +(import rmtmod) + +(define (synchash:make) + (make-hash-table)) + +;; given an alist of objects '((id obj) ...) +;; 1. remove unchanged objects from the list +;; 2. create a list of removed objects by id +;; 3. remove removed objects from synchash +;; 4. replace or add new or changed objects to synchash +;; +(define (synchash:get-delta indat synchash) + (let ((deleted '()) + (changed '()) + (found '()) + (orig-keys (hash-table-keys synchash))) + (for-each + (lambda (item) + (let* ((id (car item)) + (dat (cadr item)) + (ref (hash-table-ref/default synchash id #f))) + (if (not (equal? dat ref)) ;; item changed or new + (begin + (set! changed (cons item changed)) + (hash-table-set! synchash id dat))) + (set! found (cons id found)))) + indat) + (for-each + (lambda (id) + (if (not (member id found)) + (begin + (set! deleted (cons id deleted)) + (hash-table-delete! synchash id)))) + orig-keys) + (list changed deleted) + ;; (list indat '()) ;; just for debugging + )) + +;; keynum => the field to use as the unique key (usually 0 but can be other field) +;; +(define (synchash:client-get proc synckey keynum synchash run-id . params) + (let* ((data (rmt:synchash-get run-id proc synckey keynum params)) + (newdat (car data)) + (removs (cadr data)) + (myhash (hash-table-ref/default synchash synckey #f))) + (if (not myhash) + (begin + (set! myhash (make-hash-table)) + (hash-table-set! synchash synckey myhash))) + (for-each + (lambda (item) + (let ((id (car item)) + (dat (cadr item))) + ;; (debug:print-info 2 *default-log-port* "Processing item: " item) + (hash-table-set! myhash id dat))) + newdat) + (for-each + (lambda (id) + (hash-table-delete! myhash id)) + removs) + ;; WHICH ONE!? + ;; data)) ;; return the changed and deleted list + (list newdat removs))) ;; synchash)) + +(define *synchashes* (make-hash-table)) + +(define (synchash:server-get dbstruct run-id proc synckey keynum params) + ;; (debug:print-info 2 *default-log-port* "synckey: " synckey ", keynum: " keynum ", params: " params) + (let* ((dbdat (db:get-db dbstruct run-id)) + (db (db:dbdat-get-db dbdat)) + (synchash (hash-table-ref/default *synchashes* synckey #f)) + (newdat (apply (case proc + ((db:get-runs) db:get-runs) + ((db:get-tests-for-run-mindata) db:get-tests-for-run-mindata) + ((db:get-test-info-by-ids) db:get-test-info-by-ids) + (else + (print "ERROR: sync for hash " proc " not setup! Edits needed in synchash.scm") + print)) + db params)) + (postdat #f) + (make-indexed (lambda (x) + (list (vector-ref x keynum) x)))) + ;; Now process newdat based on the query type + (set! postdat (case proc + ((db:get-runs) + ;; (debug:print-info 2 *default-log-port* "Get runs call") + (let ((header (vector-ref newdat 0)) + (data (vector-ref newdat 1))) + ;; (debug:print-info 2 *default-log-port* "header: " header ", data: " data) + (cons (list "header" header) ;; add the header keyed by the word "header" + (map make-indexed data)))) ;; add each element keyed by the keynum'th val + (else + ;; (debug:print-info 2 *default-log-port* "Non-get runs call") + (map make-indexed newdat)))) + ;; (debug:print-info 2 *default-log-port* "postdat: " postdat) + ;; (if (not indb)(sqlite3:finalize! db)) + (if (not synchash) + (begin + (set! synchash (make-hash-table)) + (hash-table-set! *synchashes* synckey synchash))) + (synchash:get-delta postdat synchash))) + DELETED client.scm Index: client.scm ================================================================== --- client.scm +++ /dev/null @@ -1,128 +0,0 @@ - -;; Copyright 2006-2012, Matthew Welland. -;; -;; This file is part of Megatest. -;; -;; Megatest is free software: you can redistribute it and/or modify -;; it under the terms of the GNU General Public License as published by -;; the Free Software Foundation, either version 3 of the License, or -;; (at your option) any later version. -;; -;; Megatest is distributed in the hope that it will be useful, -;; but WITHOUT ANY WARRANTY; without even the implied warranty of -;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -;; GNU General Public License for more details. -;; -;; You should have received a copy of the GNU General Public License -;; along with Megatest. If not, see . - -;;====================================================================== -;; C L I E N T S -;;====================================================================== - -(use srfi-18 extras tcp s11n srfi-1 posix regex srfi-69 hostinfo md5 - message-digest matchable spiffy uri-common intarweb http-client - spiffy-request-vars uri-common intarweb directory-utils) - -(declare (unit client)) - -(declare (uses common)) -(declare (uses db)) -(declare (uses tasks)) ;; tasks are where stuff is maintained about what is running. - -(include "common_records.scm") -(include "db_records.scm") - -;; client:get-signature -(define (client:get-signature) - (if *my-client-signature* *my-client-signature* - (let ((sig (conc (get-host-name) " " (current-process-id)))) - (set! *my-client-signature* sig) - *my-client-signature*))) - -;; Not currently used! But, I think it *should* be used!!! -#;(define (client:logout serverdat) - (let ((ok (and (socket? serverdat) - (cdb:logout serverdat *toppath* (client:get-signature))))) - ok)) - -(define (client:setup areapath #!key (remaining-tries 100) (failed-connects 0)) - (client:setup-http areapath remaining-tries: remaining-tries failed-connects: failed-connects)) - -;; Do all the connection work, look up the transport type and set up the -;; connection if required. -;; -;; There are two scenarios. -;; 1. We are a test manager and we received *transport-type* and *runremote* via cmdline -;; 2. We are a run tests, list runs or other interactive process and we must figure out -;; *transport-type* and *runremote* from the monitor.db -;; -;; client:setup -;; -;; lookup_server, need to remove *runremote* stuff -;; - -(define (client:setup-http areapath #!key (remaining-tries 100) (failed-connects 0)(area-dat #f)) - (mutex-lock! *rmt-mutex*) - (let ((res (client:setup-http-baby areapath remaining-tries: remaining-tries failed-connects: failed-connects area-dat: area-dat))) - (mutex-unlock! *rmt-mutex*) - res)) - -(define (client:setup-http-baby areapath #!key (remaining-tries 100) (failed-connects 0)(area-dat #f)) - (debug:print-info 2 *default-log-port* "client:setup remaining-tries=" remaining-tries) - (server:start-and-wait areapath) - (if (<= remaining-tries 0) - (begin - (debug:print-error 0 *default-log-port* "failed to start or connect to server") - (exit 1)) - ;; - ;; Alternatively here, we can get the list of candidate servers and work our way - ;; through them searching for a good one. - ;; - (let* ((server-dat (server:choose-server areapath 'best)) - (runremote (or area-dat *runremote*))) - (if (not server-dat) ;; no server found - (client:setup-http-baby areapath remaining-tries: (- remaining-tries 1)) - (match server-dat - ((host port start-time server-id pid) - (debug:print-info 4 *default-log-port* "client:setup server-dat=" server-dat ", remaining-tries=" remaining-tries) - (if (and (not area-dat) - (not *runremote*)) - (begin - (set! *runremote* (make-remote)) - (let* ((server-info (remote-server-info *runremote*))) - (if server-info - (begin - (remote-server-url-set! *runremote* (server:record->url server-info)) - (remote-server-id-set! *runremote* (server:record->id server-info))))))) - (if (and host port server-id) - (let* ((start-res (http-transport:client-connect host port server-id)) - (ping-res (rmt:login-no-auto-client-setup start-res))) - (if (and start-res - ping-res) - (let ((runremote (or area-dat *runremote*))) ;; it might have been generated only a few statements ago - (if runremote - (begin - (remote-conndat-set! runremote start-res) ;; (hash-table-set! runremote run-id start-res) - (debug:print-info 2 *default-log-port* "connected to " (http-transport:server-dat-make-url start-res)) - start-res) - (client:setup-http-baby areapath remaining-tries: (- remaining-tries 1)))) - (begin ;; login failed but have a server record, clean out the record and try again - (debug:print-info 0 *default-log-port* "client:setup, login unsuccessful, will attempt to start server ... start-res=" start-res ", server-dat=" server-dat) ;; had runid. Fixes part of Randy;s ticket 1405717332 - (case *transport-type* - ((http)(http-transport:close-connections))) - (if *runremote* - (remote-conndat-set! runremote #f) ;; (hash-table-delete! runremote run-id) - ) - (thread-sleep! 1) - (client:setup-http-baby areapath remaining-tries: (- remaining-tries 1)) - ))) - (begin ;; no server registered - ;; (server:kind-run areapath) - (server:start-and-wait areapath) - (debug:print-info 0 *default-log-port* "client:setup, no server registered, remaining-tries=" remaining-tries) - (thread-sleep! 1) ;; (+ 5 (random (- 20 remaining-tries)))) ;; give server a little time to start up, randomize a little to avoid start storms. - (client:setup-http-baby areapath remaining-tries: (- remaining-tries 1))))) - (else - (debug:print 0 *default-log-port* "ERROR: malformed server-dat="server-dat))))))) - Index: common.scm ================================================================== --- common.scm +++ common.scm @@ -15,23 +15,32 @@ ;; ;; You should have received a copy of the GNU General Public License ;; along with Megatest. If not, see . ;;====================================================================== + +(declare (unit common)) +(declare (uses commonmod)) +(declare (uses rmtmod)) +(declare (uses debugprint)) +(declare (uses mtargs)) (use srfi-1 data-structures posix regex-case (prefix base64 base64:) format dot-locking csv-xml z3 udp ;; sql-de-lite hostinfo md5 message-digest typed-records directory-utils stack matchable regex posix (srfi 18) extras ;; tcp (prefix nanomsg nmsg:) (prefix sqlite3 sqlite3:) pkts (prefix dbi dbi:) ) +(use posix-extras pathname-expand files) -(declare (unit common)) -(declare (uses commonmod)) -(import commonmod) + +(import commonmod + debugprint + rmtmod + (prefix mtargs args:)) (include "common_records.scm") ;; (require-library margs) @@ -47,12 +56,14 @@ (define (stop-the-train) (thread-start! (make-thread (lambda () (let loop () (if (and *toppath* (file-exists? (conc *toppath*"/stop-the-train"))) - (begin - (debug:print 0 *default-log-port* "ERROR: found file "*toppath*"/stop-the-train, exiting immediately") + (let* ((msg (conc "ERROR: found file "*toppath*"/stop-the-train, exiting immediately"))) + ;; yes, print to current-output-port AND *default-log-port*, annoying but necessary I think + (print msg) + (debug:print 0 *default-log-port* msg) (exit 1))) (thread-sleep! 5) (loop)))))) ;; execute thunk, return value. If exception thrown, trap exception, return #f, and emit nonfatal condition note to *default-log-port* . @@ -161,19 +172,16 @@ ;; (define *db-multi-sync-mutex* (make-mutex)) ;; protect access to *db-sync-in-progress*, *db-last-sync* ;; task db (define *task-db* #f) ;; (vector db path-to-db) (define *db-access-allowed* #t) ;; flag to allow access ;; (define *db-access-mutex* (make-mutex)) ;; moved to dbfile -(define *db-transaction-mutex* (make-mutex)) +;; (define *db-transaction-mutex* (make-mutex)) (define *db-cache-path* #f) ;; (define *db-with-db-mutex* (make-mutex)) (define *db-api-call-time* (make-hash-table)) ;; hash of command => (list of times) -;; no sync db -;; (define *no-sync-db* #f) ;; moved to dbfile ;; SERVER -(define *my-client-signature* #f) (define *transport-type* 'http) ;; override with [server] transport http|rpc|nmsg (define *runremote* #f) ;; if set up for server communication this will hold ;; (define *max-cache-size* 0) (define *logged-in-clients* (make-hash-table)) (define *server-id* #f) @@ -210,12 +218,10 @@ ;; Miscellaneous (define *triggers-mutex* (make-mutex)) ;; block overlapping processing of triggers (define *numcpus-cache* (make-hash-table)) -(use posix-extras pathname-expand files) - ;; this plugs a hole in posix-extras in recent chicken versions > 4.9) (let-values (( (chicken-release-number chicken-major-version) (apply values (map string->number (take @@ -246,36 +252,13 @@ (define *common:this-exe-dir* (pathname-directory *common:this-exe-fullpath*)) (define *common:this-exe-name* (pathname-strip-directory *common:this-exe-fullpath*)) (define (common:get-sync-lock-filepath) (let* ((tmp-area (common:get-db-tmp-area)) - (lockfile (conc tmp-area "/megatest.db.sync-lock"))) + (lockfile (conc tmp-area "/megatest.db.lock"))) lockfile)) -;;====================================================================== -;; when called from a wrapper I need sometimes to find the calling -;; wrapper, this is for dashboard to find the correct megatest. -;; -(define (common:find-local-megatest #!optional (progname "megatest")) - (let ((res (filter file-exists? - (map (lambda (updir) - (let* ((lm (car (argv))) - (dir (pathname-directory lm)) - (exe (pathname-strip-directory lm))) - (conc (if dir (conc dir "/") "") - (case (string->symbol exe) - ((dboard) (conc updir progname)) - ((mtest) (conc updir progname)) - ((dashboard) progname) - (else exe))))) - '("../../" "../"))))) - (if (null? res) - (begin - (debug:print 0 *default-log-port* "Failed to find this executable! Using what can be found on the path") - progname) - (car res)))) - (define *common:logpro-exit-code->status-sym-alist* '( ( 0 . pass ) ( 1 . fail ) ( 2 . warn ) ( 3 . check ) @@ -315,26 +298,39 @@ (else "FAIL"))) (define (common:logpro-exit-code->test-status exit-code) (status-sym->string (common:logpro-exit-code->status-sym exit-code))) +;; (defstruct remote + + ;; transport to be used + ;; http - use http-transport + ;; http-read-cached - use http-transport for writes but in-mem cached for reads + (rmode 'http) (hh-dat (let ((res (or (server:choose-server *toppath* 'homehost) (cons #f #f)))) (assert (pair? res)(conc "FATAL: hh-dat should be a pair, got "res)) res)) (server-url #f) ;; (server:check-if-running *toppath*) #f)) (server-id #f) - (server-info (if *toppath* (server:check-if-running *toppath*) #f)) + (server-info #f) ;; (if *toppath* (server:check-if-running *toppath*) #f)) (last-server-check 0) ;; last time we checked to see if the server was alive - (connect-time (current-seconds)) - (conndat #f) - (transport *transport-type*) + (connect-time (current-seconds)) ;; when we first connected + (last-access (current-seconds)) ;; last time we talked to server + ;; (conndat #f) ;; iface port api-uri api-url api-req seconds server-id (server-timeout (server:expiration-timeout)) (force-server #f) (ro-mode #f) - (ro-mode-checked #f)) ;; flag that indicates we have checked for ro-mode + (ro-mode-checked #f) ;; flag that indicates we have checked for ro-mode + + ;; conndat stuff + (iface #f) ;; TODO: Consolidate this data with server-url and server-info above + (port #f) + (api-url #f) + (api-uri #f) + (api-req #f)) ;; launching and hosts (defstruct host (reachable #f) (last-update 0) @@ -408,27 +404,48 @@ (define (common:version-changed?) (not (equal? (common:get-last-run-version) (common:version-signature)))) + +;; From 1.70 to 1.80, db's are compatible. + (define (common:api-changed?) - (not (equal? (substring (->string megatest-version) 0 4) - (substring (conc (common:get-last-run-version)) 0 4)))) + (let* ( + (megatest-major-version (substring (->string megatest-version) 0 4)) + (run-major-version (substring (conc (common:get-last-run-version)) 0 4)) + ) + (and (not (equal? megatest-major-version "1.80")) + (not (equal? megatest-major-version megatest-run-version))) + ) +) ;;====================================================================== ;; Move me elsewhere ... ;; RADT => Why do we meed the version check here, this is called only if version misma ;; (define (common:cleanup-db dbstruct #!key (full #f)) - (apply db:multi-db-sync - dbstruct - 'schema - 'killservers - 'adj-target - 'new2old - '(dejunk) - ) + (case (rmt:transport-mode) + ((http) + (apply db:multi-db-sync + dbstruct + 'schema + 'killservers + 'adj-target + 'new2old + '(dejunk) + )) + ((tcp nfs) + (debug:print 0 *default-log-port* "WARNING: cleanup-db NOT implemented yet for tcp and nfs.") + #;(apply db:multi-db-sync + dbstruct + 'schema + 'killservers + 'adj-target + 'new2old + '(dejunk) + ))) (if (common:api-changed?) (common:set-last-run-version))) (define (common:snapshot-file filepath #!key (subdir ".") ) (if (file-exists? filepath) @@ -520,11 +537,11 @@ (define (common:rotate-logs) (let* ((all-files (make-hash-table)) (stats (make-hash-table)) (inc-stat (lambda (key) (hash-table-set! stats key (+ (hash-table-ref/default stats key 0) 1)))) - (max-allowed (string->number (or (configf:lookup *configdat* "setup" "max-logfiles") "300")))) ;; name -> age + (max-allowed (string->number (or (configf:lookup *configdat* "setup" "max-logfiles") "600")))) ;; name -> age (if (not (directory-exists? "logs"))(create-directory "logs")) (directory-fold (lambda (file rem) (handle-exceptions exn @@ -599,14 +616,15 @@ ;;====================================================================== ;; Force a megatest cleanup-db if version is changed and skip-version-check not specified ;; Do NOT check if not on homehost! ;; (define (common:exit-on-version-changed) - (if (common:on-homehost?) + (if (and *toppath* ;; do nothing if *toppath* not yet provided + (common:on-homehost?)) (if (common:api-changed?) (let* ((mtconf (conc (get-environment-variable "MT_RUN_AREA_HOME") "/megatest.config")) - (dbfile (conc (get-environment-variable "MT_RUN_AREA_HOME") "/megatest.db")) + (dbfile (conc (get-environment-variable "MT_RUN_AREA_HOME") ".mtdb/main.db")) (read-only (not (file-write-access? dbfile))) (dbstruct (db:setup #t))) ;; (db:setup-db *dbstruct-dbs* *toppath* #f))) ;; #t))) (debug:print 0 *default-log-port* "WARNING: Version mismatch!\n" " expected: " (common:version-signature) "\n" @@ -626,14 +644,14 @@ (common:cleanup-db dbstruct))) ((not (common:file-exists? mtconf)) (debug:print 0 *default-log-port* " megatest.config does not exist in this area. Cannot proceed with megatest version migration.") (exit 1)) ((not (common:file-exists? dbfile)) - (debug:print 0 *default-log-port* " megatest.db does not exist in this area. Cannot proceed with megatest version migration.") + (debug:print 0 *default-log-port* " .mtdb/main.db does not exist in this area. Cannot proceed with megatest version migration.") (exit 1)) ((not (eq? (current-user-id)(file-owner mtconf))) - (debug:print 0 *default-log-port* " You do not own megatest.db in this area. Cannot proceed with megatest version migration.") + (debug:print 0 *default-log-port* " You do not own .mtdb/main.db in this area. Cannot proceed with megatest version migration.") (exit 1)) (read-only (debug:print 0 *default-log-port* " You have read-only access to this area. Cannot proceed with megatest version migration.") (exit 1)) (else @@ -710,22 +728,21 @@ "")))) (define (common:alist-ref/default key alist default) (or (alist-ref key alist) default)) -(define (common:low-noise-print waitval . keys) - (let* ((key (string-intersperse (map conc keys) "-" )) - (lasttime (hash-table-ref/default *common:denoise* key 0)) - (currtime (current-seconds))) - (if (> (- currtime lasttime) waitval) - (begin - (hash-table-set! *common:denoise* key currtime) - #t) - #f))) - -(define (common:get-megatest-exe) - (or (getenv "MT_MEGATEST") "megatest")) +;; moved into commonmod +;; +;; (define (common:low-noise-print waitval . keys) +;; (let* ((key (string-intersperse (map conc keys) "-" )) +;; (lasttime (hash-table-ref/default *common:denoise* key 0)) +;; (currtime (current-seconds))) +;; (if (> (- currtime lasttime) waitval) +;; (begin +;; (hash-table-set! *common:denoise* key currtime) +;; #t) +;; #f))) (define (common:read-encoded-string instr) (handle-exceptions exn (handle-exceptions @@ -946,49 +963,45 @@ (handle-exceptions exn (begin (debug:print-error 0 *default-log-port* "Couldn't create path to " *db-cache-path* ", exn=" exn) (exit 1)) - (let* ((tsname (common:get-testsuite-name)) + (let* ((toppath (common:real-path *toppath*)) + (tsname (common:get-testsuite-name)) (dbpath (common:get-create-writeable-dir (list (conc "/tmp/" (current-user-name) "/megatest_localdb/" tsname "/" - (string-translate *toppath* "/" ".")) + (string-translate toppath "/" ".")) (conc "/tmp/" (current-process-id) ;; just in case we have an issue with the dir by own user name "/megatest_localdb/" tsname - (string-translate *toppath* "/" ".")) + (string-translate toppath "/" ".")) )))) (set! *db-cache-path* dbpath) - ;; ensure megatest area has .megatest - (let ((dbarea (conc *toppath* "/.megatest"))) + ;; ensure megatest area has .mtdb + (let ((dbarea (conc *toppath* "/.mtdb"))) (if (not (file-exists? dbarea)) (create-directory dbarea))) - ;; ensure tmp area has .megatest - (let ((dbarea (conc dbpath "/.megatest"))) + ;; ensure tmp area has .mtdb + (let ((dbarea (conc dbpath "/.mtdb"))) (if (not (file-exists? dbarea)) (create-directory dbarea))) dbpath)) #f))) (define (common:get-area-path-signature) (message-digest-string (md5-primitive) *toppath*)) -(define (common:get-signature str) - (message-digest-string (md5-primitive) str)) - ;;====================================================================== ;; E X I T H A N D L I N G ;;====================================================================== (define (common:run-sync?) - (and (common:on-homehost?) - (args:get-arg "-server"))) - -(define (common:human-time) - (time->string (seconds->local-time (current-seconds)) "%Y-%m-%d %H:%M:%S")) + (and *toppath* ;; gate if called before *toppath* is set + (common:on-homehost?) + (args:get-arg "-server"))) (define (std-signal-handler signum) ;; (signal-mask! signum) (set! *time-to-exit* #t) @@ -1048,26 +1061,10 @@ (define (common:get-disks #!key (configf #f)) (hash-table-ref/default (or configf (read-config "megatest.config" #f #t)) "disks" '("none" ""))) -;;====================================================================== -;; return first command that exists, else #f -;; -(define (common:which cmds) - (if (null? cmds) - #f - (let loop ((hed (car cmds)) - (tal (cdr cmds))) - (let ((res (with-input-from-pipe (conc "which " hed) read-line))) - (if (and (string? res) - (common:file-exists? res)) - res - (if (null? tal) - #f - (loop (car tal)(cdr tal)))))))) - (define (common:get-install-area) (let ((exe-path (car (argv)))) (if (common:file-exists? exe-path) (handle-exceptions exn @@ -1345,11 +1342,11 @@ (else (debug:print 0 *default-log-port* "ERROR: Bad server force setting " force-setting ", forcing server.") #t)))) ;; default to requiring server (if force-result (begin - (debug:print-info 0 *default-log-port* "forcing use of server, force setting is \"" force-setting "\".") + (debug:print-info 0 *default-log-port* "ATTENTION! Forcing use of server, force setting is \"" force-setting "\".") #t) #f))) ;;====================================================================== ;; M I S C L I S T S @@ -1598,10 +1595,30 @@ path) ;; just give up (with-input-from-pipe (conc "/bin/readlink -f " path) (lambda () (read-line))))) + +;; for reasons I don't understand multiple calls to real-path in parallel threads +;; must be protected by mutexes +;; +(define (common:real-path inpath) + ;; (process:cmd-run-with-stderr->list "readlink" "-f" inpath)) ;; cmd . params) + ;; (let-values + ;; (((inp oup pid) (process "readlink" (list "-f" inpath)))) + ;; (with-input-from-port inp + ;; (let loop ((inl (read-line)) + ;; (res #f)) + ;; (print "inl=" inl) + ;; (if (eof-object? inl) + ;; (begin + ;; (close-input-port inp) + ;; (close-output-port oup) + ;; ;; (process-wait pid) + ;; res) + ;; (loop (read-line) inl)))))) + (with-input-from-pipe (conc "readlink -f " inpath) read-line)) ;;====================================================================== ;; returns *effective load* (not normalized) ;; (define (common:get-intercept onemin fivemin) @@ -1797,25 +1814,22 @@ (> (length res) 2)) res) ((eq? res #f) default) ;; add messages? ((eq? res #f) default) ;; this would be the #eof (else default)))) + +(define (common:ssh-get-loadavg remote-host) + (let ((inp (open-input-pipe (conc "ssh " remote-host " \"cat /proc/loadavg;cat /proc/cpuinfo;echo end\"")))) + (let* ((res (read-lines inp))) + (close-input-pipe inp) + res))) (define (common:get-normalized-cpu-load-raw remote-host) (let* ((actual-host (or remote-host (get-host-name)))) ;; #f is localhost (or (common:get-cached-info actual-host "normalized-load") (let ((data (if remote-host - (let ((inp #f)) - (handle-exceptions - exn - (begin - (close-input-port inp) - '()) - (set! inp (open-input-port (conc "ssh " remote-host " \"cat /proc/loadavg;cat /proc/cpuinfo;echo end\""))) - (let* ((res (read-lines inp))) - (close-input-port inp) - res))) + (common:ssh-get-loadavg remote-host) (append (with-input-from-file "/proc/loadavg" read-lines) (with-input-from-file "/proc/cpuinfo" read-lines) @@ -1989,16 +2003,27 @@ (host-last-used-set! rec curr-time) new-best) (if (null? tal) #f (loop (car tal)(cdr tal) best-host))))))))) (define (common:wait-for-homehost-load maxnormload msg) - (let* ((hh-dat (if (common:on-homehost?) ;; if we are on the homehost then pass in #f so the calls are local. - #f - (server:choose-server *toppath* 'homehost))) - (hh (if hh-dat (car hh-dat) #f))) - (common:wait-for-normalized-load maxnormload msg hh))) - + (let loop ((start-time (current-seconds))) ;; we saw some instances of this being called before *toppath* was set. This might be an early setup race. This delay should help but it is impossible to test... + (if (not *toppath*) + (begin + (debug:print 0 *default-log-port* "ERROR: common:wait-for-homehost-load called before *toppath* set.") + (thread-sleep! 30) + (if (< (- (current-seconds) start-time) 300) + (loop start-time))))) + (case (rmt:transport-mode) + ((http) + (let* ((hh-dat (if (common:on-homehost?) ;; if we are on the homehost then pass in #f so the calls are local. + #f + (server:choose-server *toppath* 'homehost))) + (hh (if hh-dat (car hh-dat) #f))) + (common:wait-for-normalized-load maxnormload msg hh))) + (else + (common:wait-for-normalized-load maxnormload msg (get-host-name))))) + (define (common:get-num-cpus remote-host) (let* ((actual-host (or remote-host (get-host-name)))) ;; hosts had better not be changing the number of cpus too often! (or (hash-table-ref/default *numcpus-cache* actual-host #f) (let* ((numcpus (or (common:get-cached-info actual-host "num-cpus" age: (+ 2592000 (random 3600))) @@ -2023,27 +2048,10 @@ (common:write-cached-info actual-host "num-cpus" result)) result)))) (hash-table-set! *numcpus-cache* actual-host numcpus) numcpus)))) -(define (common:generic-ssh ssh-command proc default #!optional (msg-proc #f)) - (let ((inp #f)) - (handle-exceptions - exn - (begin - (close-input-port inp) - (if msg-proc - (msg-proc) - (debug:print 0 *default-log-port* "Command: \""ssh-command"\" failed. exn="exn)) - default) - (set! inp (open-input-pipe ssh-command)) - (with-input-from-port inp - (lambda () - (let ((res (proc))) - (close-input-port inp) - res)))))) - ;;====================================================================== ;; wait for normalized cpu load to drop below maxload ;; (define (common:wait-for-normalized-load maxnormload msg remote-host #!optional (rem-tries 5)) (let ((num-cpus (common:get-num-cpus remote-host))) @@ -2218,30 +2226,10 @@ (uname #f)) (if (null? (car uname-res)) "unknown" (caar uname-res)))) -;; for reasons I don't understand multiple calls to real-path in parallel threads -;; must be protected by mutexes -;; -(define (common:real-path inpath) - ;; (process:cmd-run-with-stderr->list "readlink" "-f" inpath)) ;; cmd . params) - ;; (let-values - ;; (((inp oup pid) (process "readlink" (list "-f" inpath)))) - ;; (with-input-from-port inp - ;; (let loop ((inl (read-line)) - ;; (res #f)) - ;; (print "inl=" inl) - ;; (if (eof-object? inl) - ;; (begin - ;; (close-input-port inp) - ;; (close-output-port oup) - ;; ;; (process-wait pid) - ;; res) - ;; (loop (read-line) inl)))))) - (with-input-from-pipe (conc "readlink -f " inpath) read-line)) - ;;====================================================================== ;; D I S K S P A C E ;;====================================================================== (define (common:get-disk-space-used fpath) @@ -2619,291 +2607,10 @@ (cond (with-vars (common:without-vars fullcmd)) (with-orig-env (common:with-orig-env fullcmd)) (else (common:without-vars fullcmd "MT_.*"))))) -;;====================================================================== -;; T I M E A N D D A T E -;;====================================================================== - -;;====================================================================== -;; Convert strings like "5s 2h 3m" => 60x60x2 + 3x60 + 5 -(define (common:hms-string->seconds tstr) - (let ((parts (string-split-fields "\\w+" tstr)) - (time-secs 0) - ;; s=seconds, m=minutes, h=hours, d=days, M=months, y=years, w=weeks - (trx (regexp "(\\d+)([smhdMyw])"))) - (for-each (lambda (part) - (let ((match (string-match trx part))) - (if match - (let ((val (string->number (cadr match))) - (unt (caddr match))) - (if val - (set! time-secs (+ time-secs (* val - (case (string->symbol unt) - ((s) 1) - ((m) 60) ;; minutes - ((h) 3600) - ((d) 86400) - ((w) 604800) - ((M) 2628000) ;; aproximately one month - ((y) 31536000) - (else #f)))))))))) - parts) - time-secs)) - -(define (seconds->hr-min-sec secs) - (let* ((hrs (quotient secs 3600)) - (min (quotient (- secs (* hrs 3600)) 60)) - (sec (- secs (* hrs 3600)(* min 60)))) - (conc (if (> hrs 0)(conc hrs "hr ") "") - (if (> min 0)(conc min "m ") "") - sec "s"))) - -(define (seconds->time-string sec) - (time->string - (seconds->local-time sec) "%H:%M:%S")) - -(define (seconds->work-week/day-time sec) - (time->string - (seconds->local-time sec) "ww%V.%u %H:%M")) - -(define (seconds->work-week/day sec) - (time->string - (seconds->local-time sec) "ww%V.%u")) - -(define (seconds->year-work-week/day sec) - (time->string - (seconds->local-time sec) "%yww%V.%w")) - -(define (seconds->year-work-week/day-time sec) - (time->string - (seconds->local-time sec) "%Yww%V.%w %H:%M")) - -(define (seconds->year-week/day-time sec) - (time->string - (seconds->local-time sec) "%Yw%V.%w %H:%M")) - -(define (seconds->quarter sec) - (case (string->number - (time->string - (seconds->local-time sec) - "%m")) - ((1 2 3) 1) - ((4 5 6) 2) - ((7 8 9) 3) - ((10 11 12) 4) - (else #f))) - -;;====================================================================== -;; basic ISO8601 format (e.g. "2017-02-28 06:02:54") date time => Unix epoch -;; -(define (common:date-time->seconds datetime) - (local-time->seconds (string->time datetime "%Y-%m-%d %H:%M:%S"))) - -;;====================================================================== -;; given span of seconds tstart to tend -;; find start time to mark and mark delta -;; -(define (common:find-start-mark-and-mark-delta tstart tend) - (let* ((deltat (- (max tend (+ tend 10)) tstart)) ;; can't handle runs of less than 4 seconds. Pad it to 10 seconds ... - (result #f) - (min 60) - (hr (* 60 60)) - (day (* 24 hr)) - (yr (* 365 day)) ;; year - (mo (/ yr 12)) - (wk (* day 7))) - (for-each - (lambda (max-blks) - (for-each - (lambda (span) ;; 5 2 1 - (if (not result) - (for-each - (lambda (timeunit timesym) ;; year month day hr min sec - (if (not result) - (let* ((time-blk (* span timeunit)) - (num-blks (quotient deltat time-blk))) - (if (and (> num-blks 4)(< num-blks max-blks)) - (let ((first (* (quotient tstart time-blk) time-blk))) - (set! result (list span timeunit time-blk first timesym)) - ))))) - (list yr mo wk day hr min 1) - '( y mo w d h m s)))) - (list 8 6 5 2 1))) - '(5 10 15 20 30 40 50 500)) - (if values - (apply values result) - (values 0 day 1 0 'd)))) - -;;====================================================================== -;; given x y lim return the cron expansion -;; -(define (common:expand-cron-slash x y lim) - (let loop ((curr x) - (res `())) - (if (< curr lim) - (loop (+ curr y) (cons curr res)) - (reverse res)))) - -;;====================================================================== -;; expand a complex cron string to a list of cron strings -;; -;; x/y => x, x+y, x+2y, x+3y while x+Ny a, b ,c -;; -;; NOTE: with flatten a lot of the crud below can be factored down. -;; -(define (common:cron-expand cron-str) - (if (list? cron-str) - (flatten - (fold (lambda (x res) - (if (list? x) - (let ((newres (map common:cron-expand x))) - (append x newres)) - (cons x res))) - '() - cron-str)) ;; (map common:cron-expand cron-str)) - (let ((cron-items (string-split cron-str)) - (slash-rx (regexp "(\\d+)/(\\d+)")) - (comma-rx (regexp ".*,.*")) - (max-vals '((min . 60) - (hour . 24) - (dayofmonth . 28) ;;; BUG!!!! This will be a bug for some combinations - (month . 12) - (dayofweek . 7)))) - (if (< (length cron-items) 5) ;; bad spec - cron-str ;; `(,cron-str) ;; just return the string, something downstream will fix it - (let loop ((hed (car cron-items)) - (tal (cdr cron-items)) - (type 'min) - (type-tal '(hour dayofmonth month dayofweek)) - (res '())) - (regex-case - hed - (slash-rx ( _ base incr ) (let* ((basen (string->number base)) - (incrn (string->number incr)) - (expanded-vals (common:expand-cron-slash basen incrn (alist-ref type max-vals))) - (new-list-crons (fold (lambda (x myres) - (cons (conc (if (null? res) - "" - (conc (string-intersperse res " ") " ")) - x " " (string-intersperse tal " ")) - myres)) - '() expanded-vals))) - ;; (print "new-list-crons: " new-list-crons) - ;; (fold (lambda (x res) - ;; (if (list? x) - ;; (let ((newres (map common:cron-expand x))) - ;; (append x newres)) - ;; (cons x res))) - ;; '() - (flatten (map common:cron-expand new-list-crons)))) - ;; (map common:cron-expand (map common:cron-expand new-list-crons)))) - (else (if (null? tal) - cron-str - (loop (car tal)(cdr tal)(car type-tal)(cdr type-tal)(append res (list hed))))))))))) - -;;====================================================================== -;; given a cron string and the last time event was processed return #t to run or #f to not run -;; -;; min hour dayofmonth month dayofweek -;; 0-59 0-23 1-31 1-12 0-6 ### NOTE: dayofweek does not include 7 -;; -;; #t => yes, run the job -;; #f => no, do not run the job -;; -(define (common:cron-event cron-str now-seconds-in last-done) ;; ref-seconds = #f is NOW. - (let* ((cron-items (map string->number (string-split cron-str))) - (now-seconds (or now-seconds-in (current-seconds))) - (now-time (seconds->local-time now-seconds)) - (last-done-time (seconds->local-time last-done)) - (all-times (make-hash-table))) - ;; (print "cron-items: " cron-items "(length cron-items): " (length cron-items)) - (if (not (eq? (length cron-items) 5)) ;; don't even try to figure out junk strings - #f - (match-let ((( cmin chour cdayofmonth cmonth cdayofweek) - cron-items) - ;; 0 1 2 3 4 5 6 - ((nsec nmin nhour ndayofmonth nmonth nyr ndayofweek n7 n8 n9) - (vector->list now-time)) - ((lsec lmin lhour ldayofmonth lmonth lyr ldayofweek l7 l8 l9) - (vector->list last-done-time))) - ;; create all possible time slots - ;; remove invalid slots due to (for example) day of week - ;; get the start and end entries for the ref-seconds (current) time - ;; if last-done > ref-seconds => this is an ERROR! - ;; does the last-done time fall in the legit region? - ;; yes => #f do not run again this command - ;; no => #t ok to run the command - (for-each ;; month - (lambda (month) - (for-each ;; dayofmonth - (lambda (dom) - (for-each - (lambda (hr) ;; hour - (for-each - (lambda (minute) ;; minute - (let ((copy-now (apply vector (vector->list now-time)))) - (vector-set! copy-now 0 0) ;; force seconds to zero - (vector-set! copy-now 1 minute) - (vector-set! copy-now 2 hr) - (vector-set! copy-now 3 dom) ;; dom is already corrected for zero referenced - (vector-set! copy-now 4 month) - (let* ((copy-now-secs (local-time->seconds copy-now)) - (new-copy (seconds->local-time copy-now-secs))) ;; remake the time vector - (if (or (not cdayofweek) - (equal? (vector-ref new-copy 6) - cdayofweek)) ;; if the day is specified and a match OR if the day is NOT specified - (if (or (not cdayofmonth) - (equal? (vector-ref new-copy 3) - (+ 1 cdayofmonth))) ;; if the month is specified and a match OR if the month is NOT specified - (hash-table-set! all-times copy-now-secs new-copy)))))) - (if cmin - `(,cmin) ;; if given cmin, have to use it - (list (- nmin 1) nmin (+ nmin 1))))) ;; minute - (if chour - `(,chour) - (list (- nhour 1) nhour (+ nhour 1))))) ;; hour - (if cdayofmonth - `(,cdayofmonth) - (list (- ndayofmonth 1) ndayofmonth (+ ndayofmonth 1))))) - (if cmonth - `(,cmonth) - (list (- nmonth 1) nmonth (+ nmonth 1)))) - (let ((before #f) - (is-in #f)) - (for-each - (lambda (moment) - (if (and before - (<= before now-seconds) - (>= moment now-seconds)) - (begin - ;; (print) - ;; (print "Before: " (time->string (seconds->local-time before))) - ;; (print "Now: " (time->string (seconds->local-time now-seconds))) - ;; (print "After: " (time->string (seconds->local-time moment))) - ;; (print "Last: " (time->string (seconds->local-time last-done))) - (if (< last-done before) - (set! is-in before)) - )) - (set! before moment)) - (sort (hash-table-keys all-times) <)) - is-in))))) - -(define (common:extended-cron cron-str now-seconds-in last-done) - (let ((expanded-cron (common:cron-expand cron-str))) - (if (string? expanded-cron) - (common:cron-event expanded-cron now-seconds-in last-done) - (let loop ((hed (car expanded-cron)) - (tal (cdr expanded-cron))) - (if (common:cron-event hed now-seconds-in last-done) - #t - (if (null? tal) - #f - (loop (car tal)(cdr tal)))))))) - ;;====================================================================== ;; C O L O R S ;;====================================================================== (define (common:name->iup-color name) @@ -3139,49 +2846,10 @@ (if (null? tal) fallback-launcher (loop (car tal)(cdr tal)))))))) fallback-launcher))) -;;====================================================================== -;; NMSG AND NEW API -;;====================================================================== -;; -;; ;;====================================================================== -;; ;; nm based server experiment, keep around for now. -;; ;; -;; (define (nm:start-server dbconn #!key (given-host-name #f)) -;; (let* ((srvdat (start-raw-server given-host-name: given-host-name)) -;; (host-name (srvdat-host srvdat)) -;; (soc (srvdat-soc srvdat))) -;; -;; ;; start the queue processor (save for second round of development) -;; ;; -;; (thread-start! (make-thread! (lambda ()(queue-processor dbconn) "Queue processor"))) -;; ;; msg is an alist -;; ;; 'r host:port <== where to return the data -;; ;; 'p params <== data to apply the command to -;; ;; 'e j|s|l <== encoding of the params. default is s (sexp), if not specified is assumed to be default -;; ;; 'c command <== look up the function to call using this key -;; ;; -;; (let loop ((msg-in (nn-recv soc))) -;; (if (not (equal? msg-in "quit")) -;; (let* ((dat (decode msg-in)) -;; (host-port (alist-ref 'r dat)) ;; this is for the reverse req rep where the server is a client of the original client -;; (params (alist-ref 'p dat)) -;; (command (let ((c (alist-ref 'c dat)))(if c (string->symbol c) #f))) -;; (all-good (and host-port params command (hash-table-exists? *commands* command)))) -;; (if all-good -;; (let ((cmddat (make-qitem -;; command: command -;; host-port: host-port -;; params: params))) -;; (queue-push cmddat) ;; put request into the queue -;; (nn-send soc "queued")) ;; reply with "queued" -;; (print "ERROR: ["(common:human-time)"] BAD request " dat)) -;; (loop (nn-recv soc))))) -;; (nn-close soc))) - ;;====================================================================== ;; D A S H B O A R D U S E R V I E W S ;;====================================================================== ;;====================================================================== @@ -3347,11 +3015,17 @@ (debug:print 0 *default-log-port* "ERROR: packets directory path " pktsdir " is not a directory.")) ((not (file-read-access? pktsdir)) (debug:print 0 *default-log-port* "ERROR: packets directory path " pktsdir " is not readable.")) (else (debug:print-info 0 *default-log-port* "Loading packets found in " pktsdir) - (let ((pkts (glob (conc pktsdir "/*.pkt")))) + (let ((pkts (glob (conc pktsdir "/*.pkt"))) + (sqdb (dbi:db-conn pdb)) + ) + ;; Put this in a transaction to avoid issues overloading the db + (sqlite3:with-transaction + sqdb + (lambda () (for-each (lambda (pkt) (let* ((uuid (cadr (string-match ".*/([0-9a-f]+).pkt" pkt))) (exists (lookup-by-uuid pdb uuid #f))) (if (not exists) @@ -3362,11 +3036,11 @@ (ptype (alist-ref 'T apkt))) (add-to-queue pdb pktdat uuid (or ptype 'cmd) #f 0) (debug:print 4 *default-log-port* "Added " uuid " of type " ptype " to queue")) (debug:print 4 *default-log-port* "pkt: " uuid " exists, skipping...") ))) - pkts))))) + pkts))))))) pktsdirs)) use-lt: use-lt)) (define (common:get-pkt-alists pkts) (map (lambda (x) Index: common_records.scm ================================================================== --- common_records.scm +++ common_records.scm @@ -81,133 +81,133 @@ ;; this was cached based on results from profiling but it turned out the profiling ;; somehow went wrong - perhaps too many processes writing to it. Leaving the caching ;; in for now but can probably take it out later. ;; -(define (debug:calc-verbosity vstr) - (or (hash-table-ref/default *verbosity-cache* vstr #f) - (let ((res (cond - ((number? vstr) vstr) - ((not (string? vstr)) 1) - ;; ((string-match "^\\s*$" vstr) 1) - (vstr (let ((debugvals (filter number? (map string->number (string-split vstr ","))))) - (cond - ((> (length debugvals) 1) debugvals) - ((> (length debugvals) 0)(car debugvals)) - (else 1)))) - ((args:get-arg "-v") 2) - ((args:get-arg "-q") 0) - (else 1)))) - (hash-table-set! *verbosity-cache* vstr res) - res))) - -;; check verbosity, #t is ok -(define (debug:check-verbosity verbosity vstr) - (if (not (or (number? verbosity) - (list? verbosity))) - (begin - (print "ERROR: Invalid debug value \"" vstr "\"") - #f) - #t)) - -(define (debug:debug-mode n) - (cond - ((and (number? *verbosity*) ;; number number - (number? n)) - (<= n *verbosity*)) - ((and (list? *verbosity*) ;; list number - (number? n)) - (member n *verbosity*)) - ((and (list? *verbosity*) ;; list list - (list? n)) - (not (null? (lset-intersection! eq? *verbosity* n)))) - ((and (number? *verbosity*) - (list? n)) - (member *verbosity* n)))) - -(define (debug:setup) - (let ((debugstr (or (args:get-arg "-debug") - (args:get-arg "-debug-noprop") - (getenv "MT_DEBUG_MODE")))) - (set! *verbosity* (debug:calc-verbosity debugstr)) - (debug:check-verbosity *verbosity* debugstr) - ;; if we were handed a bad verbosity rule then we will override it with 1 and continue - (if (not *verbosity*)(set! *verbosity* 1)) - (if (and (not (args:get-arg "-debug-noprop")) - (or (args:get-arg "-debug") - (not (getenv "MT_DEBUG_MODE")))) - (setenv "MT_DEBUG_MODE" (if (list? *verbosity*) - (string-intersperse (map conc *verbosity*) ",") - (conc *verbosity*)))))) - -(define (debug:print n e . params) - (if (debug:debug-mode n) - (with-output-to-port (or e (current-error-port)) - (lambda () - (if *logging* - (db:log-event (apply conc params)) - (apply print params) - ))))) +;; (define (debug:calc-verbosity vstr) +;; (or (hash-table-ref/default *verbosity-cache* vstr #f) +;; (let ((res (cond +;; ((number? vstr) vstr) +;; ((not (string? vstr)) 1) +;; ;; ((string-match "^\\s*$" vstr) 1) +;; (vstr (let ((debugvals (filter number? (map string->number (string-split vstr ","))))) +;; (cond +;; ((> (length debugvals) 1) debugvals) +;; ((> (length debugvals) 0)(car debugvals)) +;; (else 1)))) +;; ((args:get-arg "-v") 2) +;; ((args:get-arg "-q") 0) +;; (else 1)))) +;; (hash-table-set! *verbosity-cache* vstr res) +;; res))) + +;; ;; check verbosity, #t is ok +;; (define (debug:check-verbosity verbosity vstr) +;; (if (not (or (number? verbosity) +;; (list? verbosity))) +;; (begin +;; (print "ERROR: Invalid debug value \"" vstr "\"") +;; #f) +;; #t)) +;; +;; (define (debug:debug-mode n) +;; (cond +;; ((and (number? *verbosity*) ;; number number +;; (number? n)) +;; (<= n *verbosity*)) +;; ((and (list? *verbosity*) ;; list number +;; (number? n)) +;; (member n *verbosity*)) +;; ((and (list? *verbosity*) ;; list list +;; (list? n)) +;; (not (null? (lset-intersection! eq? *verbosity* n)))) +;; ((and (number? *verbosity*) +;; (list? n)) +;; (member *verbosity* n)))) +;; +;; (define (debug:setup) +;; (let ((debugstr (or (args:get-arg "-debug") +;; (args:get-arg "-debug-noprop") +;; (getenv "MT_DEBUG_MODE")))) +;; (set! *verbosity* (debug:calc-verbosity debugstr)) +;; (debug:check-verbosity *verbosity* debugstr) +;; ;; if we were handed a bad verbosity rule then we will override it with 1 and continue +;; (if (not *verbosity*)(set! *verbosity* 1)) +;; (if (and (not (args:get-arg "-debug-noprop")) +;; (or (args:get-arg "-debug") +;; (not (getenv "MT_DEBUG_MODE")))) +;; (setenv "MT_DEBUG_MODE" (if (list? *verbosity*) +;; (string-intersperse (map conc *verbosity*) ",") +;; (conc *verbosity*)))))) +;; +;; (define (debug:print n e . params) +;; (if (debug:debug-mode n) +;; (with-output-to-port (or e (current-error-port)) +;; (lambda () +;; (if *logging* +;; (db:log-event (apply conc params)) +;; (apply print params) +;; ))))) ;; Brandon's debug printer shortcut (indulge me :) -(define *BB-process-starttime* (current-milliseconds)) -(define (BB> . in-args) - (let* ((stack (get-call-chain)) - (location "??")) - (for-each - (lambda (frame) - (let* ((this-loc (vector-ref frame 0)) - (temp (string-split (->string this-loc) " ")) - (this-func (if (and (list? temp) (> (length temp) 1)) (cadr temp) "???"))) - (if (equal? this-func "BB>") - (set! location this-loc)))) - stack) - (let* ((color-on "\x1b[1m") - (color-off "\x1b[0m") - (dp-args - (append - (list 0 *default-log-port* - (conc color-on location "@"(/ (- (current-milliseconds) *BB-process-starttime*) 1000) color-off " ") ) - in-args))) - (apply debug:print dp-args)))) - -(define *BBpp_custom_expanders_list* (make-hash-table)) - - - -;; register hash tables with BBpp. -(hash-table-set! *BBpp_custom_expanders_list* HASH_TABLE: - (cons hash-table? hash-table->alist)) - -;; test name converter -(define (BBpp_custom_converter arg) - (let ((res #f)) - (for-each - (lambda (custom-type-name) - (let* ((custom-type-info (hash-table-ref *BBpp_custom_expanders_list* custom-type-name)) - (custom-type-test (car custom-type-info)) - (custom-type-converter (cdr custom-type-info))) - (when (and (not res) (custom-type-test arg)) - (set! res (custom-type-converter arg))))) - (hash-table-keys *BBpp_custom_expanders_list*)) - (if res (BBpp_ res) arg))) - -(define (BBpp_ arg) - (cond - ;;((SOMESTRUCT? arg) (cons SOMESTRUCT: (SOMESTRUCT->alist arg))) - ;;((dboard:tabdat? arg) (cons dboard:tabdat: (dboard:tabdat->alist arg))) - ((hash-table? arg) - (let ((al (hash-table->alist arg))) - (BBpp_ (cons HASH_TABLE: al)))) - ((null? arg) '()) - ;;((list? arg) (cons (BBpp_ (car arg)) (BBpp_ (cdr arg)))) - ((pair? arg) (cons (BBpp_ (car arg)) (BBpp_ (cdr arg)))) - (else (BBpp_custom_converter arg)))) - -;; Brandon's pretty printer. It expands hashes and custom types in addition to regular pp -(define (BBpp arg) - (pp (BBpp_ arg))) +;; (define *BB-process-starttime* (current-milliseconds)) +;; (define (BB> . in-args) +;; (let* ((stack (get-call-chain)) +;; (location "??")) +;; (for-each +;; (lambda (frame) +;; (let* ((this-loc (vector-ref frame 0)) +;; (temp (string-split (->string this-loc) " ")) +;; (this-func (if (and (list? temp) (> (length temp) 1)) (cadr temp) "???"))) +;; (if (equal? this-func "BB>") +;; (set! location this-loc)))) +;; stack) +;; (let* ((color-on "\x1b[1m") +;; (color-off "\x1b[0m") +;; (dp-args +;; (append +;; (list 0 *default-log-port* +;; (conc color-on location "@"(/ (- (current-milliseconds) *BB-process-starttime*) 1000) color-off " ") ) +;; in-args))) +;; (apply debug:print dp-args)))) +;; +;; (define *BBpp_custom_expanders_list* (make-hash-table)) +;; +;; +;; +;; ;; register hash tables with BBpp. +;; (hash-table-set! *BBpp_custom_expanders_list* HASH_TABLE: +;; (cons hash-table? hash-table->alist)) +;; +;; ;; test name converter +;; (define (BBpp_custom_converter arg) +;; (let ((res #f)) +;; (for-each +;; (lambda (custom-type-name) +;; (let* ((custom-type-info (hash-table-ref *BBpp_custom_expanders_list* custom-type-name)) +;; (custom-type-test (car custom-type-info)) +;; (custom-type-converter (cdr custom-type-info))) +;; (when (and (not res) (custom-type-test arg)) +;; (set! res (custom-type-converter arg))))) +;; (hash-table-keys *BBpp_custom_expanders_list*)) +;; (if res (BBpp_ res) arg))) +;; +;; (define (BBpp_ arg) +;; (cond +;; ;;((SOMESTRUCT? arg) (cons SOMESTRUCT: (SOMESTRUCT->alist arg))) +;; ;;((dboard:tabdat? arg) (cons dboard:tabdat: (dboard:tabdat->alist arg))) +;; ((hash-table? arg) +;; (let ((al (hash-table->alist arg))) +;; (BBpp_ (cons HASH_TABLE: al)))) +;; ((null? arg) '()) +;; ;;((list? arg) (cons (BBpp_ (car arg)) (BBpp_ (cdr arg)))) +;; ((pair? arg) (cons (BBpp_ (car arg)) (BBpp_ (cdr arg)))) +;; (else (BBpp_custom_converter arg)))) +;; +;; ;; Brandon's pretty printer. It expands hashes and custom types in addition to regular pp +;; (define (BBpp arg) +;; (pp (BBpp_ arg))) ;(use define-macro) (define-syntax inspect (syntax-rules () [(_ x) @@ -215,40 +215,40 @@ (printf "~a is: ~a\n" 'x (with-output-to-string (lambda () (BBpp x)))) ;; ) ] [(_ x y ...) (begin (inspect x) (inspect y ...))])) -(define (debug:print-error n e . params) - ;; normal print - (if (debug:debug-mode n) - (with-output-to-port (if (port? e) e (current-error-port)) - (lambda () - (if *logging* - (db:log-event (apply conc params)) - ;; (apply print "pid:" (current-process-id) " " params) - (apply print "ERROR: " params) - )))) - ;; pass important messages to stderr - (if (and (eq? n 0)(not (eq? e (current-error-port)))) - (with-output-to-port (current-error-port) - (lambda () - (apply print "ERROR: " params) - )))) - -(define (debug:print-info n e . params) - (if (debug:debug-mode n) - (with-output-to-port (if (port? e) e (current-error-port)) - (lambda () - (if *logging* - (let ((res (format#format #f "INFO: (~a) ~a" n (apply conc params)))) - (db:log-event res)) - ;; (apply print "pid:" (current-process-id) " " "INFO: (" n ") " params) ;; res) - (apply print "INFO: (" n ") " params) ;; res) - ))))) - +;; (define (debug:print-error n e . params) +;; ;; normal print +;; (if (debug:debug-mode n) +;; (with-output-to-port (if (port? e) e (current-error-port)) +;; (lambda () +;; (if *logging* +;; (db:log-event (apply conc params)) +;; ;; (apply print "pid:" (current-process-id) " " params) +;; (apply print "ERROR: " params) +;; )))) +;; ;; pass important messages to stderr +;; (if (and (eq? n 0)(not (eq? e (current-error-port)))) +;; (with-output-to-port (current-error-port) +;; (lambda () +;; (apply print "ERROR: " params) +;; )))) +;; +;; (define (debug:print-info n e . params) +;; (if (debug:debug-mode n) +;; (with-output-to-port (if (port? e) e (current-error-port)) +;; (lambda () +;; (if *logging* +;; (let ((res (format#format #f "INFO: (~a) ~a" n (apply conc params)))) +;; (db:log-event res)) +;; ;; (apply print "pid:" (current-process-id) " " "INFO: (" n ") " params) ;; res) +;; (apply print "INFO: (" n ") " params) ;; res) +;; ))))) +;; ;; if a value is printable (i.e. string or number) return the value ;; else return an empty string (define-inline (printable val) (if (or (number? val)(string? val)) val "")) Index: commonmod.scm ================================================================== --- commonmod.scm +++ commonmod.scm @@ -17,21 +17,77 @@ ;; along with Megatest. If not, see . ;;====================================================================== (declare (unit commonmod)) +(declare (uses debugprint)) (use srfi-69) (module commonmod * -(import scheme chicken data-structures extras files) -(import (prefix sqlite3 sqlite3:) - posix typed-records srfi-18 srfi-69 - md5 message-digest - regex srfi-1) +(import scheme) +(cond-expand + (chicken-4 + + (import chicken + ports + + (prefix sqlite3 sqlite3:) + data-structures + extras + files + matchable + md5 + message-digest + pathname-expand + posix + posix-extras + regex + regex-case + srfi-1 + srfi-18 + srfi-69 + typed-records + + debugprint + ) + (use srfi-69)) + (chicken-5 + (import (prefix sqlite3 sqlite3:) + ;; data-structures + ;; extras + ;; files + ;; posix + ;; posix-extras + chicken.base + chicken.condition + chicken.file + chicken.file.posix + chicken.io + chicken.pathname + chicken.process + chicken.process-context + chicken.process-context.posix + chicken.sort + chicken.string + chicken.time + chicken.time.posix + + matchable + md5 + message-digest + pathname-expand + regex + regex-case + srfi-1 + srfi-18 + srfi-69 + typed-records + system-information + ))) ;;====================================================================== ;; CONTENTS ;; ;; config file utils @@ -41,10 +97,16 @@ ;;====================================================================== (include "megatest-version.scm") (include "megatest-fossil-hash.scm") +;; http - use the old http + in /tmp db +;; tcp - use tcp transport with cachedb db +;; nfs - use direct to disk access (read-only) +;; +(define rmt:transport-mode (make-parameter 'tcp)) + (define (get-full-version) (conc megatest-version "-" megatest-fossil-hash)) (define (version-signature) (conc megatest-version "-" (substring megatest-fossil-hash 0 4))) @@ -59,10 +121,22 @@ (begin (hash-table-set! *common:denoise* key currtime) #t) #f))) +;; KEEP THIS ONE +;; +;; client:get-signature + +(define *my-client-signature* #f) + +(define (client:get-signature) + (if *my-client-signature* *my-client-signature* + (let ((sig (conc (get-host-name) " " (current-process-id)))) + (set! *my-client-signature* sig) + *my-client-signature*))) + ;;====================================================================== ;; config file utils ;;====================================================================== (define (lookup cfgdat section var) @@ -136,10 +210,58 @@ ;;====================================================================== ;; misc conversion, data manipulation functions ;;====================================================================== +;;====================================================================== +;; return first command that exists, else #f +;; +(define (common:which cmds) + (if (null? cmds) + #f + (let loop ((hed (car cmds)) + (tal (cdr cmds))) + (let ((res (with-input-from-pipe (conc "which " hed) read-line))) + (if (and (string? res) + (file-exists? res)) + res + (if (null? tal) + #f + (loop (car tal)(cdr tal)))))))) + +(define (common:get-megatest-exe) + (let* ((mtexe (or (get-environment-variable "MT_MEGATEST") + (common:which '("megatest")) + "megatest"))) + (if (file-exists? mtexe) + (realpath mtexe) + mtexe))) + +(define (common:get-megatest-exe-dir) + (let* ((mtexe (common:get-megatest-exe))) + (pathname-directory mtexe))) + +;; more generic and comprehensive version of get-megatest-exe +;; +(define (common:get-mtexe) + (let* ((mtpathdir (common:get-megatest-exe-dir))) + (or (common:get-megatest-exe) + (if mtpathdir + (conc mtpathdir"/megatest") + #f) + "megatest"))) + +(define (common:get-megatest-exe-path) + (let* ((mtpathdir (common:get-megatest-exe-dir))) + (conc mtpathdir":"(get-environment-variable "PATH") ":."))) + +(cond-expand + (chicken-4 + (define (realpath x) (resolve-pathname (pathname-expand (or x "/dev/null")) ))) + (chicken-5 + (define (realpath x) (normalize-pathname (pathname-expand (or x "/dev/null")))))) + ;; if it looks like a number -> convert it to a number, else return it ;; (define (lazy-convert inval) (let* ((as-num (if (string? inval)(string->number inval) #f))) (or as-num inval))) @@ -161,10 +283,40 @@ (filter (lambda (x) (not (string-match "^\\s*" x))) val-list)) '()))) +(define (get-cpu-load) + (let* ((load-info (with-input-from-file "/proc/loadavg" read-lines))) + (map string->number (string-split load-info)))) + +(define *current-host-cores* #f) + +(define (get-current-host-cores) + (or *current-host-cores* + (let ((cpu-info (with-input-from-file "/proc/cpuinfo" read-lines))) + (let loop ((lines cpu-info)) + (if (null? lines) + 1 ;; gotta be at least one! + (let* ((inl (car lines)) + (tail (cdr lines)) + (parts (string-split inl))) + (match parts + (("cpu" "cores" ":" num) (string->number num)) + (else (loop tail))))))))) + +(define (number-of-processes-running processname) + (with-input-from-pipe + (conc "ps -def | egrep \""processname"\" |wc -l") + (lambda () + (string->number (read-line))))) + +;; get the normalized (i.e. load / numcpus) for *this* host +;; +(define (get-normalized-cpu-load) + (/ (get-cpu-load)(get-current-host-cores))) + ;;====================================================================== ;; testsuite and area utilites ;;====================================================================== (define (get-testsuite-name toppath configdat) @@ -208,18 +360,387 @@ (let ((adat (get-section cfgdat "areas"))) (map (lambda (entry) `(,(car entry) . ,(val->alist (cadr entry)))) adat))) + +;;====================================================================== +;; time utils +;;====================================================================== + +(define (common:human-time) + (time->string (seconds->local-time (current-seconds)) "%Y-%m-%d %H:%M:%S")) + +;;====================================================================== +;; T I M E A N D D A T E +;;====================================================================== + +;;====================================================================== +;; Convert strings like "5s 2h 3m" => 60x60x2 + 3x60 + 5 +(define (common:hms-string->seconds tstr) + (let ((parts (string-split-fields "\\w+" tstr)) + (time-secs 0) + ;; s=seconds, m=minutes, h=hours, d=days, M=months, y=years, w=weeks + (trx (regexp "^(\\d+)([smhdMyw])$"))) + (for-each (lambda (part) + (let ((match (string-match trx part))) + (if match + (let ((val (string->number (cadr match))) + (unt (caddr match))) + (if val + (set! time-secs (+ time-secs (* val + (case (string->symbol unt) + ((s) 1) + ((m) 60) ;; minutes + ((h) 3600) + ((d) 86400) + ((w) 604800) + ((M) 2628000) ;; aproximately one month + ((y) 31536000) + (else + 0))))))) + ;; (print "ERROR: can't parse timestring "tstr", component "part) + ;; can't (yet) use debugprint. rely on -show-config for user to find errors + ))) + parts) + time-secs)) + +(define (seconds->hr-min-sec secs) + (let* ((hrs (quotient secs 3600)) + (min (quotient (- secs (* hrs 3600)) 60)) + (sec (- secs (* hrs 3600)(* min 60)))) + (conc (if (> hrs 0)(conc hrs "hr ") "") + (if (> min 0)(conc min "m ") "") + sec "s"))) + +(define (seconds->time-string sec) + (time->string + (seconds->local-time sec) "%H:%M:%S")) + +(define (seconds->work-week/day-time sec) + (time->string + (seconds->local-time sec) "ww%V.%u %H:%M")) + +(define (seconds->work-week/day sec) + (time->string + (seconds->local-time sec) "ww%V.%u")) + +(define (seconds->year-work-week/day sec) + (time->string + (seconds->local-time sec) "%yww%V.%w")) + +(define (seconds->year-work-week/day-time sec) + (time->string + (seconds->local-time sec) "%Yww%V.%w %H:%M")) + +(define (seconds->year-week/day-time sec) + (time->string + (seconds->local-time sec) "%Yw%V.%w %H:%M")) + +(define (seconds->quarter sec) + (case (string->number + (time->string + (seconds->local-time sec) + "%m")) + ((1 2 3) 1) + ((4 5 6) 2) + ((7 8 9) 3) + ((10 11 12) 4) + (else #f))) + +;;====================================================================== +;; basic ISO8601 format (e.g. "2017-02-28 06:02:54") date time => Unix epoch +;; +(define (common:date-time->seconds datetime) + (local-time->seconds (string->time datetime "%Y-%m-%d %H:%M:%S"))) + +;;====================================================================== +;; given span of seconds tstart to tend +;; find start time to mark and mark delta +;; +(define (common:find-start-mark-and-mark-delta tstart tend) + (let* ((deltat (- (max tend (+ tend 10)) tstart)) ;; can't handle runs of less than 4 seconds. Pad it to 10 seconds ... + (result #f) + (min 60) + (hr (* 60 60)) + (day (* 24 hr)) + (yr (* 365 day)) ;; year + (mo (/ yr 12)) + (wk (* day 7))) + (for-each + (lambda (max-blks) + (for-each + (lambda (span) ;; 5 2 1 + (if (not result) + (for-each + (lambda (timeunit timesym) ;; year month day hr min sec + (if (not result) + (let* ((time-blk (* span timeunit)) + (num-blks (quotient deltat time-blk))) + (if (and (> num-blks 4)(< num-blks max-blks)) + (let ((first (* (quotient tstart time-blk) time-blk))) + (set! result (list span timeunit time-blk first timesym)) + ))))) + (list yr mo wk day hr min 1) + '( y mo w d h m s)))) + (list 8 6 5 2 1))) + '(5 10 15 20 30 40 50 500)) + (if values + (apply values result) + (values 0 day 1 0 'd)))) + +;;====================================================================== +;; given x y lim return the cron expansion +;; +(define (common:expand-cron-slash x y lim) + (let loop ((curr x) + (res `())) + (if (< curr lim) + (loop (+ curr y) (cons curr res)) + (reverse res)))) + +;;====================================================================== +;; expand a complex cron string to a list of cron strings +;; +;; x/y => x, x+y, x+2y, x+3y while x+Ny a, b ,c +;; +;; NOTE: with flatten a lot of the crud below can be factored down. +;; +(define (common:cron-expand cron-str) + (if (list? cron-str) + (flatten + (fold (lambda (x res) + (if (list? x) + (let ((newres (map common:cron-expand x))) + (append x newres)) + (cons x res))) + '() + cron-str)) ;; (map common:cron-expand cron-str)) + (let ((cron-items (string-split cron-str)) + (slash-rx (regexp "(\\d+)/(\\d+)")) + (comma-rx (regexp ".*,.*")) + (max-vals '((min . 60) + (hour . 24) + (dayofmonth . 28) ;;; BUG!!!! This will be a bug for some combinations + (month . 12) + (dayofweek . 7)))) + (if (< (length cron-items) 5) ;; bad spec + cron-str ;; `(,cron-str) ;; just return the string, something downstream will fix it + (let loop ((hed (car cron-items)) + (tal (cdr cron-items)) + (type 'min) + (type-tal '(hour dayofmonth month dayofweek)) + (res '())) + (regex-case + hed + (slash-rx ( _ base incr ) (let* ((basen (string->number base)) + (incrn (string->number incr)) + (expanded-vals (common:expand-cron-slash basen incrn (alist-ref type max-vals))) + (new-list-crons (fold (lambda (x myres) + (cons (conc (if (null? res) + "" + (conc (string-intersperse res " ") " ")) + x " " (string-intersperse tal " ")) + myres)) + '() expanded-vals))) + ;; (print "new-list-crons: " new-list-crons) + ;; (fold (lambda (x res) + ;; (if (list? x) + ;; (let ((newres (map common:cron-expand x))) + ;; (append x newres)) + ;; (cons x res))) + ;; '() + (flatten (map common:cron-expand new-list-crons)))) + ;; (map common:cron-expand (map common:cron-expand new-list-crons)))) + (else (if (null? tal) + cron-str + (loop (car tal)(cdr tal)(car type-tal)(cdr type-tal)(append res (list hed))))))))))) + +;;====================================================================== +;; given a cron string and the last time event was processed return #t to run or #f to not run +;; +;; min hour dayofmonth month dayofweek +;; 0-59 0-23 1-31 1-12 0-6 ### NOTE: dayofweek does not include 7 +;; +;; #t => yes, run the job +;; #f => no, do not run the job +;; +(define (common:cron-event cron-str now-seconds-in last-done) ;; ref-seconds = #f is NOW. + (let* ((cron-items (map string->number (string-split cron-str))) + (now-seconds (or now-seconds-in (current-seconds))) + (now-time (seconds->local-time now-seconds)) + (last-done-time (seconds->local-time last-done)) + (all-times (make-hash-table))) + ;; (print "cron-items: " cron-items "(length cron-items): " (length cron-items)) + (if (not (eq? (length cron-items) 5)) ;; don't even try to figure out junk strings + #f + (match-let ((( cmin chour cdayofmonth cmonth cdayofweek) + cron-items) + ;; 0 1 2 3 4 5 6 + ((nsec nmin nhour ndayofmonth nmonth nyr ndayofweek n7 n8 n9) + (vector->list now-time)) + ((lsec lmin lhour ldayofmonth lmonth lyr ldayofweek l7 l8 l9) + (vector->list last-done-time))) + ;; create all possible time slots + ;; remove invalid slots due to (for example) day of week + ;; get the start and end entries for the ref-seconds (current) time + ;; if last-done > ref-seconds => this is an ERROR! + ;; does the last-done time fall in the legit region? + ;; yes => #f do not run again this command + ;; no => #t ok to run the command + (for-each ;; month + (lambda (month) + (for-each ;; dayofmonth + (lambda (dom) + (for-each + (lambda (hr) ;; hour + (for-each + (lambda (minute) ;; minute + (let ((copy-now (apply vector (vector->list now-time)))) + (vector-set! copy-now 0 0) ;; force seconds to zero + (vector-set! copy-now 1 minute) + (vector-set! copy-now 2 hr) + (vector-set! copy-now 3 dom) ;; dom is already corrected for zero referenced + (vector-set! copy-now 4 month) + (let* ((copy-now-secs (local-time->seconds copy-now)) + (new-copy (seconds->local-time copy-now-secs))) ;; remake the time vector + (if (or (not cdayofweek) + (equal? (vector-ref new-copy 6) + cdayofweek)) ;; if the day is specified and a match OR if the day is NOT specified + (if (or (not cdayofmonth) + (equal? (vector-ref new-copy 3) + (+ 1 cdayofmonth))) ;; if the month is specified and a match OR if the month is NOT specified + (hash-table-set! all-times copy-now-secs new-copy)))))) + (if cmin + `(,cmin) ;; if given cmin, have to use it + (list (- nmin 1) nmin (+ nmin 1))))) ;; minute + (if chour + `(,chour) + (list (- nhour 1) nhour (+ nhour 1))))) ;; hour + (if cdayofmonth + `(,cdayofmonth) + (list (- ndayofmonth 1) ndayofmonth (+ ndayofmonth 1))))) + (if cmonth + `(,cmonth) + (list (- nmonth 1) nmonth (+ nmonth 1)))) + (let ((before #f) + (is-in #f)) + (for-each + (lambda (moment) + (if (and before + (<= before now-seconds) + (>= moment now-seconds)) + (begin + ;; (print) + ;; (print "Before: " (time->string (seconds->local-time before))) + ;; (print "Now: " (time->string (seconds->local-time now-seconds))) + ;; (print "After: " (time->string (seconds->local-time moment))) + ;; (print "Last: " (time->string (seconds->local-time last-done))) + (if (< last-done before) + (set! is-in before)) + )) + (set! before moment)) + (sort (hash-table-keys all-times) <)) + is-in))))) + +(define (common:extended-cron cron-str now-seconds-in last-done) + (let ((expanded-cron (common:cron-expand cron-str))) + (if (string? expanded-cron) + (common:cron-event expanded-cron now-seconds-in last-done) + (let loop ((hed (car expanded-cron)) + (tal (cdr expanded-cron))) + (if (common:cron-event hed now-seconds-in last-done) + #t + (if (null? tal) + #f + (loop (car tal)(cdr tal)))))))) + + ;;====================================================================== ;; misc stuff ;;====================================================================== -;; (define (debug:print . params) #f) -;; (define (debug:print-info . params) #f) -;; -;; (define (set-functions dbgp dbgpinfo) -;; (set! debug:print dbgp) -;; (set! debug:print-info dbgpinfo)) +(define (common:get-signature str) + (message-digest-string (md5-primitive) str)) + +;;====================================================================== +;; hash of hashs +;;====================================================================== + +(define (db:hoh-set! dat key1 key2 val) + (let* ((subhash (hash-table-ref/default dat key1 #f))) + (if subhash + (hash-table-set! subhash key2 val) + (begin + (hash-table-set! dat key1 (make-hash-table)) + (db:hoh-set! dat key1 key2 val))))) + +(define (db:hoh-get dat key1 key2) + (let* ((subhash (hash-table-ref/default dat key1 #f))) + (and subhash + (hash-table-ref/default subhash key2 #f)))) + +;;====================================================================== +;; when called from a wrapper I need sometimes to find the calling +;; wrapper, this is for dashboard to find the correct megatest. +;; +(define (common:find-local-megatest #!optional (progname "megatest")) + (let ((res (filter file-exists? + (map (lambda (updir) + (let* ((lm (car (argv))) + (dir (pathname-directory lm)) + (exe (pathname-strip-directory lm))) + (conc (if dir (conc dir "/") "") + (case (string->symbol exe) + ((dboard) (conc updir progname)) + ((mtest) (conc updir progname)) + ((dashboard) progname) + (else exe))))) + '("../../" "../"))))) + (if (null? res) + (begin + ;; (debug:print 0 *default-log-port* "Failed to find this executable! Using what can be found on the path") + progname) + (car res)))) + +(define (common:generic-ssh ssh-command proc default #!optional (msg-proc #f)) + (let ((inp #f)) + (handle-exceptions + exn + (begin + (close-input-port inp) + (if msg-proc + (msg-proc) + (debug:print 0 *default-log-port* "Command: \""ssh-command"\" failed. exn="exn)) + default) + (set! inp (open-input-pipe ssh-command)) + (with-input-from-port inp + (lambda () + (let ((res (proc))) + (close-input-port inp) + res)))))) + +;; this is a close duplicate of: +;; process:alist-on-host? +;; process:alive +;; +(define (commonmod:is-test-alive host pid) + (let* ((same-host (equal? host (get-host-name))) + (cmd (conc + (if same-host "" (conc "ssh "host" ")) + "pstree -A "pid))) + (if (and host pid + (not (equal? host "n/a"))) + + (let* ((output (if same-host + (with-input-from-pipe cmd read-lines) + (common:generic-ssh cmd read-lines '())))) ;; (with-input-from-pipe cmd read-lines))) + (debug:print 2 *default-log-port* "Running " cmd " received " output) + (if (eq? (length output) 0) + #f + #t)) + #t))) ;; assuming bad query is about a live test is likely not the right thing to do? + ) Index: configf.scm ================================================================== --- configf.scm +++ configf.scm @@ -25,10 +25,19 @@ (use regex regex-case matchable) ;; directory-utils) (declare (unit configf)) (declare (uses process)) (declare (uses env)) (declare (uses keys)) +(declare (uses debugprint)) +(declare (uses mtargs)) +(declare (uses mtargs.import)) +(declare (uses common)) +(declare (uses commonmod)) +(declare (uses commonmod.import)) +(import commonmod + (prefix mtargs args:) + debugprint) (include "common_records.scm") ;; return list (path fullpath configname) (define (find-config configname #!key (toppath #f)) @@ -98,10 +107,12 @@ (define (configf:system ht cmd) (system cmd) ) +(define configf:imports "(import commonmod (prefix mtargs args:))") + (define (configf:process-line l ht allow-system #!key (linenum #f)) (let loop ((res l)) (if (string? res) (let ((matchdat (string-search configf:var-expand-regex res))) (if matchdat @@ -111,11 +122,11 @@ (poststr (list-ref matchdat 4)) (result #f) (start-time (current-seconds)) (cmdsym (string->symbol cmdtype)) (fullcmd (case cmdsym - ((scheme scm) (conc "(lambda (ht)" cmd ")")) + ((scheme scm) (conc "(lambda (ht)(begin " configf:imports cmd "))")) ((system) (conc "(lambda (ht)(configf:system ht \"" cmd "\"))")) ((shell sh) (conc "(lambda (ht)(string-translate (shell \"" cmd "\") \"\n\" \" \"))")) ((realpath rp)(conc "(lambda (ht)(common:nice-path \"" cmd "\"))")) ((getenv gv) (conc "(lambda (ht)(get-environment-variable \"" cmd "\"))")) ((mtrah) (conc "(lambda (ht)" Index: dashboard-context-menu.scm ================================================================== --- dashboard-context-menu.scm +++ dashboard-context-menu.scm @@ -21,32 +21,38 @@ ;;====================================================================== ;; implementation of context menu that pops up on ;; right click on test cell in Runs & Runs Summary Tabs ;;====================================================================== +(declare (unit dashboard-context-menu)) +(declare (uses common)) +(declare (uses commonmod)) +(declare (uses db)) +(declare (uses gutils)) +(declare (uses rmt)) +(declare (uses rmtmod)) +(declare (uses ezsteps)) +;; (declare (uses sdb)) +;; (declare (uses filedb)) +(declare (uses subrun)) + (use format fmt) (require-library iup) (import (prefix iup iup:)) (use canvas-draw) (use srfi-1 posix regex regex-case srfi-69) (use (prefix sqlite3 sqlite3:)) -(declare (unit dashboard-context-menu)) -(declare (uses common)) -(declare (uses db)) -(declare (uses gutils)) -(declare (uses rmt)) -(declare (uses ezsteps)) -;; (declare (uses sdb)) -;; (declare (uses filedb)) -(declare (uses subrun)) - (include "common_records.scm") (include "db_records.scm") (include "run_records.scm") + +(import commonmod + rmtmod + debugprint) (define (dboard:launch-testpanel run-id test-id) (let* ((dboardexe (common:find-local-megatest "dashboard")) (cmd (conc dboardexe " -test " run-id "," test-id Index: dashboard-guimonitor.scm ================================================================== --- dashboard-guimonitor.scm +++ dashboard-guimonitor.scm @@ -34,10 +34,12 @@ (declare (unit dashboard-guimonitor)) (declare (uses common)) (declare (uses keys)) (declare (uses db)) (declare (uses tasks)) +(declare (uses commonmod)) +(import commonmod) (include "common_records.scm") (include "db_records.scm") (include "run_records.scm") (include "task_records.scm") Index: dashboard-tests.scm ================================================================== --- dashboard-tests.scm +++ dashboard-tests.scm @@ -20,28 +20,33 @@ ;;====================================================================== ;; Test info panel ;;====================================================================== +(declare (unit dashboard-tests)) +(declare (uses common)) +(declare (uses commonmod)) +(declare (uses db)) +(declare (uses gutils)) +(declare (uses rmt)) +(declare (uses ezsteps)) +(declare (uses subrun)) +(declare (uses debugprint)) +(declare (uses rmtmod)) + (use format fmt) (require-library iup) (import (prefix iup iup:)) (use canvas-draw) (use srfi-1 posix regex regex-case srfi-69) (use (prefix sqlite3 sqlite3:)) -(declare (unit dashboard-tests)) -(declare (uses common)) -(declare (uses db)) -(declare (uses gutils)) -(declare (uses rmt)) -(declare (uses ezsteps)) -;; (declare (uses sdb)) -;; (declare (uses filedb)) -(declare (uses subrun)) +(import commonmod + rmtmod + debugprint) (include "common_records.scm") (include "db_records.scm") (include "run_records.scm") @@ -459,12 +464,11 @@ ;;====================================================================== ;; ;;====================================================================== (define (dashboard-tests:examine-test run-id test-id) ;; run-id run-key origtest) (let* ((db-path (db:dbfile-path)) ;; (conc (configf:lookup *configdat* "setup" "linktree") "/db/" run-id ".db")) - (dbstruct #f) ;; NOT ACTUALLY USED (db:setup)) ;; (make-dbr:dbstruct path: (db:dbfile-path #f) ;; (configf:lookup *configdat* "setup" "linktree") - ;; local: #t)) + (dbstruct #f) ;; NOT USED (testdat (rmt:get-test-info-by-id run-id test-id)) ;; (db:get-test-info-by-id dbstruct run-id test-id)) (db-mod-time 0) ;; (file-modification-time db-path)) (last-update 0) ;; (current-seconds)) (request-update #t)) (if (not testdat) ADDED dashboard-transport-mode.scm.template Index: dashboard-transport-mode.scm.template ================================================================== --- /dev/null +++ dashboard-transport-mode.scm.template @@ -0,0 +1,22 @@ +;;====================================================================== +;; set up transport, db cache and sync methods +;; +;; sync-method: 'original, 'attach or 'none +;; cache-method: 'tmp or 'none +;; rmt:transport-mode: 'http, 'tcp, 'nfs +;; +;; NOTE: NOT ALL COMBINATIONS WORK +;; +;;====================================================================== + +;; uncomment this block to test without tcp or cachedb +;; (dbfile:sync-method 'none) +;; (dbfile:cache-method 'none) +;; (rmt:transport-mode 'nfs) + +;; uncomment this block to test with tcp and cachedb +(dbfile:sync-method 'attach) ;; original was causing crash on start. +(dbfile:cache-method 'none) +(rmt:transport-mode 'nfs) + + Index: dashboard.scm ================================================================== --- dashboard.scm +++ dashboard.scm @@ -16,26 +16,17 @@ ;; You should have received a copy of the GNU General Public License ;; along with Megatest. If not, see . ;; ;;====================================================================== -(use format) - -(require-library iup) -(import (prefix iup iup:)) - -(use canvas-draw) -(import canvas-draw-iup) -(use ducttape-lib) -(use sqlite3 srfi-1 posix regex regex-case srfi-69 typed-records sparse-vectors) ;; defstruct -(import (prefix sqlite3 sqlite3:)) -(import dbfile) - (declare (uses common)) -(declare (uses margs)) +(declare (uses mtargs)) +(declare (uses mtargs.import)) (declare (uses keys)) (declare (uses items)) +(declare (uses debugprint)) +(declare (uses debugprint.import)) (declare (uses db)) (declare (uses configf)) (declare (uses process)) (declare (uses launch)) (declare (uses runs)) @@ -44,21 +35,49 @@ (declare (uses dcommon)) (declare (uses dashboard-context-menu)) (declare (uses vg)) (declare (uses subrun)) (declare (uses mt)) -(declare (uses dbfile)) +(declare (uses dbmod)) +(declare (uses rmtmod)) +(declare (uses dbfile)) +(declare (uses commonmod)) +(declare (uses commonmod.import)) + +(use format) + +(require-library iup) +(import (prefix iup iup:)) + +(use canvas-draw) +(import canvas-draw-iup + (prefix sqlite3 sqlite3:)) + +(use ducttape-lib) +(use sqlite3 srfi-1 posix regex regex-case srfi-69 typed-records sparse-vectors) ;; defstruct + +(import commonmod + (prefix mtargs args:) + dbmod + dbfile + rmtmod + debugprint) (include "common_records.scm") (include "db_records.scm") (include "run_records.scm") (include "task_records.scm") (include "megatest-version.scm") (include "megatest-fossil-hash.scm") (include "vg_records.scm") +;; set some parameters here - these need to be put in something that can be loaded from other +;; executables such as dashboard and mtutil +;; +(include "dashboard-transport-mode.scm") (dbfile:db-init-proc db:initialize-main-db) +(set! rmtmod:send-receive rmt:send-receive) (define help (conc "Megatest Dashboard, documentation at http://www.kiatoa.com/fossils/megatest version " megatest-version " license GPL, Copyright (C) Matt Welland 2012-2017 @@ -70,10 +89,11 @@ -cols C : set number of columns -start-dir dir : start dashboard in the given directory -target target : filter runs tab to given target. -debug n[,n] : set debug level(s) e.g. -debug 4 or -debug 0,9 -repl : Start a chicken scheme interpreter + -mode MODE : tcp or nfs " )) ;; process args @@ -84,10 +104,11 @@ "-cols" "-test" ;; given a run id and test id, open only a test control panel on that test.. "-debug" "-start-dir" "-target" + "-mode" ;; tcp or nfs ) ;; switches (don't take arguments) (list "-h" "-skip-version-check" "-repl" @@ -94,11 +115,17 @@ "-:p" ;; ignore the built in chicken profiling switch ) args:arg-hash 0)) - +(if (args:get-arg "-mode") + (let* ((mode (string->symbol (args:get-arg "-mode")))) + (rmt:transport-mode mode))) + +(if (args:get-arg "-test") ;; need to use tcp for test control panel + (rmt:transport-mode 'tcp)) + ;; RA => Might require revert for filters ;; create a watch dog to move changes from lt/.db/*.db to megatest.db ;; ;;;(if (file-write-access? (conc *toppath* "/megatest.db")) ;;(debug:print-info 13 *default-log-port* "Before common:watchdog spawn") @@ -204,11 +231,15 @@ tabdat)) ;; gets and calls updater list based on curr-tab-num ;; (define (dboard:common-run-curr-updaters commondat #!key (tab-num #f)) - (sync-db-to-tmp (dboard:common-get-tabdat commondat tab-num: tab-num)) + ;; (sync-db-to-tmp (dboard:common-get-tabdat commondat tab-num: tab-num)) ;; no longer applies + + ;; maybe need sleep here? + + (if (dboard:common-get-tabdat commondat tab-num: tab-num) ;; only update if there is a tabdat (let* ((tnum (or tab-num (dboard:commondat-curr-tab-num commondat))) (updaters (hash-table-ref/default (dboard:commondat-updaters commondat) tnum '()))) @@ -341,18 +372,18 @@ tests-tree ;; used in newdashboard ) ;; register tabdat with BBpp ;; this is used by BBpp (Brandon's pretty printer) to convert dboard:tabdat into a composition of lists that pp will handle -(hash-table-set! *BBpp_custom_expanders_list* TABDAT: - (cons dboard:tabdat? - (lambda (tabdat-item) - (filter - (lambda (alist-entry) - (member (car alist-entry) - '(allruns-by-id allruns))) ;; FIELDS OF INTEREST - (dboard:tabdat->alist tabdat-item))))) +;; (hash-table-set! *BBpp_custom_expanders_list* TABDAT: +;; (cons dboard:tabdat? +;; (lambda (tabdat-item) +;; (filter +;; (lambda (alist-entry) +;; (member (car alist-entry) +;; '(allruns-by-id allruns))) ;; FIELDS OF INTEREST +;; (dboard:tabdat->alist tabdat-item))))) (define (dboard:tabdat-target-string vec) (let ((targ (dboard:tabdat-target vec))) @@ -405,27 +436,29 @@ (make-dboard:runsdat runs-index: (make-hash-table) tests-index: (make-hash-table) matrix-dat: (make-sparse-array))) -;; used to keep the rundata from rmt:get-tests-for-run -;; in sync. +;; duplicated in dcommon.scm ;; -(defstruct dboard:rundat - run - tests-drawn ;; list of id's already drawn on screen - tests-notdrawn ;; list of id's NOT already drawn - rowsused ;; hash of lists covering what areas used - replace with quadtree - hierdat ;; put hierarchial sorted list here - tests ;; hash of id => testdat - ((tests-by-name (make-hash-table)) : hash-table) ;; hash of testfullname => testdat - key-vals - ((last-update 0) : number) ;; last query to db got records from before last-update - ((last-db-time 0) : number) ;; last timestamp on main.db - ((data-changed #f) : boolean) - ((run-data-offset 0) : number) ;; get only 100 items per call, set back to zero when received less than 100 items - (db-path #f)) +;; ;; used to keep the rundata from rmt:get-tests-for-run +;; ;; in sync. +;; ;; +;; (defstruct dboard:rundat +;; run +;; tests-drawn ;; list of id's already drawn on screen +;; tests-notdrawn ;; list of id's NOT already drawn +;; rowsused ;; hash of lists covering what areas used - replace with quadtree +;; hierdat ;; put hierarchial sorted list here +;; tests ;; hash of id => testdat +;; ((tests-by-name (make-hash-table)) : hash-table) ;; hash of testfullname => testdat +;; key-vals +;; ((last-update 0) : number) ;; last query to db got records from before last-update +;; ((last-db-time 0) : number) ;; last timestamp on main.db +;; ((data-changed #f) : boolean) +;; ((run-data-offset 0) : number) ;; get only 100 items per call, set back to zero when received less than 100 items +;; (db-path #f)) ;; for the new runs view lets build up a few new record types and then consolidate later ;; ;; this is a two level deep pipeline for the incoming data: ;; sql query data ==> filters ==> data for display @@ -491,18 +524,18 @@ duration ) ;; register dboard:rundat with BBpp ;; this is used by BBpp (Brandon's pretty printer) to convert dboard:rundat into a composition of lists that pp will handle -(hash-table-set! *BBpp_custom_expanders_list* RUNDAT: - (cons dboard:rundat? - (lambda (tabdat-item) - (filter - (lambda (alist-entry) - (member (car alist-entry) - '(run run-data-offset ))) ;; FIELDS OF INTEREST - (dboard:rundat->alist tabdat-item))))) +;; (hash-table-set! *BBpp_custom_expanders_list* RUNDAT: +;; (cons dboard:rundat? +;; (lambda (tabdat-item) +;; (filter +;; (lambda (alist-entry) +;; (member (car alist-entry) +;; '(run run-data-offset ))) ;; FIELDS OF INTEREST +;; (dboard:rundat->alist tabdat-item))))) (define (dboard:rundat-make-init #!key (run #f)(key-vals #f)(tests #f));; -100 is before time began @@ -640,11 +673,11 @@ (access-mode (dboard:tabdat-access-mode tabdat)) (num-to-get (string->number (or (configf:lookup *configdat* "setup" "num-tests-to-get") "200"))) (states (hash-table-keys (dboard:tabdat-state-ignore-hash tabdat))) (statuses (hash-table-keys (dboard:tabdat-status-ignore-hash tabdat))) - (do-not-use-db-file-timestamps #t) ;; (configf:lookup *configdat* "setup" "do-not-use-db-file-timestamps")) ;; this still hosts runs-summary-tab + (do-not-use-db-file-timestamps #f) ;; (configf:lookup *configdat* "setup" "do-not-use-db-file-timestamps")) ;; this still hosts runs-summary-tab (do-not-use-query-timestamps #t) ;; (configf:lookup *configdat* "setup" "do-not-use-query-timestamps")) ;; this no longer troubles runs-summary-tab (sort-info (get-curr-sort)) (sort-by (vector-ref sort-info 1)) (sort-order (vector-ref sort-info 2)) (bubble-type (if (member sort-order '(testname)) @@ -663,12 +696,12 @@ (dboard:rundat-last-update run-dat))) (last-db-time (if do-not-use-db-file-timestamps 0 (dboard:rundat-last-db-time run-dat))) (db-path (or (dboard:rundat-db-path run-dat) - (let* ((db-dir (common:get-db-tmp-area)) - (db-pth (conc db-dir "/.megatest/main.db"))) + (let* ((db-dir (get-environment-variable "MT_RUN_AREA_HOME")) ;; (common:get-db-tmp-area)) + (db-pth (conc db-dir "/.mtdb/main.db"))) (dboard:rundat-db-path-set! run-dat db-pth) db-pth))) (db-mod-time (common:lazy-sqlite-db-modification-time db-path)) (db-modified (>= db-mod-time last-db-time)) (multi-get (> (dboard:rundat-run-data-offset run-dat) 0)) ;; multi-get in progress @@ -1076,11 +1109,11 @@ ;; - not appropriate for where all the runs are needed ;; (define (update-buttons tabdat uidat numruns numtests) (let* ((runs (if (> (length (dboard:tabdat-allruns tabdat)) numruns) (take-right (dboard:tabdat-allruns tabdat) numruns) - (pad-list (dboard:tabdat-allruns tabdat) numruns))) + (pad-list (dboard:tabdat-allruns tabdat) numruns))) (lftcol (dboard:uidat-get-lftcol uidat)) (tableheader (dboard:uidat-get-header uidat)) (table (dboard:uidat-get-runsvec uidat)) (coln 0) (all-test-names (make-hash-table)) @@ -3100,21 +3133,18 @@ (hash-table-ref/default (dboard:tabdat-last-db-update tabdat) context 0)) (define (dboard:set-last-db-update! tabdat context newtime) (hash-table-set! (dboard:tabdat-last-db-update tabdat) context newtime)) -;; DOES NOT WORK RELIABLY WITH /tmp WAL mode files. Timestamps only change when the db -;; is closed (I think). If db dir starts with /tmp always return true ;; (define (dashboard:database-changed? commondat tabdat #!key (context-key 'default)) (let* ((run-update-time (current-seconds)) - (dbdir (dboard:tabdat-dbdir tabdat)) + (dbdir *toppath*) (modtime (dashboard:get-youngest-run-db-mod-time dbdir)) (recalc (dashboard:recalc modtime (dboard:commondat-please-update commondat) (dboard:get-last-db-update tabdat context-key)))) - ;; (dboard:tabdat-last-db-update tabdat)))) (if recalc (dboard:set-last-db-update! tabdat context-key run-update-time)) (dboard:commondat-please-update-set! commondat #f) recalc)) @@ -3792,11 +3822,11 @@ (stop-the-train) (define (main) ;; (print "Starting dashboard main") - (let* ((mtdb-path (conc *toppath* "/.megatest/main.db")) + (let* ((mtdb-path (conc *toppath* "/.mtdb/main.db")) (target (args:get-arg "-target")) (commondat (dboard:commondat-make))) (if target (begin (args:remove-arg-from-ht "-target") @@ -3889,11 +3919,11 @@ ;; Sync to tmp only if in read-only mode. (define (sync-db-to-tmp tabdat) - (let* ((db-file "./.megatest/main.db")) + (let* ((db-file "./.mtdb/main.db")) (if (and (not (file-write-access? db-file)) ( > (current-seconds) (+ last-copy-time 5))) (begin (db:multi-db-sync (db:setup #f) 'old2new) (set! last-copy-time (current-seconds)) ) Index: db.scm ================================================================== --- db.scm +++ db.scm @@ -22,13 +22,28 @@ ;; Database access ;;====================================================================== ;; dbstruct vector containing all the relevant dbs like main.db, megatest.db, run.db etc +(declare (unit db)) +(declare (uses common)) +(declare (uses debugprint)) +(declare (uses dbmod)) +(declare (uses dbfile)) +(declare (uses keys)) +(declare (uses ods)) +(declare (uses mt)) +(declare (uses commonmod)) +(declare (uses mtargs)) +(declare (uses rmtmod)) + +(import commonmod + (prefix mtargs args:)) + (use (srfi 18) extras - tcp + ;; tcp stack (prefix sqlite3 sqlite3:) srfi-1 posix regex @@ -44,39 +59,37 @@ z3 typed-records matchable files) -(declare (unit db)) -(declare (uses common)) -(declare (uses dbmod)) -;; (declare (uses debugprint)) -(declare (uses dbfile)) -(declare (uses keys)) -(declare (uses ods)) -(declare (uses client)) -(declare (uses mt)) - (include "common_records.scm") (include "db_records.scm") (include "key_records.scm") (include "run_records.scm") (define *number-of-writes* 0) (define *number-non-write-queries* 0) -(import dbmod) +(import debugprint) (import dbfile) +(import dbmod) +(import rmtmod) ;; record for keeping state,status and count for doing roll-ups in ;; iterated tests ;; (defstruct dbr:counts (state #f) (status #f) (count 0)) +;; (define (db:with-db dbstruct run-id r/w proc . params) +;; (case (rmt:transport-mode) +;; ((http)(dbfile:with-db dbstruct run-id r/w proc params)) +;; ((tcp) (dbmod:with-db dbstruct run-id r/w proc params)) +;; ((nfs) (dbmod:with-db dbstruct run-id r/w proc params)) +;; (else (assert #f "FATAL: db:with-db called with non-existant transport mode")))) ;;====================================================================== ;; hash of hashs ;;====================================================================== @@ -92,19 +105,10 @@ (define (db:hoh-get dat key1 key2) (let* ((subhash (hash-table-ref/default dat key1 #f))) (and subhash (hash-table-ref/default subhash key2 #f)))) -(define (db:get-cache-stmth dbdat run-id db stmt) - (let* (;; (dbdat (dbfile:get-dbdat dbstruct run-id)) - (stmt-cache (dbr:dbdat-stmt-cache dbdat)) - (stmth (db:hoh-get stmt-cache db stmt))) - (or stmth - (let* ((newstmth (sqlite3:prepare db stmt))) - (db:hoh-set! stmt-cache db stmt newstmth) - newstmth)))) - ;;====================================================================== ;; SQLITE3 HELPERS ;;====================================================================== (define (db:general-sqlite-error-dump exn stmt . params) @@ -133,10 +137,83 @@ (assert *toppath* "FATAL: db:setup called before launch:setup has been run.") (let* ((tmpdir (common:get-db-tmp-area))) (if (not *dbstruct-dbs*) (dbfile:setup do-sync *toppath* tmpdir) *dbstruct-dbs*))) + +;; moved from dbfile +;; +;; ADD run-id SUPPORT +;; +(define (db:create-all-triggers dbstruct) + (db:with-db + dbstruct #f #t + (lambda (dbdat db) + (db:create-triggers db)))) + +(define (db:create-triggers db) + (for-each (lambda (key) + (sqlite3:execute db (cadr key))) + db:trigger-list)) + +(define (db:drop-all-triggers dbstruct) + (db:with-db + dbstruct #f #t + (lambda (dbdat db) + (db:drop-triggers db)))) + +(define (db:have-incompletes? dbstruct run-id ovr-deadtime) + (let* ((incompleted '()) + (oldlaunched '()) + (toplevels '()) + ;; (deadtime-str (configf:lookup *configdat* "setup" "deadtime")) + (deadtime (or ovr-deadtime 72000))) ;; twenty hours + (db:with-db + dbstruct run-id #f + (lambda (dbdat db) + + ;; in RUNNING or REMOTEHOSTSTART for more than 10 minutes + ;; + ;; HOWEVER: this code in run:test seems to work fine + ;; (> (- (current-seconds)(+ (db:test-get-event_time testdat) + ;; (db:test-get-run_duration testdat))) + ;; 600) + ;; (db:delay-if-busy dbdat) + (sqlite3:for-each-row + (lambda (test-id run-dir uname testname item-path) + (if (and (equal? uname "n/a") + (equal? item-path "")) ;; this is a toplevel test + ;; what to do with toplevel? call rollup? + (begin + (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels))) + ;; (print-info "Found old toplevel test in RUNNING state, test-id=" test-id)) + (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted)))) + (db:get-cache-stmth dbdat db + "SELECT id,rundir,uname,testname,item_path FROM tests WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) AND state IN ('RUNNING','REMOTEHOSTSTART');") + run-id deadtime) + + ;; in LAUNCHED for more than one day. Could be long due to job queues TODO/BUG: Need override for this in config + ;; + ;; (db:delay-if-busy dbdat) + (sqlite3:for-each-row + (lambda (test-id run-dir uname testname item-path) + (if (and (equal? uname "n/a") + (equal? item-path "")) ;; this is a toplevel test + ;; what to do with toplevel? call rollup? + (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) + (set! oldlaunched (cons (list test-id run-dir uname testname item-path run-id) oldlaunched)))) + (db:get-cache-stmth dbdat db + "SELECT id,rundir,uname,testname,item_path FROM tests WHERE run_id=? AND (strftime('%s','now') - event_time) > 86400 AND state IN ('LAUNCHED');") + run-id) + + ;; (print-info "Found " (length oldlaunched) " old LAUNCHED items, " (length toplevels) " old LAUNCHED toplevel tests and " (length incompleted) " tests marked RUNNING but apparently dead.") + (if (and (null? incompleted) + (null? oldlaunched) + (null? toplevels)) + #f + #t))))) + ;; looks up subdb and returns it, if not found then set up ;; and then return it. ;; #;(define (db:get-db dbstruct run-id) @@ -150,12 +227,12 @@ ;; Get/open a database ;; if run-id => get run specific db ;; if #f => get main db ;; if run-id is a string treat it as a filename -;; if db already open - return inmem -;; if db not open, open inmem, rundb and sync then return inmem +;; if db already open - return cachedb +;; if db not open, open cachedb, rundb and sync then return cachedb ;; inuse gets set automatically for rundb's ;; ;; (define db:get-db db:get-subdb) ;; (define (db:get-db subdb #;dbstruct run-id) ;; RENAME TO db:get-dbh @@ -359,13 +436,15 @@ (define (db:cache-for-read-only source target #!key (use-last-update #f)) (if (and (hash-table-ref/default *global-db-store* target #f) (>= (file-modification-time target)(file-modification-time source))) (hash-table-ref *global-db-store* target) (let* ((toppath (launch:setup)) - (targ-db-last-mod (if (common:file-exists? target) - (file-modification-time target) - 0)) + (targ-db-last-mod (db:get-sqlite3-mod-time target)) +;; (if (common:file-exists? target) +;; BUG: This needs to include wal mode stuff .shm etc. +;; (file-modification-time target) +;; 0)) (cache-db (or (hash-table-ref/default *global-db-store* target #f) (db:open-megatest-db path: target))) (source-db (db:open-megatest-db path: source)) (curr-time (current-seconds)) (res '()) @@ -373,95 +452,99 @@ (db:sync-tables (db:sync-main-list source-db) last-update source-db cache-db) (db:sync-tables db:sync-tests-only last-update source-db cache-db) (hash-table-set! *global-db-store* target cache-db) cache-db))) -;; ;; call a proc with a cached db -;; ;; -;; (define (db:call-with-cached-db proc . params) -;; ;; first cache the db in /tmp -;; (let* ((cname-part (conc "megatest_cache/" (common:get-testsuite-name))) -;; (fname (conc (common:get-area-path-signature) ".db")) -;; (cache-dir (common:get-create-writeable-dir -;; (list (conc "/tmp/" (current-user-name) "/" cname-part) -;; (conc "/tmp/" (current-user-name) "-" cname-part) -;; (conc "/tmp/" (current-user-name) "_" cname-part)))) -;; (megatest-db (conc *toppath* "/megatest.db"))) -;; ;; (debug:print-info 0 *default-log-port* "Using cache dir " cache-dir) -;; (if (not cache-dir) -;; (begin -;; (debug:print 0 *default-log-port* "ERROR: Failed to find an area to write the cache db") -;; (exit 1)) -;; (let* ((th1 (make-thread -;; (lambda () -;; (if (and (common:file-exists? megatest-db) -;; (file-write-access? megatest-db)) -;; (begin -;; (db:sync-to-megatest.db dbstruct 'timestamps) ;; internally mutexes on *db-local-sync* -;; (debug:print-info 2 *default-log-port* "Done syncing to megatest.db")))) -;; "call-with-cached-db sync-to-megatest.db")) -;; (cache-db (db:cache-for-read-only -;; megatest-db -;; (conc cache-dir "/" fname) -;; use-last-update: #t))) -;; (thread-start! th1) -;; (apply proc cache-db params) -;; )))) - - - - -(define (db:all-db-sync dbstruct) - (let* ((dbdat (db:open-db dbstruct #f db:initialize-main-db)) - (data-synced 0) ;; count of changed records - (tmp-area (common:get-db-tmp-area)) - (dbfiles (glob (conc tmp-area"/.megatest/*.db"))) - (sync-durations (make-hash-table)) - (no-sync-db (db:open-no-sync-db))) +(define (db:get-sqlite3-mod-time fname) + (let* ((wal-file (conc fname "-wal")) + (shm-file (conc fname "-shm")) + (get-mtime (lambda (f) + (if (and (file-exists? f) + (file-read-access? f)) + (file-modification-time f) + 0)))) + (max (get-mtime fname) + (get-mtime wal-file) + (get-mtime shm-file)))) + +;; (define (db:all-db-sync dbstruct) +;; (let* ((dbdat (db:open-db dbstruct #f db:initialize-main-db)) +;; (data-synced 0) ;; count of changed records +;; (tmp-area (common:get-db-tmp-area)) +;; (dbfiles (glob (conc tmp-area"/.mtdb/*.db"))) +;; (sync-durations (make-hash-table)) +;; (no-sync-db (db:open-no-sync-db))) +;; (for-each +;; (lambda (file) ;; tmp db file +;; (debug:print-info 3 *default-log-port* "file: " file) +;; (let* ((fname (conc (pathname-file file) ".db")) ;; fname is tmp db file +;; (wal-file (conc fname "-wal")) +;; (shm-file (conc fname "-shm")) +;; (fulln (conc *toppath*"/,mtdb/"fname)) ;; fulln is nfs db name +;; (wal-time (if (file-exists? wal-file) +;; (file-modification-time wal-file) +;; 0)) +;; (shm-time (if (file-exists? shm-file) +;; (file-modification-time shm-file) +;; 0)) +;; (time1 (db:get-sqlite3-mod-time file)) +;; ;; (if (file-exists? file) ;; time1 is the max itime of the tmp db, -wal and -shm files. +;; ;; (max (file-modification-time file) wal-time shm-time) +;; ;; (begin +;; ;; (debug:print-info 2 *default-log-port* "Sync - I do not see file "file) +;; ;; 1))) +;; (time2 (db:get-sqlite3-mod-time fulln)) +;; ;; (if (file-exists? fulln) ;; time2 is nfs file time +;; ;; (file-modification-time fulln) +;; ;; (begin +;; ;; (debug:print-info 2 *default-log-port* "Sync - I do not see file "fulln) +;; ;; 0))) +;; (changed (> (- time1 time2) (+ (random 5) 1))) ;; it has been at some few seconds since last synced +;; (changed10 (> (- time1 time2) 10)) ;; it has been at least ten seconds since sync'd +;; (jfile-exists (file-exists? (conc file"-journal"))) ;; i.e. are we busy? +;; (do-cp (cond +;; ((not (file-exists? fulln)) ;; shouldn't happen, but this might recover +;; (cons #t (conc "File "fulln" not found! Copying "fname" to "fulln))) +;; ((and (not jfile-exists) changed) +;; (cons #t "not busy, changed")) ;; not busy and changed +;; ((and jfile-exists changed10) +;; (cons #t "busy but not synced in a while")) ;; busy but not sync'd in over 10 seconds +;; ((and changed *time-to-exit*) +;; (cons #t "Time to exit, forced final sync")) ;; last sync +;; (else +;; (cons #f "No sync needed"))))) +;; (if (car do-cp) +;; (let* ((start-time (current-milliseconds)) +;; (fname (pathname-file file)) +;; (runid (if (string= fname "main") #f (string->number fname)))) +;; (debug:print-info 3 *default-log-port* "db:all-db-sync: fname: " +;; fname", delta: " (- time1 time2) " seconds, reason: "(cdr do-cp)) +;; (db:lock-and-delta-sync no-sync-db dbstruct fname runid (db:get-keys dbstruct) db:initialize-main-db) +;; (hash-table-set! sync-durations (conc fname".db") +;; (- (current-milliseconds) start-time))) +;; (debug:print-info 3 *default-log-port* "skipping sync. " file " is up to date") +;; ))) +;; dbfiles) +;; ;; WHY does the dbdat need to be added back? +;; (if dbdat (dbfile:add-dbdat dbstruct #f dbdat)) +;; ) +;; #t) + +(define (db:kill-servers) + (let* ((servers (server:choose-server *toppath* 'all-valid))) ;; (server:get-list *toppath*)) (for-each - (lambda (file) - (debug:print-info 3 *default-log-port* "file: " file) - (let* ((fname (conc (pathname-file file) ".db")) - (fulln (conc *toppath*"/.megatest/"fname)) - (time1 (if (file-exists? file) - (file-modification-time file) - (begin - (debug:print-info 2 *default-log-port* "Sync - I do not see file "file) - 1))) - (time2 (if (file-exists? fulln) - (file-modification-time fulln) - (begin - (debug:print-info 2 *default-log-port* "Sync - I do not see file "fulln) - 0))) - (changed (> (- time1 time2) (+ (random 5) 1))) ;; it has been at some few seconds since last synced - (changed10 (> (- time1 time2) 10)) ;; it has been at least ten seconds since sync'd - (jfile-exists (file-exists? (conc file"-journal"))) ;; i.e. are we busy? - (do-cp (cond - ((not (file-exists? fulln)) ;; shouldn't happen, but this might recover - (cons #t (conc "File "fulln" not found! Copying "fname" to "fulln))) - ((and (not jfile-exists) changed) - (cons #t "not busy, changed")) ;; not busy and changed - ((and jfile-exists changed10) - (cons #t "busy but not synced in a while")) ;; busy but not sync'd in over 10 seconds - ((and changed *time-to-exit*) - (cons #t "Time to exit, forced final sync")) ;; last sync - (else - (cons #f "No sync needed"))))) - (if (car do-cp) - (let* ((start-time (current-milliseconds)) - (fname (pathname-file file)) - (runid (if (string= fname "main") #f (string->number fname)))) - (debug:print-info 3 *default-log-port* "db:all-db-sync: fname: " - fname", delta: " (- time1 time2) " seconds, reason: "(cdr do-cp)) - (db:lock-and-delta-sync no-sync-db dbstruct fname runid (db:get-keys dbstruct) db:initialize-main-db) - (hash-table-set! sync-durations (conc fname".db") - (- (current-milliseconds) start-time))) - (debug:print-info 3 *default-log-port* "skipping sync. " file " is up to date") - ))) - dbfiles) - (if dbdat (dbfile:add-dbdat dbstruct #f dbdat))) - #t) + (lambda (server) + (handle-exceptions + exn + (begin + (debug:print-info 0 *default-log-port* "Unable to get host and/or port from " server ", exn=" exn) + #f) + (match-let (((mod-time host port start-time server-id pid) server)) + (if (and host pid) + (tasks:kill-server host pid))))) + servers) + (delete-file* (common:get-sync-lock-filepath)))) ;; options: ;; ;; 'killservers - kills all servers ;; 'dejunk - removes junk records @@ -473,117 +556,97 @@ ;; run-ids: '(1 2 3 ...) or #f (for all) ;; (define (db:multi-db-sync dbstruct . options) (let* (;; (dbdat (db:open-db dbstruct #f dbfile:db-init-proc)) (data-synced 0) ;; count of changed records - (tmp-area (common:get-db-tmp-area)) - (old2new (member 'old2new options)) - (dejunk (member 'dejunk options)) - (killservers (member 'killservers options)) - (servers (server:choose-server *toppath* 'all-valid)) ;; (server:get-list *toppath*)) - (src-area (if old2new *toppath* tmp-area)) - (dest-area (if old2new tmp-area *toppath*)) - (dbfiles (if old2new (glob (conc *toppath* "/.megatest/*.db")) (glob (conc tmp-area "/.megatest/*.db")))) - (keys (db:get-keys dbstruct)) - (sync-durations (make-hash-table))) - - - (if killservers - (begin - (for-each - (lambda (server) - (handle-exceptions - exn - (begin - (debug:print-info 0 *default-log-port* "Unable to get host and/or port from " server ", exn=" exn) - #f) - (match-let (((mod-time host port start-time server-id pid) server)) - (if (and host pid) - (tasks:kill-server host pid))))) - servers) - (delete-file* (common:get-sync-lock-filepath)) - ) - ) - (for-each - (lambda (srcfile) - (debug:print-info 3 *default-log-port* "file: " srcfile) - (let* ((fname (conc (pathname-file srcfile) ".db")) - (basename (pathname-file srcfile)) - (run-id (if (string= basename "main") #f (string->number basename))) - (destfile (conc dest-area "/.megatest/" fname)) - (dest-directory (conc dest-area "/.megatest/")) - (dummy (debug:print-info 2 *default-log-port* "destfile = " destfile)) - (dummy2 (debug:print-info 2 *default-log-port* "dejunk = " dejunk)) - (time1 (file-modification-time srcfile)) - (time2 (if (file-exists? destfile) - (begin - (debug:print-info 2 *default-log-port* "destfile " destfile " exists") - (file-modification-time destfile) - ) - (begin - (debug:print-info 0 *default-log-port* "Sync - I do not see file " destfile) - 0))) - (changed ( < (- time2 time1) 6.0)) ;; dest db not updated within last 6 seconds - - (do-cp (cond - ((not (file-exists? destfile)) ;; shouldn't happen, but this might recover - (debug:print-info 2 *default-log-port* "File " destfile " not found. Copying "srcfile" to "destfile) - (system (conc "/bin/mkdir -p " dest-directory)) - (system (conc "/bin/cp " srcfile " " destfile)) - #t) - (changed ;; (and changed - ;; (> (- (current-seconds) time1) 3)) ;; if file is changed and three seconds have passed. - #t) - ((and changed *time-to-exit*) ;; last sync - #t) - (else - #f)))) - (if (or dejunk do-cp) - (let* ( - (start-time (current-milliseconds)) - - (subdb (or (dbfile:get-subdb dbstruct run-id) (dbfile:init-subdb dbstruct run-id dbfile:db-init-proc))) - (mtdb (dbr:subdb-mtdbdat subdb)) - (tmpdb (dbfile:open-db dbstruct run-id dbfile:db-init-proc)) - - ) - (debug:print-info 2 *default-log-port* "delta syncing file: " srcfile ", time diff: " (- time1 time2) " seconds") - - (if old2new - (begin - (if dejunk (db:clean-up run-id mtdb)) - (db:sync-tables (db:sync-all-tables-list dbstruct (db:get-keys dbstruct)) #f mtdb tmpdb) - ) - (begin - (if dejunk (db:clean-up run-id tmpdb)) - (db:sync-tables (db:sync-all-tables-list dbstruct (db:get-keys dbstruct)) #f tmpdb mtdb) - ) - ) - (hash-table-set! sync-durations (conc srcfile ".db") (- (current-milliseconds) start-time))) - (debug:print-info 2 *default-log-port* "skipping delta sync. " srcfile " is up to date") - ) - ) - ) - dbfiles - ) - data-synced - ) -) + (tmp-area (common:get-db-tmp-area)) + (old2new (member 'old2new options)) + (dejunk (member 'dejunk options)) + (killservers (member 'killservers options)) + (src-area (if old2new *toppath* tmp-area)) + (dest-area (if old2new tmp-area *toppath*)) + (dbfiles (if old2new (glob (conc *toppath* "/.mtdb/*.db")) (glob (conc tmp-area "/.mtdt/*.db")))) + (keys (db:get-keys dbstruct)) + (sync-durations (make-hash-table))) + + ;; kill servers + (if killservers (db:kill-servers)) + + (if (not dbfiles) + (debug:print-error 0 *default-log-port* "no dbfiles found in " (conc *toppath* "/.mtdb")) + (for-each + (lambda (srcfile) + (debug:print-info 3 *default-log-port* "file: " srcfile) + (let* ((fname (conc (pathname-file srcfile) ".db")) + (basename (pathname-file srcfile)) + (run-id (if (string= basename "main") #f (string->number basename))) + (destfile (conc dest-area "/.mtdb/" fname)) + (dest-directory (conc dest-area "/.mtdb/")) + (time1 (file-modification-time srcfile)) + (time2 (if (file-exists? destfile) + (begin + (debug:print-info 2 *default-log-port* "destfile " destfile " exists") + (file-modification-time destfile)) + (begin + (debug:print-info 0 *default-log-port* "Sync - I do not see file " destfile) + 0))) + (changed ( < (- time2 time1) 6.0)) ;; dest db not updated within last 6 seconds + + (do-cp (cond + ((not (file-exists? destfile)) ;; shouldn't happen, but this might recover + (debug:print-info 2 *default-log-port* "File " destfile " not found. Copying "srcfile" to "destfile) + ;; TODO: Need to fix this for WAL mod. Can't just copy. + (system (conc "/bin/mkdir -p " dest-directory)) + (system (conc "/bin/cp " srcfile " " destfile)) + #t) + (changed ;; (and changed + #t) + ((and changed *time-to-exit*) ;; last sync + #t) + (else + #f)))) + (if (or dejunk do-cp) + (let* ((start-time (current-milliseconds)) + ;; subdb is misnamed - should be dbdat (I think...) + (subdb (dbfile:open-db dbstruct run-id dbfile:db-init-proc)) + ;; (or (dbfile:get-subdb dbstruct run-id) + ;; (dbfile:init-subdb dbstruct run-id dbfile:db-init-proc))) + (mtdb (dbr:subdb-mtdbdat subdb)) + ;; + ;; BUG: -mrw- I think this next line is wrong. run-id should be the path to .mtdb/.db + ;; + (tmpdb (dbfile:open-db dbstruct run-id dbfile:db-init-proc))) + + (debug:print-info 2 *default-log-port* "delta syncing file: " srcfile ", time diff: " (- time1 time2) " seconds") + (if old2new + (begin + (if dejunk (db:clean-up run-id mtdb)) + (db:sync-tables (db:sync-all-tables-list + dbstruct + (db:get-keys dbstruct)) + #f mtdb tmpdb)) + (begin + (if dejunk (db:clean-up run-id tmpdb)) + (db:sync-tables (db:sync-all-tables-list dbstruct (db:get-keys dbstruct)) #f tmpdb mtdb))) + (hash-table-set! sync-durations (conc srcfile ".db") (- (current-milliseconds) start-time))) + (debug:print-info 2 *default-log-port* "skipping delta sync. " srcfile " is up to date")))) + dbfiles)) + data-synced)) ;; Sync all changed db's ;; (define (db:tmp->megatest.db-sync dbstruct run-id last-update) (let* ((subdbs (hash-table-values (dbr:dbstruct-subdbs dbstruct))) (res '())) (for-each (lambda (subdb) - (let* ((dbname (db:run-id->dbname run-id)) - (mtdb (dbr:subdb-mtdb subdb)) + (let* ((mtdb (dbr:subdb-mtdb subdb)) (tmpdb (db:get-subdb dbstruct run-id)) (refndb (dbr:subdb-refndb subdb)) (newres (db:sync-tables (db:sync-all-tables-list dbstruct (db:get-keys dbstruct)) last-update tmpdb refndb mtdb))) ;; (stack-push! (dbr:subdb-dbstack subdb) tmpdb) + ;; BUG: verify this is really needed (dbfile:add-dbdat dbstruct run-id tmpdb) (set! res (cons newres res)))) subdbs) res)) @@ -648,13 +711,23 @@ ;; exn ;; (begin ;; (debug:print 0 "ERROR: Failed to create tables. Look at your [fields] section, should be: fieldname TEXT DEFAULT 'yourdefault'") ;; (exit)) (sqlite3:execute db "CREATE TABLE IF NOT EXISTS keys (id INTEGER PRIMARY KEY, fieldname TEXT, fieldtype TEXT, CONSTRAINT keyconstraint UNIQUE (fieldname));") - (for-each (lambda (key) - (sqlite3:execute db "INSERT OR REPLACE INTO keys (fieldname,fieldtype) VALUES (?,?);" key "TEXT")) - keys) + (for-each + (lambda (key) + (let* ((fieldname #f) + (fieldtype #f)) + (sqlite3:for-each-row + (lambda (fn ft) + (set! fieldname fn) + (set! fieldtype ft)) + db + "SELECT fieldname,fieldtype FROM keys WHERE fieldname=?" key) + (if (not fieldname) + (sqlite3:execute db "INSERT INTO keys (fieldname,fieldtype) VALUES (?,?);" key "TEXT")))) + keys) (sqlite3:execute db (conc "CREATE TABLE IF NOT EXISTS runs (id INTEGER PRIMARY KEY, \n " fieldstr (if havekeys "," "") " runname TEXT DEFAULT 'norun', contour TEXT DEFAULT '', @@ -745,11 +818,21 @@ (sqlite3:execute db "CREATE TABLE IF NOT EXISTS metadat (id INTEGER PRIMARY KEY, var TEXT, val TEXT, CONSTRAINT metadat_constraint UNIQUE (var));") (sqlite3:execute db "CREATE TABLE IF NOT EXISTS access_log (id INTEGER PRIMARY KEY, user TEXT, accessed TIMESTAMP, args TEXT);") ;; Must do this *after* running patch db !! No more. ;; cannot use db:set-var since it will deadlock, hardwire the code here - (sqlite3:execute db "INSERT OR REPLACE INTO metadat (var,val) VALUES (?,?);" "MEGATEST_VERSION" (common:version-signature)) + (let* ((prev-version #f) + (curr-version (common:version-signature))) + (sqlite3:for-each-row + (lambda (ver) + (set! prev-version ver)) + db + "SELECT val FROM metadat WHERE var='MEGATEST_VERSION';") + (if prev-version + (if (not (equal? prev-version curr-version)) + (sqlite3:execute db "UPDATE metadat SET val=? WHERE var=?;" curr-version "MEGATEST_VERSION")) + (sqlite3:execute db "INSERT OR REPLACE INTO metadat (var,val) VALUES (?,?);" "MEGATEST_VERSION" curr-version) )) (debug:print-info 11 *default-log-port* "db:initialize END") ;; )))) ;;====================================================================== ;; R U N S P E C I F I C D B ;;====================================================================== @@ -840,19 +923,21 @@ test_id INTEGER, update_time TIMESTAMP, cpuload INTEGER DEFAULT -1, diskfree INTEGER DEFAULT -1, diskusage INTGER DEFAULT -1, - run_duration INTEGER DEFAULT 0);") + run_duration INTEGER DEFAULT 0, + last_update INTEGER DEFAULT (strftime('%s','now')));") (sqlite3:execute db "CREATE TABLE IF NOT EXISTS archives ( id INTEGER PRIMARY KEY, test_id INTEGER, state TEXT DEFAULT 'new', status TEXT DEFAULT 'n/a', archive_type TEXT DEFAULT 'bup', du INTEGER, - archive_path TEXT);"))) + archive_path TEXT, + last_update INTEGER DEFAULT (strftime('%s','now')));"))) (db:create-triggers db) db)) ;; ) ;;====================================================================== ;; A R C H I V E S @@ -886,10 +971,11 @@ "SELECT d.id,d.archive_area_name,disk_path,last_df,last_df_time FROM archive_disks AS d INNER JOIN archive_blocks AS b ON d.id=b.archive_disk_id WHERE b.id IN (" (string-intersperse (map conc res) ",") ") AND last_df > ?;") dneeded)) + ;; BUG: Verfify this is really needed (dbfile:add-dbdat dbstruct #f dbdat) blocks)) ;; returns id of the record, register a disk allocated to archiving and record it's last known ;; available space @@ -952,11 +1038,11 @@ ;; (define (db:test-set-archive-block-id dbstruct run-id test-id archive-block-id) (db:with-db dbstruct run-id - #f + #t (lambda (dbdat db) (sqlite3:execute db "UPDATE tests SET archived=? WHERE id=?;" archive-block-id test-id)))) ;; Look up the archive block info given a block-id @@ -990,220 +1076,64 @@ ;;====================================================================== ;; M A I N T E N A N C E ;;====================================================================== -(define (db:have-incompletes? dbstruct run-id ovr-deadtime) - (let* ((incompleted '()) - (oldlaunched '()) - (toplevels '()) - (deadtime-str (configf:lookup *configdat* "setup" "deadtime")) - (deadtime (if (and deadtime-str - (string->number deadtime-str)) - (string->number deadtime-str) - 72000))) ;; twenty hours - (db:with-db - dbstruct run-id #f - (lambda (dbdat db) - (if (number? ovr-deadtime)(set! deadtime ovr-deadtime)) - - ;; in RUNNING or REMOTEHOSTSTART for more than 10 minutes - ;; - ;; HOWEVER: this code in run:test seems to work fine - ;; (> (- (current-seconds)(+ (db:test-get-event_time testdat) - ;; (db:test-get-run_duration testdat))) - ;; 600) - ;; (db:delay-if-busy dbdat) - (sqlite3:for-each-row - (lambda (test-id run-dir uname testname item-path) - (if (and (equal? uname "n/a") - (equal? item-path "")) ;; this is a toplevel test - ;; what to do with toplevel? call rollup? - (begin - (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) - (debug:print-info 0 *default-log-port* "Found old toplevel test in RUNNING state, test-id=" test-id)) - (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted)))) - db - "SELECT id,rundir,uname,testname,item_path FROM tests WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) AND state IN ('RUNNING','REMOTEHOSTSTART');" - run-id deadtime) - - ;; in LAUNCHED for more than one day. Could be long due to job queues TODO/BUG: Need override for this in config - ;; - ;; (db:delay-if-busy dbdat) - (sqlite3:for-each-row - (lambda (test-id run-dir uname testname item-path) - (if (and (equal? uname "n/a") - (equal? item-path "")) ;; this is a toplevel test - ;; what to do with toplevel? call rollup? - (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) - (set! oldlaunched (cons (list test-id run-dir uname testname item-path run-id) oldlaunched)))) - db - "SELECT id,rundir,uname,testname,item_path FROM tests WHERE run_id=? AND (strftime('%s','now') - event_time) > 86400 AND state IN ('LAUNCHED');" - run-id) - - (debug:print-info 18 *default-log-port* "Found " (length oldlaunched) " old LAUNCHED items, " (length toplevels) " old LAUNCHED toplevel tests and " (length incompleted) " tests marked RUNNING but apparently dead.") - (if (and (null? incompleted) - (null? oldlaunched) - (null? toplevels)) - #f - #t))))) - -(define (db:get-status-from-final-status-file run-dir) - (let ((infile (conc run-dir "/.final-status"))) - ;; first verify we are able to write the output file - (if (not (file-read-access? infile)) - (begin - (debug:print 2 *default-log-port* "ERROR: cannot read " infile) - (debug:print 2 *default-log-port* "ERROR: run-dir is " run-dir) - #f - ) - (with-input-from-file infile read-lines) - ))) - -;; select end_time-now from -;; (select testname,item_path,event_time+run_duration as -;; end_time,strftime('%s','now') as now from tests where state in -;; ('RUNNING','REMOTEHOSTSTART','LAUNCHED')); - -(define (db:find-and-mark-incomplete dbstruct run-id ovr-deadtime) - (let* ((incompleted '()) - (oldlaunched '()) - (toplevels '()) - ;; The default running-deadtime is 720 seconds = 12 minutes. - ;; "(running-deadtime-default (+ server-start-allowance (* 2 launch-monitor-period)))" = 200 + (2 * (200 + 30 + 30)) - (deadtime-trim (or ovr-deadtime (configf:lookup-number *configdat* "setup" "deadtime"))) - (server-start-allowance 200) - (server-overloaded-budget 200) - (launch-monitor-off-time (or (configf:lookup-number *configdat* "setup" "test-stats-update-period") 30)) - (launch-monitor-on-time-budget 30) - (launch-monitor-period (+ launch-monitor-off-time launch-monitor-on-time-budget server-overloaded-budget)) - (remotehoststart-deadtime-default (+ server-start-allowance server-overloaded-budget 30)) - (remotehoststart-deadtime (or deadtime-trim remotehoststart-deadtime-default)) - (running-deadtime-default (+ server-start-allowance (* 2 launch-monitor-period))) - (running-deadtime (or deadtime-trim running-deadtime-default)) ;; two minutes (30 seconds between updates, this leaves 3x grace period) - ) - (debug:print-info 4 *default-log-port* "running-deadtime = " running-deadtime) - (debug:print-info 4 *default-log-port* "deadtime-trim = " deadtime-trim) - - (db:with-db - dbstruct run-id #f - (lambda (dbdat db) - (let* ((stmth1 (db:get-cache-stmth - dbdat run-id db - "SELECT id,rundir,uname,testname,item_path,event_time,run_duration FROM tests - WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) - AND state IN ('RUNNING');")) - (stmth2 (db:get-cache-stmth - dbdat run-id db - "SELECT id,rundir,uname,testname,item_path,event_time,run_duration FROM tests - WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) - AND state IN ('REMOTEHOSTSTART');")) - (stmth3 (db:get-cache-stmth - dbdat run-id db - "SELECT id,rundir,uname,testname,item_path FROM tests - WHERE run_id=? AND (strftime('%s','now') - event_time) > 86400 - AND state IN ('LAUNCHED');"))) - ;; in RUNNING or REMOTEHOSTSTART for more than 10 minutes - ;; - ;; HOWEVER: this code in run:test seems to work fine - ;; (> (- (current-seconds)(+ (db:test-get-event_time testdat) - ;; (db:test-get-run_duration testdat))) - ;; 600) - ;; (db:delay-if-busy dbdat) - (sqlite3:for-each-row - (lambda (test-id run-dir uname testname item-path event-time run-duration) - (if (and (equal? uname "n/a") - (equal? item-path "")) ;; this is a toplevel test - ;; what to do with toplevel? call rollup? - (begin - (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) - (debug:print-info 0 *default-log-port* "Found old toplevel test in RUNNING state, test-id=" test-id)) - (begin - (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted)) - (debug:print-info 0 *default-log-port* "Found old test in RUNNING state, test-id=" - test-id" exceeded running-deadtime "running-deadtime" now="(current-seconds) - " event-time="event-time" run-duration="run-duration)))) - stmth1 - run-id running-deadtime) ;; default time 720 seconds - - (sqlite3:for-each-row - (lambda (test-id run-dir uname testname item-path event-time run-duration) - (if (and (equal? uname "n/a") - (equal? item-path "")) ;; this is a toplevel test - ;; what to do with toplevel? call rollup? - (begin - (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) - (debug:print-info 0 *default-log-port* "Found old toplevel test in RUNNING state, test-id=" test-id)) - (begin - (debug:print-info 0 *default-log-port* "Found old test in REMOTEHOSTSTART state, test-id=" test-id - " exceeded running-deadtime "running-deadtime" now="(current-seconds)" event-time="event-time - " run-duration="run-duration) - (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted))))) - stmth2 - run-id remotehoststart-deadtime) ;; default time 230 seconds - - ;; in LAUNCHED for more than one day. Could be long due to job queues TODO/BUG: Need override for this in config - ;; - ;; (db:delay-if-busy dbdat) - (sqlite3:for-each-row - (lambda (test-id run-dir uname testname item-path) - (if (and (equal? uname "n/a") - (equal? item-path "")) ;; this is a toplevel test - ;; what to do with toplevel? call rollup? - (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) - (begin - (debug:print-info 0 *default-log-port* "Found old test in LAUNCHED state, test-id=" test-id - " 1 day since event_time marked") - (set! oldlaunched (cons (list test-id run-dir uname testname item-path run-id) oldlaunched))))) - stmth3 - run-id) - - (debug:print-info 18 *default-log-port* "Found " (length oldlaunched) " old LAUNCHED items, " - (length toplevels) " old LAUNCHED toplevel tests and " - (length incompleted) " tests marked RUNNING but apparently dead.")) - - ;; These are defunct tests, do not do all the overhead of set-state-status. Force them to INCOMPLETE. - ;; - ;; (db:delay-if-busy dbdat) - (let* ((min-incompleted-ids (map car incompleted)) ;; do 'em all - (all-ids (append min-incompleted-ids (map car oldlaunched)))) - (if (> (length all-ids) 0) - (begin - ;; (launch:is-test-alive "localhost" 435) - (debug:print 0 *default-log-port* "WARNING: Marking test(s); " (string-intersperse (map conc all-ids) ", ") - " as DEAD") - (for-each - (lambda (test-id) - (let* (;; (run-dir (db:test-get-rundir-from-test-id dbstruct run-id test-id)) - (tinfo (db:get-test-info-by-id dbstruct run-id test-id)) - (run-dir (db:test-get-rundir tinfo)) - (host (db:test-get-host tinfo)) - (pid (db:test-get-process_id tinfo)) - (result (db:get-status-from-final-status-file run-dir))) - (if (and (list? result) (> (length result) 1) (equal? "PASS" (cadr result)) (equal? "COMPLETED" (car result))) - (begin - (debug:print 0 *default-log-port* "INFO: test " test-id " actually passed, so marking PASS not DEAD") - (db:set-state-status-and-roll-up-items - dbstruct run-id test-id 'foo "COMPLETED" "PASS" - "Test stopped responding but it has PASSED; marking it PASS in the DB.")) - (let ((is-alive (and (not (eq? pid 0)) ;; 0 is default in re-used field "attemptnum" where pid stored. - (launch:is-test-alive host pid)))) - (if is-alive - (debug:print 0 *default-log-port* "INFO: test " test-id " on host " host - " has a process on pid " pid ", NOT setting to DEAD.") - (begin - (debug:print 0 *default-log-port* "INFO: test " test-id - " final state/status is not COMPLETED/PASS. It is " result) - (db:set-state-status-and-roll-up-items - dbstruct run-id test-id 'foo "COMPLETED" "DEAD" - "Test stopped responding while in RUNNING or REMOTEHOSTSTART; presumed dead."))))))) - ;; call end of eud of run detection for posthook - from merge, is it needed? - ;; (launch:end-of-run-check run-id) - all-ids) - ;;call end of eud of run detection for posthook - (launch:end-of-run-check run-id) - ))))))) +;; (define (db:have-incompletes? dbstruct run-id ovr-deadtime) +;; (let* ((incompleted '()) +;; (oldlaunched '()) +;; (toplevels '()) +;; (deadtime-str (configf:lookup *configdat* "setup" "deadtime")) +;; (deadtime (if (and deadtime-str +;; (string->number deadtime-str)) +;; (string->number deadtime-str) +;; 72000))) ;; twenty hours +;; (db:with-db +;; dbstruct run-id #f +;; (lambda (dbdat db) +;; (if (number? ovr-deadtime)(set! deadtime ovr-deadtime)) +;; +;; ;; in RUNNING or REMOTEHOSTSTART for more than 10 minutes +;; ;; +;; ;; HOWEVER: this code in run:test seems to work fine +;; ;; (> (- (current-seconds)(+ (db:test-get-event_time testdat) +;; ;; (db:test-get-run_duration testdat))) +;; ;; 600) +;; ;; (db:delay-if-busy dbdat) +;; (sqlite3:for-each-row +;; (lambda (test-id run-dir uname testname item-path) +;; (if (and (equal? uname "n/a") +;; (equal? item-path "")) ;; this is a toplevel test +;; ;; what to do with toplevel? call rollup? +;; (begin +;; (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) +;; (debug:print-info 0 *default-log-port* "Found old toplevel test in RUNNING state, test-id=" test-id)) +;; (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted)))) +;; (db:get-cache-stmth dbdat db +;; "SELECT id,rundir,uname,testname,item_path FROM tests WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) AND state IN ('RUNNING','REMOTEHOSTSTART');") +;; run-id deadtime) +;; +;; ;; in LAUNCHED for more than one day. Could be long due to job queues TODO/BUG: Need override for this in config +;; ;; +;; ;; (db:delay-if-busy dbdat) +;; (sqlite3:for-each-row +;; (lambda (test-id run-dir uname testname item-path) +;; (if (and (equal? uname "n/a") +;; (equal? item-path "")) ;; this is a toplevel test +;; ;; what to do with toplevel? call rollup? +;; (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) +;; (set! oldlaunched (cons (list test-id run-dir uname testname item-path run-id) oldlaunched)))) +;; (db:get-cache-stmth dbdat db +;; "SELECT id,rundir,uname,testname,item_path FROM tests WHERE run_id=? AND (strftime('%s','now') - event_time) > 86400 AND state IN ('LAUNCHED');") +;; run-id) +;; +;; (debug:print-info 18 *default-log-port* "Found " (length oldlaunched) " old LAUNCHED items, " (length toplevels) " old LAUNCHED toplevel tests and " (length incompleted) " tests marked RUNNING but apparently dead.") +;; (if (and (null? incompleted) +;; (null? oldlaunched) +;; (null? toplevels)) +;; #f +;; #t))))) ;; BUG: Probably broken - does not explicitly use run-id in the query ;; (define (db:top-test-set-per-pf-counts dbstruct run-id test-name) (db:general-call dbstruct run-id 'top-test-set-per-pf-counts (list test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name test-name))) @@ -1317,141 +1247,56 @@ ;; (db:find-and-mark-incomplete db) ;; (db:delay-if-busy dbdat) (sqlite3:execute db "VACUUM;") dead-runs)) -;;====================================================================== -;; M E T A G E T A N D S E T V A R S -;;====================================================================== - -;; returns number if string->number is successful, string otherwise -;; also updates *global-delta* -;; -(define (db:get-var dbstruct var) - (let* ((res #f)) - (db:with-db - dbstruct #f #f ;; for the moment vars are only stored in main.db - (lambda (dbdat db) - (sqlite3:for-each-row - (lambda (val) - (set! res val)) - db - "SELECT val FROM metadat WHERE var=?;" var) - ;; convert to number if can - (if (string? res) - (let ((valnum (string->number res))) - (if valnum (set! res valnum)))) - res)))) - -(define (db:inc-var dbstruct var) - (db:with-db dbstruct #f #t - (lambda (dbdat db) - (sqlite3:execute db "UPDATE metadat SET val=val+1 WHERE var=?;" var)))) - -(define (db:dec-var dbstruct var) - (db:with-db dbstruct #f #t - (lambda (dbdat db) - (sqlite3:execute db "UPDATE metadat SET val=val-1 WHERE var=?;" var)))) - -;; This was part of db:get-var. It was used to estimate the load on -;; the database files. -;; -;; scale by 10, average with current value. -;; (set! *global-delta* (/ (+ *global-delta* (* (- (current-milliseconds) start-ms) -;; (if throttle throttle 0.01))) -;; 2)) -;; (if (> (abs (- *last-global-delta-printed* *global-delta*)) 0.08) ;; don't print all the time, only if it changes a bit -;; (begin -;; (debug:print-info 4 *default-log-port* "launch throttle factor=" *global-delta*) -;; (set! *last-global-delta-printed* *global-delta*))) - -(define (db:set-var dbstruct var val) - (db:with-db dbstruct #f #t - (lambda (dbdat db) - (sqlite3:execute db "INSERT OR REPLACE INTO metadat (var,val) VALUES (?,?);" var val)))) - -(define (db:add-var dbstruct var val) - (db:with-db dbstruct #f #t - (lambda (dbdat db) - (sqlite3:execute db "UPDATE metadat SET val=val+? WHERE var=?;" val var)))) - -(define (db:del-var dbstruct var) - (db:with-db dbstruct #f #t - (lambda (dbdat db) - (sqlite3:execute db "DELETE FROM metadat WHERE var=?;" var)))) - ;;====================================================================== ;; no-sync.db - small bits of data to be shared between servers ;;====================================================================== -(define (db:no-sync-db db-in) - (if db-in - db-in - (if *no-sync-db* - *no-sync-db* - (begin - (mutex-lock! *db-access-mutex*) - (let ((dbpath (common:get-db-tmp-area)) - (db (dbfile:open-no-sync-db dbpath))) - (set! *no-sync-db* db) - (mutex-unlock! *db-access-mutex*) - db))))) - -(define (with-no-sync-db proc) - (let* ((db (db:no-sync-db *no-sync-db*))) - (proc db))) - +(define (db:get-dbsync-path) + (case (rmt:transport-mode) + ((http)(common:get-db-tmp-area)) + ((tcp) (conc *toppath*"/.mtdb")) + ((nfs) (conc *toppath*"/.mtdb")) + (else "/tmp/dunno-this-gonna-exist"))) + +;; This is needed for api.scm (define (db:open-no-sync-db) - (dbfile:open-no-sync-db (db:dbfile-path))) - -(define (db:no-sync-close-db db stmt-cache) - (db:safely-close-sqlite3-db db stmt-cache)) - - -;; use a global for some primitive caching, it is just silly to -;; re-read the db over and over again for the keys since they never -;; change - + (dbfile:open-no-sync-db (db:get-dbsync-path))) + ;; why get the keys from the db? why not get from the *configdat* ;; using keys:config-get-fields? (define (db:get-keys dbstruct) - (keys:config-get-fields *configdat*) -) - -;; (if *db-keys* *db-keys* -;; (let ((res '())) -;; (db:with-db dbstruct #f #f -;; (lambda (dbdat db) -;; (sqlite3:for-each-row -;; (lambda (key) -;; (set! res (cons key res))) -;; db -;; "SELECT fieldname FROM keys ORDER BY id DESC;"))) -;; (set! *db-keys* res) -;; res))) + (keys:config-get-fields *configdat*)) ;; extract index number given a header/data structure (define (db:get-index-by-header header field) (list-index (lambda (x)(equal? x field)) header)) ;; look up values in a header/data structure (define (db:get-value-by-header row header field) - (if (or (null? header) (not row)) - #f - (let loop ((hed (car header)) - (tal (cdr header)) - (n 0)) - (if (equal? hed field) - (handle-exceptions - exn - (begin - (debug:print 0 *default-log-port* "WARNING: attempt to read non-existant field, row=" - row " header=" header " field=" field ", exn=" exn) - #f) - (vector-ref row n)) - (if (null? tal) #f (loop (car tal)(cdr tal)(+ n 1))))))) + (let ((len (if (vector? row) + (vector-length row) + 0))) + (if (or (null? header) (not row)) + #f + (let loop ((hed (car header)) + (tal (cdr header)) + (n 0)) + (if (equal? hed field) + (handle-exceptions + exn + (begin + (debug:print 4 *default-log-port* "WARNING: attempt to read non-existant field, row=" + row " header=" header " field=" field ", exn=" exn) + #f) + (if (>= n len) + #f + (vector-ref row n))) + (if (null? tal) #f (loop (car tal)(cdr tal)(+ n 1)))))))) ;; Accessors for the header/data structure ;; get rows and header from (define (db:get-header vec)(vector-ref vec 0)) (define (db:get-rows vec)(vector-ref vec 1)) @@ -1458,33 +1303,26 @@ ;;====================================================================== ;; R U N S ;;====================================================================== - - - - (define (db:get-run-times dbstruct run-patt target-patt) (let ((res `()) - (qry (conc "select runname, (max(end_time)-min(event_time))/60 as runtime, target from (select runname, run_id,tests.event_time,tests.event_time+run_duration AS end_time, " (string-join (db:get-keys dbstruct) " || '/' || ") " as target from tests inner join runs on tests.run_id = runs.id where runs.runname like ? and target like ?) group by run_id ;"))) -;(print qry) -(db:with-db + (qry (conc "select runname, (max(end_time)-min(event_time))/60 as runtime, target from (select runname, run_id,tests.event_time,tests.event_time+run_duration AS end_time, " (string-join (db:get-keys dbstruct) " || '/' || ") " as target from tests inner join runs on tests.run_id = runs.id where runs.runname like ? and target like ?) group by run_id ;"))) + ;(print qry) + (db:with-db dbstruct #f ;; this is for the main runs db #f ;; does not modify db (lambda (dbdat db) - (sqlite3:for-each-row - (lambda (runname runtime target ) - (set! res (cons (vector runname runtime target) res))) - db - qry - run-patt target-patt) - - res)))) - - + (sqlite3:for-each-row + (lambda (runname runtime target ) + (set! res (cons (vector runname runtime target) res))) + db + qry + run-patt target-patt) + res)))) (define (db:get-run-name-from-id dbstruct run-id) (db:with-db dbstruct #f ;; this is for the main runs db @@ -1551,11 +1389,11 @@ ;; (debug:print 0 *default-log-port* "Got here 0.") (debug:print 3 *default-log-port* "keys: " keys " allvals: " allvals " keyvals: " keyvals " key=?str is " key=?str) (debug:print 2 *default-log-port* "NOTE: using target " (string-intersperse (map cadr keyvals) "/") " for this run") (if (and runname (null? (filter (lambda (x)(not x)) keyvals))) ;; there must be a better way to "apply and" (db:with-db - dbstruct #f #f + dbstruct #f #t (lambda (dbdat db) ;; (debug:print 0 *default-log-port* "Got here 1.") (let ((res #f)) (apply sqlite3:execute db (conc "INSERT OR IGNORE INTO runs (runname,state,status,owner,event_time,contour" @@ -1572,10 +1410,92 @@ res))) (begin (debug:print-error 0 *default-log-port* "Called without all necessary keys") #f)))) +(define (db:get-run-id dbstruct runname target) + (let ((runs (db:simple-get-runs dbstruct runname #f #f target #f))) ;; runpatt count offset target last-update + (if (null? runs) + #f + (simple-run-id (car runs))))) + +;; called with run-id=#f so will operate on main.db +;; +(define (db:insert-run dbstruct target runname run-meta) + (let* ((keys (db:get-keys dbstruct)) + (runs (db:simple-get-runs dbstruct runname #f #f target #f))) ;; runpatt count offset target last-update + ;; need to insert run based on target and runname + (let* ((targvals (string-split target "/")) + (keystr (string-intersperse keys ",")) + (key?str (string-intersperse (make-list (length targvals) "?") ",")) + (qrystr (conc "INSERT INTO runs (runname,"keystr") VALUES (?,"key?str")")) + (get-var (lambda (db qrystr) + (let* ((res #f)) + (sqlite3:for-each-row + (lambda row + (set res (car row))) + db qrystr runname) + res)))) + (if (null? runs) + (db:create-initial-run-record dbstruct runname target)) + (let* ((run-id (db:get-run-id dbstruct runname target))) + (db:with-db + dbstruct + #f #t + (lambda (dbdat db) + (for-each + (lambda (keyval) + (let* ((fieldname (car keyval)) + (getqry (conc "SELECT "fieldname" FROM runs WHERE id=?;")) + (setqry (conc "UPDATE runs SET "fieldname"=? WHERE id=?;")) + (val (cdr keyval)) + (valnum (if (number? val) + val + (if (string? val) + (string->number val) + #f)))) + (if (not (member fieldname (cons "runname" keys))) ;; don't attempt to tweak these + (let* ((curr-val (get-var db getqry)) + (have-it (or (equal? curr-val val) + (equal? curr-val valnum)))) + (if (not have-it) + (sqlite3:execute db setqry (or valnum val) run-id)))))) + run-meta))) + run-id)))) + +(define (db:create-initial-run-record dbstruct runname target) + (let* ((keys (db:get-keys dbstruct)) + (targvals (string-split target "/")) + (keystr (string-intersperse keys ",")) + (key?str (string-intersperse (make-list (length targvals) "?") ",")) + (qrystr (conc "INSERT INTO runs (runname,"keystr") VALUES (?,"key?str")"))) + (db:with-db + dbstruct #f #t + (lambda (dbdat db) + (apply sqlite3:execute db qrystr runname targvals))))) + +(define (db:insert-test dbstruct run-id test-rec) + (let* ((testname (alist-ref "testname" test-rec equal?)) + (item-path (alist-ref "item_path" test-rec equal?)) + (id (db:get-test-id dbstruct run-id testname item-path)) + (fieldvals (filter (lambda (x)(not (member (car x) '("id" "last_update")))) test-rec)) + (setqry (conc "UPDATE tests SET "(string-intersperse + (map (lambda (dat) + (conc (car dat)"=?")) + fieldvals) + ",")" WHERE id=?;")) + (insqry (conc "INSERT INTO tests ("(string-intersperse (map (lambda (x) (car x)) fieldvals) ",") + ") VALUES ("(string-intersperse (make-list (length fieldvals) "?") ",")");"))) + (debug:print 0 *default-log-port* "id: "id"\nset: "setqry"\ninsqry: "insqry) + (db:with-db + dbstruct + run-id #t + (lambda (dbdat db) + (if id + (apply sqlite3:execute db setqry (append (map cdr fieldvals) (list id))) + (apply sqlite3:execute db insqry (map cdr fieldvals))))))) + ;; replace header and keystr with a call to runs:get-std-run-fields ;; ;; keypatts: ( (KEY1 "abc%def")(KEY2 "%") ) ;; runpatts: patt1,patt2 ... ;; @@ -1615,17 +1535,13 @@ qrystr ))) (debug:print-info 11 *default-log-port* "db:get-runs END qrystr: " qrystr " keypatts: " keypatts " offset: " offset " limit: " count) (vector header res))) - -(define-record simple-run target id runname state status owner event_time) -(define-record-printer (simple-run x out) - (fprintf out "#,(simple-run ~S ~S ~S ~S)" - (simple-run-target x) (simple-run-id x) (simple-run-runname x) (time->string (seconds->local-time (simple-run-event_time x) )))) - ;; simple get-runs +;; +;; records used defined in dbfile ;; (define (db:simple-get-runs dbstruct runpatt count offset target last-update) (let* ((res '()) (keys (db:get-keys dbstruct)) (runpattstr (db:patt->like "runname" runpatt)) @@ -1658,23 +1574,23 @@ qrystr ))) (debug:print-info 11 *default-log-port* "db:get-runs END qrystr: " qrystr " target: " target " offset: " offset " limit: " count) res)) -;; TODO: Switch this to use max(update_time) from each run db? Then if using a server there is no disk traffic (using inmem db) +;; TODO: Switch this to use max(update_time) from each run db? Then if using a server there is no disk traffic (using cachedb db) ??? ;; ;; NOTE: This DOESN'T (necessarily) get the real run ids, but the number of the .db!! (define (db:get-changed-run-ids since-time) (let* ((dbdir (db:dbfile-path)) ;; (configf:lookup *configdat* "setup" "dbdir")) - (alldbs (glob (conc dbdir "/.megatest/[0-9]*.db"))) + (alldbs (glob (conc dbdir "/.mtdb/[0-9]*.db*"))) (changed (filter (lambda (dbfile) (> (file-modification-time dbfile) since-time)) alldbs))) (delete-duplicates (map (lambda (dbfile) - (let* ((res (string-match ".*\\/(\\d\\d)\\.db" dbfile))) + (let* ((res (string-match ".*\\/(\\d\\d)\\.db*" dbfile))) (if res (string->number (cadr res)) (begin (debug:print 2 *default-log-port* "WARNING: Failed to process " dbfile " for run-id") 0)))) @@ -1778,16 +1694,15 @@ ;; Update run_stats for given run_id ;; input data is a list (state status count) ;; (define (db:update-run-stats dbstruct run-id stats) - ;; (mutex-lock! *db-transaction-mutex*) + (mutex-lock! *db-transaction-mutex*) (db:with-db dbstruct #f - #f - + #t (lambda (dbdat db) ;; remove previous data (let* ((stmt1 (sqlite3:prepare db "DELETE FROM run_stats WHERE run_id=? AND state=? AND status=?;")) (stmt2 (sqlite3:prepare db "INSERT INTO run_stats (run_id,state,status,count) VALUES (?,?,?,?);")) @@ -1800,11 +1715,11 @@ (sqlite3:execute stmt1 run-id (car dat)(cadr dat)) (apply sqlite3:execute stmt2 run-id dat)) stats))))) (sqlite3:finalize! stmt1) (sqlite3:finalize! stmt2) - ;; (mutex-unlock! *db-transaction-mutex*) + (mutex-unlock! *db-transaction-mutex*) res)))) (define (db:get-main-run-stats dbstruct run-id) (db:with-db dbstruct @@ -1942,19 +1857,20 @@ (debug:print-info 4 *default-log-port* "runs:get-runs-by-patt qry=" qry-str " " runnamepatt) ;(print "runs:get-runs-by-patt qry=" qry-str " " runnamepatt) (vector header (reverse - (db:with-db dbstruct #f #f ;; reads db, does not write to it. - (lambda (dbdat db) - (sqlite3:fold-row - (lambda (res . r) - (cons (list->vector r) res)) - '() - db - qry-str - runnamepatt))))))) + (db:with-db + dbstruct #f #f ;; reads db, does not write to it. + (lambda (dbdat db) + (sqlite3:fold-row + (lambda (res . r) + (cons (list->vector r) res)) + '() + db + qry-str + runnamepatt))))))) ;; use (get-value-by-header (db:get-header runinfo)(db:get-rows runinfo)) ;; NOTE: Does NOT return a list of rows (or one row) for the first slot of the vector ;; this is inconsistent with get-runs but it makes some sense. ;; @@ -1991,11 +1907,11 @@ run-id)))) ;; does not (obviously!) removed dependent data. But why not!!? (define (db:delete-run dbstruct run-id) (db:with-db - dbstruct #f #f + dbstruct #f #t (lambda (dbdat db) (sqlite3:with-transaction db (lambda () (sqlite3:execute db "DELETE FROM test_steps WHERE test_id IN (SELECT id FROM tests WHERE run_id=?);" run-id) @@ -2023,34 +1939,38 @@ user (conc newlockval " " run-id)) (debug:print-info 1 *default-log-port* "" newlockval " run number " run-id))))) (define (db:set-run-status dbstruct run-id status msg) (db:with-db - dbstruct #f #f + dbstruct #f #t (lambda (dbdat db) (if msg (sqlite3:execute db "UPDATE runs SET status=?,comment=? WHERE id=?;" status msg run-id) (sqlite3:execute db "UPDATE runs SET status=? WHERE id=?;" status run-id))))) +(define (db:set-run-state-status-db dbdat db run-id state status ) + (sqlite3:execute + (db:get-cache-stmth + dbdat db "UPDATE runs SET status=?,state=? WHERE id=?;") status state run-id)) + (define (db:set-run-state-status dbstruct run-id state status ) (db:with-db - dbstruct #f #f + dbstruct #f #t (lambda (dbdat db) - (sqlite3:execute db "UPDATE runs SET status=?,state=? WHERE id=?;" status state run-id)))) - - - + (db:set-run-state-status-db dbdat db run-id state status)))) + (define (db:get-run-status dbstruct run-id) (let ((res "n/a")) (db:with-db dbstruct #f #f (lambda (dbdat db) (sqlite3:for-each-row (lambda (status) (set! res status)) - db - "SELECT status FROM runs WHERE id=?;" + (db:get-cache-stmth + dbdat db + "SELECT status FROM runs WHERE id=?;" ) run-id) res)))) (define (db:get-run-state dbstruct run-id) (let ((res "n/a")) @@ -2058,12 +1978,27 @@ dbstruct #f #f (lambda (dbdat db) (sqlite3:for-each-row (lambda (status) (set! res status)) - db - "SELECT state FROM runs WHERE id=?;" + (db:get-cache-stmth + dbdat db + "SELECT state FROM runs WHERE id=?;" ) + run-id) + res)))) + +(define (db:get-run-state-status dbstruct run-id) + (let ((res (cons "n/a" "n/a"))) + (db:with-db + dbstruct #f #f + (lambda (dbdat db) + (sqlite3:for-each-row + (lambda (state status) + (set! res (cons state status))) + (db:get-cache-stmth + dbdat db + "SELECT state,status FROM runs WHERE id=?;" ) run-id) res)))) ;;====================================================================== @@ -2306,12 +2241,12 @@ (sqlite3:for-each-row (lambda (run-id testname item-path state status) ;; id,run_id,testname,state,status,event_time,host,cpuload,diskfree,uname,rundir,item_path,run_duration,final_logf,comment (set! res (vector test-id run-id testname state status -1 "" -1 -1 "" "-" item-path -1 "-" "-"))) db - "SELECT run_id,testname,item_path,state,status FROM tests WHERE id=?;" - test-id))) + "SELECT run_id,testname,item_path,state,status FROM tests WHERE id=? and run_id=?;" + test-id run-id))) res)) ;; get a useful subset of the tests data (used in dashboard ;; use db:mintest-get-{id ,run_id,testname ...} ;; @@ -2338,11 +2273,11 @@ (define (db:delete-test-records dbstruct run-id test-id) (db:general-call dbstruct run-id 'delete-test-step-records (list test-id)) (db:general-call dbstruct run-id 'delete-test-data-records (list test-id)) (db:with-db - dbstruct run-id #f + dbstruct run-id #t (lambda (dbdat db) (sqlite3:execute db "UPDATE tests SET state='DELETED',status='n/a',comment='' WHERE id=?;" test-id)))) ;; (define (db:delete-old-deleted-test-records dbstruct) @@ -2402,39 +2337,40 @@ ;; NOTE: run-id is not used ;; ;; (define (db:test-set-state-status dbstruct run-id test-id newstate newstatus newcomment) (db:with-db dbstruct - run-id - #t + run-id #t (lambda (dbdat db) - (cond - ((and newstate newstatus newcomment) - (sqlite3:execute db "UPDATE tests SET state=?,status=?,comment=? WHERE id=?;" newstate newstatus newcomment ;; (sdb:qry 'getid newcomment) - test-id)) - ((and newstate newstatus) - (sqlite3:execute db "UPDATE tests SET state=?,status=? WHERE id=?;" newstate newstatus test-id)) - (else - (if newstate (sqlite3:execute db "UPDATE tests SET state=? WHERE id=?;" newstate test-id)) - (if newstatus (sqlite3:execute db "UPDATE tests SET status=? WHERE id=?;" newstatus test-id)) - (if newcomment (sqlite3:execute db "UPDATE tests SET comment=? WHERE id=?;" newcomment ;; (sdb:qry 'getid newcomment) - test-id)))))) - (mt:process-triggers dbstruct run-id test-id newstate newstatus)) + (db:test-set-state-status-db db run-id test-id newstate newstatus newcomment)))) + +(define (db:test-set-state-status-db db run-id test-id newstate newstatus newcomment) + (cond + ((and newstate newstatus newcomment) + (sqlite3:execute db "UPDATE tests SET state=?,status=?,comment=? WHERE id=?;" newstate newstatus newcomment ;; (sdb:qry 'getid newcomment) + test-id)) + ((and newstate newstatus) + (sqlite3:execute db "UPDATE tests SET state=?,status=? WHERE id=?;" newstate newstatus test-id)) + (else + (if newstate (sqlite3:execute db "UPDATE tests SET state=? WHERE id=?;" newstate test-id)) + (if newstatus (sqlite3:execute db "UPDATE tests SET status=? WHERE id=?;" newstatus test-id)) + (if newcomment (sqlite3:execute db "UPDATE tests SET comment=? WHERE id=?;" newcomment ;; (sdb:qry 'getid newcomment) + test-id)))) + ;; (mt:process-triggers dbstruct run-id test-id newstate newstatus)) ;; NOTE: Moved into calling function + ) ;; NEW BEHAVIOR: Count tests running in all runs! ;; (define (db:get-count-tests-running dbstruct run-id) ;; fastmode) - (let* ((qry ;; (if fastmode - ;; "SELECT count(id) FROM tests WHERE state in ('RUNNING','LAUNCHED','REMOTEHOSTSTART') AND NOT (uname = 'n/a' AND item_path = '') LIMIT 1;" - "SELECT count(id) FROM tests WHERE state in ('RUNNING','LAUNCHED','REMOTEHOSTSTART') AND NOT (uname = 'n/a' AND item_path = '');")) ;; ) - (db:with-db - dbstruct - run-id - #f - (lambda (dbdat db) - (let* ((stmth (db:get-cache-stmth dbdat run-id db qry))) - (sqlite3:first-result stmth)))))) + (let* ((qry "SELECT count(id) FROM tests WHERE state in ('RUNNING','LAUNCHED','REMOTEHOSTSTART') AND NOT (uname = 'n/a' AND item_path = '');")) + (db:with-db + dbstruct + run-id + #f + (lambda (dbdat db) + (let* ((stmth (db:get-cache-stmth dbdat db qry))) + (sqlite3:first-result stmth)))))) ;; NEW BEHAVIOR: Count tests running in only one run! ;; (define (db:get-count-tests-actually-running dbstruct run-id) (db:with-db @@ -2459,11 +2395,11 @@ (db:with-db dbstruct run-id #f (lambda (dbdat db) - (let* ((stmth (db:get-cache-stmth dbdat run-id db qry))) + (let* ((stmth (db:get-cache-stmth dbdat db qry))) (sqlite3:first-result stmth run-id)))))) ;; For a given testname how many items are running? Used to determine ;; probability for regenerating html ;; @@ -2472,11 +2408,11 @@ dbstruct run-id #f (lambda (dbdat db) (let* ((stmt "SELECT count(id) FROM tests WHERE state in ('RUNNING','LAUNCHED','REMOTEHOSTSTART') AND run_id=? AND NOT (uname = 'n/a' AND item_path = '') AND testname=?;") - (stmth (db:get-cache-stmth dbdat run-id db stmt))) + (stmth (db:get-cache-stmth dbdat db stmt))) (sqlite3:first-result stmth run-id testname))))) (define (db:get-not-completed-cnt dbstruct run-id) (db:with-db @@ -2552,11 +2488,11 @@ ;; (define (db:test-set-top-process-pid dbstruct run-id test-id pid) (db:with-db dbstruct run-id - #f + #t (lambda (dbdat db) (sqlite3:execute db "UPDATE tests SET attemptnum=? WHERE id=?;" pid test-id)))) (define (db:test-get-top-process-pid dbstruct run-id test-id) @@ -2565,13 +2501,13 @@ run-id #f (lambda (dbdat db) (db:first-result-default db - "SELECT attemptnum FROM tests WHERE id=?;" + "SELECT attemptnum FROM tests WHERE id=? AND run_id=?;" #f - test-id)))) + test-id run-id)))) (define db:test-record-fields '("id" "run_id" "testname" "state" "status" "event_time" "host" "cpuload" "diskfree" "uname" "rundir" "item_path" "run_duration" "final_logf" "comment" "shortdir" "attemptnum" "archived" "last_update")) @@ -2591,11 +2527,11 @@ (define db:test-record-qry-selector (string-intersperse db:test-record-fields ",")) (define (db:update-tesdata-on-repilcate-db dbstruct old-lt new-lt) (db:with-db - dbstruct #f #f + dbstruct #f #t (lambda (dbdat db) (sqlite3:execute db "UPDATE tests SET rundir= replace(rundir,?,?), shortdir=replace(shortdir,?,?);" old-lt new-lt old-lt new-lt)))) ;; NOTE: Use db:test-get* to access records @@ -2672,11 +2608,11 @@ (lambda (run-id) (let ((testrecs (db:get-all-tests-info-by-run-id mtdb run-id))) (db:prep-megatest.db-adj-test-ids (dbr:dbdat-dbh mtdb) run-id testrecs))) run-ids))) -;; Get test data using test_id, run-id is not used +;; Get test data using test_id ;; (define (db:get-test-info-by-id dbstruct run-id test-id) (db:with-db dbstruct run-id @@ -2686,12 +2622,32 @@ (sqlite3:for-each-row ;; attemptnum added to hold pid of top process (not Megatest) controlling a test (lambda (id run-id testname state status event-time host cpuload diskfree uname rundir-id item-path run_duration final-logf-id comment short-dir-id attemptnum archived last-update) ;; 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 (set! res (vector id run-id testname state status event-time host cpuload diskfree uname rundir-id item-path run_duration final-logf-id comment short-dir-id attemptnum archived last-update))) db - (conc "SELECT " db:test-record-qry-selector " FROM tests WHERE id=?;") - test-id) + ;; (db:get-cache-stmth dbdat db + ;; (conc "SELECT " db:test-record-qry-selector " FROM tests WHERE id=? AND run_id=?;")) + (conc "SELECT " db:test-record-qry-selector " FROM tests WHERE id=? AND run_id=?;") + test-id run-id) + res)))) + +;; Get test state, status using test_id +;; +(define (db:get-test-state-status-by-id dbstruct run-id test-id) + (db:with-db + dbstruct + run-id + #f + (lambda (dbdat db) + (let ((res (cons #f #f))) +;; (stmth (db:get-cache-stmth dbdat db "SELECT state,status FROM tests WHERE id=?;"))) + (sqlite3:for-each-row ;; attemptnum added to hold pid of top process (not Megatest) controlling a test + (lambda (state status) + (cons state status)) + db + "SELECT state,status FROM tests WHERE id=? AND run_id=?;" ;; stmth try not compiling this one - yes, this fixed the bind issue + test-id run-id) res)))) ;; Use db:test-get* to access ;; Get test data using test_ids. NB// Only works within a single run!! ;; @@ -2709,36 +2665,54 @@ db (conc "SELECT " db:test-record-qry-selector " FROM tests WHERE id in (" (string-intersperse (map conc test-ids) ",") ");")) res)))) +;; try every second until tries times proc +;; +(define (db:keep-trying-until-true proc params tries) + (let* ((res (apply proc params))) + (if res + res + (if (> tries 0) + (begin + (thread-sleep! 1) + (db:keep-trying-until-true proc params (- tries 1))) + (begin + ;; (debug:print-info 0 *default-log-port* "proc never returned true, params="params) + (print"db:keep-trying-until-true proc never returned true, proc = " proc " params =" params " tries = " tries) + #f))))) + (define (db:get-test-info dbstruct run-id test-name item-path) (db:with-db dbstruct run-id #f (lambda (dbdat db) - (let ((res #f)) - (sqlite3:for-each-row - (lambda (a . b) - (set! res (apply vector a b))) - db - (conc "SELECT " db:test-record-qry-selector " FROM tests WHERE testname=? AND item_path=? AND run_id=?;") - test-name item-path run-id) - res)))) + (db:get-test-info-db db run-id test-name item-path)))) + +(define (db:get-test-info-db db run-id test-name item-path) + (let ((res #f)) + (sqlite3:for-each-row + (lambda (a . b) + (set! res (apply vector a b))) + db + (conc "SELECT " db:test-record-qry-selector " FROM tests WHERE testname=? AND item_path=? AND run_id=?;") + test-name item-path run-id) + res)) (define (db:test-get-rundir-from-test-id dbstruct run-id test-id) (db:with-db dbstruct run-id #f (lambda (dbdat db) (db:first-result-default db - "SELECT rundir FROM tests WHERE id=?;" + "SELECT rundir FROM tests WHERE id=? AND run_id=?;" #f ;; default result - test-id)))) + test-id run-id)))) (define (db:get-test-times dbstruct run-name target) (let ((res `()) (qry (conc "select testname, item_path, run_duration, " (string-join (db:get-keys dbstruct) " || '/' || ") @@ -2843,11 +2817,11 @@ (db:with-db dbstruct run-id #f (lambda (dbdat db) - (let* ((stmth (db:get-cache-stmth dbdat #f db stmt)) + (let* ((stmth (db:get-cache-stmth dbdat db stmt)) (res (sqlite3:fold-row (lambda (res id test-id category variable value expected tol units comment status type last-update) (vector id test-id category variable value expected tol units comment status type last-update)) (vector #f #f #f #f #f #f #f #f #f #f #f #f) stmth @@ -2861,11 +2835,11 @@ ;; if one or more are fail (any case) then set test status to PASS, non "pass" or "fail" are ignored (define (db:test-data-rollup dbstruct run-id test-id status) (let* ((fail-count 0) (pass-count 0)) (db:with-db - dbstruct run-id #f + dbstruct run-id #t (lambda (dbdat db) (sqlite3:for-each-row (lambda (fcount pcount) (set! fail-count fcount) (set! pass-count pcount)) @@ -2960,11 +2934,11 @@ ;; EOF (define (db:csv->test-data dbstruct run-id test-id csvdata) (debug:print 4 *default-log-port* "test-id " test-id ", csvdata: " csvdata) (db:with-db - dbstruct #f #f + dbstruct #f #t (lambda (dbdat db) (let* ((csvlist (csv->list (make-csv-reader (open-input-string csvdata) '((strip-leading-whitespace? #t) (strip-trailing-whitespace? #t)))))) ;; (csv->list csvdata))) @@ -3168,163 +3142,171 @@ (define (db:set-state-status-and-roll-up-items dbstruct run-id test-name item-path state status comment) ;; establish info on incoming test followed by info on top level test ;; BBnote - for mode itemwait, linkage between upstream test & matching item status is propagated to run queue in db:prereqs-not-met (let* ((testdat (if (number? test-name) (db:get-test-info-by-id dbstruct run-id test-name) ;; test-name is actually a test-id - (db:get-test-info dbstruct run-id test-name item-path))) + (db:keep-trying-until-true ;; in our threaded stuff this call could happen before the test is registered (maybe?) + db:get-test-info + (list dbstruct run-id test-name item-path) + 10))) (test-id (db:test-get-id testdat)) (test-name (if (number? test-name) (db:test-get-testname testdat) test-name)) (item-path (db:test-get-item-path testdat)) (tl-testdat (db:get-test-info dbstruct run-id test-name "")) (tl-test-id (if tl-testdat (db:test-get-id tl-testdat) - #f))) + #f)) + (new-state-eh #f) + (new-status-eh #f)) (if (member state '("LAUNCHED" "REMOTEHOSTSTART")) (db:general-call dbstruct run-id 'set-test-start-time (list test-id))) (mutex-lock! *db-transaction-mutex*) (db:with-db - dbstruct run-id #f + dbstruct run-id #t (lambda (dbdat db) (let ((tr-res (sqlite3:with-transaction db (lambda () ;; NB// Pass the db so it is part fo the transaction - (db:test-set-state-status db run-id test-id state status comment) ;; this call sets the item state/status + (db:test-set-state-status-db db run-id test-id state status comment) ;; this call sets the item state/status (if (not (equal? item-path "")) ;; only roll up IF incoming test is an item - (let* ((state-status-counts (db:get-all-state-status-counts-for-test db run-id test-name item-path state status)) ;; item-path is used to exclude current state/status of THIS test - (state-statuses (db:roll-up-rules state-status-counts state status)) - (newstate (car state-statuses)) - (newstatus (cadr state-statuses))) + (let* ((state-status-counts (db:get-all-state-status-counts-for-test db run-id test-name item-path state status)) ;; item-path is used to exclude current state/status of THIS test + (state-statuses (db:roll-up-rules state-status-counts state status)) + (newstate (car state-statuses)) + (newstatus (cadr state-statuses))) + (set! new-state-eh newstate) + (set! new-status-eh newstatus) (debug:print 4 *default-log-port* "BB> tl-test-id="tl-test-id" ; "test-name":"item-path" newstate="newstate" newstatus="newstatus" len(sscs)="(length state-status-counts) " state-status-counts: " - (apply conc - (map (lambda (x) - (conc - (with-output-to-string (lambda () (pp (dbr:counts->alist x)))) " | ")) - state-status-counts))); end debug:print - - (if tl-test-id - (db:test-set-state-status db run-id tl-test-id newstate newstatus #f)) ;; we are still in the transaction - must access the db and not the dbstruct + (apply conc + (map (lambda (x) + (conc + (with-output-to-string (lambda () (pp (dbr:counts->alist x)))) " | ")) + state-status-counts))); end debug:print + (if tl-test-id + (db:test-set-state-status-db db run-id tl-test-id newstate newstatus #f)) ;; we are still in the transaction - must access the db and not the dbstruct )))))) (mutex-unlock! *db-transaction-mutex*) (if (and test-id state status (equal? status "AUTO")) (db:test-data-rollup dbstruct run-id test-id status)) + (if new-state-eh ;; moved from db:test-set-state-status + (mt:process-triggers dbstruct run-id test-id new-state-eh new-status-eh)) tr-res))))) (define (db:roll-up-rules state-status-counts state status) - (let* ((running (length (filter (lambda (x) - (member (dbr:counts-state x) *common:running-states*)) - state-status-counts))) - (bad-not-started (length (filter (lambda (x) - (and (equal? (dbr:counts-state x) "NOT_STARTED") - (not (member (dbr:counts-status x) *common:not-started-ok-statuses*)))) - state-status-counts))) - (all-curr-states (common:special-sort ;; worst -> best (sort of) - (delete-duplicates - (if (and state (not (member state *common:dont-roll-up-states*))) - (cons state (map dbr:counts-state state-status-counts)) - (map dbr:counts-state state-status-counts))) - *common:std-states* >)) - (all-curr-statuses (common:special-sort ;; worst -> best - (delete-duplicates - (if (and state status (not (member state *common:dont-roll-up-states*))) - (cons status (map dbr:counts-status state-status-counts)) - (map dbr:counts-status state-status-counts))) - *common:std-statuses* >)) - (non-completes (filter (lambda (x) - (not (member x (cons "COMPLETED" *common:dont-roll-up-states*)))) - all-curr-states)) - (preq-fails (filter (lambda (x) - (equal? x "PREQ_FAIL")) - all-curr-statuses)) - (num-non-completes (length non-completes)) - (newstate (cond - ((> running 0) "RUNNING") ;; anything running, call the situation running - ((> (length preq-fails) 0) "NOT_STARTED") - ((> bad-not-started 0) "COMPLETED") ;; we have an ugly situation, it is completed in the sense we cannot do more. - ((> num-non-completes 0) (car non-completes)) ;; (remove (lambda (x)(equal? "COMPLETED" x)) all-curr-states))) ;; only rollup DELETED if all DELETED - (else (car all-curr-states)))) - (newstatus (cond - ((> (length preq-fails) 0) "PREQ_FAIL") - ((or (> bad-not-started 0) - (and (equal? newstate "NOT_STARTED") - (> num-non-completes 0))) - "STARTED") - (else (car all-curr-statuses))))) - (debug:print-info 2 *default-log-port* - "\n--> probe db:set-state-status-and-roll-up-items: " - "\n--> state-status-counts: "(map dbr:counts->alist state-status-counts) - "\n--> running: "running - "\n--> bad-not-started: "bad-not-started - "\n--> non-non-completes: "num-non-completes - "\n--> non-completes: "non-completes - "\n--> all-curr-states: "all-curr-states - "\n--> all-curr-statuses: "all-curr-statuses - "\n--> newstate "newstate - "\n--> newstatus "newstatus - "\n\n") - - ;; NB// Pass the db so it is part of the transaction - (list newstate newstatus))) + (if (null? state-status-counts) + '(#f #f) + (let* ((running (length (filter (lambda (x) + (member (dbr:counts-state x) *common:running-states*)) + state-status-counts))) + (bad-not-started (length (filter (lambda (x) + (and (equal? (dbr:counts-state x) "NOT_STARTED") + (not (member (dbr:counts-status x) *common:not-started-ok-statuses*)))) + state-status-counts))) + (all-curr-states (common:special-sort ;; worst -> best (sort of) + (delete-duplicates + (if (and state (not (member state *common:dont-roll-up-states*))) + (cons state (map dbr:counts-state state-status-counts)) + (map dbr:counts-state state-status-counts))) + *common:std-states* >)) + (all-curr-statuses (common:special-sort ;; worst -> best + (delete-duplicates + (if (and state status (not (member state *common:dont-roll-up-states*))) + (cons status (map dbr:counts-status state-status-counts)) + (map dbr:counts-status state-status-counts))) + *common:std-statuses* >)) + (non-completes (filter (lambda (x) + (not (member x (cons "COMPLETED" *common:dont-roll-up-states*)))) + all-curr-states)) + (preq-fails (filter (lambda (x) + (equal? x "PREQ_FAIL")) + all-curr-statuses)) + (num-non-completes (length non-completes)) + (newstate (cond + ((> running 0) "RUNNING") ;; anything running, call the situation running + ((> (length preq-fails) 0) "NOT_STARTED") + ((> bad-not-started 0) "COMPLETED") ;; we have an ugly situation, it is completed in the sense we cannot do more. + ((> num-non-completes 0) (car non-completes)) ;; (remove (lambda (x)(equal? "COMPLETED" x)) all-curr-states))) ;; only rollup DELETED if all DELETED + (else (car all-curr-states)))) + (newstatus (cond + ((> (length preq-fails) 0) "PREQ_FAIL") + ((or (> bad-not-started 0) + (and (equal? newstate "NOT_STARTED") + (> num-non-completes 0))) + "STARTED") + (else (car all-curr-statuses))))) + (debug:print-info 2 *default-log-port* + "\n--> probe db:set-state-status-and-roll-up-items: " + "\n--> state-status-counts: "(map dbr:counts->alist state-status-counts) + "\n--> running: "running + "\n--> bad-not-started: "bad-not-started + "\n--> non-non-completes: "num-non-completes + "\n--> non-completes: "non-completes + "\n--> all-curr-states: "all-curr-states + "\n--> all-curr-statuses: "all-curr-statuses + "\n--> newstate "newstate + "\n--> newstatus "newstatus + "\n\n") + + ;; NB// Pass the db so it is part of the transaction + (list newstate newstatus)))) (define (db:set-state-status-and-roll-up-run dbstruct run-id curr-state curr-status) (mutex-lock! *db-transaction-mutex*) (db:with-db - dbstruct run-id #f + dbstruct run-id #t (lambda (dbdat db) (let ((tr-res (sqlite3:with-transaction db (lambda () - (let* ((state-status-counts (db:get-all-state-status-counts-for-run db run-id)) + (let* ((state-status-counts (db:get-all-state-status-counts-for-run-db dbdat db run-id)) (state-statuses (db:roll-up-rules state-status-counts #f #f )) (newstate (car state-statuses)) (newstatus (cadr state-statuses))) (if (or (not (eq? newstate curr-state)) (not (eq? newstatus curr-status))) - (db:set-run-state-status db run-id newstate newstatus ))))))) + (db:set-run-state-status-db dbdat db run-id newstate newstatus ))))))) (mutex-unlock! *db-transaction-mutex*) tr-res)))) + +(define (db:get-all-state-status-counts-for-run-db dbdat db run-id) + (sqlite3:map-row + (lambda (state status count) + (make-dbr:counts state: state status: status count: count)) + (db:get-cache-stmth + dbdat db + "SELECT state,status,count(id) FROM tests WHERE run_id=? GROUP BY state,status;") + run-id )) (define (db:get-all-state-status-counts-for-run dbstruct run-id) - (let* ((test-count-recs (db:with-db - dbstruct #f #f - (lambda (dbdat db) - (sqlite3:map-row - (lambda (state status count) - (make-dbr:counts state: state status: status count: count)) - db - "SELECT state,status,count(id) FROM tests WHERE run_id=? GROUP BY state,status;" - run-id ))))) - test-count-recs)) - + (db:with-db + dbstruct #f #f + (lambda (dbdat db) + (db:get-all-state-status-counts-for-run-db dbdat db run-id)))) ;; BBnote: db:get-all-state-status-counts-for-test returns dbr:counts object aggregating state and status of items of a given test, *not including rollup state/status* ;; ;; NOTE: This is called within a transaction ;; -(define (db:get-all-state-status-counts-for-test dbstruct run-id test-name item-path item-state-in item-status-in) - (let* ((test-info (db:get-test-info dbstruct run-id test-name item-path)) +(define (db:get-all-state-status-counts-for-test db run-id test-name item-path item-state-in item-status-in) + (let* ((test-info (db:get-test-info-db db run-id test-name item-path)) (item-state (or item-state-in (db:test-get-state test-info))) (item-status (or item-status-in (db:test-get-status test-info))) - (other-items-count-recs (db:with-db - dbstruct run-id #f - (lambda (dbdat db) - (sqlite3:map-row - (lambda (state status count) - (make-dbr:counts state: state status: status count: count)) - db - ;; ignore current item because we have changed its value in the current transation so this select will see the old value. - "SELECT state,status,count(id) FROM tests WHERE run_id=? AND testname=? AND item_path != '' AND item_path !=? GROUP BY state,status;" - run-id test-name item-path)))) - + (other-items-count-recs (sqlite3:map-row + (lambda (state status count) + (make-dbr:counts state: state status: status count: count)) + db + ;; ignore current item because we have changed its value in the current transation so this select will see the old value. + "SELECT state,status,count(id) FROM tests WHERE run_id=? AND testname=? AND item_path != '' AND item_path !=? GROUP BY state,status;" + run-id test-name item-path)) ;; add current item to tally outside of sql query - (match-countrec-lambda (lambda (countrec) - (and (equal? (dbr:counts-state countrec) item-state) + (match-countrec-lambda (lambda (countrec) + (and (equal? (dbr:counts-state countrec) item-state) (equal? (dbr:counts-status countrec) item-status)))) - + (already-have-count-rec-list (filter match-countrec-lambda other-items-count-recs)) ;; will have either 0 or 1 count recs depending if another item shares this item's state/status (updated-count-rec (if (null? already-have-count-rec-list) (make-dbr:counts state: item-state status: item-status count: 1) @@ -3334,11 +3316,10 @@ (nonmatch-countrec-lambda (lambda (countrec) (not (match-countrec-lambda countrec)))) (unrelated-rec-list (filter nonmatch-countrec-lambda other-items-count-recs))) - (cons updated-count-rec unrelated-rec-list))) ;; (define (db:get-all-item-states db run-id test-name) ;; (sqlite3:map-row ;; (lambda (a) a) @@ -3542,19 +3523,21 @@ (else (hash-table-set! *logged-in-clients* client-signature (current-seconds)) '(#t "successful login")))) +;; NO WAY TO KNOW IF IT MODIFIES THE DB BUT NEARLY ALL ARE UPDATES/INSERTS +;; (define (db:general-call dbstruct run-id stmtname params) ;; Why is db:lookup-query above not used here to get the query? (let ((query (let ((q (alist-ref (if (string? stmtname) (string->symbol stmtname) stmtname) db:queries))) (if q (car q) #f)))) (db:with-db - dbstruct run-id #f + dbstruct run-id #t (lambda (dbdat db) (apply sqlite3:execute db query params) #t)))) ;; get a summary of state and status counts to calculate a rollup @@ -3762,19 +3745,19 @@ testname) res)))) ;; create a new record for a given testname (define (db:testmeta-add-record dbstruct testname) - (db:with-db dbstruct #f #f + (db:with-db dbstruct #f #t (lambda (dbdat db) (sqlite3:execute db "INSERT OR IGNORE INTO test_meta (testname,author,owner,description,reviewed,iterated,avg_runtime,avg_disk,tags) VALUES (?,'','','','','','','','');" testname)))) ;; update one of the testmeta fields (define (db:testmeta-update-field dbstruct testname field value) - (db:with-db dbstruct #f #f + (db:with-db dbstruct #f #t (lambda (dbdat db) (sqlite3:execute db (conc "UPDATE test_meta SET " field "=? WHERE testname=?;") value testname)))) @@ -4032,12 +4015,10 @@ ;; To sync individual run ;;====================================================================== (define (db:get-run-record-ids dbstruct target run keynames test-patt) (let* ((backcons (lambda (lst item)(cons item lst))) (all_tests '()) - (all_test_steps '()) - (all_test_data '()) (keystr (string-intersperse (map (lambda (key val) (conc key " like '" val "'")) keynames (string-split target "/")) @@ -4063,39 +4044,15 @@ ) ) ) all_tests ) ) - (set! all_test_steps - (append - (map (lambda (x) (cons x run_id)) - (db:with-db dbstruct run_id #f - (lambda (dbdat db) - (sqlite3:fold-row backcons '() db (conc "SELECT id FROM test_steps WHERE test_id in (" test-qry ")")) - ) - ) - ) all_test_steps - ) - ) - (set! all_test_data - (append - (map (lambda (x) (cons x run_id)) - (db:with-db dbstruct run_id #f - (lambda (dbdat db) - (sqlite3:fold-row backcons '() db (conc "SELECT id FROM test_data WHERE test_id in (" test-qry ")")) - ) - ) - ) all_test_data - ) - ) ) run_ids ) `((runs . ,run_ids) (tests . ,all_tests) - (test_steps . ,all_test_steps) - (test_data . ,all_test_data) ) ) ) @@ -4103,17 +4060,27 @@ ;; Just for sync, procedures to make sync easy ;;====================================================================== ;; get an alist of run ids and test/run, test_step/run pairs changed since time since-time ;; '((runs . (1 2 3 ...))(tests . ((5 . 1) (6 . 3) (6 . 2) (7 . 1) ... + +;; Retrieves record IDs from the database based on the timestamp of their last update. + +;; The function takes two arguments: dbstruct, which represents the database structure, and since-time, which is a timestamp indicating the time of the last update. +;; The function first defines a few helper functions, including backcons, which takes a list and an item and adds the item to the front of the list. +;; It then initializes several variables to empty lists: all_tests, all_test_steps, all_test_data, all_run_ids, and all_test_ids. +;; The function then retrieves a list of IDs for runs that have been changed since since-time using the db:get-changed-run-ids function. +;; It then filters the full list of run IDs to only include those that match the changed run IDs based on their modulo 100. +;; For each changed run ID, the function retrieves a list of test IDs, test step IDs, and test data IDs that have been updated since since-time. +;; It appends these IDs to the appropriate lists (all_tests, all_test_steps, and all_test_data) using the append and map functions. +;; The function then retrieves a list of run stat IDs that have been updated since since-time. +;; Finally, the function returns a list of associations between record types and their corresponding IDs: runs, tests, test_steps, test_data, and run_stats. ;; (define (db:get-changed-record-ids dbstruct since-time) ;; no transaction, allow the db to be accessed between the big queries (let* ((backcons (lambda (lst item)(cons item lst))) (all_tests '()) - (all_test_steps '()) - (all_test_data '()) (changed_run_dbs (db:get-changed-run-ids since-time)) ;; gets the rundb numbers (all_run_ids (db:with-db dbstruct #f #f (lambda (dbdat db) (sqlite3:fold-row backcons '() db "SELECT id FROM runs")) @@ -4124,16 +4091,10 @@ (db:with-db dbstruct #f #f (lambda (dbdat db) (sqlite3:fold-row backcons '() db "SELECT id FROM runs WHERE last_update>=?" since-time)) ) ) - (run_stat_ids - (db:with-db dbstruct #f #f - (lambda (dbdat db) - (sqlite3:fold-row backcons '() db "SELECT id FROM run_stats WHERE last_update>=?" since-time)) - ) - ) ) (for-each (lambda (run_id) (set! all_tests (append @@ -4144,43 +4105,18 @@ ) ) ) all_tests ) ) - (set! all_test_steps - (append - (map (lambda (x) (cons x run_id)) - (db:with-db dbstruct run_id #f - (lambda (dbdat db) - (sqlite3:fold-row backcons '() db "SELECT id FROM test_steps WHERE last_update>=?" since-time) - ) - ) - ) all_test_steps - ) - ) - (set! all_test_data - (append - (map (lambda (x) (cons x run_id)) - (db:with-db dbstruct run_id #f - (lambda (dbdat db) - (sqlite3:fold-row backcons '() db "SELECT id FROM test_data WHERE last_update>=?" since-time) - ) - ) - ) all_test_data - ) - ) ) changed_run_ids ) (debug:print 2 *default-log-port* "run_ids = " run_ids) (debug:print 2 *default-log-port* "all_tests = " all_tests) `((runs . ,run_ids) (tests . ,all_tests) - (test_steps . ,all_test_steps) - (test_data . ,all_test_data) - (run_stats . ,run_stat_ids) ) ) ) ;;====================================================================== @@ -4370,17 +4306,17 @@ ;; sync for filesystem local db writes ;; (define (db:run-lock-and-sync no-sync-db) (let* ((tmp-area (common:get-db-tmp-area)) - (dbfiles (glob (conc tmp-area"/.megatest/*.db"))) + (dbfiles (glob (conc tmp-area"/.mtdb/*.db"))) (sync-durations (make-hash-table))) ;; (debug:print-info 0 *default-log-port* "lock-and-sync, dbfiles: "dbfiles) (for-each (lambda (file) (let* ((fname (conc (pathname-file file) ".db")) - (fulln (conc *toppath*"/.megatest/"fname)) + (fulln (conc *toppath*"/.mtdb/"fname)) (time1 (if (file-exists? file) (file-modification-time file) (begin (debug:print-info 0 *default-log-port* "Sync - I do not see file "file) 1))) @@ -4427,11 +4363,10 @@ (last-time (current-seconds)) ;; last time through the sync loop (no-sync-db (db:open-no-sync-db)) (sync-duration 0) ;; run time of the sync in milliseconds (tmp-area (common:get-db-tmp-area))) ;; Sync moved to http-transport keep-running loop - (set! *no-sync-db* no-sync-db) ;; make the no sync db available to api calls (debug:print-info 2 *default-log-port* "Periodic copy-based sync thread started. syncer is copy-sync, tmp-area is " tmp-area) (debug:print-info 3 *default-log-port* "watchdog starting. syncer is copy-sync pid="(current-process-id));; " this-wd-num="this-wd-num) (if (and legacy-sync (not *time-to-exit*)) (begin @@ -4470,11 +4405,10 @@ (last-time (current-seconds)) (no-sync-db (db:open-no-sync-db)) (stmt-cache #f) ;; (dbr:dbstruct-stmt-cache dbstruct)) (sync-duration 0) ;; run time of the sync in milliseconds (subdbs (hash-table-values (dbr:dbstruct-subdbs dbstruct)))) - (set! *no-sync-db* no-sync-db) ;; make the no sync db available to api calls (debug:print-info 2 *default-log-port* "Periodic sync thread started.") (debug:print-info 3 *default-log-port* "watchdog starting. legacy-sync is " legacy-sync" pid="(current-process-id) );; " this-wd-num="this-wd-num) (if (and legacy-sync (not *time-to-exit*)) (begin @@ -4612,15 +4546,20 @@ (begin (sqlite3:interrupt! db) (sqlite3:finalize! db #t) ;; (vector-set! *task-db* 0 #f) (set! *task-db* #f))))) + (if (and *no-sync-db* + (sqlite3:database? *no-sync-db*)) + (sqlite3:finalize! *no-sync-db* #t)) (if (and (not (args:get-arg "-server")) - *runremote*) + *runremote* + (eq? (rmt:transport-mode) 'http)) (begin (debug:print-info 0 *default-log-port* "Closing all client connections...") - (http-client#close-all-connections!))) + (http-transport:close-connections *runremote*) + #;(http-client#close-all-connections!))) ;; (if (and *runremote* ;; (remote-conndat *runremote*)) ;; (begin ;; (http-client#close-all-connections!))) ;; for http-client (if (not (eq? *default-log-port* (current-error-port))) Index: db_records.scm ================================================================== --- db_records.scm +++ db_records.scm @@ -16,76 +16,10 @@ ;; along with Megatest. If not, see . ;;====================================================================== ;; dbstruct ;;====================================================================== - -;; -;; -path-|-megatest.db -;; |-db-|-main.db -;; |-monitor.db -;; |-sdb.db -;; |-fdb.db -;; |-1.db -;; |-.db -;; -;; -;; Accessors for a dbstruct -;; - -;; (define-inline (dbr:dbstruct-main vec) (vector-ref vec 0)) ;; ( db path ) -;; (define-inline (dbr:dbstruct-strdb vec) (vector-ref vec 1)) ;; ( db path ) -;; (define-inline (dbr:dbstruct-path vec) (vector-ref vec 2)) -;; (define-inline (dbr:dbstruct-local vec) (vector-ref vec 3)) -;; (define-inline (dbr:dbstruct-rundb vec) (vector-ref vec 4)) ;; ( db path ) -;; (define-inline (dbr:dbstruct-inmem vec) (vector-ref vec 5)) ;; ( db #f ) -;; (define-inline (dbr:dbstruct-mtime vec) (vector-ref vec 6)) -;; (define-inline (dbr:dbstruct-rtime vec) (vector-ref vec 7)) -;; (define-inline (dbr:dbstruct-stime vec) (vector-ref vec 8)) -;; (define-inline (dbr:dbstruct-inuse vec) (vector-ref vec 9)) -;; (define-inline (dbr:dbstruct-refdb vec) (vector-ref vec 10)) ;; ( db path ) -;; (define-inline (dbr:dbstruct-locdbs vec) (vector-ref vec 11)) -;; (define-inline (dbr:dbstruct-olddb vec) (vector-ref vec 12)) ;; ( db path ) -;; ;; (define-inline (dbr:dbstruct-main-path vec) (vector-ref vec 13)) -;; ;; (define-inline (dbr:dbstruct-rundb-path vec) (vector-ref vec 14)) -;; ;; (define-inline (dbr:dbstruct-run-id vec) (vector-ref vec 13)) -;; -;; (define-inline (dbr:dbstruct-main-set! vec val)(vector-set! vec 0 val)) -;; (define-inline (dbr:dbstruct-strdb-set! vec val)(vector-set! vec 1 val)) -;; (define-inline (dbr:dbstruct-path-set! vec val)(vector-set! vec 2 val)) -;; (define-inline (dbr:dbstruct-local-set! vec val)(vector-set! vec 3 val)) -;; (define-inline (dbr:dbstruct-rundb-set! vec val)(vector-set! vec 4 val)) -;; (define-inline (dbr:dbstruct-inmem-set! vec val)(vector-set! vec 5 val)) -;; (define-inline (dbr:dbstruct-mtime-set! vec val)(vector-set! vec 6 val)) -;; (define-inline (dbr:dbstruct-rtime-set! vec val)(vector-set! vec 7 val)) -;; (define-inline (dbr:dbstruct-stime-set! vec val)(vector-set! vec 8 val)) -;; (define-inline (dbr:dbstruct-inuse-set! vec val)(vector-set! vec 9 val)) -;; (define-inline (dbr:dbstruct-refdb-set! vec val)(vector-set! vec 10 val)) -;; (define-inline (dbr:dbstruct-locdbs-set! vec val)(vector-set! vec 11 val)) -;; (define-inline (dbr:dbstruct-olddb-set! vec val)(vector-set! vec 12 val)) -;; (define-inline (dbr:dbstruct-main-path-set! vec val)(vector-set! vec 13 val)) -;; (define-inline (dbr:dbstruct-rundb-path-set! vec val)(vector-set! vec 14 val)) -;; -; (define-inline (dbr:dbstruct-run-id-set! vec val)(vector-set! vec 13 val)) - -;; constructor for dbstruct -;; -;; (define (make-dbr:dbstruct #!key (path #f)(local #f)) -;; (let ((v (make-vector 15 #f))) -;; (dbr:dbstruct-path-set! v path) -;; (dbr:dbstruct-local-set! v local) -;; (dbr:dbstruct-locdbs-set! v (make-hash-table)) -;; v)) - -;; Returns the database for a particular run-id fron the dbstruct:localdbs -;; -(define (dbr:dbstruct-localdb v run-id) - (hash-table-ref/default (dbr:dbstruct-locdbs v) run-id #f)) - -(define (dbr:dbstruct-localdb-set! v run-id db) - (hash-table-set! (dbr:dbstruct-locdbs v) run-id db)) - (define (make-db:test)(make-vector 20)) (define-inline (db:test-get-id vec) (vector-ref vec 0)) (define-inline (db:test-get-run_id vec) (vector-ref vec 1)) (define-inline (db:test-get-testname vec) (vector-ref vec 2)) Index: dbfile.scm ================================================================== --- dbfile.scm +++ dbfile.scm @@ -15,35 +15,58 @@ ;; ;; You should have received a copy of the GNU General Public License ;; along with Megatest. If not, see . ;;====================================================================== + +(use srfi-18) (declare (unit dbfile)) -;; (declare (uses debugprint)) +(declare (uses debugprint)) (declare (uses commonmod)) (module dbfile * (import scheme chicken data-structures extras - matchable) - -(import (prefix sqlite3 sqlite3:) - posix typed-records srfi-18 srfi-1 - srfi-69 - stack - files - ports - - commonmod - ) - -;; (import debugprint) + matchable + + (prefix sqlite3 sqlite3:) + posix typed-records + + srfi-18 + srfi-1 + srfi-69 + stack + files + ports + + commonmod + debugprint + ) + +;; parameters +;; +(define dbfile:testsuite-name (make-parameter #f)) + +(define keep-age-param (make-parameter 10)) ;; qif file age, if over move to attic +(define num-run-dbs (make-parameter 10)) ;; number of db's in .mtdb +(define dbfile:sync-method (make-parameter 'attach)) ;; 'attach or 'original +(define dbfile:cache-method (make-parameter 'cachedb)) ;; 'direct +(define dbcache-mode (make-parameter 'tmp)) ;; 'cachedb, 'tmp (changes what open cachedb routine does) + +;; moved from tcp-transportmod so that it can be used in launch.scm +(define tt-server-profile-string (make-parameter "")) + +;; 'original - use old condition code +;; 'suicide-mode - create mtrah/stop-the-train with info on what went wrong +;; else use no condition code (should be production mode) +;; +(define no-condition-db-with-db (make-parameter 'suicide-mode)) ;;====================================================================== ;; R E C O R D S ;;====================================================================== @@ -54,21 +77,32 @@ (areapath #f) (homehost #f) (tmppath #f) (read-only #f) (subdbs (make-hash-table)) + ;; + ;; for the cachedb approach (see dbmod.scm) + ;; this is one db per server + (cachedb #f) ;; handle for the in memory copy + (dbfile #f) ;; path to the db file on disk + (dbfname #f) ;; short name of db file on disk (used to validate accessing correct db) + (ondiskdb #f) ;; handle for the on-disk file + (dbtmpname #f) ;; path to db file in /tmp (non-imem method) + (dbdat #f) ;; create a dbdat for the downstream calls such as db:with-db + (last-update 0) + (sync-proc #f) ) ;; NOTE: Need one dbr:subdb per main.db, 1.db ... ;; (defstruct dbr:subdb - (dbname #f) ;; .megatest/1.db - (mtdbfile #f) ;; mtrah/.megatest/1.db + (dbname #f) ;; .mtdb/1.db + (mtdbfile #f) ;; mtrah/.mtdb/1.db (mtdbdat #f) ;; only need one of these for syncing ;; (dbdats (make-hash-table)) ;; id => dbdat - (tmpdbfile #f) ;; /tmp/.../.megatest/1.db - ;; (refndbfile #f) ;; /tmp/.../.megatest/1.db_ref + (tmpdbfile #f) ;; /tmp/.../.mtdb/1.db + ;; (refndbfile #f) ;; /tmp/.../.mtdb/1.db_ref (dbstack (make-stack)) ;; stack for tmp dbr:dbdat, (homehost #f) ;; not used yet (on-homehost #f) ;; not used yet (read-only #f) (last-sync 0) @@ -81,10 +115,16 @@ (dbh #f) (stmt-cache (make-hash-table)) (read-only #f) (birth-sec (current-seconds))) +;; used in simple-get-runs (thanks Brandon!) +(define-record simple-run target id runname state status owner event_time) +(define-record-printer (simple-run x out) + (fprintf out "#,(simple-run ~S ~S ~S ~S)" + (simple-run-target x) (simple-run-id x) (simple-run-runname x) (time->string (seconds->local-time (simple-run-event_time x) )))) + (define *dbstruct-dbs* #f) (define *db-open-mutex* (make-mutex)) (define *db-access-mutex* (make-mutex)) ;; used in common.scm (define *no-sync-db* #f) (define *db-sync-in-progress* #f) @@ -92,10 +132,13 @@ (define *max-api-process-requests* 0) (define *api-process-request-count* 0) (define *db-write-access* #t) (define *db-last-sync* 0) ;; last time the sync to megatest.db happened (define *db-multi-sync-mutex* (make-mutex)) ;; protect access to *db-sync-in-progress*, *db-last-sync* +(define *db-last-access* (current-seconds)) + +(define *db-transaction-mutex* (make-mutex)) (define (db:generic-error-printout exn . message) (print-call-chain (current-error-port)) (apply dbfile:print-err message) (dbfile:print-err @@ -157,51 +200,38 @@ ) #f ) ) -;; ;; set up a single db (e.g. main.db, 1.db ... etc.) -;; ;; -;; (define (db:setup-db dbstruct areapath run-id) -;; (let* ((dbname (db:run-id->dbname run-id)) -;; (dbstruct (hash-table-ref/default dbstructs dbname #f))) -;; (if dbstruct -;; dbstruct -;; (let* ((dbstruct-new (make-dbr:dbstruct))) -;; (db:open-db dbstruct-new run-id areapath: areapath do-sync: #t) -;; (hash-table-set! dbstructs dbname dbstruct-new) -;; dbstruct-new)))) - -;; ; Returns the dbdat for a particular dbfile inside the area -;; ;; -;; (define (dbr:dbstruct-get-dbdat dbstruct dbfile) -;; (hash-table-ref/default (dbr:dbstruct-dbdats dbstruct) dbfile #f)) -;; -;; (define (dbr:dbstruct-dbdat-put! dbstruct dbfile db) -;; (hash-table-set! (dbr:dbstruct-dbdats dbstruct) dbfile db)) -;; -;; (define (db:run-id->first-num run-id) -;; (let* ((s (number->string run-id)) -;; (l (string-length s))) -;; (substring s (- l 1) l))) - -;; 1234 => 4/1234.db -;; #f => 0/main.db -;; (abandoned the idea of num/db) -;; +(define (dbfile:make-tmpdir-name areapath tmpadj) + (let* ((dname (conc "/tmp/"(current-user-name)"/" (string-translate areapath "/" ".") tmpadj))) + (unless (directory-exists? dname) + (create-directory dname #t)) + dname)) + (define (dbfile:run-id->path apath run-id) (conc apath"/"(dbfile:run-id->dbname run-id))) (define (db:dbname->path apath dbname) (conc apath"/"dbname)) -;; POTENTIAL BUG: this implementation could produce a db file if run-id is neither #f or a number -(define (dbfile:run-id->dbname run-id) +(define (dbfile:run-id->dbnum run-id) (cond - ((number? run-id) (conc ".megatest/" (modulo run-id 100) ".db")) - ((not run-id) (conc ".megatest/main.db")) - (else run-id))) + ((number? run-id) + (modulo run-id (num-run-dbs))) + ((not run-id) "main") ;; 0 or main? No, not 0. + (else + (assert #f "FATAL: run-id is required to be a number or #f")))) + +;; just the filename +(define (dbfile:run-id->dbfname run-id) + (conc (dbfile:run-id->dbnum run-id)".db")) + +;; the path in MTRAH with the filename +(define (dbfile:run-id->dbname run-id) + (conc ".mtdb/"(dbfile:run-id->dbfname run-id))) + ;; Make the dbstruct, setup up auxillary db's and call for main db at least once ;; ;; called in http-transport and replicated in rmt.scm for *local* access. ;; @@ -209,14 +239,12 @@ (cond (*dbstruct-dbs* (dbfile:print-err "WARNING: dbfile:setup called when *dbstruct-dbs* is already initialized") *dbstruct-dbs*) ;; TODO: when multiple areas are supported, this optimization will be a hazard (else - (let* ((dbstruct (make-dbr:dbstruct))) + (let* ((dbstruct (make-dbr:dbstruct areapath: areapath tmppath: tmppath))) (set! *dbstruct-dbs* dbstruct) - (dbr:dbstruct-areapath-set! dbstruct areapath) - (dbr:dbstruct-tmppath-set! dbstruct tmppath) dbstruct)))) (define (dbfile:get-subdb dbstruct run-id) (let* ((dbfname (dbfile:run-id->dbname run-id))) (hash-table-ref/default (dbr:dbstruct-subdbs dbstruct) dbfname #f))) @@ -224,16 +252,19 @@ (define (dbfile:set-subdb dbstruct run-id subdb) (hash-table-set! (dbr:dbstruct-subdbs dbstruct) (dbfile:run-id->dbname run-id) subdb)) ;; (define *dbfile:num-handles-in-use* 0) -;; Get/open a database +;; Get/open a database. +;; +;; NOTE: most usage should call dbfile:open-db to get a dbdat +;; ;; if run-id => get run specific db ;; if #f => get main db -;; if run-id is a string treat it as a filename -;; if db already open - return inmem -;; if db not open, open inmem, rundb and sync then return inmem +;; if run-id is a string treat it as a filename - DON'T use this - we'll get rid of it. +;; if db already open - return cachedb +;; if db not open, open cachedb, rundb and sync then return cachedb ;; inuse gets set automatically for rundb's ;; (define (dbfile:get-dbdat dbstruct run-id) (let* ((subdb (dbfile:get-subdb dbstruct run-id))) (if (stack-empty? (dbr:subdb-dbstack subdb)) @@ -241,12 +272,16 @@ (begin (stack-pop! (dbr:subdb-dbstack subdb)))))) ;; return a previously opened db handle to the stack of available handles (define (dbfile:add-dbdat dbstruct run-id dbdat) - (let* ((subdb (dbfile:get-subdb dbstruct run-id))) - (stack-push! (dbr:subdb-dbstack subdb) dbdat) + (let* ((subdb (dbfile:get-subdb dbstruct run-id)) + (dbstk (dbr:subdb-dbstack subdb)) + (count (stack-count dbstk))) + (if (> count 15) + (dbfile:print-err "WARNING: stack for "run-id".db is "count".")) + (stack-push! dbstk dbdat) dbdat)) ;; set up a subdb ;; (define (dbfile:init-subdb dbstruct run-id init-proc) @@ -326,21 +361,21 @@ (define (dbfile:print-err . params) (with-output-to-port (current-error-port) (lambda () (apply print params)))) - + (define (dbfile:cautious-open-database fname init-proc sync-mode journal-mode #!optional (tries-left 500)) - (let* ((busy-file (conc fname"-journal")) + (let* ((busy-file (conc fname "-journal")) (delay-time (* (- 51 tries-left) 1.1)) (write-access (file-write-access? fname)) (dir-access (file-write-access? (pathname-directory fname))) (retry (lambda () (thread-sleep! delay-time) (if (> tries-left 0) (dbfile:cautious-open-database fname init-proc - sync-mode: sync-mode journal-mode: journal-mode + sync-mode journal-mode (- tries-left 1)))))) (assert (>= tries-left 0) (conc "FATAL: too many attempts in dbfile:cautious-open-database of "fname", giving up.")) (if (and (file-write-access? fname) (file-exists? busy-file)) @@ -351,11 +386,11 @@ (thread-sleep! 1) (if (eq? tries-left 2) (begin (dbfile:print-err "INFO: forcing journal rollup "busy-file) (dbfile:brute-force-salvage-db fname))) - (dbfile:cautious-open-database fname init-proc sync-mode: sync-mode journal-mode: journal-mode (- tries-left 1))) + (dbfile:cautious-open-database fname init-proc sync-mode journal-mode (- tries-left 1))) (let* ((result (condition-case (if dir-access (dbfile:with-simple-file-lock (conc fname ".lock") @@ -402,37 +437,81 @@ "cp "backupfname" "fname))) (dbfile:print-err "WARNING: attempting recovery of file "fname" by running commands:\n" " "cmd) (system cmd))) +;; opens and returns handle and nothing else +;; +;; NOTE: this is already protected by mutex *no-sync-db-mutex* +;; +(define (dbfile:raw-open-no-sync-db dbpath) + (if (not (file-exists? dbpath)) + (create-directory dbpath #t)) + (debug:print-info 0 *default-log-port* "Opening "dbpath"/no-sync.db") + (let* ((dbname (conc dbpath "/no-sync.db")) + (db-exists (file-exists? dbname)) + (init-proc (lambda (db) + (sqlite3:with-transaction + db + (lambda () + ;; I have been having trouble with init of no-sync.db so + ;; doing the init in a transaction every time (no gating + ;; on file existance. + (for-each + (lambda (stmt) + (sqlite3:execute db stmt)) + (list + "CREATE TABLE IF NOT EXISTS no_sync_metadat + (var TEXT, + val TEXT, + CONSTRAINT no_sync_metadat_constraint UNIQUE (var));" + "CREATE TABLE IF NOT EXISTS no_sync_locks + (key TEXT, + val TEXT, + CONSTRAINT no_sync_metadat_constraint UNIQUE (key));")))))) + (on-tmp (equal? (car (string-split dbpath "/")) "tmp")) + (db (if on-tmp + (dbfile:cautious-open-database dbname init-proc 0 "WAL") + (dbfile:cautious-open-database dbname init-proc 0 #f) + ;; (sqlite3:open-database dbname) + ))) + (if on-tmp ;; done in cautious-open-database + (begin + (sqlite3:execute db "PRAGMA synchronous = 0;") + (sqlite3:set-busy-handler! db (sqlite3:make-busy-timeout 136000)))) + db)) + +(define (dbfile:with-no-sync-db dbpath proc) + (mutex-lock! *no-sync-db-mutex*) + (let* ((already-open *no-sync-db*) + (db (or already-open (dbfile:raw-open-no-sync-db dbpath))) + (res (proc db))) + (if (not already-open) + (sqlite3:finalize! db)) + (mutex-unlock! *no-sync-db-mutex*) + res)) + +(define *no-sync-db-mutex* (make-mutex)) (define (dbfile:open-no-sync-db dbpath) - (if *no-sync-db* - *no-sync-db* - (begin - (if (not (file-exists? dbpath)) - (create-directory dbpath #t)) - (let* ((dbname (conc dbpath "/no-sync.db")) - (db-exists (file-exists? dbname)) - (init-proc (lambda (db) - (if (not db-exists) - (begin - (sqlite3:execute db "CREATE TABLE IF NOT EXISTS no_sync_metadat (var TEXT,val TEXT, CONSTRAINT no_sync_metadat_constraint UNIQUE (var));")) - ))) - (db (dbfile:cautious-open-database dbname init-proc 0 "WAL"))) ;; (sqlite3:open-database dbname))) - ;; (sqlite3:execute db "PRAGMA synchronous = 0;") - ;; (sqlite3:set-busy-handler! db (sqlite3:make-busy-timeout 136000)) ;; done in cautious-open-database - (set! *no-sync-db* db) - db)))) + (mutex-lock! *no-sync-db-mutex*) + (let* ((res (if *no-sync-db* + *no-sync-db* + (let* ((db (dbfile:raw-open-no-sync-db dbpath))) + (set! *no-sync-db* db) + db)))) + (mutex-unlock! *no-sync-db-mutex*) + res)) (define (db:no-sync-set db var val) (sqlite3:execute db "INSERT OR REPLACE INTO no_sync_metadat (var,val) VALUES (?,?);" var val)) (define (db:no-sync-del! db var) (sqlite3:execute db "DELETE FROM no_sync_metadat WHERE var=?;" var)) (define (db:no-sync-get/default db var default) + (assert (sqlite3:database? db) "FATAL: db:no-sync-get/default called with a bad db handle:" db) (let ((res default)) (sqlite3:for-each-row (lambda (val) (set! res val)) db @@ -445,10 +524,72 @@ (if newres newres res)) res))) +;; timestring+identifier+payload +;; locks are unique on identifier, payload is informational +(define (db:extract-time-identifier instr) + (let ((tokens (string-split instr "+"))) + (match tokens + ((t i)(cons (string->number t) i)) + ((t) (cons (string->number t) #f)) + (else + (assert #f "FATAL: db:extract-time-identifier handed bad data "instr))))) + +;; transaction protected lock aquisition +;; either: +;; fails returns (#f lock-creation-time identifier) +;; succeeds (returns (#t lock-creation-time identifier) +;; use (db:no-sync-del! db keyname) to release the lock +;; +(define (db:no-sync-get-lock-with-id db keyname identifier) + (sqlite3:with-transaction + db + (lambda () + (condition-case + (let* ((curr-val (db:no-sync-get/default db keyname #f))) + (if curr-val + (match (db:extract-time-identifier curr-val) ;; result->timestamp, identifier + ((timestamp . ident) + (cons (equal? ident identifier) timestamp)) + (else (cons #f 'malformed-lock))) ;; lock malformed + (let ((curr-sec (current-seconds)) + (lock-value (if identifier + (conc (current-seconds)"+"identifier) + (current-seconds)))) + (sqlite3:execute db "INSERT OR REPLACE INTO no_sync_metadat (var,val) VALUES(?,?);" keyname lock-value) + (cons #t curr-sec)))) + (exn (io-error) (dbfile:print-err "ERROR: i/o error with no-sync db. Check permissions, disk space etc. and try again.")) + (exn (corrupt) (dbfile:print-err "ERROR: database no-sync db is corrupt. Repair it to proceed.")) + (exn (busy) (dbfile:print-err "ERROR: database no-sync db is locked. Try copying to another location, remove original and copy back.")) + (exn (permission)(dbfile:print-err "ERROR: database no-sync db has some permissions problem.")) + (exn () ;; (status done) ;; I don't know how to detect status done but no data! + (dbfile:print-err "ERROR: Unknown error with database no-sync db message: exn="(condition->list exn)", \n" + ((condition-property-accessor 'exn 'message) exn)) + (cons #f #f)))))) + +(define (db:no-sync-check-lock db keyname identifier) + (let* ((curr-val (db:no-sync-get/default db keyname #f))) + (match (db:extract-time-identifier curr-val) ;; result->timestamp, identifier + ((timestamp . ident) + (cons (equal? ident identifier) ident)) + (else (cons #f 'no-lock))))) + +;; get the lock, wait 0.25 seconds and verify still have it. +;; this should not be necessary given the use of transaction in +;; db:no-sync-get-lock-with-id but it does seem to be needed +;; +(define (db:no-sync-lock-and-check db keyname identifier) + (let* ((result (db:no-sync-get-lock-with-id db keyname identifier)) + (gotlock (car result))) + (if gotlock + (begin + (thread-sleep! 0.25) + (db:no-sync-check-lock db keyname identifier)) + result))) + ;; transaction protected lock aquisition ;; either: ;; fails returns (#f . lock-creation-time) ;; succeeds (returns (#t . lock-creation-time) ;; use (db:no-sync-del! db keyname) to release the lock @@ -513,11 +654,12 @@ (db:sync-touched dbstruct runid keys dbinit) (set! *db-sync-in-progress* #f) (delete-file* lock-file) #t) (begin - (dbfile:print-err "INFO: could not get lock for " from-db-file ", sync likely in progress.") + (if (common:low-noise-print 120 (conc "no lock "from-db-file)) + (dbfile:print-err "INFO: could not get lock for " from-db-file ", sync likely in progress.")) #f )))) ;; ;; Get a lock from the no-sync-db for the from-db, then delta sync the from-db to the to-db, otherwise return #f ;; ;; @@ -556,11 +698,11 @@ (tmpdb (db:open-db dbstruct run-id dbinit)) ;; sqlite3-db tmpdbfile #f)) (start-t (current-seconds))) (mutex-lock! *db-multi-sync-mutex*) (let ((update_info (cons "last_update" (if force-sync 0 *db-last-sync*) ))) (mutex-unlock! *db-multi-sync-mutex*) - (db:sync-tables (db:sync-all-tables-list dbstruct keys) update_info tmpdb mtdb)) + (db:sync-tables (db:sync-all-tables-list keys) update_info tmpdb mtdb)) (mutex-lock! *db-multi-sync-mutex*) (set! *db-last-sync* start-t) (set! *db-last-access* start-t) (mutex-unlock! *db-multi-sync-mutex*) (dbfile:add-dbdat dbstruct run-id tmpdb) @@ -619,12 +761,12 @@ '("type" #f) '("last_update" #f)))) ;; needs db to get keys, this is for syncing all tables ;; -(define (db:sync-main-list dbstruct keys) - (let ((keys keys)) ;; (db:get-keys dbstruct))) +(define (db:sync-main-list keys) + (let ((keys keys)) (list (list "keys" '("id" #f) '("fieldname" #f) '("fieldtype" #f)) @@ -658,14 +800,29 @@ '("reviewed" #f) '("iterated" #f) '("avg_runtime" #f) '("avg_disk" #f) '("tags" #f) - '("jobgroup" #f))))) + '("jobgroup" #f)) + + + (list "tasks_queue" + '("id" #f) + '("action" #f) + '("owner" #f) + '("state" #f) + '("target" #f) + '("name" #f) + '("testpatt" #f) + '("keylock" #f) + '("params" #f) + '("creation_time" #f) + '("execution_time" #f)) + ))) -(define (db:sync-all-tables-list dbstruct keys) - (append (db:sync-main-list dbstruct keys) +(define (db:sync-all-tables-list keys) + (append (db:sync-main-list keys) db:sync-tests-only)) ;; tbls is ( ("tablename" ( "field1" [#f|proc1] ) ( "field2" [#f|proc2] ) .... ) ) ;; db's are dbdat's ;; @@ -785,22 +942,27 @@ fields) ;; read the source table ;; store a list of all rows in the table in fromdat, up to batch-len. ;; Then add fromdat to the fromdats list, clear fromdat and repeat. - (sqlite3:for-each-row - (lambda (a . b) - (set! fromdat (cons (apply vector a b) fromdat)) - (if (> (length fromdat) batch-len) - (begin - (set! fromdats (cons fromdat fromdats)) - (set! fromdat '()) - (set! totrecords (+ totrecords 1))) - ) - ) - (dbr:dbdat-dbh fromdb) - full-sel) + (sqlite3:with-transaction + (dbr:dbdat-dbh fromdb) + (lambda () + (sqlite3:for-each-row + (lambda (a . b) + (set! fromdat (cons (apply vector a b) fromdat)) + (if (> (length fromdat) batch-len) + (begin + (set! fromdats (cons fromdat fromdats)) + (set! fromdat '()) + (set! totrecords (+ totrecords 1))) + ) + ) + (dbr:dbdat-dbh fromdb) + full-sel) + ) + ) ;; Count less than batch-len as a record (if (> (length fromdat) 0) (set! totrecords (+ totrecords 1))) @@ -870,11 +1032,12 @@ ) ) tbls) (let* ((runtime (- (current-milliseconds) start-time)) (should-print (or ;; (debug:debug-mode 12) - (common:low-noise-print 120 "db sync" (> runtime 500))))) ;; low and high sync times treated as separate. + (common:low-noise-print 120 "db sync") + (> runtime 500)))) ;; low and high sync times treated as separate. (for-each (lambda (dat) (let ((tblname (car dat)) (count (cdr dat))) (set! tot-count (+ tot-count count)) @@ -915,30 +1078,10 @@ FOR EACH ROW BEGIN UPDATE test_data SET last_update=(strftime('%s','now')) WHERE id=old.id; END;" ))) -;; -;; ADD run-id SUPPORT -;; -(define (db:create-all-triggers dbstruct) - (db:with-db - dbstruct #f #f - (lambda (dbdat db) - (db:create-triggers db)))) - -(define (db:create-triggers db) - (for-each (lambda (key) - (sqlite3:execute db (cadr key))) - db:trigger-list)) - -(define (db:drop-all-triggers dbstruct) - (db:with-db - dbstruct #f #f - (lambda (dbdat db) - (db:drop-triggers db)))) - (define (db:is-trigger-dropped db tbl-name) (let* ((trigger-name (if (equal? tbl-name "test_steps") "update_teststeps_trigger" (conc "update_" tbl-name "_trigger"))) (res #f)) @@ -988,58 +1131,144 @@ ;; (mutex-unlock! *db-open-mutex*) dbdat)) (define dbfile:db-init-proc (make-parameter #f)) -;; (db:with-db dbstruct run-id sqlite3:exec "select blah fgrom blaz;") -;; r/w is a flag to indicate if the db is modified by this query #t = yes, #f = no +;; in xmaxima this gives a curve close to what I want: +;; plot2d ((exp(x/1.2)-1)/300, [x, 0, 10])$ +;; plot2d ((exp(x/1.5)-1)/40, [x, 0, 10])$ +;; plot2d ((exp(x/5)-1)/40, [x, 0, 20])$ +(define (dbfile:droop x) + (/ (- (exp (/ x 5)) 1) 40)) + ;; (* numqrys (/ 1 (qif-slope)))) + +;; create a dropping near the db file in a qif dir +;; use count of such files to gate queries (queries in flight) ;; -(define (db:with-db dbstruct run-id r/w proc . params) - (let* ((use-mutex (> *api-process-request-count* 25)) ;; risk of db corruption - (have-struct (dbr:dbstruct? dbstruct)) - (dbdat (if have-struct ;; this stuff just allows us to call with a db handle directly - (db:open-db dbstruct run-id (dbfile:db-init-proc)) ;; (dbfile:get-subdb dbstruct run-id) - #f)) - (db (if have-struct ;; this stuff just allows us to call with a db handle directly - (dbr:dbdat-dbh dbdat) - dbstruct)) - (fname (if dbdat - (dbr:dbdat-dbfile dbdat) - "nofilenameavailable")) - (jfile (conc fname"-journal")) - #;(subdb (if have-struct - (dbfile:get-subdb dbstruct run-id) - #f)) - ) ;; was 25 - (if (file-exists? jfile) - (begin - (dbfile:print-err "INFO: "jfile" exists, delaying to reduce database load") - (thread-sleep! 0.2))) - (if (and use-mutex - (common:low-noise-print 120 "over-50-parallel-api-requests")) - (dbfile:print-err *api-process-request-count* " parallel api requests being processed in process " - (current-process-id))) ;; ", throttling access")) - (condition-case - (begin - (if use-mutex (mutex-lock! *db-with-db-mutex*)) - (let ((res (apply proc dbdat db params))) ;; the actual call is here. - (if use-mutex (mutex-unlock! *db-with-db-mutex*)) - ;; (if (vector? dbstruct)(db:done-with dbstruct run-id r/w)) - (if dbdat - (dbfile:add-dbdat dbstruct run-id dbdat)) - res)) - (exn (io-error) - (db:generic-error-printout exn "ERROR: i/o error with " fname ". Check permissions, disk space etc. and try again.")) - (exn (corrupt) - (db:generic-error-printout exn "ERROR: database " fname " is corrupt. Repair it to proceed.")) - (exn (busy) - (db:generic-error-printout exn "ERROR: database " fname - " is locked. Try copying to another location, remove original and copy back.")) - (exn (permission)(db:generic-error-printout exn "ERROR: database " fname " has some permissions problem.")) - (exn () - (db:generic-error-printout exn "ERROR: Unknown error with database " fname " message: " - ((condition-property-accessor 'exn 'message) exn)))))) +(define (dbfile:wait-for-qif fname run-id params) + (let* ((thedir (pathname-directory fname)) + (dbnum (dbfile:run-id->dbnum run-id)) + (destdir (conc thedir"/qif-"dbnum)) + (uniqn (get-area-path-signature (conc dbnum params))) + (crumbn (conc destdir"/"(current-seconds)"-"uniqn"."(current-process-id)))) + (if (not (file-exists? destdir))(create-directory (conc destdir"/attic") #t)) + (let loop ((count 0)) + (let* ((currlks (glob (conc destdir"/*"))) + (numqrys (length currlks)) + (delayval (cond ;; do a droopish curve + ((> numqrys 25) + (for-each + (lambda (f) + (if (> (- (current-seconds) + (handle-exceptions + exn + (current-seconds) ;; file is likely gone, just fake out + (file-modification-time f))) + (keep-age-param)) + (let* ((basedir (pathname-directory f)) + (filen (pathname-file f)) + (destf (conc basedir"/attic/"filen))) + (dbfile:print-err "Moving qif file "f" older than 10 seconds to "destf) + ;; (delete-file* f) + (handle-exceptions + exn + #t + (file-move f destf #t))))) + currlks) + 4) + ((> numqrys 0) (dbfile:droop numqrys)) ;; slope of 1/100 + (else #f)))) + (if (and delayval + (< count 5)) + (begin + (thread-sleep! delayval) + (loop (+ count 1)))))) + (with-output-to-file crumbn + (lambda () + (print fname" run-id="run-id" params="params) + )) + crumbn)) + +;; ;; (db:with-db dbstruct run-id sqlite3:exec "select blah fgrom blaz;") +;; ;; r/w is a flag to indicate if the db is modified by this query #t = yes, #f = no +;; ;; +;; ;; Used only with http - to be removed +;; ;; +;; (define (dbfile:with-db dbstruct run-id r/w proc params) +;; (assert dbstruct "FATAL: db:with-db called with dbstruct "#f) +;; (assert (dbr:dbstruct? dbstruct) "FATAL: dbstruct is "dbstruct) +;; ;; Testing 2023, March 14th. I went from full time use of the mutext to no use at all and +;; ;; didn't see much change in the frequency of the messages: +;; ;; Warning (#): in thread: (bind!) bad parameter or other API misuse +;; ;; allowing request count to go up to 1000 and other crashes showed up: +;; ;; Warning (#): in thread: (deserialize) unexpected end of input: # +;; ;; +;; ;; leave it fully on for now, test later if there is a performance issue +;; ;; +;; (let* ((use-mutex #t) ;;(> *api-process-request-count* 50)) ;; risk of db corruption +;; (have-struct (dbr:dbstruct? dbstruct)) +;; (dbdat (if have-struct ;; this stuff just allows us to call with a db handle directly +;; (db:open-db dbstruct run-id (dbfile:db-init-proc)) ;; (dbfile:get-subdb dbstruct run-id) +;; #f)) +;; (db (if have-struct ;; this stuff just allows us to call with a db handle directly +;; (dbr:dbdat-dbh dbdat) +;; dbstruct)) +;; (fname (if dbdat +;; (dbr:dbdat-dbfile dbdat) +;; "nofilenameavailable")) +;; (jfile (conc fname"-journal")) +;; (qryproc (lambda () +;; (if use-mutex (mutex-lock! *db-with-db-mutex*)) +;; (let ((res (apply proc dbdat db params))) ;; the actual call is here. +;; (if use-mutex (mutex-unlock! *db-with-db-mutex*)) +;; ;; (if (vector? dbstruct)(db:done-with dbstruct run-id r/w)) +;; (if dbdat +;; (dbfile:add-dbdat dbstruct run-id dbdat)) +;; ;; (delete-file* crumbfile) +;; res))) +;; (stop-train (conc (dbr:dbstruct-areapath dbstruct)"/stop-the-train"))) +;; +;; (assert (sqlite3:database? db) "FATAL: db:with-db, db is not a database, db="db +;; ", fname="fname) +;; (if (file-exists? jfile) +;; (begin +;; (dbfile:print-err "INFO: "jfile" exists, delaying to reduce database load") +;; (thread-sleep! 0.2))) +;; (if (and use-mutex +;; (common:low-noise-print 120 "over-50-parallel-api-requests")) +;; (dbfile:print-err *api-process-request-count* +;; " parallel api requests being processed in process " +;; (current-process-id))) ;; ", throttling access")) +;; (case (no-condition-db-with-db) +;; ((production)(qryproc)) +;; ((suicide-mode) +;; (handle-exceptions +;; exn +;; (with-output-to-file stop-train +;; (lambda () +;; (db:generic-error-printout exn "Stop train mode, run-id: "run-id +;; " params: "params" proc: "proc))) +;; (qryproc))) +;; (else +;; (condition-case +;; (qryproc) +;; (exn (io-error) +;; (db:generic-error-printout exn "ERROR: i/o error with "fname +;; ". Check permissions, disk space etc. and try again.")) +;; (exn (corrupt) +;; (db:generic-error-printout exn "ERROR: database "fname +;; " is corrupt. Repair it to proceed.")) +;; (exn (busy) +;; (db:generic-error-printout exn "ERROR: database "fname +;; " is locked. Try copying to another location," +;; " remove original and copy back.")) +;; (exn (permission)(db:generic-error-printout exn "ERROR: database "fname +;; " has some permissions problem.")) +;; (exn () +;; (db:generic-error-printout exn "ERROR: Unknown error with database "fname +;; " message: " +;; ((condition-property-accessor 'exn 'message) exn)))))))) ;;====================================================================== ;; another attempt at a transactionized queue ;;====================================================================== @@ -1112,34 +1341,45 @@ (define (dbfile:simple-file-lock fname #!key (expire-time 300)) (let ((fmod-time (handle-exceptions ext (current-seconds) (file-modification-time fname)))) + + ;; if the file exists, if it has expired, delete it and call this function recursively. (if (file-exists? fname) (if (> (- (current-seconds) fmod-time) expire-time) (begin + (dbfile:print-err "simple-file-lock: removing expired file: " fname) (handle-exceptions exn #f (delete-file* fname)) (dbfile:simple-file-lock fname expire-time: expire-time)) - #f) - (let ((key-string (conc (get-host-name) "-" (current-process-id))) + #f + ) + + ;; If it doesn't exist, write the host name and process id to the file + (let ((key-string (conc (get-host-name) "-" (current-process-id) ": " (argv))) (oup (open-output-file fname))) (with-output-to-port oup (lambda () (print key-string))) (close-output-port oup) - #;(with-output-to-file fname ;; bizarre. with-output-to-file does not seem to be cleaning up after itself. - (lambda () - (print key-string))) + + + ;; sleep 3 seconds and make sure it still exists and contains the same host/process id. + ;; if not, return #f (thread-sleep! 0.25) (if (file-exists? fname) (handle-exceptions exn #f (with-input-from-file fname (lambda () (equal? key-string (read-line))))) - #f) + (begin + (dbfile:print-err "dbfile:simple-file-lock created " fname " but it was gone 3 seconds later") + #f + ) + ) ) ) ) ) @@ -1158,14 +1398,47 @@ (handle-exceptions exn #f ;; I don't really care why this failed (at least for now) (delete-file* fname))) -(define (dbfile:with-simple-file-lock fname proc #!key (expire-time 300)) - (let ((gotlock (dbfile:simple-file-lock-and-wait fname expire-time: expire-time))) +(define (dbfile:with-simple-file-lock fname proc #!key (expire-time 300)(run-anyway #f)) + (let ((start-time (current-seconds)) + (gotlock (dbfile:simple-file-lock-and-wait fname expire-time: expire-time)) + (end-time (current-seconds)) + ) (if gotlock (let ((res (proc))) (dbfile:simple-file-release-lock fname) res) - (assert #t "FATAL: simple file lock never got a lock.")))) - + (begin + (dbfile:print-err "dbfile:with-simple-file-lock: " fname " is locked by " + (handle-exceptions + exn + "unreadable" + (with-input-from-file fname + (lambda () + (read-line))))) + (dbfile:print-err "wait time = " (- end-time start-time)) + (dbfile:print-err "ERROR: simple file lock could not get a lock for " fname " in " expire-time " seconds") + (if run-anyway + (let ((res (proc))) + (dbfile:simple-file-release-lock fname) + res) + #f))))) + +(define *get-cache-stmth-mutex* (make-mutex)) + +(define (db:get-cache-stmth dbdat db stmt) + (mutex-lock! *get-cache-stmth-mutex*) + (let* (;; (dbdat (dbfile:get-dbdat dbstruct run-id)) + (stmt-cache (dbr:dbdat-stmt-cache dbdat)) + ;; (stmth (db:hoh-get stmt-cache db stmt)) + (stmth (hash-table-ref/default stmt-cache stmt #f)) + (result (or stmth + (let* ((newstmth (sqlite3:prepare db stmt))) + ;; (db:hoh-set! stmt-cache db stmt newstmth) + (hash-table-set! stmt-cache stmt newstmth) + newstmth)))) + (mutex-unlock! *get-cache-stmth-mutex*) + result)) + ) Index: dbmod.scm ================================================================== --- dbmod.scm +++ dbmod.scm @@ -1,5 +1,6 @@ + ;;====================================================================== ;; Copyright 2017, Matthew Welland. ;; ;; This file is part of Megatest. ;; @@ -17,40 +18,837 @@ ;; along with Megatest. If not, see . ;;====================================================================== (declare (unit dbmod)) +(declare (uses dbfile)) +(declare (uses commonmod)) +(declare (uses debugprint)) (module dbmod * -(import scheme chicken data-structures extras) -(import (prefix sqlite3 sqlite3:) - posix typed-records srfi-18 - srfi-69) - -(define (db:run-id->dbname run-id) - (cond - ((number? run-id)(conc run-id ".db")) - ((not run-id) "main.db") - (else run-id))) - - -;;====================================================================== -;; hash of hashs -;;====================================================================== - - -(define (db:hoh-set! dat key1 key2 val) - (let* ((subhash (hash-table-ref/default dat key1 #f))) - (if subhash - (hash-table-set! subhash key2 val) - (begin - (hash-table-set! dat key1 (make-hash-table)) - (db:hoh-set! dat key1 key2 val))))) - -(define (db:hoh-get dat key1 key2) - (let* ((subhash (hash-table-ref/default dat key1 #f))) - (and subhash - (hash-table-ref/default subhash key2 #f)))) - +(import scheme + chicken + data-structures + extras + files + + (prefix sqlite3 sqlite3:) + matchable + posix + typed-records + srfi-1 + srfi-18 + srfi-69 + + commonmod + dbfile + debugprint + ) + +;; NOTE: This returns only the name "1.db", "main.db", not the path +;; +(define (dbmod:run-id->dbfname run-id) + (conc (dbfile:run-id->dbnum run-id)".db")) + +(define (dbmod:get-dbdir dbstruct) + (let* ((areapath (dbr:dbstruct-areapath dbstruct)) + (dbdir (conc areapath"/.mtdb"))) + (if (and (file-write-access? areapath) + (not (file-exists? dbdir))) + (create-directory dbdir)) + dbdir)) + +(define (dbmod:run-id->full-dbfname dbstruct run-id) + (conc (dbmod:get-dbdir dbstruct + + run-id + + )"/"(dbmod:run-id->dbfname run-id))) + +;;====================================================================== +;; Read-only cachedb cached direct from disk method +;;====================================================================== + +(define *dbmod:nfs-db-handles* (make-hash-table)) ;; dbfname -> dbstruct + +;; called in rmt.scm nfs-transport-handler +(define (dbmod:nfs-get-dbstruct run-id keys init-proc areapath #!key (tmpadj "")) + (assert areapath "FATAL: dbmod:nfs-get-dbstruct called without areapath set.") + (let* ((dbfname (dbmod:run-id->dbfname run-id)) + (dbstruct (hash-table-ref/default *dbmod:nfs-db-handles* dbfname #f))) + (if dbstruct + dbstruct + (let* ((newdbstruct (dbmod:open-dbmoddb areapath run-id dbfname init-proc keys syncdir: 'fromdisk tmpadj: tmpadj))) + (hash-table-set! *dbmod:nfs-db-handles* dbfname newdbstruct) + newdbstruct)))) + +;;====================================================================== +;; The cachedb one-db file per server method goes in here +;;====================================================================== + +;; NOTE: the r/w is now w/r, #t=db modified by query, #f=db NOT modified by query +(define (dbmod:with-db dbstruct run-id w/r proc params) + (let* ((use-mutex (or (and w/r ;; use the mutex on queries that modify the db and for sync to disk + (> *api-process-request-count* 5)) ;; when writes are happening throttle more + (> *api-process-request-count* 50))) + (dbdat (dbmod:open-db dbstruct run-id (dbfile:db-init-proc))) + (dbh (dbr:dbdat-dbh dbdat)) ;; this will be the cachedb handle + (dbfile (dbr:dbdat-dbfile dbdat))) + ;; if nfs mode do a sync if delta > 2 + (let* ((last-update (dbr:dbstruct-last-update dbstruct)) + (sync-proc (dbr:dbstruct-sync-proc dbstruct)) + (curr-secs (current-seconds))) + (if (> (- curr-secs last-update) 5) + (begin + (sync-proc last-update) + + ;; MOVE THIS CALL TO INSIDE THE sync-proc CALL + (dbr:dbstruct-last-update-set! dbstruct curr-secs) + ))) + (assert (sqlite3:database? dbh) "FATAL: bad db handle in dbmod:with-db") + (if use-mutex (mutex-lock! *db-with-db-mutex*)) + (let* ((res (let loop ((count 3)) + (condition-case + (apply proc dbdat dbh params) + (exn (busy) + (if (> count 0) + (begin + (debug:print-info 0 *default-log-port* "dbmod:with-db, database is busy, will try "count" more times.") + (thread-sleep! 1) + (loop (- count 1))) + (begin + (debug:print-info 0 *default-log-port* "dbmod:with-db, database is busy, giving up.") + (exit 1)))) + (exn () + (dbfile:print-err exn "ERROR: Unknown error with database for run-id "run-id", message: " + ((condition-property-accessor 'exn 'message) exn)) + (exit 2)))))) + (if use-mutex (mutex-unlock! *db-with-db-mutex*)) + res))) + +(define (db:with-db dbstruct run-id w/r proc . params) + (dbmod:with-db dbstruct run-id w/r proc params)) + +;; +(define (dbmod:open-cachedb-db init-proc dbfullname) + (let* ((db (if dbfullname + (dbmod:safely-open-db dbfullname init-proc #t) + (sqlite3:open-database ":memory:"))) + (handler (sqlite3:make-busy-timeout 136000))) + (sqlite3:set-busy-handler! db handler) + (init-proc db) + db)) + +(define (dbmod:open-db dbstruct run-id dbinit) + (or (dbr:dbstruct-dbdat dbstruct) + (let* ((dbdat (make-dbr:dbdat + dbfile: (dbr:dbstruct-dbfile dbstruct) + dbh: (dbr:dbstruct-cachedb dbstruct) + ))) + (dbr:dbstruct-dbdat-set! dbstruct dbdat) + dbdat))) + +;; NOT USED? +(define (dbmod:need-on-disk-db-handle) + (case (dbfile:cache-method) + ((none tmp) #t) + ((cachedb) + (case (dbfile:sync-method) + ((original) #t) + ((attach) #t) ;; we need it to force creation of the on-disk file - FIXME + (else + (debug:print 0 *default-log-port* "Unknown dbfile:sync-method setting: " + (dbfile:sync-method))))) + (else + (debug:print 0 *default-log-port* "Unknown dbfile:cache-method setting: " + (dbfile:cache-method)) + #f))) + +(define (dbmod:safely-open-db dbfullname init-proc write-access) + (dbfile:with-simple-file-lock + (conc dbfullname".lock") + (lambda () + (let* ((dbexists (file-exists? dbfullname)) + (db (sqlite3:open-database dbfullname)) + (handler (sqlite3:make-busy-timeout 136000))) + (sqlite3:set-busy-handler! db handler) + (if (and (not dbexists) + write-access) + (init-proc db)) + db)) + run-anyway: #t)) + +(define *sync-in-progress* #f) + +;; Open the cachedb db and the on-disk db +;; populate the cachedb db with data +;; +;; Updates fields in dbstruct +;; Returns dbstruct +;; +;; * This routine creates the db if not found +;; * Probably can get rid of the dbstruct-in +;; +(define (dbmod:open-dbmoddb areapath run-id dbfname-in init-proc keys + #!key (dbstruct-in #f) + ;; (dbcontext 'megatest) ;; use dashboard to do the dashboard + (tmpadj "") ;; add to tmp path + (syncdir 'todisk)) ;; todisk is used when caching in /tmp and writing data back to MTRAH + (let* ((dbstruct (or dbstruct-in (make-dbr:dbstruct areapath: areapath))) + (dbfname (or dbfname-in (dbmod:run-id->dbfname run-id))) + (dbpath (dbmod:get-dbdir dbstruct)) ;; directory where all the .db files are kept + (dbfullname (conc dbpath"/"dbfname)) ;; (dbmod:run-id->full-dbfname dbstruct run-id)) + (dbexists (file-exists? dbfullname)) + (tmpdir (dbfile:make-tmpdir-name areapath tmpadj)) + (tmpdb (let* ((fname (conc tmpdir"/"dbfname))) + fname)) + (cachedb (dbmod:open-cachedb-db init-proc + ;; (if (eq? (dbfile:cache-method) 'cachedb) + ;; #f + tmpdb + ;; ) + )) + (write-access (file-write-access? dbpath)) + (db (dbmod:safely-open-db dbfullname init-proc write-access)) + (tables (db:sync-all-tables-list keys))) + (if (not (and (sqlite3:database? cachedb) + (sqlite3:database? db))) + (begin + (debug:print 0 *default-log-port* "ERROR: Failed to properly open "dbfname-in", exiting immediately.") + (exit))) ;; (assert (sqlite3:database? cachedb) "FATAL: open-dbmoddb: cachedb is not a db") + ;; (assert (sqlite3:database? db) "FATAL: open-dbmoddb: db is not a db") + (dbr:dbstruct-cachedb-set! dbstruct cachedb) + (dbr:dbstruct-ondiskdb-set! dbstruct db) + (dbr:dbstruct-dbfile-set! dbstruct dbfullname) + (dbr:dbstruct-dbtmpname-set! dbstruct tmpdb) + (dbr:dbstruct-dbfname-set! dbstruct dbfname) + (dbr:dbstruct-sync-proc-set! dbstruct + (lambda (last-update) + (if *sync-in-progress* + (debug:print 3 *default-log-port* "WARNING: overlapping calls to sync to disk") + (let* ((syncer-logfile (conc areapath"/logs/"dbfname"-syncer.log")) + (sync-cmd (if (eq? syncdir 'todisk) + (conc "(NBFAKE_LOG="syncer-logfile" nbfake megatest -db2db -from "tmpdb" -to "dbfullname" -period 5 -timeout 10 > /dev/null 2&>1)&") + (conc "(NBFAKE_LOG="syncer-logfile" nbfake megatest -db2db -from "dbfullname" -to "tmpdb" -period 5 -timeout 10 > /dev/null 2&>1)&"))) + (synclock-file (conc dbfullname".lock")) + (syncer-running-file (conc dbfullname"-sync-running")) + (synclock-mod-time (if (file-exists? synclock-file) + (handle-exceptions + exn + #f + (file-modification-time synclock-file)) + #f)) + (thethread (lambda () + (thread-start! + (make-thread + (lambda () + (set! *sync-in-progress* #t) + (debug:print-info "Running "sync-cmd) + (if (file-exists? syncer-running-file) + (debug:print-info 0 *default-log-port* "Syncer still running, skipping syncer start.") + (system sync-cmd)) + (set! *sync-in-progress* #f))))))) + (if ((if (eq? syncdir 'todisk) < >) ;; use less than for todisk, greater than for from disk + (file-modification-time tmpdb) + (file-modification-time dbfullname)) + (debug:print 4 *default-log-port* "Skipping sync, "tmpdb" older than "dbfullname) + (if synclock-mod-time + (if (< (- (current-seconds) synclock-mod-time) 20) ;; something wrong with sync, remove file + (begin + (handle-exceptions + exn + #f + (delete-file synclock-file)) + (thethread)) + (debug:print 0 *default-log-port* "Skipping sync, lockfile "synclock-file" found.")) + (thethread))))))) + ;; (dbmod:sync-tables tables #f db cachedb) + ;; + (thread-sleep! 1) ;; let things settle before syncing in needed data + (dbmod:sync-gasket tables #f cachedb db dbfullname 'fromdest keys) ;; ) ;; load into cachedb + (dbr:dbstruct-last-update-set! dbstruct (+ (current-seconds) -10)) ;; should this be offset back in time by one second? + dbstruct)) + +;; (if (eq? syncdir 'todisk) ;; sync to disk normally, sync from in dashboard +;; (dbmod:sync-tables tables last-update cachedb db) +;; (dbmod:sync-tables tables last-update db cachedb)))) +;; +;; direction: 'fromdest 'todest +;; +(define (dbmod:sync-gasket tables last-update cachedb dbh dbfname direction keys) + (assert (sqlite3:database? cachedb) "FATAL: sync-gasket: cachedb is not a db") + (assert (sqlite3:database? cachedb) "FATAL: sync-gasket: dbh is not a db") + (debug:print-info 2 *default-log-port* "dbmod:sync-gasket called with sync-method="(dbfile:sync-method)) + (case (dbfile:sync-method) + ((none) #f) + ((attach) + (dbmod:attach-sync tables cachedb dbfname direction)) + ((newsync) + (dbmod:new-sync tables cachedb dbh dbfname direction)) + (else ;; original + (case direction + ((todisk) ;; i.e. from the cache db to the mtrah db + (dbmod:sync-tables tables last-update keys cachedb dbh)) + (else + (dbmod:sync-tables tables last-update keys dbh cachedb)))))) + +(define (dbmod:close-db dbstruct) + ;; do final sync to disk file + ;; (do-sync ...) + (sqlite3:finalize! (dbr:dbstruct-ondiskdb dbstruct))) + +;;====================================================================== +;; Sync db +;;====================================================================== + +(define (dbmod:calc-use-last-update has-last-update fields last-update) + (cond + ((and has-last-update + (member "last_update" fields)) + #t) ;; if given a number, just use it for all fields + ((number? last-update) #f) ;; if not matched first entry then ignore last-update for this table + ((and (pair? last-update) + (member (car last-update) ;; last-update field name + (map car fields))) + #t) + ((and last-update (not (pair? last-update)) (not (number? last-update))) + (debug:print 0 *default-log-port* "ERROR: parameter last-update for db:sync-tables must be a pair or a number, received: " last-update);; found in fields + #f) + (else + #f))) + +;; tbls is ( ("tablename" ( "field1" [#f|proc1] ) ( "field2" [#f|proc2] ) .... ) ) +;; dbs are sqlite3 db handles +;; +;; if last-update specified ("field-name" . time-in-seconds) +;; then sync only records where field-name >= time-in-seconds +;; IFF field-name exists +;; +;; Use (db:sync-all-tables-list keys) to get the tbls input +;; +(define (dbmod:sync-tables tbls last-update keys fromdb todb) + (debug:print-info 2 *default-log-port* "dbmod:sync-tables called, from: "fromdb", to: "todb) + (assert (sqlite3:database? fromdb) "FATAL: dbmod:sync-tables called with fromdb not a database" fromdb) + (assert (sqlite3:database? todb) "FATAL: dbmod:sync-tables called with fromdb not a database" todb) + (let ((stmts (make-hash-table)) ;; table-field => stmt + (all-stmts '()) ;; ( ( stmt1 value1 ) ( stml2 value2 )) + (numrecs (make-hash-table)) + (start-time (current-milliseconds)) + (tot-count 0)) + (for-each ;; table + (lambda (tabledat) + (let* ((tablename (car tabledat)) + (fields (cdr tabledat)) + (has-last-update (member "last_update" fields)) + (use-last-update (dbmod:calc-use-last-update has-last-update fields last-update)) + (last-update-value (if use-last-update ;; no need to check for has-last-update - it is already accounted for + (if (number? last-update) + last-update + (cdr last-update)) + #f)) + (last-update-field (if use-last-update + (if (number? last-update) + "last_update" + (car last-update)) + #f)) + (num-fields (length fields)) + (field->num (make-hash-table)) + (num->field (apply vector (map car fields))) ;; BBHERE + (full-sel (conc "SELECT " (string-intersperse (map car fields) ",") + " FROM " tablename (if use-last-update ;; apply last-update criteria + (conc " WHERE " last-update-field " >= " last-update-value) + "") + ";")) + (full-ins (conc "INSERT OR REPLACE INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) " + " VALUES ( " (string-intersperse (make-list num-fields "?") ",") " );")) + (fromdat '()) + (fromdats '()) + (totrecords 0) + (batch-len 100) ;; (string->number (or (configf:lookup *configdat* "sync" "batchsize") "100"))) + (todat (make-hash-table)) + (count 0) + (field-names (map car fields))) + + ;; set up the field->num table + (for-each + (lambda (field) + (hash-table-set! field->num field count) + (set! count (+ count 1))) + fields) + + ;; read the source table + ;; store a list of all rows in the table in fromdat, up to batch-len. + ;; Then add fromdat to the fromdats list, clear fromdat and repeat. + (sqlite3:for-each-row + (lambda (a . b) + (set! fromdat (cons (apply vector a b) fromdat)) + (if (> (length fromdat) batch-len) + (begin + (set! fromdats (cons fromdat fromdats)) + (set! fromdat '()) + (set! totrecords (+ totrecords 1))))) + fromdb + full-sel) + + ;; Count less than batch-len as a record + (if (> (length fromdat) 0) + (set! totrecords (+ totrecords 1))) + + ;; tack on remaining records in fromdat + (if (not (null? fromdat)) + (set! fromdats (cons fromdat fromdats))) + + (sqlite3:for-each-row + (lambda (a . b) + (hash-table-set! todat a (apply vector a b))) + todb + full-sel) + + ;; first pass implementation, just insert all changed rows + + (let* ((db todb) + (drp-trigger (if (member "last_update" field-names) + (db:drop-trigger db tablename) + #f)) + (has-last-update (member "last_update" field-names)) + (is-trigger-dropped (if has-last-update + (db:is-trigger-dropped db tablename) + #f)) + (stmth (sqlite3:prepare db full-ins)) + (changed-rows 0)) + (for-each + (lambda (fromdat-lst) + (mutex-lock! *db-transaction-mutex*) + (sqlite3:with-transaction + db + (lambda () + (for-each ;; + (lambda (fromrow) + (let* ((a (vector-ref fromrow 0)) + (curr (hash-table-ref/default todat a #f)) + (same #t)) + (let loop ((i 0)) + (if (or (not curr) + (not (equal? (vector-ref fromrow i)(vector-ref curr i)))) + (set! same #f)) + (if (and same + (< i (- num-fields 1))) + (loop (+ i 1)))) + (if (not same) + (begin + (apply sqlite3:execute stmth (vector->list fromrow)) + (hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0))) + (set! changed-rows (+ changed-rows 1)))))) + fromdat-lst))) + (mutex-unlock! *db-transaction-mutex*)) + fromdats) + ;; (debug:print 0 *default-log-port* "row="row) + + (sqlite3:finalize! stmth) + (if (member "last_update" field-names) + (db:create-trigger db tablename))) + )) + tbls) + (let* ((runtime (- (current-milliseconds) start-time)) + (should-print (or ;; (debug:debug-mode 12) + (common:low-noise-print 120 "db sync") + (> runtime 500)))) ;; low and high sync times treated as separate. + (for-each + (lambda (dat) + (let ((tblname (car dat)) + (count (cdr dat))) + (set! tot-count (+ tot-count count)))) + (sort (hash-table->alist numrecs)(lambda (a b)(> (cdr a)(cdr b)))))) + tot-count)) + +(define (has-last-update dbh tablename) + (let* ((has-last #f)) + (sqlite3:for-each-row + (lambda (name) + (if (equal? name "last_update") + (set! has-last #t))) + dbh + (conc "SELECT name FROM pragma_table_info('"tablename"') as tblInfo;")) + has-last)) + +;; tbls is ( ("tablename" ( "field1" [#f|proc1] ) ( "field2" [#f|proc2] ) .... ) ) +;; +;; direction = fromdest, todisk +;; mode = 'full, 'incr +;; +;; Idea: youngest in dest is last_update time +;; +(define (dbmod:attach-sync tables dbh destdbfile direction #!key + (mode 'full) + (no-update '("keys")) ;; do + ) + (let* ((num-changes 0) + (update-changed (lambda (num-changed table qryname) + (if (> num-changed 0) + (begin + (debug:print-info 0 *default-log-port* "Changed "num-changed" rows for table "table", qry "qryname) + (set! num-changes (+ num-changes num-changed))))))) + (debug:print 0 *default-log-port* "Doing sync "direction" "destdbfile) + (if (not (sqlite3:auto-committing? dbh)) + (debug:print 0 *default-log-port* "Skipping sync due to transaction in flight.") + (let* ((table-names (map car tables)) + (dest-exists (file-exists? destdbfile))) + (assert dest-exists "FATAL: sync called with non-existant file, "destdbfile) + ;; attach the destdbfile + ;; for each table + ;; insert into dest. select * from src.
where last_update>last_update + ;; done + (debug:print 0 *default-log-port* "Attaching "destdbfile" as auxdb") + (handle-exceptions + exn + (begin + (debug:print 0 "ATTACH failed, exiting. exn="(condition->list exn)) + (exit 1)) + (sqlite3:execute dbh (conc "ATTACH '"destdbfile"' AS auxdb;"))) + (for-each + (lambda (table) + (let* ((tbldat (alist-ref table tables equal?)) + (fields (map car tbldat)) + (no-id-fields (filter (lambda (x)(not (equal? x "id"))) fields)) + (fields-str (string-intersperse fields ",")) + (no-id-fields-str (string-intersperse no-id-fields ",")) + (dir (eq? direction 'todisk)) + (fromdb (if dir "main." "auxdb.")) + (todb (if dir "auxdb." "main.")) + (set-str (string-intersperse + (map (lambda (field) + (conc fromdb field"="todb field)) + fields) + ",")) + (stmt1 (conc "INSERT OR IGNORE INTO "todb table + " SELECT * FROM "fromdb table";")) + (stmt2 (conc "INSERT OR IGNORE INTO "todb table + " SELECT * FROM "fromdb table" WHERE "fromdb table".id=?;")) + (stmt8 (conc "UPDATE "todb table" SET ("no-id-fields-str") = (SELECT "no-id-fields-str" FROM "fromdb table" WHERE "todb table".id="fromdb table".id" + (conc " AND "fromdb table".last_update > "todb table".last_update);") + ");")) + (stmt9 (conc "UPDATE "todb table" SET ("no-id-fields-str") = " + "(SELECT "no-id-fields-str" FROM "fromdb table" WHERE "fromdb table".id=?)" + " WHERE "todb table".id=?")) + (newrec (conc "SELECT id FROM "fromdb table" WHERE id NOT IN (SELECT id FROM "todb table");")) + #;(changedrec (conc "SELECT id FROM "fromdb table" WHERE "fromdb table".last_update > "todb table".last_update AND " + fromdb table".id="todb table".id;")) ;; main = fromdb + (changedrec (conc "SELECT "fromdb table".id FROM "fromdb table" join "todb table" on "fromdb table".id="todb table".id WHERE "fromdb table".last_update > "todb table".last_update;")) + ;; SELECT main.tests.id FROM main.tests join auxdb.tests on main.tests.id=auxdb.tests.id WHERE main.tests.last_update > auxdb.tests.last_update;" + (start-ms (current-milliseconds)) + (new-ids (sqlite3:fold-row (lambda (res id)(cons id res)) '() dbh newrec))) + ;; (debug:print 0 *default-log-port* "Got "(length aux-ids)" in aux-ids and "(length main-ids)" in main-ids") + (update-changed (length new-ids) table "new records") + (mutex-lock! *db-transaction-mutex*) + (handle-exceptions + exn + (debug:print 0 *default-log-port* "Transaction update of "table" failed.") + (sqlite3:with-transaction + dbh + (lambda () + (for-each (lambda (id) + (sqlite3:execute dbh stmt2 id)) + new-ids)))) + + (if (member "last_update" fields) + (handle-exceptions + exn + (debug:print 0 *default-log-port* "Transaction update of "table" failed.") + (sqlite3:with-transaction + dbh + (lambda () + (let* ((changed-ids (sqlite3:fold-row (lambda (res id)(cons id res)) '() dbh changedrec))) + (update-changed (length changed-ids) table "changed records") + (for-each (lambda (id) + (sqlite3:execute dbh stmt9 id id)) + changed-ids)))))) + + (mutex-unlock! *db-transaction-mutex*) + + (debug:print 0 *default-log-port* "Synced table "table + " in "(- (current-milliseconds) start-ms)"ms") + + )) + table-names) + (sqlite3:execute dbh "DETACH auxdb;"))) + num-changes)) + +;; prefix is "" or "auxdb." +;; +;; (define (dbmod:last-update-patch dbh prefix) +;; (let (( + +;; tbls is ( ("tablename" ( "field1" [#f|proc1] ) ( "field2" [#f|proc2] ) .... ) ) +;; +;; direction = fromdest, todest +;; mode = 'full, 'incr +;; +;; Idea: youngest in dest is last_update time +;; +(define (dbmod:new-sync tables dbh1 dbh2 destdbfile direction #!key + (mode 'full)) + (debug:print 0 *default-log-port* "Doing new-sync "direction" "destdbfile) + (if (not (sqlite3:auto-committing? dbh1)) + (debug:print 0 *default-log-port* "Skipping sync due to transaction in flight.") + (let* ((table-names (map car tables)) + (dest-exists (file-exists? destdbfile))) + (assert dest-exists "FATAL: sync called with non-existant file, "destdbfile) + (for-each + (lambda (table) + (let* ((tbldat (alist-ref table tables equal?)) + (fields (map car tbldat)) + (no-id-fields (filter (lambda (x)(not (equal? x "id"))) fields)) + (questionmarks (string-intersperse (make-list (length no-id-fields) "?") ",")) + (fields-str (string-intersperse fields ",")) + (no-id-fields-str (string-intersperse no-id-fields ",")) + (dir (eq? direction 'todest)) + (fromdb (if dir dbh1 dbh2)) + (todb (if dir dbh2 dbh1)) + (set-str (string-intersperse + (map (lambda (field) + (conc fromdb field"="todb field)) + fields) + ",")) + ;; (stmt1 (conc "INSERT OR IGNORE INTO "todb table + ;; " SELECT * FROM "fromdb table";")) + ;; (stmt8 (conc "UPDATE "todb table" SET ("no-id-fields-str") = (SELECT "no-id-fields-str" FROM "fromdb table " WHERE "todb table".id="fromdb table".id" + ;; (if (member "last_update" fields) + ;; (conc " AND "fromdb table".last_update > "todb table".last_update);") + ;; ");"))) + (stmt1 (conc "SELECT MAX(last_update) FROM "table";")) ;; use the highest last_update as your time reference + (stmt2 (conc "SELECT no-id-fields-str FROM "table" WHERE last_update>?;")) + (stmt3 (conc "UPDATE "table" SET ("no-id-fields-str") = ("questionmarks") WHERE id=?;")) + (start-ms (current-milliseconds))) + (debug:print 0 *default-log-port* "stmt3="stmt3) + (if (sqlite3:auto-committing? dbh1) + (begin + (sqlite3:with-transaction + dbh1 + (lambda () + (sqlite3:execute dbh1 stmt1) ;; get all new rows + + #;(if (member "last_update" fields) + (sqlite3:execute dbh1 stmt8)) ;; get all updated rows + ;; (sqlite3:execute dbh stmt5) + ;; (sqlite3:execute dbh stmt4) ;; if it worked this would be better for incremental up + ;; (sqlite3:execute dbh stmt6) + )) + (debug:print 0 *default-log-port* "Synced table "table + " in "(- (current-milliseconds) start-ms)"ms")) + (debug:print 0 *default-log-port* "Skipping sync of table "table" due to transaction in flight.")))) + table-names) + (sqlite3:execute dbh1 "DETACH auxdb;")))) + + + + +;;====================================================================== +;; Moved from dbfile +;;====================================================================== + +;; wait up to aprox n seconds for a journal to go away +;; +(define (tasks:wait-on-journal path n #!key (remove #f)(waiting-msg #f)) + (if (not (string? path)) + (debug:print-error 0 *default-log-port* "Called tasks:wait-on-journal with path=" path " (not a string)") + (let ((fullpath (conc path "-journal"))) + (handle-exceptions + exn + (begin + (print-call-chain (current-error-port)) + (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) + (debug:print 5 *default-log-port* " exn=" (condition->list exn)) + (debug:print 0 *default-log-port* "tasks:wait-on-journal failed. Continuing on, you can ignore this call-chain") + #t) ;; if stuff goes wrong just allow it to move on + (let loop ((journal-exists (file-exists? fullpath)) + (count n)) ;; wait ten times ... + (if journal-exists + (begin + (if (and waiting-msg + (eq? (modulo n 30) 0)) + (debug:print 0 *default-log-port* waiting-msg)) + (if (> count 0) + (begin + (thread-sleep! 1) + (loop (file-exists? fullpath) + (- count 1))) + (begin + (debug:print 0 *default-log-port* "ERROR: removing the journal file " fullpath ", this is not good. Look for disk full, write access and other issues.") + (if remove (system (conc "rm -rf " fullpath))) + #f))) + #t)))))) + + +;;====================================================================== +;; M E T A G E T A N D S E T V A R S +;;====================================================================== + +;; returns number if string->number is successful, string otherwise +;; also updates *global-delta* +;; +(define (db:get-var dbstruct var) + (let* ((res #f)) + (db:with-db + dbstruct #f #f ;; for the moment vars are only stored in main.db + (lambda (dbdat db) + (sqlite3:for-each-row + (lambda (val) + (set! res val)) + db + "SELECT val FROM metadat WHERE var=?;" var) + ;; convert to number if can + (if (string? res) + (let ((valnum (string->number res))) + (if valnum (set! res valnum)))) + res)))) + +(define (db:inc-var dbstruct var) + (db:with-db dbstruct #f #t + (lambda (dbdat db) + (sqlite3:execute db "UPDATE metadat SET val=val+1 WHERE var=?;" var)))) + +(define (db:dec-var dbstruct var) + (db:with-db dbstruct #f #t + (lambda (dbdat db) + (sqlite3:execute db "UPDATE metadat SET val=val-1 WHERE var=?;" var)))) + +;; This was part of db:get-var. It was used to estimate the load on +;; the database files. +;; +;; scale by 10, average with current value. +;; (set! *global-delta* (/ (+ *global-delta* (* (- (current-milliseconds) start-ms) +;; (if throttle throttle 0.01))) +;; 2)) +;; (if (> (abs (- *last-global-delta-printed* *global-delta*)) 0.08) ;; don't print all the time, only if it changes a bit +;; (begin +;; (debug:print-info 4 *default-log-port* "launch throttle factor=" *global-delta*) +;; (set! *last-global-delta-printed* *global-delta*))) + +(define (db:set-var dbstruct var val) + (db:with-db dbstruct #f #t + (lambda (dbdat db) + (sqlite3:execute (db:get-cache-stmth dbdat db "INSERT OR REPLACE INTO metadat (var,val) VALUES (?,?);") + var val)))) + +(define (db:add-var dbstruct var val) + (db:with-db dbstruct #f #t + (lambda (dbdat db) + (sqlite3:execute (db:get-cache-stmth dbdat db "UPDATE metadat SET val=val+? WHERE var=?;") val var)))) + +(define (db:del-var dbstruct var) + (db:with-db dbstruct #f #t + (lambda (dbdat db) + (sqlite3:execute (db:get-cache-stmth dbdat db "DELETE FROM metadat WHERE var=?;") var)))) + +(define (db:get-toplevels-and-incompletes dbstruct run-id running-deadtime remotehoststart-deadtime) + (let* ((toplevels '()) + (oldlaunched '()) + (incompleted '())) + (db:with-db + dbstruct run-id #t ;; not a write but problemtic + (lambda (dbdat db) + (let* ((stmth1 (db:get-cache-stmth + dbdat db + "SELECT id,rundir,uname,testname,item_path,event_time,run_duration FROM tests + WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) + AND state IN ('RUNNING');")) + (stmth2 (db:get-cache-stmth + dbdat db + "SELECT id,rundir,uname,testname,item_path,event_time,run_duration FROM tests + WHERE run_id=? AND (strftime('%s','now') - event_time) > (run_duration + ?) + AND state IN ('REMOTEHOSTSTART');")) + (stmth3 (db:get-cache-stmth + dbdat db + "SELECT id,rundir,uname,testname,item_path FROM tests + WHERE run_id=? AND (strftime('%s','now') - event_time) > 86400 + AND state IN ('LAUNCHED');"))) + ;; in RUNNING or REMOTEHOSTSTART for more than 10 minutes + ;; + ;; HOWEVER: this code in run:test seems to work fine + ;; (> (- (current-seconds)(+ (db:test-get-event_time testdat) + ;; (db:test-get-run_duration testdat))) + ;; 600) + (sqlite3:for-each-row + (lambda (test-id run-dir uname testname item-path event-time run-duration) + (if (and (equal? uname "n/a") + (equal? item-path "")) ;; this is a toplevel test + ;; what to do with toplevel? call rollup? + (begin + (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) + (debug:print-info 0 *default-log-port* "Found old toplevel test in RUNNING state, test-id=" test-id)) + (begin + (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted)) + (debug:print-info 0 *default-log-port* "Found old test in RUNNING state, test-id=" + test-id" exceeded running-deadtime "running-deadtime" now="(current-seconds) + " event-time="event-time" run-duration="run-duration)))) + stmth1 + run-id running-deadtime) ;; default time 720 seconds + + (sqlite3:for-each-row + (lambda (test-id run-dir uname testname item-path event-time run-duration) + (if (and (equal? uname "n/a") + (equal? item-path "")) ;; this is a toplevel test + ;; what to do with toplevel? call rollup? + (begin + (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) + (debug:print-info 0 *default-log-port* "Found old toplevel test in RUNNING state, test-id=" test-id)) + (begin + (debug:print-info 0 *default-log-port* "Found old test in REMOTEHOSTSTART state, test-id=" test-id + " exceeded running-deadtime "running-deadtime" now="(current-seconds)" event-time="event-time + " run-duration="run-duration) + (set! incompleted (cons (list test-id run-dir uname testname item-path run-id) incompleted))))) + stmth2 + run-id remotehoststart-deadtime) ;; default time 230 seconds + + ;; in LAUNCHED for more than one day. Could be long due to job queues TODO/BUG: Need override for this in config + (sqlite3:for-each-row + (lambda (test-id run-dir uname testname item-path) + (if (and (equal? uname "n/a") + (equal? item-path "")) ;; this is a toplevel test + ;; what to do with toplevel? call rollup? + (set! toplevels (cons (list test-id run-dir uname testname item-path run-id) toplevels)) + (begin + (debug:print-info 0 *default-log-port* "Found old test in LAUNCHED state, test-id=" test-id + " 1 day since event_time marked") + (set! oldlaunched (cons (list test-id run-dir uname testname item-path run-id) oldlaunched))))) + stmth3 + run-id)))) + (list incompleted oldlaunched toplevels))) + +;;====================================================================== +;; db to db sync +;;====================================================================== + +(define (dbmod:db-to-db-sync src-db dest-db last-update init-proc keys) + (if (and (file-exists? src-db) ;; can't proceed without a source + (file-read-access? src-db)) + (let* ((have-dest (file-exists? dest-db)) + (dest-file-wr (and have-dest + (file-write-access? dest-db))) ;; exists and writable + (dest-dir (or (pathname-directory dest-db) + ".")) + (dest-dir-wr (and (file-exists? dest-dir) + (file-write-access? dest-dir))) + (d-wr (or (and have-dest + dest-file-wr) + dest-dir-wr)) + (copied (if (and (not have-dest) + dest-dir-wr) + (begin + (file-copy src-db dest-db) + #t) + #f))) + (if copied + (begin + (debug:print-info 0 *default-log-port* "db-to-db-sync done with file-copy") + #t) + (let* ((tables (db:sync-all-tables-list keys)) + (sdb (dbmod:safely-open-db src-db init-proc #t)) + (ddb (dbmod:safely-open-db dest-db init-proc d-wr)) + (res (dbmod:sync-gasket tables last-update sdb ddb dest-db 'todisk keys))) + (sqlite3:finalize! sdb) + (sqlite3:finalize! ddb) + res))) + #f)) ) Index: dcommon.scm ================================================================== --- dcommon.scm +++ dcommon.scm @@ -16,22 +16,27 @@ ;; You should have received a copy of the GNU General Public License ;; along with Megatest. If not, see . ;; ;;====================================================================== +(declare (unit dcommon)) + +(declare (uses gutils)) +(declare (uses db)) +(declare (uses commonmod)) +(declare (uses rmtmod)) + (use format) (require-library iup) (import (prefix iup iup:)) (use canvas-draw) (import canvas-draw-iup) (use regex typed-records matchable) -(declare (unit dcommon)) - -(declare (uses gutils)) -(declare (uses db)) -;; (declare (uses synchash)) +(import commonmod + rmtmod + debugprint) (include "megatest-version.scm") (include "common_records.scm") (include "db_records.scm") (include "key_records.scm") @@ -635,11 +640,12 @@ (common:max (map cadr col-indices)))) (max-visible (max (- (dboard:tabdat-num-tests tabdat) 15) 3)) (max-col-vis (if (> max-col 10) 10 max-col)) (numrows 1) (numcols 1)) - (iup:attribute-set! stats-matrix "CLEARVALUE" "CONTENTS") + (if (common:low-noise-print 60 "runs-stats-update-clear") + (iup:attribute-set! stats-matrix "CLEARVALUE" "CONTENTS")) (iup:attribute-set! stats-matrix "NUMCOL" max-col ) (iup:attribute-set! stats-matrix "NUMLIN" (if (< max-row max-visible) max-visible max-row)) ;; min of 20 (iup:attribute-set! stats-matrix "NUMCOL_VISIBLE" max-col-vis) (iup:attribute-set! stats-matrix "NUMLIN_VISIBLE" (if (> max-row max-visible) max-visible max-row)) ;;(print "row-indices: " row-indices " col-indices: " col-indices) @@ -704,11 +710,13 @@ #:numlin-visible 5 )) (colnames (list "Id" "MTver" "Pid" "Host" "Interface:OutPort" "RunTime" "State" "RunId")) (updater (lambda () (if (dashboard:monitor-changed? commondat tabdat) - (let ((servers (server:choose-server *toppath* 'all-valid))) ;; (server:get-list *toppath* limit: 10))) + (let ((servers (case (rmt:transport-mode) + ((http)(server:choose-server *toppath* 'all-valid)) ;; (server:get-list *toppath* limit: 10))) + (else '())))) (iup:attribute-set! servers-matrix "NUMLIN" (length servers)) ;; (set! colnum 0) ;; (for-each (lambda (colname) ;; ;; (print "colnum: " colnum " colname: " colname) ;; (iup:attribute-set! servers-matrix (conc "0:" colnum) colname) @@ -1418,36 +1426,10 @@ (begin (set! *last-monitor-update-time* run-update-time) ;; monitor-modtime) #t) #f))) -;; DOES NOT WORK RELIABLY WITH /tmp WAL mode files. Timestamps only change when the db -;; is closed (I think). If db dir starts with /tmp always return true -;; -(define (dashboard:database-changed? commondat tabdat #!key (context-key 'default)) - (let* ((run-update-time (current-seconds)) - (dbdir (dboard:tabdat-dbdir tabdat)) - (modtime (dashboard:get-youngest-run-db-mod-time dbdir)) - (recalc (dashboard:recalc modtime - (dboard:commondat-please-update commondat) - (dboard:get-last-db-update tabdat context-key)))) - ;; (dboard:tabdat-last-db-update tabdat)))) - (if recalc - (dboard:set-last-db-update! tabdat context-key run-update-time)) - (dboard:commondat-please-update-set! commondat #f) - recalc)) - -(define (dashboard:get-youngest-run-db-mod-time dbdir) - (handle-exceptions - exn - (begin - (debug:print 2 *default-log-port* "WARNING: error in accessing databases in get-youngest-run-db-mod-time: " ((condition-property-accessor 'exn 'message) exn) - " db-dir="dbdir ", exn=" exn) - (current-seconds)) ;; something went wrong - just print an error and return current-seconds - (common:max (map (lambda (filen) - (file-modification-time filen)) - (glob (conc dbdir "/*.db*")))))) (define (dboard:get-last-db-update tabdat context) (hash-table-ref/default (dboard:tabdat-last-db-update tabdat) context 0)) (define (dboard:set-last-db-update! tabdat context newtime) Index: debugprint.scm ================================================================== --- debugprint.scm +++ debugprint.scm @@ -3,41 +3,49 @@ (declare (uses mtargs)) (module debugprint * -;;(import scheme chicken data-structures extras files ports) +(import scheme) +(cond-expand + (chicken-4 (import scheme chicken data-structures posix ports extras - - ;; scheme - ;; chicken.base - ;; chicken.string - ;; chicken.time - ;; chicken.time.posix - ;; chicken.port - ;; chicken.process-context - ;; chicken.process-context.posix - (prefix mtargs args:) srfi-1 ;; system-information - ) + )) + (chicken-5 + (import + scheme + chicken.base + chicken.string + chicken.time + chicken.time.posix + chicken.port + chicken.process-context + chicken.process-context.posix + + srfi-1 + (prefix mtargs args:)) + + (define setenv set-environment-variable!) + )) ;;====================================================================== ;; debug stuff ;;====================================================================== (define verbosity (make-parameter '())) (define *default-log-port* (current-error-port)) (define debug:print-logger (make-parameter #f)) ;; set to a proc to call on every logging print - + (define (debug:setup) (let ((debugstr (or (args:get-arg "-debug") (args:get-arg "-debug-noprop") (get-environment-variable "MT_DEBUG_MODE")))) (verbosity (debug:calc-verbosity debugstr 'q)) @@ -45,11 +53,11 @@ ;; if we were handed a bad verbosity rule then we will override it with 1 and continue (if (not (verbosity))(verbosity 1)) (if (and (not (args:get-arg "-debug-noprop")) (or (args:get-arg "-debug") (not (get-environment-variable "MT_DEBUG_MODE")))) - (setenv #;set-environment-variable! "MT_DEBUG_MODE" (if (list? (verbosity)) + (setenv "MT_DEBUG_MODE" (if (list? (verbosity)) (string-intersperse (map conc (verbosity)) ",") (conc (verbosity))))))) ;; check verbosity, #t is ok (define (debug:check-verbosity verbosity vstr) @@ -114,15 +122,15 @@ ((and (number? vb) (list? n)) (member vb n)) (else #f)))) -(define (debug:handle-remote-logging params) - (if (debug:print-logger) ;; NOTE: turn params into string to keep things simple for now - ((debug:print-logger)(conc "REMOTE ("(get-host-name)", pid="(current-process-id)") " - (string-intersperse (map conc params) " ") "; " - (string-intersperse (command-line-arguments) " "))))) +;; (define (debug:handle-remote-logging params) +;; (if (debug:print-logger) ;; NOTE: turn params into string to keep things simple for now +;; ((debug:print-logger)(conc "REMOTE ("(get-host-name)", pid="(current-process-id)") " +;; (string-intersperse (map conc params) " ") "; " +;; (string-intersperse (command-line-arguments) " "))))) (define debug:enable-timestamp (make-parameter #t)) (define (debug:timestamp) (if (debug:enable-timestamp) Index: diff-report.scm ================================================================== --- diff-report.scm +++ diff-report.scm @@ -16,11 +16,17 @@ ;; along with Megatest. If not, see . ;; (declare (unit diff-report)) (declare (uses common)) +(declare (uses debugprint)) (declare (uses rmt)) +(declare (uses rmtmod)) +(declare (uses commonmod)) +(import commonmod + rmtmod + debugprint) (include "common_records.scm") (use matchable) (use fmt) (use ducttape-lib) Index: docs/manual/bisecting.png ================================================================== --- docs/manual/bisecting.png +++ docs/manual/bisecting.png cannot compute difference between binary files Index: docs/manual/megatest-test-stages.png ================================================================== --- docs/manual/megatest-test-stages.png +++ docs/manual/megatest-test-stages.png cannot compute difference between binary files Index: docs/manual/megatest_manual.html ================================================================== --- docs/manual/megatest_manual.html +++ docs/manual/megatest_manual.html @@ -1,10 +1,10 @@ - + The Megatest Users Manual