Changes In Branch v1.63-configdbsync Through [ad8b9df640] Excluding Merge-Ins
This is equivalent to a diff from e58c0e3fb7 to ad8b9df640
2017-02-03
| ||
14:25 | added sync of megatest.db to config dbs check-in: ae352678af user: srehman tags: v1.63-configdbsync | |
2017-02-02
| ||
17:41 | Merged server-log-handshaking to v1.63 check-in: 9d4d98bcd5 user: mrwellan tags: v1.63 | |
14:52 | added basic html genration check-in: ae20a8a286 user: pjhatwal tags: html-gen | |
2017-01-31
| ||
17:47 | completed diff report feature check-in: 118224962b user: bjbarcla tags: v1.63-xor-report | |
2017-01-30
| ||
12:23 | added update on conflict insert for pg, synced with latest v1.63 check-in: ad8b9df640 user: srehman tags: v1.63-configdbsync | |
2017-01-26
| ||
13:46 | preparations to add update on conflict insert for pg check-in: f2e2f9ce97 user: srehman tags: v1.63-configdbsync | |
2017-01-25
| ||
22:02 | Stripped out nearly all server management stuff related to monitor.db server tables. partially replaced with log file based handshaking check-in: 6d32c79c1e user: matt tags: server-log-handshaking | |
17:37 | Automated merge of v1.63/e58c0e3fb7/integ into integ-home check-in: 6e5448c9cf user: matt tags: integ-home | |
17:11 | made yougest-db error message more useful check-in: e58c0e3fb7 user: bjbarcla tags: v1.63 | |
2017-01-23
| ||
13:28 | Changed num tests to draw per cycle to 200, bumped version to v1.6305 check-in: a03bdb5581 user: mrwellan tags: v1.63, v1.6305 | |
Modified db.scm from [a6e4ada759] to [3917c44888].
︙ | ︙ | |||
15 16 17 18 19 20 21 22 23 24 25 26 27 28 | ;; dbstruct vector containing all the relevant dbs like main.db, megatest.db, run.db etc (use (srfi 18) extras tcp stack) ;; RADT => use of require-extension? (use sqlite3 srfi-1 posix regex regex-case srfi-69 csv-xml s11n md5 message-digest base64 format dot-locking z3 typed-records) (import (prefix sqlite3 sqlite3:)) (import (prefix base64 base64:)) ;; RADT => prefix?? (declare (unit db)) (declare (uses common)) (declare (uses keys)) (declare (uses ods)) (declare (uses client)) (declare (uses mt)) | > > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | ;; dbstruct vector containing all the relevant dbs like main.db, megatest.db, run.db etc (use (srfi 18) extras tcp stack) ;; RADT => use of require-extension? (use sqlite3 srfi-1 posix regex regex-case srfi-69 csv-xml s11n md5 message-digest base64 format dot-locking z3 typed-records) (import (prefix sqlite3 sqlite3:)) (import (prefix base64 base64:)) ;; RADT => prefix?? (include "/nfs/site/disks/icf_fdk_cw_gwa002/srehman/fossil/dbi/dbi.scm") (import (prefix dbi dbi:)) (declare (unit db)) (declare (uses common)) (declare (uses keys)) (declare (uses ods)) (declare (uses client)) (declare (uses mt)) |
︙ | ︙ | |||
44 45 46 47 48 49 50 51 52 53 54 55 56 57 | ;; I propose this record evolves into the area record ;; (defstruct dbr:dbstruct ;; (tmpdb #f) (dbstack #f) ;; stack for tmp db handles, do not initialize with a stack (mtdb #f) (refndb #f) (homehost #f) ;; not used yet (on-homehost #f) ;; not used yet ) ;; goal is to converge on one struct for an area but for now it is too confusing ;; record for keeping state,status and count for doing roll-ups in ;; iterated tests | > | 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | ;; I propose this record evolves into the area record ;; (defstruct dbr:dbstruct ;; (tmpdb #f) (dbstack #f) ;; stack for tmp db handles, do not initialize with a stack (mtdb #f) (refndb #f) (slave-dbs '()) (homehost #f) ;; not used yet (on-homehost #f) ;; not used yet ) ;; goal is to converge on one struct for an area but for now it is too confusing ;; record for keeping state,status and count for doing roll-ups in ;; iterated tests |
︙ | ︙ | |||
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 | (let* ((dbpath (db:dbfile-path)) ;; 0)) (dbexists (file-exists? dbpath)) (dbfexists (file-exists? (conc dbpath "/megatest.db"))) (tmpdb (db:open-megatest-db path: dbpath)) ;; lock-create-open dbpath db:initialize-main-db)) (mtdb (db:open-megatest-db)) (refndb (db:open-megatest-db path: dbpath name: "megatest_ref.db")) (write-access (file-write-access? dbpath))) (if (and dbexists (not write-access)) (set! *db-write-access* #f)) (dbr:dbstruct-mtdb-set! dbstruct mtdb) (dbr:dbstruct-dbstack-set! dbstruct (make-stack)) (stack-push! (dbr:dbstruct-dbstack dbstruct) tmpdb) ;; olddb is already a (cons db path) (dbr:dbstruct-refndb-set! dbstruct refndb) ;; (mutex-unlock! *rundb-mutex*) (if (and (not dbfexists) write-access) ;; *db-write-access*) ;; did not have a prior db and do have write access (begin (debug:print 0 *default-log-port* "filling db " (db:dbdat-get-path tmpdb) " with data from " (db:dbdat-get-path mtdb)) (db:sync-tables (db:sync-all-tables-list dbstruct) #f mtdb refndb tmpdb)) (debug:print 0 *default-log-port* " db, " (db:dbdat-get-path tmpdb) " already exists, not propogating data from " (db:dbdat-get-path mtdb))) | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 | (let* ((dbpath (db:dbfile-path)) ;; 0)) (dbexists (file-exists? dbpath)) (dbfexists (file-exists? (conc dbpath "/megatest.db"))) (tmpdb (db:open-megatest-db path: dbpath)) ;; lock-create-open dbpath db:initialize-main-db)) (mtdb (db:open-megatest-db)) (refndb (db:open-megatest-db path: dbpath name: "megatest_ref.db")) (write-access (file-write-access? dbpath))) (if (args:get-arg "-server") (if (configf:get-section *configdat* "ext-sync") (let* ((dblist (configf:get-section *configdat* "ext-sync")) (res '()) (cfgdb #f)) (for-each (lambda (dbitem) (let* ((stringsplit (string-split (cadr dbitem))) (dbtype (string->symbol (car stringsplit))) (dbpath (cadr stringsplit))) (case dbtype ((sqlite3) (set! cfgdb (dbi:open dbtype (cons (cons 'dbname dbpath) '()) )) (db:initialize-main-db (dbi:db-conn cfgdb)) (db:initialize-run-id-db (dbi:db-conn cfgdb)) (set! res (cons (cons cfgdb dbpath) res))) ((pg) (let* ((dbinfo '())) (for-each (lambda (x) (if (not (eqv? (string->symbol x) dbtype)) (let* ((pair (string-split x ":"))) (if (not (eqv? pair '())) (set! dbinfo (cons (cons (string->symbol (car pair)) (cadr pair)) dbinfo)))))) stringsplit) (set! cfgdb (dbi:open dbtype dbinfo)) (set! res (cons (cons cfgdb (alist-ref 'host dbinfo)) res)) ))))) dblist) (dbr:dbstruct-slave-dbs-set! dbstruct res) ))) (if (and dbexists (not write-access)) (set! *db-write-access* #f)) (dbr:dbstruct-mtdb-set! dbstruct mtdb) (dbr:dbstruct-dbstack-set! dbstruct (make-stack)) (stack-push! (dbr:dbstruct-dbstack dbstruct) tmpdb) ;; olddb is already a (cons db path) (dbr:dbstruct-refndb-set! dbstruct refndb) ;; (mutex-unlock! *rundb-mutex*) (if (and (not dbfexists) write-access) ;; *db-write-access*) ;; did not have a prior db and do have write access (begin (debug:print 0 *default-log-port* "filling db " (db:dbdat-get-path tmpdb) " with data from " (db:dbdat-get-path mtdb)) (db:sync-tables (db:sync-all-tables-list dbstruct) #f mtdb refndb tmpdb)) (debug:print 0 *default-log-port* " db, " (db:dbdat-get-path tmpdb) " already exists, not propogating data from " (db:dbdat-get-path mtdb))) |
︙ | ︙ | |||
376 377 378 379 380 381 382 | ;; just tests, test_steps and test_data tables (define db:sync-tests-only (list ;; (list "strs" ;; '("id" #f) ;; '("str" #f)) (list "tests" | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > > > | | < > > > > > > > | > > > | | | | | | | | | | | | 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 | ;; just tests, test_steps and test_data tables (define db:sync-tests-only (list ;; (list "strs" ;; '("id" #f) ;; '("str" #f)) (list "tests" '("id" "INTEGER" 'key) '("run_id" "INTEGER") '("testname" "TEXT") '("host" "TEXT") '("cpuload" "REAL") '("diskfree" "INTEGER") '("uname" "TEXT") '("rundir" "TEXT") '("shortdir" "TEXT") '("item_path" "TEXT") '("state" "TEXT") '("status" "TEXT") '("attemptnum" "INTEGER") '("final_logf" "TEXT") '("logdat" "TEXT") '("run_duration" "INTEGER") '("comment" "TEXT") '("event_time" "INTEGER") '("fail_count" "INTEGER") '("pass_count" "INTEGER") '("archived" "INTEGER")) (list "test_steps" '("id" "INTEGER" 'key) '("test_id" "INTEGER") '("stepname" "TEXT") '("state" "TEXT") '("status" "TEXT") '("event_time" "INTEGER") '("comment" "TEXT") '("logfile" "TEXT")) (list "test_data" '("id" "INTEGER" 'key) '("test_id" "INTEGER") '("category" "TEXT") '("variable" "TEXT") '("value" "REAL") '("expected" "REAL") '("tol" "REAL") '("units" "TEXT") '("comment" "TEXT") '("status" "TEXT") '("type" "TEXT")))) ;; needs db to get keys, this is for syncing all tables ;; (define (db:sync-main-list dbstruct) (let ((keys (db:get-keys dbstruct))) (list (list "keys" '("id" "INTEGER" 'key) '("fieldname" "TEXT") '("fieldtype" "TEXT")) (list "metadat" '("id" "INTEGER" 'key) '("var" "TEXT") '("val" "TEXT")) (list "runs" '("id" "INTEGER" 'key) '("release" "TEXT") '("iteration" "TEXT") '("testsuite_mode" "TEXT") '("runname" "TEXT") '("state" "TEXT") '("status" "TEXT") '("owner" "TEXT") '("event_time" "INTEGER") '("comment" "TEXT") '("fail_count" "INTEGER") '("pass_count" "INTEGER")) (list "test_meta" '("id" "INTEGER" 'key) '("testname" "TEXT") '("owner" "TEXT") '("description" "TEXT") '("reviewed" "INTEGER") '("iterated" "TEXT") '("avg_runtime" "REAL") '("avg_disk" "REAL") '("tags" "TEXT") '("jobgroup" "TEXT"))))) (define (db:sync-all-tables-list dbstruct) (append (db:sync-main-list dbstruct) db:sync-tests-only)) ;; use bunch of Unix commands to try to break the lock and recreate the db ;; |
︙ | ︙ | |||
508 509 510 511 512 513 514 | (if (member fname '("megatest.db" "monitor.db")) "megatest -cleanup-db" "megatest -import-megatest.db;megatest -cleanup-db") "\"\n") (exit) ;; we can not safely continue when a db was corrupted - even if fixed. ) ;; test read/write access to the database | | | | | | | | > > > | 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 | (if (member fname '("megatest.db" "monitor.db")) "megatest -cleanup-db" "megatest -import-megatest.db;megatest -cleanup-db") "\"\n") (exit) ;; we can not safely continue when a db was corrupted - even if fixed. ) ;; test read/write access to the database (let ((db (dbi:open 'sqlite3 (cons (cons ('dbname dbpath) '()))))) (cond ((equal? fname "megatest.db") (sqlite3:executeute db "DELETE FROM tests WHERE state='DELETED';")) ((equal? fname "main.db") (sqlite3:executeute db "DELETE FROM runs WHERE state='deleted';")) ((string-match "\\d.db" fname) (sqlite3:executeute db "UPDATE tests SET state='DELETED' WHERE state='DELETED';")) ((equal? fname "monitor.db") (sqlite3:executeute "DELETE FROM servers WHERE state LIKE 'defunct%';")) (else (sqlite3:executeute db "vacuum;"))) (dbi:close db) #t)))))) ;; tbls is ( ("tablename" ( "field1" [#f|proc1] ) ( "field2" [#f|proc2] ) .... ) ) ;; db's are dbdat's ;; ;; if last-update specified ("field-name" . time-in-seconds) ;; then sync only records where field-name >= time-in-seconds ;; IFF field-name exists ;; (define (db:sync-tables tbls last-update fromdb todb . slave-dbs) (set! todb (cons (dbi:convert (db:dbdat-get-db todb)) (db:dbdat-get-path todb))) (set! fromdb (cons (dbi:convert (db:dbdat-get-db fromdb)) (db:dbdat-get-path fromdb))) (handle-exceptions exn (begin (debug:print 0 *default-log-port* "EXCEPTION: database probably overloaded or unreadable in db:sync-tables.") (print-call-chain (current-error-port)) (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) (print "exn=" (condition->list exn)) |
︙ | ︙ | |||
555 556 557 558 559 560 561 | (cons todb slave-dbs)) 0) ;; this is the work to be done (cond ((not fromdb) (debug:print 3 *default-log-port* "WARNING: db:sync-tables called with fromdb missing") -1) ((not todb) (debug:print 3 *default-log-port* "WARNING: db:sync-tables called with todb missing") -2) | | | | 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 | (cons todb slave-dbs)) 0) ;; this is the work to be done (cond ((not fromdb) (debug:print 3 *default-log-port* "WARNING: db:sync-tables called with fromdb missing") -1) ((not todb) (debug:print 3 *default-log-port* "WARNING: db:sync-tables called with todb missing") -2) ((not (dbi:database? (db:dbdat-get-db fromdb))) (debug:print-error 0 *default-log-port* "db:sync-tables called with fromdb not a database " fromdb) -3) ((not (dbi:database? (db:dbdat-get-db todb))) (debug:print-error 0 *default-log-port* "db:sync-tables called with todb not a database " todb) -4) (else (let ((stmts (make-hash-table)) ;; table-field => stmt (all-stmts '()) ;; ( ( stmt1 value1 ) ( stml2 value2 )) (numrecs (make-hash-table)) (start-time (current-milliseconds)) (tot-count 0)) |
︙ | ︙ | |||
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 | (conc " " (car last-update) ">=" (cdr last-update)) "") ";")) (full-ins (conc "INSERT OR REPLACE INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) " " VALUES ( " (string-intersperse (make-list num-fields "?") ",") " );")) (fromdat '()) (fromdats '()) (totrecords 0) (batch-len (string->number (or (configf:lookup *configdat* "sync" "batchsize") "10"))) (todat (make-hash-table)) (count 0)) ;; set up the field->num table (for-each (lambda (field) (hash-table-set! field->num field count) (set! count (+ count 1))) fields) ;; read the source table | > | | | | | | > > > > > > > > > > > > > > | | | > | | | > > | | > > > > > > > > > > > > | | | | 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 | (conc " " (car last-update) ">=" (cdr last-update)) "") ";")) (full-ins (conc "INSERT OR REPLACE INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) " " VALUES ( " (string-intersperse (make-list num-fields "?") ",") " );")) (fromdat '()) (fromdats '()) (tabletypes '()) (totrecords 0) (batch-len (string->number (or (configf:lookup *configdat* "sync" "batchsize") "10"))) (todat (make-hash-table)) (count 0)) ;; set up the field->num table (for-each (lambda (field) (hash-table-set! field->num field count) (set! count (+ count 1))) fields) ;; read the source table (dbi:for-each-row (lambda (a) (set! fromdat (cons a fromdat)) (if (> (length fromdat) batch-len) (begin (set! fromdats (cons fromdat fromdats)) (set! fromdat '()) (set! totrecords (+ totrecords 1))))) (db:dbdat-get-db fromdb) full-sel) ;; tack on remaining records in fromdat (if (not (null? fromdat)) (set! fromdats (cons fromdat fromdats))) (if (common:low-noise-print 120 "sync-records") (debug:print-info 4 *default-log-port* "found " totrecords " records to sync")) ;; read the target table (dbi:for-each-row (lambda (a) (hash-table-set! todat (vector-ref a 0) a)) (db:dbdat-get-db todb) full-sel) ;; first pass implementation, just insert all changed rows (for-each (lambda (targdb) (set! targdb (dbi:convert (db:dbdat-get-db targdb))) (if (eqv? (dbi:db-dbtype targdb) 'pg) (let* ((prep "") (set-stmt "") (key (car (map car fields))) (list-fields (map car fields))) (set! prep (string-intersperse (map cadr fields) ",")) (set! prep (conc "PREPARE fullupdate (" prep ") AS UPDATE " tablename " SET ")) ;;maybe add lookup in the future depending on where types are needed (let loop ((i 1)) (set! set-stmt (conc set-stmt (list-ref list-fields i) " = $" (+ i 1) ", ")) (if (< i (- (length list-fields) 2)) (loop (+ i 1)) (set! set-stmt (conc set-stmt (list-ref list-fields (+ i 1)) " = $" (+ i 2) " WHERE " key " = $1;")))) (set! full-ins (conc prep set-stmt)))) (let* ((db (dbi:convert (db:dbdat-get-db targdb))) (stmth (dbi:prepare db full-ins))) ;; (db:delay-if-busy targdb) ;; NO WAITING (for-each (lambda (fromdat-lst) (dbi:with-transaction db (lambda () (for-each ;; (lambda (fromrow) (let* ((a (vector-ref fromrow 0)) (curr (hash-table-ref/default todat a #f)) (same #t) (res #f) (len (length (vector->list fromrow)))) (let loop ((i 0)) (if (or (not curr) (not (equal? (vector-ref fromrow i)(vector-ref curr i)))) (set! same #f)) (if (and same (< i (- num-fields 1))) (loop (+ i 1)))) (if (not same) (begin (set! res (apply dbi:prepare-exec stmth (vector->list fromrow))) (hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0))))) (if (and (not same) (eqv? (dbi:get-res res 'affected-rows) 0)) (let* ((prep "")) (set! prep (string-intersperse (map cadr fields) ",")) (set! prep (conc "INSERT INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) VALUES ( " (string-intersperse (make-list len "?") ",") " );")) ;;maybe add lookup in the future depending on where types are needed (begin (hash-table-set! numrecs tablename (- 1 (hash-table-ref/default numrecs tablename 0))) (apply dbi:exec db prep (vector->list fromrow)) (hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0)))))))) ;;(begin ;; (dbi:prepare-exec stmth (vector->list fromrow)) ;;(hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0))))))) fromdat-lst)) )) fromdats) (dbi:close stmth))) (append (list todb) slave-dbs)))) tbls) (let* ((runtime (- (current-milliseconds) start-time)) (should-print (or (debug:debug-mode 12) (common:low-noise-print 120 "db sync" (> runtime 500))))) ;; low and high sync times treated as separate. (if should-print (debug:print 3 *default-log-port* "INFO: db sync, total run time " runtime " ms")) (for-each |
︙ | ︙ | |||
820 821 822 823 824 825 826 | ;; ;; run-ids: '(1 2 3 ...) or #f (for all) ;; (define (db:multi-db-sync dbstruct . options) (if (not (launch:setup)) (debug:print 0 *default-log-port* "ERROR: not able to setup up for megatest.") (let* ((mtdb (dbr:dbstruct-mtdb dbstruct)) | | > | 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 | ;; ;; run-ids: '(1 2 3 ...) or #f (for all) ;; (define (db:multi-db-sync dbstruct . options) (if (not (launch:setup)) (debug:print 0 *default-log-port* "ERROR: not able to setup up for megatest.") (let* ((mtdb (dbr:dbstruct-mtdb dbstruct)) (tmpdb (db:get-db dbstruct)) (refndb (dbr:dbstruct-refndb dbstruct)) (slave-dbs (dbr:dbstruct-slave-dbs dbstruct)) (allow-cleanup #t) ;; (if run-ids #f #t)) (tdbdat (tasks:open-db)) (servers (tasks:get-all-servers (db:delay-if-busy tdbdat))) (data-synced 0)) ;; count of changed records (I hope) ;; kill servers (if (member 'killservers options) |
︙ | ︙ | |||
872 873 874 875 876 877 878 | ;; run-ids))) ;; now ensure all newdb data are synced to megatest.db ;; do not use the run-ids list passed in to the function ;; (if (member 'new2old options) (set! data-synced | | | 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 | ;; run-ids))) ;; now ensure all newdb data are synced to megatest.db ;; do not use the run-ids list passed in to the function ;; (if (member 'new2old options) (set! data-synced (+ (apply db:sync-tables (db:sync-all-tables-list dbstruct) #f tmpdb refndb mtdb slave-dbs) data-synced))) (if (member 'fixschema options) (begin (db:patch-schema-maindb (db:dbdat-get-db mtdb)) (db:patch-schema-maindb (db:dbdat-get-db tmpdb)) |
︙ | ︙ | |||
1146 1147 1148 1149 1150 1151 1152 | comment TEXT DEFAULT '', event_time TIMESTAMP DEFAULT (strftime('%s','now')), fail_count INTEGER DEFAULT 0, pass_count INTEGER DEFAULT 0, archived INTEGER DEFAULT 0, -- 0=no, > 1=archive block id where test data can be found last_update INTEGER DEFAULT (strftime('%s','now')), CONSTRAINT testsconstraint UNIQUE (run_id, testname, item_path));") | | | 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 | comment TEXT DEFAULT '', event_time TIMESTAMP DEFAULT (strftime('%s','now')), fail_count INTEGER DEFAULT 0, pass_count INTEGER DEFAULT 0, archived INTEGER DEFAULT 0, -- 0=no, > 1=archive block id where test data can be found last_update INTEGER DEFAULT (strftime('%s','now')), CONSTRAINT testsconstraint UNIQUE (run_id, testname, item_path));") (sqlite3:execute db "CREATE INDEX IF NOT EXISTS tests_index ON tests (run_id, testname, item_path);") (sqlite3:execute db "CREATE TRIGGER IF NOT EXISTS update_tests_trigger AFTER UPDATE ON tests FOR EACH ROW BEGIN UPDATE tests SET last_update=(strftime('%s','now')) WHERE id=old.id; END;") (sqlite3:execute db "CREATE TABLE IF NOT EXISTS test_steps |
︙ | ︙ |