Changes In Branch v1.64 Through [8a616355a0] Excluding Merge-Ins
This is equivalent to a diff from 45b3d258d9 to 8a616355a0
2017-02-22
| ||
21:01 | merge1 of v1.63 into v1.64 check-in: 8104470e86 user: matt tags: v1.64, v1.6401 | |
2017-02-21
| ||
21:31 | Missing param check-in: 11f9127253 user: mrwellan tags: v1.64, v1.64-defunct | |
2017-02-20
| ||
23:59 | Typo check-in: 8a616355a0 user: matt tags: v1.64, v1.64-defunct | |
23:58 | Cleaned up help on -env* calls check-in: b763e5c0bc user: matt tags: v1.64, v1.64-defunct | |
14:23 | Merged incomplete db sync work and created v1.6401 check-in: 5f438a82b6 user: matt tags: v1.64, v1.64-defunct | |
07:03 | Merged first version of mtutil into v1.64 Closed-Leaf check-in: 45b3d258d9 user: matt tags: v1.64-defunct | |
2017-02-19
| ||
23:47 | Replaced cron logic with crude but robust approach. check-in: 358e040c6c user: matt tags: run-mgr | |
2017-02-17
| ||
20:48 | Pulled in fix for dashboard launching check-in: 56bd54e48e user: matt tags: v1.63 | |
Modified Makefile from [e2c6c4a906] to [7d122a8cb8].
︙ | ︙ | |||
58 59 60 61 62 63 64 65 66 67 68 69 70 71 | # DOES NOT REBUILD DOCS # $(PREFIX)/share/docs/megatest_manual.html : docs/manual/megatest_manual.html mkdir -p $(PREFIX)/share/docs $(INSTALL) docs/manual/megatest_manual.html $(PREFIX)/share/docs/megatest_manual.html for png in $(PNGFILES);do $(INSTALL) docs/manual/$$png $(PREFIX)/share/docs/$$png;done #multi-dboard : multi-dboard.scm $(OFILES) $(GOFILES) # csc $(CSCOPTS) $(OFILES) $(GOFILES) multi-dboard.scm -o multi-dboard # # $(PREFIX)/bin/revtagfsl : utils/revtagfsl.scm # csc utils/revtagfsl.scm -o $(PREFIX)/bin/revtagfsl | > > > > | 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 | # DOES NOT REBUILD DOCS # $(PREFIX)/share/docs/megatest_manual.html : docs/manual/megatest_manual.html mkdir -p $(PREFIX)/share/docs $(INSTALL) docs/manual/megatest_manual.html $(PREFIX)/share/docs/megatest_manual.html for png in $(PNGFILES);do $(INSTALL) docs/manual/$$png $(PREFIX)/share/docs/$$png;done $(PREFIX)/share/db/mt-pg.sql : mt-pg.sql mkdir -p $(PREFIX)/share/db $(INSTALL) mt-pg.sql $(PREFIX)/share/db/mt-pg.sql #multi-dboard : multi-dboard.scm $(OFILES) $(GOFILES) # csc $(CSCOPTS) $(OFILES) $(GOFILES) multi-dboard.scm -o multi-dboard # # $(PREFIX)/bin/revtagfsl : utils/revtagfsl.scm # csc utils/revtagfsl.scm -o $(PREFIX)/bin/revtagfsl |
︙ | ︙ | |||
186 187 188 189 190 191 192 | utils/mk_wrapper $(PREFIX) dboard $(PREFIX)/bin/dashboard chmod a+x $(PREFIX)/bin/dashboard $(INSTALL) dboard $(PREFIX)/bin/.$(ARCHSTR)/dboard install : $(PREFIX)/bin/.$(ARCHSTR) $(PREFIX)/bin/.$(ARCHSTR)/mtest $(PREFIX)/bin/megatest \ $(PREFIX)/bin/.$(ARCHSTR)/dboard $(PREFIX)/bin/dashboard $(HELPERS) $(PREFIX)/bin/nbfake \ $(PREFIX)/bin/nbfind $(PREFIX)/bin/loadrunner $(PREFIX)/bin/viewscreen $(PREFIX)/bin/mt_xterm \ | | > | 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 | utils/mk_wrapper $(PREFIX) dboard $(PREFIX)/bin/dashboard chmod a+x $(PREFIX)/bin/dashboard $(INSTALL) dboard $(PREFIX)/bin/.$(ARCHSTR)/dboard install : $(PREFIX)/bin/.$(ARCHSTR) $(PREFIX)/bin/.$(ARCHSTR)/mtest $(PREFIX)/bin/megatest \ $(PREFIX)/bin/.$(ARCHSTR)/dboard $(PREFIX)/bin/dashboard $(HELPERS) $(PREFIX)/bin/nbfake \ $(PREFIX)/bin/nbfind $(PREFIX)/bin/loadrunner $(PREFIX)/bin/viewscreen $(PREFIX)/bin/mt_xterm \ $(PREFIX)/share/docs/megatest_manual.html $(PREFIX)/bin/remrun $(PREFIX)/bin/mtutil \ $(PREFIX)/share/db/mt-pg.sql $(PREFIX)/bin/.$(ARCHSTR) : mkdir -p $(PREFIX)/bin/.$(ARCHSTR) mkdir -p $(PREFIX)/bin/.$(ARCHSTR)/lib test: tests/tests.scm cd tests;csi -I .. -b -n tests.scm |
︙ | ︙ |
Modified db.scm from [b010476b5f] to [727d18ac53].
︙ | ︙ | |||
12 13 14 15 16 17 18 | ;;====================================================================== ;; Database access ;;====================================================================== ;; dbstruct vector containing all the relevant dbs like main.db, megatest.db, run.db etc (use (srfi 18) extras tcp stack) | | > > > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ;;====================================================================== ;; Database access ;;====================================================================== ;; dbstruct vector containing all the relevant dbs like main.db, megatest.db, run.db etc (use (srfi 18) extras tcp stack) (use sqlite3 srfi-1 posix regex regex-case srfi-69 csv-xml s11n md5 message-digest base64 format dot-locking z3 typed-records sql-null matchable) (import (prefix sqlite3 sqlite3:)) (import (prefix base64 base64:)) ;; (include "/nfs/site/disks/icf_fdk_cw_gwa002/srehman/fossil/dbi/dbi.scm") ;; (import (prefix dbi dbi:)) (use (prefix dbi dbi:)) (declare (unit db)) (declare (uses common)) (declare (uses keys)) (declare (uses ods)) (declare (uses client)) (declare (uses mt)) |
︙ | ︙ | |||
44 45 46 47 48 49 50 51 52 53 54 55 56 57 | ;; I propose this record evolves into the area record ;; (defstruct dbr:dbstruct ;; (tmpdb #f) (dbstack #f) ;; stack for tmp db handles, do not initialize with a stack (mtdb #f) (refndb #f) (homehost #f) ;; not used yet (on-homehost #f) ;; not used yet ) ;; goal is to converge on one struct for an area but for now it is too confusing ;; record for keeping state,status and count for doing roll-ups in ;; iterated tests | > | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | ;; I propose this record evolves into the area record ;; (defstruct dbr:dbstruct ;; (tmpdb #f) (dbstack #f) ;; stack for tmp db handles, do not initialize with a stack (mtdb #f) (refndb #f) (slave-dbs '()) (homehost #f) ;; not used yet (on-homehost #f) ;; not used yet ) ;; goal is to converge on one struct for an area but for now it is too confusing ;; record for keeping state,status and count for doing roll-ups in ;; iterated tests |
︙ | ︙ | |||
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 | (let* ((dbpath (db:dbfile-path)) ;; 0)) (dbexists (file-exists? dbpath)) (dbfexists (file-exists? (conc dbpath "/megatest.db"))) (tmpdb (db:open-megatest-db path: dbpath)) ;; lock-create-open dbpath db:initialize-main-db)) (mtdb (db:open-megatest-db)) (refndb (db:open-megatest-db path: dbpath name: "megatest_ref.db")) (write-access (file-write-access? dbpath))) (if (and dbexists (not write-access)) (set! *db-write-access* #f)) (dbr:dbstruct-mtdb-set! dbstruct mtdb) (dbr:dbstruct-dbstack-set! dbstruct (make-stack)) (stack-push! (dbr:dbstruct-dbstack dbstruct) tmpdb) ;; olddb is already a (cons db path) (dbr:dbstruct-refndb-set! dbstruct refndb) ;; (mutex-unlock! *rundb-mutex*) (if (and (not dbfexists) write-access) ;; *db-write-access*) ;; did not have a prior db and do have write access (begin (debug:print 0 *default-log-port* "filling db " (db:dbdat-get-path tmpdb) " with data from " (db:dbdat-get-path mtdb)) (db:sync-tables (db:sync-all-tables-list dbstruct) #f mtdb refndb tmpdb)) (debug:print 4 *default-log-port* " db, " (db:dbdat-get-path tmpdb) " already exists, not propogating data from " (db:dbdat-get-path mtdb))) | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 | (let* ((dbpath (db:dbfile-path)) ;; 0)) (dbexists (file-exists? dbpath)) (dbfexists (file-exists? (conc dbpath "/megatest.db"))) (tmpdb (db:open-megatest-db path: dbpath)) ;; lock-create-open dbpath db:initialize-main-db)) (mtdb (db:open-megatest-db)) (refndb (db:open-megatest-db path: dbpath name: "megatest_ref.db")) (write-access (file-write-access? dbpath))) (if (args:get-arg "-server") (if (configf:get-section *configdat* "ext-sync") (let* ((dblist (configf:get-section *configdat* "ext-sync")) (res '()) (cfgdb #f)) (for-each (lambda (dbitem) (let* ((stringsplit (string-split (cadr dbitem))) (dbtype (string->symbol (car stringsplit))) (dbpath (cadr stringsplit))) (case dbtype ((sqlite3) (set! cfgdb (dbi:open dbtype (cons (cons 'dbname dbpath) '()) )) (db:initialize-main-db (dbi:db-conn cfgdb)) (db:initialize-run-id-db (dbi:db-conn cfgdb)) (set! res (cons (cons cfgdb dbpath) res))) ((pg) (let* ((dbinfo '())) (for-each (lambda (x) (if (not (eqv? (string->symbol x) dbtype)) (let* ((pair (string-split x ":"))) (if (not (eqv? pair '())) (set! dbinfo (cons (cons (string->symbol (car pair)) (cadr pair)) dbinfo)))))) stringsplit) (set! cfgdb (dbi:open dbtype dbinfo)) (set! res (cons (cons cfgdb (alist-ref 'host dbinfo)) res)) ))))) dblist) (dbr:dbstruct-slave-dbs-set! dbstruct res) ))) (if (and dbexists (not write-access)) (set! *db-write-access* #f)) (dbr:dbstruct-mtdb-set! dbstruct mtdb) (dbr:dbstruct-dbstack-set! dbstruct (make-stack)) (stack-push! (dbr:dbstruct-dbstack dbstruct) tmpdb) ;; olddb is already a (cons db path) (dbr:dbstruct-refndb-set! dbstruct refndb) ;; (mutex-unlock! *rundb-mutex*) (if (and (not dbfexists) write-access) ;; *db-write-access*) ;; did not have a prior db and do have write access (begin (debug:print 0 *default-log-port* "filling db " (db:dbdat-get-path tmpdb) " with data from " (db:dbdat-get-path mtdb)) (db:sync-tables (db:sync-all-tables-list dbstruct) #f mtdb refndb tmpdb)) (debug:print 4 *default-log-port* " db, " (db:dbdat-get-path tmpdb) " already exists, not propogating data from " (db:dbdat-get-path mtdb))) |
︙ | ︙ | |||
376 377 378 379 380 381 382 | ;; just tests, test_steps and test_data tables (define db:sync-tests-only (list ;; (list "strs" ;; '("id" #f) ;; '("str" #f)) (list "tests" | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > > > | > | | > | > > > > > > > > > > > > | | | > | | | | | | | | | 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 | ;; just tests, test_steps and test_data tables (define db:sync-tests-only (list ;; (list "strs" ;; '("id" #f) ;; '("str" #f)) (list "tests" '("id" "INTEGER" 'key) '("run_id" "INTEGER") '("testname" "TEXT") '("host" "TEXT") '("cpuload" "REAL") '("diskfree" "INTEGER") '("uname" "TEXT") '("rundir" "TEXT") '("shortdir" "TEXT") '("item_path" "TEXT") '("state" "TEXT") '("status" "TEXT") '("attemptnum" "INTEGER") '("final_logf" "TEXT") '("logdat" "TEXT") '("run_duration" "INTEGER") '("comment" "TEXT") '("event_time" "INTEGER") '("fail_count" "INTEGER") '("pass_count" "INTEGER") '("archived" "INTEGER")) (list "test_steps" '("id" "INTEGER" 'key) '("test_id" "INTEGER") '("stepname" "TEXT") '("state" "TEXT") '("status" "TEXT") '("event_time" "INTEGER") '("comment" "TEXT") '("logfile" "TEXT")) (list "test_data" '("id" "INTEGER" 'key) '("test_id" "INTEGER") '("category" "TEXT") '("variable" "TEXT") '("value" "REAL") '("expected" "REAL") '("tol" "REAL") '("units" "TEXT") '("comment" "TEXT") '("status" "TEXT") '("type" "TEXT")))) ;; needs db to get keys, this is for syncing all tables ;; (define (db:sync-main-list dbstruct) (let ((keys (db:get-keys dbstruct))) (list (list "keys" '("id" "INTEGER" 'key) '("fieldname" "TEXT") '("fieldtype" "TEXT")) (list "metadat" '("id" "INTEGER" 'key) '("var" "TEXT") '("val" "TEXT")) (append (list "runs" '("id" "INTEGER" 'key)) (map (lambda (key) (list key "TEXT")) keys) (list ;; '("release" "TEXT") ;; '("iteration" "TEXT") ;; '("testsuite_mode" "TEXT") '("runname" "TEXT") '("state" "TEXT") '("status" "TEXT") '("owner" "TEXT") '("event_time" "INTEGER") '("comment" "TEXT") '("fail_count" "INTEGER") '("pass_count" "INTEGER"))) (list "test_meta" '("id" "INTEGER" 'key) '("testname" "TEXT") '("author" "TEXT") '("owner" "TEXT") '("description" "TEXT") '("reviewed" "TEXT") '("iterated" "TEXT") '("avg_runtime" "REAL") '("avg_disk" "REAL") '("tags" "TEXT") '("jobgroup" "TEXT"))))) (define (db:sync-all-tables-list dbstruct) (append (db:sync-main-list dbstruct) db:sync-tests-only)) ;; use bunch of Unix commands to try to break the lock and recreate the db ;; |
︙ | ︙ | |||
479 480 481 482 483 484 485 486 487 488 489 490 491 492 | ;; (define (db:repair-db dbdat #!key (numtries 1)) (let* ((dbpath (db:dbdat-get-path dbdat)) (dbdir (pathname-directory dbpath)) (fname (pathname-strip-directory dbpath))) (debug:print-info 0 *default-log-port* "Checking db " dbpath " for errors.") (cond ((not (file-write-access? dbdir)) (debug:print 0 *default-log-port* "WARNING: can't write to " dbdir ", can't fix " fname) #f) ;; handle special cases, megatest.db and monitor.db ;; ;; NOPE: apply this same approach to all db files | > > | 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 | ;; (define (db:repair-db dbdat #!key (numtries 1)) (let* ((dbpath (db:dbdat-get-path dbdat)) (dbdir (pathname-directory dbpath)) (fname (pathname-strip-directory dbpath))) (debug:print-info 0 *default-log-port* "Checking db " dbpath " for errors.") (cond ((eqv? (dbi:db-dbtype (db:dbdat-get-db dbdat)) 'pg) #t) ((not (file-write-access? dbdir)) (debug:print 0 *default-log-port* "WARNING: can't write to " dbdir ", can't fix " fname) #f) ;; handle special cases, megatest.db and monitor.db ;; ;; NOPE: apply this same approach to all db files |
︙ | ︙ | |||
508 509 510 511 512 513 514 | (if (member fname '("megatest.db" "monitor.db")) "megatest -cleanup-db" "megatest -import-megatest.db;megatest -cleanup-db") "\"\n") (exit) ;; we can not safely continue when a db was corrupted - even if fixed. ) ;; test read/write access to the database | | | > > > | | | | 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 | (if (member fname '("megatest.db" "monitor.db")) "megatest -cleanup-db" "megatest -import-megatest.db;megatest -cleanup-db") "\"\n") (exit) ;; we can not safely continue when a db was corrupted - even if fixed. ) ;; test read/write access to the database (let ((db (dbi:open 'sqlite3 (cons (cons ('dbname dbpath) '()))))) (cond ((equal? fname "megatest.db") (sqlite3:execute db "DELETE FROM tests WHERE state='DELETED';")) ((equal? fname "main.db") (sqlite3:execute db "DELETE FROM runs WHERE state='deleted';")) ((string-match "\\d.db" fname) (sqlite3:execute db "UPDATE tests SET state='DELETED' WHERE state='DELETED';")) ((equal? fname "monitor.db") (sqlite3:execute "DELETE FROM servers WHERE state LIKE 'defunct%';")) (else (sqlite3:execute db "vacuum;"))) (sqlite3:finalize! db) #t)))))) ;; tbls is ( ("tablename" ( "field1" [#f|proc1] ) ( "field2" [#f|proc2] ) .... ) ) ;; db's are dbdat's ;; ;; if last-update specified ("field-name" . time-in-seconds) ;; then sync only records where field-name >= time-in-seconds ;; IFF field-name exists ;; (define (db:sync-tables tbls last-update fromdb todb . slave-dbs) (set! todb (cons (dbi:convert (db:dbdat-get-db todb)) (db:dbdat-get-path todb))) (set! fromdb (cons (dbi:convert (db:dbdat-get-db fromdb)) (db:dbdat-get-path fromdb))) (handle-exceptions exn (begin (debug:print 0 *default-log-port* "EXCEPTION: database probably overloaded or unreadable in db:sync-tables.") (print-call-chain (current-error-port)) (debug:print 0 *default-log-port* " message: " ((condition-property-accessor 'exn 'message) exn)) (print "exn=" (condition->list exn)) (debug:print 0 *default-log-port* " status: " ((condition-property-accessor 'sqlite3 'status) exn)) (debug:print 0 *default-log-port* " src db: " (db:dbdat-get-path fromdb)) (for-each (lambda (dbdat) (let ((dbpath (db:dbdat-get-path dbdat))) (debug:print 0 *default-log-port* " dbpath: " dbpath) (if (not (db:repair-db dbdat)) (begin (debug:print-error 0 *default-log-port* "Failed to rebuild " dbpath ", exiting now.") (exit))))) (cons todb slave-dbs)) 0) ;; this is the work to be done (cond ((not fromdb) (debug:print 3 *default-log-port* "WARNING: db:sync-tables called with fromdb missing") -1) ((not todb) (debug:print 3 *default-log-port* "WARNING: db:sync-tables called with todb missing") -2) ((not (dbi:database? (db:dbdat-get-db fromdb))) (debug:print-error 0 *default-log-port* "db:sync-tables called with fromdb not a database " fromdb) -3) ((not (dbi:database? (db:dbdat-get-db todb))) (debug:print-error 0 *default-log-port* "db:sync-tables called with todb not a database " todb) -4) (else (let ((stmts (make-hash-table)) ;; table-field => stmt (all-stmts '()) ;; ( ( stmt1 value1 ) ( stml2 value2 )) (numrecs (make-hash-table)) (start-time (current-milliseconds)) (tot-count 0)) |
︙ | ︙ | |||
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 | (conc " " (car last-update) ">=" (cdr last-update)) "") ";")) (full-ins (conc "INSERT OR REPLACE INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) " " VALUES ( " (string-intersperse (make-list num-fields "?") ",") " );")) (fromdat '()) (fromdats '()) (totrecords 0) (batch-len (string->number (or (configf:lookup *configdat* "sync" "batchsize") "10"))) (todat (make-hash-table)) (count 0)) ;; set up the field->num table (for-each (lambda (field) (hash-table-set! field->num field count) (set! count (+ count 1))) fields) ;; read the source table | > | | | | | | > > > > > > > > > > > > > > | | | > | | | > > > | | > > > > > > > > > > > > | | | | 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 | (conc " " (car last-update) ">=" (cdr last-update)) "") ";")) (full-ins (conc "INSERT OR REPLACE INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) " " VALUES ( " (string-intersperse (make-list num-fields "?") ",") " );")) (fromdat '()) (fromdats '()) (tabletypes '()) (totrecords 0) (batch-len (string->number (or (configf:lookup *configdat* "sync" "batchsize") "10"))) (todat (make-hash-table)) (count 0)) ;; set up the field->num table (for-each (lambda (field) (hash-table-set! field->num field count) (set! count (+ count 1))) fields) ;; read the source table (dbi:for-each-row (lambda (a) (set! fromdat (cons a fromdat)) (if (> (length fromdat) batch-len) (begin (set! fromdats (cons fromdat fromdats)) (set! fromdat '()) (set! totrecords (+ totrecords 1))))) (db:dbdat-get-db fromdb) full-sel) ;; tack on remaining records in fromdat (if (not (null? fromdat)) (set! fromdats (cons fromdat fromdats))) (if (common:low-noise-print 120 "sync-records") (debug:print-info 4 *default-log-port* "found " totrecords " records to sync")) ;; read the target table (dbi:for-each-row (lambda (a) (hash-table-set! todat (vector-ref a 0) a)) (db:dbdat-get-db todb) full-sel) ;; first pass implementation, just insert all changed rows (for-each (lambda (targdb) (set! targdb (dbi:convert (db:dbdat-get-db targdb))) (if (eqv? (dbi:db-dbtype targdb) 'pg) (let* ((prep "") (set-stmt "") (key (car (map car fields))) (list-fields (map car fields))) (set! prep (string-intersperse (map cadr fields) ",")) (set! prep (conc "PREPARE fullupdate (" prep ") AS UPDATE " tablename " SET ")) ;;maybe add lookup in the future depending on where types are needed (let loop ((i 1)) (set! set-stmt (conc set-stmt (list-ref list-fields i) " = $" (+ i 1) ", ")) (if (< i (- (length list-fields) 2)) (loop (+ i 1)) (set! set-stmt (conc set-stmt (list-ref list-fields (+ i 1)) " = $" (+ i 2) " WHERE " key " = $1;")))) (set! full-ins (conc prep set-stmt)))) (let* ((db (dbi:convert (db:dbdat-get-db targdb))) (stmth (dbi:prepare db full-ins))) ;; (db:delay-if-busy targdb) ;; NO WAITING (for-each (lambda (fromdat-lst) (dbi:with-transaction db (lambda () (for-each ;; (lambda (fromrow) (let* ((a (vector-ref fromrow 0)) (curr (hash-table-ref/default todat a #f)) (same #t) (res #f) (len (length (vector->list fromrow)))) (let loop ((i 0)) (if (or (not curr) (not (equal? (vector-ref fromrow i)(vector-ref curr i)))) (set! same #f)) (if (and same (< i (- num-fields 1))) (loop (+ i 1)))) (if (eqv? (dbi:db-dbtype db) 'pg) (set! fromrow (list->vector (map (lambda (x) (if (and (string? x) (string-null? x)) (sql-null) x)) (vector->list fromrow))))) (if (not same) (begin (set! res (apply dbi:prepare-exec stmth (vector->list fromrow))) (hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0))))) (if (and (not same) (eqv? (dbi:get-res res 'affected-rows) 0)) (let* ((prep "")) (set! prep (string-intersperse (map cadr fields) ",")) (set! prep (conc "INSERT INTO " tablename " ( " (string-intersperse (map car fields) ",") " ) VALUES ( " (string-intersperse (make-list len "?") ",") " );")) ;;maybe add lookup in the future depending on where types are needed (begin (hash-table-set! numrecs tablename (- 1 (hash-table-ref/default numrecs tablename 0))) (apply dbi:exec db prep (vector->list fromrow)) (hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0)))))))) ;;(begin ;; (dbi:prepare-exec stmth (vector->list fromrow)) ;;(hash-table-set! numrecs tablename (+ 1 (hash-table-ref/default numrecs tablename 0))))))) fromdat-lst)) )) fromdats) (dbi:close stmth))) (append (list todb) slave-dbs)))) tbls) (let* ((runtime (- (current-milliseconds) start-time)) (should-print (or (debug:debug-mode 12) (common:low-noise-print 120 "db sync" (> runtime 500))))) ;; low and high sync times treated as separate. (if should-print (debug:print 3 *default-log-port* "INFO: db sync, total run time " runtime " ms")) (for-each |
︙ | ︙ | |||
825 826 827 828 829 830 831 | ;; ;; run-ids: '(1 2 3 ...) or #f (for all) ;; (define (db:multi-db-sync dbstruct . options) (if (not (launch:setup)) (debug:print 0 *default-log-port* "ERROR: not able to setup up for megatest.") (let* ((mtdb (dbr:dbstruct-mtdb dbstruct)) | | > | 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 | ;; ;; run-ids: '(1 2 3 ...) or #f (for all) ;; (define (db:multi-db-sync dbstruct . options) (if (not (launch:setup)) (debug:print 0 *default-log-port* "ERROR: not able to setup up for megatest.") (let* ((mtdb (dbr:dbstruct-mtdb dbstruct)) (tmpdb (db:get-db dbstruct)) (refndb (dbr:dbstruct-refndb dbstruct)) (slave-dbs (dbr:dbstruct-slave-dbs dbstruct)) (allow-cleanup #t) ;; (if run-ids #f #t)) ;; (tdbdat (tasks:open-db)) (servers (server:get-list *toppath*)) ;; (tasks:get-all-servers (db:delay-if-busy tdbdat))) (data-synced 0)) ;; count of changed records (I hope) ;; kill servers (if (member 'killservers options) |
︙ | ︙ | |||
872 873 874 875 876 877 878 879 880 881 882 883 884 | ;; (db:delay-if-busy mtdb) ;; (let ((testrecs (db:get-all-tests-info-by-run-id mtdb run-id))) ;; ;; (dbstruct (if toppath (make-dbr:dbstruct path: toppath local: #t) #f))) ;; (debug:print 0 *default-log-port* "INFO: Propagating " (length testrecs) " records for run-id=" run-id " to run specific db") ;; (db:replace-test-records dbstruct run-id testrecs) ;; (sqlite3:finalize! (db:dbdat-get-db (dbr:dbstruct-rundb dbstruct))))) ;; run-ids))) ;; now ensure all newdb data are synced to megatest.db ;; do not use the run-ids list passed in to the function ;; (if (member 'new2old options) (set! data-synced | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 | ;; (db:delay-if-busy mtdb) ;; (let ((testrecs (db:get-all-tests-info-by-run-id mtdb run-id))) ;; ;; (dbstruct (if toppath (make-dbr:dbstruct path: toppath local: #t) #f))) ;; (debug:print 0 *default-log-port* "INFO: Propagating " (length testrecs) " records for run-id=" run-id " to run specific db") ;; (db:replace-test-records dbstruct run-id testrecs) ;; (sqlite3:finalize! (db:dbdat-get-db (dbr:dbstruct-rundb dbstruct))))) ;; run-ids))) (if (member 'synctoconfig options) (if (configf:get-section *configdat* "ext-sync") (let* ((dblist (configf:get-section *configdat* "ext-sync")) (res '()) (cfgdb #f)) (for-each (lambda (dbitem) (let* ((stringsplit (string-split (cadr dbitem))) (dbtype (string->symbol (car stringsplit))) (dbpath (cadr stringsplit))) (case dbtype ((sqlite3) (set! cfgdb (dbi:open dbtype (cons (cons 'dbname dbpath) '()) )) (db:initialize-main-db (dbi:db-conn cfgdb)) (db:initialize-run-id-db (dbi:db-conn cfgdb)) (set! res (cons (cons cfgdb dbpath) res))) ((pg) (let* ((dbinfo '())) (for-each (lambda (x) (if (not (eqv? (string->symbol x) dbtype)) (let* ((pair (string-split x ":"))) (if (not (eqv? pair '())) (set! dbinfo (cons (cons (string->symbol (car pair)) (cadr pair)) dbinfo)))))) stringsplit) (set! cfgdb (dbi:open dbtype dbinfo)) (set! res (cons (cons cfgdb (alist-ref 'host dbinfo)) res)) ))))) dblist) (for-each (lambda (todb) (db:sync-tables (db:sync-all-tables-list dbstruct) #f mtdb todb)) res) ))) ;; now ensure all newdb data are synced to megatest.db ;; do not use the run-ids list passed in to the function ;; (if (member 'new2old options) (set! data-synced (+ (apply db:sync-tables (db:sync-all-tables-list dbstruct) #f tmpdb refndb mtdb slave-dbs) data-synced))) (if (member 'schema options) (begin (db:patch-schema-maindb (db:dbdat-get-db mtdb)) (db:patch-schema-maindb (db:dbdat-get-db tmpdb)) |
︙ | ︙ | |||
1153 1154 1155 1156 1157 1158 1159 | comment TEXT DEFAULT '', event_time TIMESTAMP DEFAULT (strftime('%s','now')), fail_count INTEGER DEFAULT 0, pass_count INTEGER DEFAULT 0, archived INTEGER DEFAULT 0, -- 0=no, > 1=archive block id where test data can be found last_update INTEGER DEFAULT (strftime('%s','now')), CONSTRAINT testsconstraint UNIQUE (run_id, testname, item_path));") | | | 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 | comment TEXT DEFAULT '', event_time TIMESTAMP DEFAULT (strftime('%s','now')), fail_count INTEGER DEFAULT 0, pass_count INTEGER DEFAULT 0, archived INTEGER DEFAULT 0, -- 0=no, > 1=archive block id where test data can be found last_update INTEGER DEFAULT (strftime('%s','now')), CONSTRAINT testsconstraint UNIQUE (run_id, testname, item_path));") (sqlite3:execute db "CREATE INDEX IF NOT EXISTS tests_index ON tests (run_id, testname, item_path);") (sqlite3:execute db "CREATE TRIGGER IF NOT EXISTS update_tests_trigger AFTER UPDATE ON tests FOR EACH ROW BEGIN UPDATE tests SET last_update=(strftime('%s','now')) WHERE id=old.id; END;") (sqlite3:execute db "CREATE TABLE IF NOT EXISTS test_steps |
︙ | ︙ |
Modified launch.scm from [b091c1ed1c] to [cd4ca46ef8].
︙ | ︙ | |||
871 872 873 874 875 876 877 878 879 880 881 882 883 884 | (setenv "MT_RUN_AREA_HOME" *toppath*) (setenv "MT_TESTSUITE_NAME" (common:get-testsuite-name))) (begin (debug:print-error 0 *default-log-port* "failed to find the top path to your Megatest area.") ;;(exit 1) #f )) *toppath*)) (define (get-best-disk confdat testconfig) (let* ((disks (or (and testconfig (hash-table-ref/default testconfig "disks" #f)) (hash-table-ref/default confdat "disks" #f))) (minspace (let ((m (configf:lookup confdat "setup" "minspace"))) (string->number (or m "10000"))))) | > > > > > | 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 | (setenv "MT_RUN_AREA_HOME" *toppath*) (setenv "MT_TESTSUITE_NAME" (common:get-testsuite-name))) (begin (debug:print-error 0 *default-log-port* "failed to find the top path to your Megatest area.") ;;(exit 1) #f )) ;; if have -append-config then read and append here (let ((cfname (args:get-arg "-append-config"))) (if (and cfname (file-read-access? cfname)) (read-config cfname *configdat* #t))) ;; values are added to the hash, no need to do anything special. *toppath*)) (define (get-best-disk confdat testconfig) (let* ((disks (or (and testconfig (hash-table-ref/default testconfig "disks" #f)) (hash-table-ref/default confdat "disks" #f))) (minspace (let ((m (configf:lookup confdat "setup" "minspace"))) (string->number (or m "10000"))))) |
︙ | ︙ |
Modified megatest-version.scm from [813a77d970] to [00b9725f9d].
1 2 3 4 5 | ;; Always use two or four digit decimal ;; 1.01, 1.02...1.10,1.11,1.1101 ... 1.99,2.00.. (declare (unit megatest-version)) | | | 1 2 3 4 5 6 7 | ;; Always use two or four digit decimal ;; 1.01, 1.02...1.10,1.11,1.1101 ... 1.99,2.00.. (declare (unit megatest-version)) (define megatest-version 1.6401) |
Modified megatest.scm from [c18877ce07] to [f1cbfd9a61].
︙ | ︙ | |||
161 162 163 164 165 166 167 168 169 170 | -stop-server id : stop server specified by id (see output of -list-servers), use 0 to kill all -repl : start a repl (useful for extending megatest) -load file.scm : load and run file.scm -mark-incompletes : find and mark incomplete tests -ping run-id|host:port : ping server, exit with 0 if found -debug N|N,M,O... : enable debug 0-N or N and M and O ... Utilities -env2file fname : write the environment to fname.csh and fname.sh | > > | > > > | | 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | -stop-server id : stop server specified by id (see output of -list-servers), use 0 to kill all -repl : start a repl (useful for extending megatest) -load file.scm : load and run file.scm -mark-incompletes : find and mark incomplete tests -ping run-id|host:port : ping server, exit with 0 if found -debug N|N,M,O... : enable debug 0-N or N and M and O ... -config fname : override the megatest.config file with fname -append-config fname : append fname to the megatest.config file Utilities -env2file fname : write the environment to fname.csh and fname.sh -envcap a : save current variables labeled as context 'a' in file envdat.db -envdelta a-b : output enviroment delta from context a to context b to -o fname set the output mode with -dumpmode csh, bash or ini note: ini format will use calls to use curr and minimize path -refdb2dat refdb : convert refdb to sexp or to format specified by s-dumpmode formats: perl, ruby, sqlite3, csv (for csv the -o param will substitute %s for the sheet name in generating multiple sheets) -o : output file for refdb2dat (defaults to stdout) -archive cmd : archive runs specified by selectors to one of disks specified in the [archive-disks] section. cmd: keep-html, restore, save, save-remove |
︙ | ︙ | |||
202 203 204 205 206 207 208 | # Get test path, use '.' to get a single path or a specific path/file pattern megatest -test-files 'logs/*.log' -target ubuntu/n%/no% -runname w49% -testpatt test_mt% Called as " (string-intersperse (argv) " ") " Version " megatest-version ", built from " megatest-fossil-hash )) ;; -gui : start a gui interface | < > | 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 | # Get test path, use '.' to get a single path or a specific path/file pattern megatest -test-files 'logs/*.log' -target ubuntu/n%/no% -runname w49% -testpatt test_mt% Called as " (string-intersperse (argv) " ") " Version " megatest-version ", built from " megatest-fossil-hash )) ;; -gui : start a gui interface ;; process args (define remargs (args:get-args (argv) (list "-runtests" ;; run a specific test "-config" ;; override the config file name "-append-config" "-execute" ;; run the command encoded in the base64 parameter "-step" "-target" "-reqtarg" ":runname" "-runname" ":state" |
︙ | ︙ | |||
336 337 338 339 340 341 342 343 344 345 346 347 348 349 | "-create-megatest-area" "-mark-incompletes" "-convert-to-norm" "-convert-to-old" "-import-megatest.db" "-sync-to-megatest.db" "-logging" "-v" ;; verbose 2, more than normal (normal is 1) "-q" ;; quiet 0, errors/warnings only "-diff-rep" ) | > | 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 | "-create-megatest-area" "-mark-incompletes" "-convert-to-norm" "-convert-to-old" "-import-megatest.db" "-sync-to-megatest.db" "-sync-to-configdb" "-logging" "-v" ;; verbose 2, more than normal (normal is 1) "-q" ;; quiet 0, errors/warnings only "-diff-rep" ) |
︙ | ︙ | |||
1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 | (set! *didsomething* #t))) (if (args:get-arg "-sync-to-megatest.db") (begin (db:multi-db-sync (db:setup) 'new2old ) (set! *didsomething* #t))) (if (args:get-arg "-generate-html") (let* ((toppath (launch:setup))) (if (tests:create-html-tree #f) (debug:print-info 0 *default-log-port* "HTML output created in " toppath "/lt/page#.html") | > > > > > > > > | 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 | (set! *didsomething* #t))) (if (args:get-arg "-sync-to-megatest.db") (begin (db:multi-db-sync (db:setup) 'new2old ) (set! *didsomething* #t))) (if (args:get-arg "-sync-to-configdb") (begin (db:multi-db-sync (db:setup) 'synctoconfig ) (set! *didsomething* #t))) (if (args:get-arg "-generate-html") (let* ((toppath (launch:setup))) (if (tests:create-html-tree #f) (debug:print-info 0 *default-log-port* "HTML output created in " toppath "/lt/page#.html") |
︙ | ︙ |
Added mt-pg.sql version [5d168e54a7].
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 | CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY, fieldname TEXT, fieldtype TEXT, CONSTRAINT keyconstraint UNIQUE (fieldname)); CREATE TABLE IF NOT EXISTS areas ( id INTEGER PRIMARY KEY, areaname TEXT DEFAULT 'local', areapath TEXT DEFAULT '.', last_sync INTEGER DEFAULT 0, CONSTRAINT areaconstraint UNIQUE (areaname)); INSERT INTO areas (id,areaname,areapath) VALUES (0,'local','.'); CREATE TABLE IF NOT EXISTS ttype ( id INTEGER PRIMARY KEY, target_spec TEXT DEFAULT ''); CREATE TABLE IF NOT EXISTS runs ( id INTEGER PRIMARY KEY, target TEXT DEFAULT '', ttype_id INTEGER DEFAULT 0, runname TEXT DEFAULT 'norun', state TEXT DEFAULT '', status TEXT DEFAULT '', owner TEXT DEFAULT '', event_time INTEGER DEFAULT extract(epoch from now()), comment TEXT DEFAULT '', fail_count INTEGER DEFAULT 0, pass_count INTEGER DEFAULT 0, last_update INTEGER DEFAULT extract(epoch from now()), area_id INTEGER DEFAULT 0, CONSTRAINT runsconstraint UNIQUE (runname)); CREATE TABLE IF NOT EXISTS run_stats ( id INTEGER PRIMARY KEY, run_id INTEGER, state TEXT, status TEXT, count INTEGER, last_update INTEGER DEFAULT extract(epoch from now())); CREATE TABLE IF NOT EXISTS test_meta ( id INTEGER PRIMARY KEY, testname TEXT DEFAULT '', author TEXT DEFAULT '', owner TEXT DEFAULT '', description TEXT DEFAULT '', reviewed TEXT, iterated TEXT DEFAULT '', avg_runtime REAL, avg_disk REAL, tags TEXT DEFAULT '', jobgroup TEXT DEFAULT 'default', CONSTRAINT test_meta_constraint UNIQUE (testname)); CREATE TABLE IF NOT EXISTS tasks_queue ( id INTEGER PRIMARY KEY, action TEXT DEFAULT '', owner TEXT, state TEXT DEFAULT 'new', target TEXT DEFAULT '', name TEXT DEFAULT '', testpatt TEXT DEFAULT '', keylock TEXT, params TEXT, creation_time INTEGER DEFAULT extract(epoch from now()), execution_time INTEGER); CREATE TABLE IF NOT EXISTS archive_disks ( id INTEGER PRIMARY KEY, archive_area_name TEXT, disk_path TEXT, last_df INTEGER DEFAULT -1, last_df_time INTEGER DEFAULT extract(epoch from now()), creation_time INTEGER DEFAULT extract(epoch from now())); CREATE TABLE IF NOT EXISTS archive_blocks ( id INTEGER PRIMARY KEY, archive_disk_id INTEGER, disk_path TEXT, last_du INTEGER DEFAULT -1, last_du_time INTEGER DEFAULT extract(epoch from now()), creation_time INTEGER DEFAULT extract(epoch from now())); CREATE TABLE IF NOT EXISTS archive_allocations ( id INTEGER PRIMARY KEY, archive_block_id INTEGER, testname TEXT, item_path TEXT, creation_time INTEGER DEFAULT extract(epoch from now())); CREATE TABLE IF NOT EXISTS extradat ( id INTEGER PRIMARY KEY, run_id INTEGER, key TEXT, val TEXT); CREATE TABLE IF NOT EXISTS metadat ( id INTEGER PRIMARY KEY, var TEXT, val TEXT); CREATE TABLE IF NOT EXISTS access_log ( id INTEGER PRIMARY KEY, "user" TEXT, accessed TIMESTAMP, args TEXT); CREATE TABLE tests ( id INTEGER PRIMARY KEY, run_id INTEGER DEFAULT -1, testname TEXT DEFAULT 'noname', host TEXT DEFAULT 'n/a', cpuload REAL DEFAULT -1, diskfree INTEGER DEFAULT -1, uname TEXT DEFAULT 'n/a', rundir TEXT DEFAULT '/tmp/badname', shortdir TEXT DEFAULT '/tmp/badname', item_path TEXT DEFAULT '', state TEXT DEFAULT 'NOT_STARTED', status TEXT DEFAULT 'FAIL', attemptnum INTEGER DEFAULT 0, final_logf TEXT DEFAULT 'logs/final.log', logdat TEXT DEFAULT '', run_duration INTEGER DEFAULT 0, comment TEXT DEFAULT '', event_time INTEGER DEFAULT extract(epoch from now()), fail_count INTEGER DEFAULT 0, pass_count INTEGER DEFAULT 0, archived INTEGER DEFAULT 0, -- 0=no, > 1=archive block id where test data can be found last_update INTEGER DEFAULT extract(epoch from now()), CONSTRAINT testsconstraint UNIQUE (run_id, testname, item_path)); CREATE TABLE IF NOT EXISTS test_steps ( id INTEGER PRIMARY KEY, test_id INTEGER, stepname TEXT, state TEXT DEFAULT 'NOT_STARTED', status TEXT DEFAULT 'n/a', event_time INTEGER DEFAULT extract(epoch from now()), comment TEXT DEFAULT '', logfile TEXT DEFAULT '', last_update INTEGER DEFAULT extract(epoch from now()), CONSTRAINT test_steps_constraint UNIQUE (test_id,stepname,state)); CREATE TABLE IF NOT EXISTS test_data ( id INTEGER PRIMARY KEY, test_id INTEGER, category TEXT DEFAULT '', variable TEXT, value REAL, expected REAL, tol REAL, units TEXT, comment TEXT DEFAULT '', status TEXT DEFAULT 'n/a', type TEXT DEFAULT '', last_update INTEGER DEFAULT extract(epoch from now()), CONSTRAINT test_data_constraint UNIQUE (test_id,category,variable)); CREATE TABLE IF NOT EXISTS test_rundat ( id INTEGER PRIMARY KEY, test_id INTEGER, update_time INTEGER, cpuload INTEGER DEFAULT -1, diskfree INTEGER DEFAULT -1, diskusage INTEGER DEFAULT -1, run_duration INTEGER DEFAULT 0); CREATE TABLE IF NOT EXISTS archives ( id INTEGER PRIMARY KEY, test_id INTEGER, state TEXT DEFAULT 'new', status TEXT DEFAULT 'n/a', archive_type TEXT DEFAULT 'bup', du INTEGER, archive_path TEXT); TRUNCATE archive_blocks, archive_allocations, extradat, metadat, access_log, tests, test_steps, test_data, test_rundat, archives, keys, runs, run_stats, test_meta, tasks_queue, archive_disks; |
Modified mtut.scm from [e6a134e6d3] to [50e439c840].
︙ | ︙ | |||
17 18 19 20 21 22 23 24 25 26 27 28 29 30 | srfi-18 extras format pkts regex (prefix dbi dbi:)) ;; zmq extras) (declare (uses common)) (declare (uses megatest-version)) (declare (uses margs)) (declare (uses configf)) (include "megatest-fossil-hash.scm") (let ((debugcontrolf (conc (get-environment-variable "HOME") "/.mtutilrc"))) (if (file-exists? debugcontrolf) (load debugcontrolf))) | > | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | srfi-18 extras format pkts regex (prefix dbi dbi:)) ;; zmq extras) (declare (uses common)) (declare (uses megatest-version)) (declare (uses margs)) (declare (uses configf)) (declare (uses rmt)) (include "megatest-fossil-hash.scm") (let ((debugcontrolf (conc (get-environment-variable "HOME") "/.mtutilrc"))) (if (file-exists? debugcontrolf) (load debugcontrolf))) |
︙ | ︙ | |||
49 50 51 52 53 54 55 56 57 58 59 60 61 62 | Actions: run : initiate runs remove : remove runs rerun : register action for processing set-ss : set state/status archive : compress and move test data to archive disk kill : stop tests or entire runs Contour actions: process : runs import, rungen and dispatch Selectors -immediate : apply this action immediately, default is to queue up actions -area areapatt1,area2... : apply this action only to the specified areas | > | 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | Actions: run : initiate runs remove : remove runs rerun : register action for processing set-ss : set state/status archive : compress and move test data to archive disk kill : stop tests or entire runs db : database utilities Contour actions: process : runs import, rungen and dispatch Selectors -immediate : apply this action immediately, default is to queue up actions -area areapatt1,area2... : apply this action only to the specified areas |
︙ | ︙ | |||
75 76 77 78 79 80 81 82 83 84 85 86 87 88 | -set-vars V1=1,V2=2 : Add environment variables to a run NB// these are overwritten by values set in config files. -log logfile : send stdout and stderr to logfile -repl : start a repl (useful for extending megatest) -load file.scm : load and run file.scm -debug N|N,M,O... : enable debug messages 0-N or N and M and O ... Examples: # Start a megatest run in the area \"mytests\" mtutil -area mytests -action run -target v1.63/aa3e -mode-patt MYPATT -tag-expr quick # Start a contour mtutil run -contour quick -target v1.63/aa3e | > > > | 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | -set-vars V1=1,V2=2 : Add environment variables to a run NB// these are overwritten by values set in config files. -log logfile : send stdout and stderr to logfile -repl : start a repl (useful for extending megatest) -load file.scm : load and run file.scm -debug N|N,M,O... : enable debug messages 0-N or N and M and O ... Utility pgschema : emit postgresql schema; do \"mtutil db pgschema | psql -d mydb\" Examples: # Start a megatest run in the area \"mytests\" mtutil -area mytests -action run -target v1.63/aa3e -mode-patt MYPATT -tag-expr quick # Start a contour mtutil run -contour quick -target v1.63/aa3e |
︙ | ︙ | |||
173 174 175 176 177 178 179 180 181 182 183 184 185 186 | ;; Add args that use remargs here ;; (if (and (not (null? remargs)) (not (or (args:get-arg "-runstep") (args:get-arg "-envcap") (args:get-arg "-envdelta") ))) (debug:print-error 0 *default-log-port* "Unrecognised arguments: " (string-intersperse (if (list? remargs) remargs (argv)) " "))) (if (or (args:any? "-h" "help" "-help" "--help") (member *action* '("-h" "-help" "--help" "help"))) (begin (print help) | > | 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | ;; Add args that use remargs here ;; (if (and (not (null? remargs)) (not (or (args:get-arg "-runstep") (args:get-arg "-envcap") (args:get-arg "-envdelta") (member *action* '("db")) ;; very loose checks on db. ))) (debug:print-error 0 *default-log-port* "Unrecognised arguments: " (string-intersperse (if (list? remargs) remargs (argv)) " "))) (if (or (args:any? "-h" "help" "-help" "--help") (member *action* '("-h" "-help" "--help" "help"))) (begin (print help) |
︙ | ︙ | |||
518 519 520 521 522 523 524 | ((process) (begin (load-pkts-to-db mtconf) (generate-run-pkts mtconf toppath) (load-pkts-to-db mtconf) (dispatch-commands mtconf toppath))) ((import) (load-pkts-to-db mtconf)) ;; import pkts ((rungen) (generate-run-pkts mtconf toppath)) | | > > > > > > > > > > > > | 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 | ((process) (begin (load-pkts-to-db mtconf) (generate-run-pkts mtconf toppath) (load-pkts-to-db mtconf) (dispatch-commands mtconf toppath))) ((import) (load-pkts-to-db mtconf)) ;; import pkts ((rungen) (generate-run-pkts mtconf toppath)) ((dispatch) (dispatch-commands mtconf toppath))))) ((db) (if (null? remargs) (print "ERROR: missing sub command for db command") (let ((subcmd (car remargs))) (case (string->symbol subcmd) ((pgschema) (let* ((install-home (common:get-install-area)) (schema-file (conc install-home "/share/db/mt-pg.sql"))) (if (file-exists? schema-file) (system (conc "/bin/cat " schema-file))))) ((junk) (rmt:get-keys)))))))) (if (or (args:get-arg "-repl") (args:get-arg "-load")) (begin (import extras) ;; might not be needed ;; (import csi) (import readline) |
︙ | ︙ |