|
1 | 1 | #!/usr/bin/env python3 |
2 | 2 |
|
3 | 3 | import argparse |
| 4 | +import json |
4 | 5 | import os |
5 | 6 | import yaml |
6 | 7 | import subprocess |
@@ -354,215 +355,112 @@ def parse_suite(suite_path, parent_suite_path, options, settings, name=None): |
354 | 355 | sys.exit(1) |
355 | 356 | return suite |
356 | 357 |
|
357 | | -def write_report_result_tree(file, includes, data, depth): |
358 | | - for test in data['suite']: |
359 | | - indent = ' ' * depth |
360 | | - stars = '*' + '*' * depth |
361 | | - |
362 | | - string = f"{indent}" |
363 | | - string += f"{stars}" |
364 | | - string += f" {resultfmt(test)}" |
365 | | - if 'outfile' in test: |
366 | | - string += f" <<output-{test['unix_name']},{test['uniq_id']} {test['name']}>>" |
367 | | - else: |
368 | | - string += f" {test['uniq_id']} {test['name']}" |
369 | | - |
370 | | - file.write(f"{string}\n") |
371 | | - |
372 | | - if 'suite' in test: |
373 | | - write_report_result_tree(file, includes, test, depth + 1) |
374 | | - |
375 | | -def resultfmt(test): |
376 | | - result = test.get('result', 'unknown') |
377 | | - if result == 'masked-fail': |
378 | | - return "[.fail line-through]#FAIL#" |
379 | | - elif result == 'masked-skip': |
380 | | - return "[.skip line-through]#SKIP#" |
381 | | - else: |
382 | | - return f"[.{result}]#{result.upper()}#" |
| 358 | +def collect_test_logs(test_data, depth=0): |
| 359 | + """Recursively collect all test logs and embed them in the data structure.""" |
| 360 | + if not test_data.get('suite'): |
| 361 | + return |
383 | 362 |
|
384 | | -def write_report_output(file, data, depth, is_first=True): |
385 | | - """For each test in suite, write specification¹, result, and output""" |
386 | | - for test in data['suite']: |
| 363 | + for test in test_data['suite']: |
387 | 364 | if 'outfile' in test: |
388 | | - # Add page break before each test, except first one |
389 | | - if is_first: |
390 | | - is_first = False |
391 | | - else: |
392 | | - file.write("\n<<<\n") |
393 | | - |
394 | | - # Test heading is always from 'name:' in the suite file |
395 | | - file.write(f"\n[[output-{test['unix_name']}]]\n") |
396 | | - file.write(f"\n=== {resultfmt(test)} {test['name']}\n") |
397 | | - |
398 | | - # Skip headnig from test spec. |
399 | | - if 'test-spec' in test: |
400 | | - file.write(f"include::{test['test-spec']}[lines=2..-1]\n") |
401 | | - |
402 | | - # Add test information table |
403 | | - file.write("\n==== Test Information\n") |
404 | | - file.write('[cols="1h,3"]\n') |
405 | | - file.write("|===\n") |
406 | | - file.write(f"| ID | `{test['uniq_id']}`\n") |
407 | | - file.write(f"| Name | `{test['name']}`\n") |
408 | | - |
409 | | - # Add test file path (relative to project root) |
410 | | - if 'case' in test: |
411 | | - rel_path = os.path.relpath(test['case'], ROOT_PATH) |
412 | | - file.write(f"| File | `{rel_path}`\n") |
413 | | - |
414 | | - # Add arguments if present |
415 | | - if 'options' in test and test['options']: |
416 | | - args_str = ', '.join(test['options']) |
417 | | - file.write(f"| Arguments | `{args_str}`\n") |
418 | | - else: |
419 | | - file.write("| Arguments | `None`\n") |
420 | | - |
421 | | - file.write("|===\n") |
422 | | - |
423 | | - file.write("\n==== Output\n") |
424 | | - file.write("----\n") |
425 | | - file.write(f"include::{test['outfile']}[]\n") |
426 | | - file.write("----\n") |
| 365 | + log_path = os.path.join(LOGDIR, test['outfile']) |
| 366 | + try: |
| 367 | + with open(log_path, 'r') as f: |
| 368 | + test['logs'] = f.read() |
| 369 | + except FileNotFoundError: |
| 370 | + test['logs'] = f"Log file not found: {log_path}" |
| 371 | + except Exception as e: |
| 372 | + test['logs'] = f"Error reading log file {log_path}: {e}" |
427 | 373 |
|
428 | 374 | if 'suite' in test: |
429 | | - is_first = write_report_output(file, test, depth + 1, is_first) |
430 | | - |
431 | | - return is_first |
432 | | - |
433 | | -def write_report_project_info(file, config): |
434 | | - if 'PROJECT-NAME' not in config or 'PROJECT-ROOT' not in config: |
435 | | - return None |
436 | | - |
437 | | - name = config['PROJECT-NAME'] |
438 | | - root = config['PROJECT-ROOT'] |
439 | | - version = run_git_cmd(root, ["describe", "--tags", "--always"]) |
440 | | - sha = run_git_cmd(root, ['rev-parse', 'HEAD'])[:12] |
441 | | - |
442 | | - file.write(f"\n=== {name} Info\n\n") |
443 | | - |
444 | | - file.write('[cols="1h,2", width=30%]\n') |
445 | | - file.write("|===\n") |
446 | | - file.write(f"| Version | {version}\n") |
447 | | - file.write(f"| SHA | {sha}\n") |
448 | | - |
449 | | - file.write("|===\n") |
| 375 | + collect_test_logs(test, depth + 1) |
| 376 | + |
| 377 | +def calculate_test_summary(test_data): |
| 378 | + """Calculate summary statistics for all tests.""" |
| 379 | + counts = { |
| 380 | + 'pass': 0, |
| 381 | + 'fail': 0, |
| 382 | + 'skip': 0, |
| 383 | + 'masked_fail': 0, |
| 384 | + 'masked_skip': 0, |
| 385 | + 'total': 0 |
| 386 | + } |
450 | 387 |
|
451 | | -def write_report_test_info(file, data): |
452 | | - pass_count = 0 |
453 | | - fail_count = 0 |
454 | | - skip_count = 0 |
455 | | - masked_fail_count = 0 |
456 | | - masked_skip_count = 0 |
| 388 | + def count_tests(data): |
| 389 | + if not data.get('suite'): |
| 390 | + return |
457 | 391 |
|
458 | | - def count_tests(suite_data): |
459 | | - nonlocal pass_count, fail_count, skip_count, masked_fail_count, masked_skip_count |
460 | | - for test in suite_data['suite']: |
| 392 | + for test in data['suite']: |
461 | 393 | if 'suite' in test: |
462 | 394 | # This is a sub-suite, recurse but don't count it |
463 | 395 | count_tests(test) |
464 | 396 | elif 'result' in test: |
465 | 397 | # This is a leaf test case, count it |
466 | | - if test['result'] == 'pass': |
467 | | - pass_count += 1 |
468 | | - elif test['result'] == 'fail': |
469 | | - fail_count += 1 |
470 | | - elif test['result'] == 'skip': |
471 | | - skip_count += 1 |
472 | | - elif test['result'] == 'masked-fail': |
473 | | - masked_fail_count += 1 |
474 | | - elif test['result'] == 'masked-skip': |
475 | | - masked_skip_count += 1 |
476 | | - |
477 | | - count_tests(data) |
478 | | - |
479 | | - file.write("\n=== Test Overview\n\n") |
480 | | - file.write('[cols="1h,2", width=30%]\n') |
481 | | - file.write("|===\n") |
482 | | - file.write(f"| {resultfmt({'result': 'pass'})} | {pass_count}\n") |
483 | | - file.write(f"| {resultfmt({'result': 'fail'})} | {fail_count}\n") |
484 | | - file.write(f"| {resultfmt({'result': 'skip'})} | {skip_count}\n") |
485 | | - file.write(f"| {resultfmt({'result': 'masked-fail'})} | {masked_fail_count}\n") |
486 | | - file.write(f"| {resultfmt({'result': 'masked-skip'})} | {masked_skip_count}\n") |
487 | | - |
488 | | - total_count = pass_count + fail_count + skip_count + masked_fail_count + masked_skip_count |
489 | | - file.write(f"| *TOTAL* | *{total_count}*\n") |
490 | | - file.write("|===\n") |
491 | | - |
492 | | - includes = [] |
493 | | - write_report_result_tree(file, includes, data, 0) |
494 | | - |
495 | | -def write_report(data, config): |
496 | | - with open(os.path.join(LOGDIR, 'report.adoc'), 'a') as file: |
497 | | - current_date = datetime.now().strftime("%Y-%m-%d") |
498 | | - name = config['PROJECT-NAME'] if 'PROJECT-NAME' in config else "9pm" |
499 | | - root = config['PROJECT-ROOT'] |
500 | | - topdoc = config['PROJECT-TOPDOC'] + "/" if 'PROJECT-TOPDOC' in config else "" |
501 | | - version = run_git_cmd(root, ["describe", "--tags", "--always"]) |
502 | | - |
503 | | - file.write(":title-page:\n") |
504 | | - file.write(f":topdoc: {topdoc}\n") |
505 | | - file.write("ifdef::logo[]\n") # Optional -a logo=PATH from asciidoctor-pdf |
506 | | - file.write(":title-logo-image: {logo}\n") |
507 | | - file.write("endif::[]\n") |
508 | | - file.write(":toc:\n") |
509 | | - file.write(":toclevels: 2\n") |
510 | | - file.write(":sectnums:\n") |
511 | | - file.write(":sectnumlevels: 2\n") |
512 | | - file.write(":pdfmark:\n") |
513 | | - file.write(":pdf-page-size: A4\n") |
514 | | - file.write(":pdf-page-layout: portrait\n") |
515 | | - file.write(":pdf-page-margin: [1in, 0.5in]\n") |
516 | | - file.write(f":keywords: regression, test, testing, 9pm, {name}\n") |
517 | | - file.write(":subject: Regression testing\n") |
518 | | - file.write(":autofit-option:\n") |
519 | | - file.write("\n") |
520 | | - |
521 | | - file.write(f"= Test Report\n") |
522 | | - file.write(f"{name} {version}\n") |
523 | | - file.write(f"{current_date}\n") |
524 | | - |
525 | | - file.write("\n<<<\n") |
526 | | - file.write("\n== Test Summary\n\n") |
527 | | - write_report_project_info(file, config) |
528 | | - write_report_test_info(file, data) |
529 | | - |
530 | | - file.write("\n<<<\n") |
531 | | - file.write("\n== Test Result\n\n") |
532 | | - write_report_output(file, data, 0) |
533 | | - |
534 | | - |
535 | | -def write_github_result_tree(file, data, depth): |
536 | | - icon_map = { |
537 | | - "pass": ":white_check_mark:", |
538 | | - "fail": ":red_circle:", |
539 | | - "skip": ":large_orange_diamond:", |
540 | | - "masked-fail": ":o:", |
541 | | - "masked-skip": ":small_orange_diamond:", |
| 398 | + result = test['result'] |
| 399 | + if result == 'pass': |
| 400 | + counts['pass'] += 1 |
| 401 | + elif result == 'fail': |
| 402 | + counts['fail'] += 1 |
| 403 | + elif result == 'skip': |
| 404 | + counts['skip'] += 1 |
| 405 | + elif result == 'masked-fail': |
| 406 | + counts['masked_fail'] += 1 |
| 407 | + elif result == 'masked-skip': |
| 408 | + counts['masked_skip'] += 1 |
| 409 | + counts['total'] += 1 |
| 410 | + |
| 411 | + count_tests(test_data) |
| 412 | + return counts |
| 413 | + |
| 414 | +def write_json_result(data, config): |
| 415 | + """Write comprehensive JSON result file with embedded logs.""" |
| 416 | + # Collect all test logs and embed them in the data structure |
| 417 | + collect_test_logs(data) |
| 418 | + |
| 419 | + # Calculate summary statistics |
| 420 | + summary = calculate_test_summary(data) |
| 421 | + |
| 422 | + # Prepare metadata |
| 423 | + current_time = datetime.now() |
| 424 | + project_info = {} |
| 425 | + if config: |
| 426 | + if 'PROJECT-NAME' in config: |
| 427 | + project_info['name'] = config['PROJECT-NAME'] |
| 428 | + if 'PROJECT-ROOT' in config: |
| 429 | + project_info['root'] = config['PROJECT-ROOT'] |
| 430 | + # Get git info |
| 431 | + version = run_git_cmd(config['PROJECT-ROOT'], ["describe", "--tags", "--always"]) |
| 432 | + sha = run_git_cmd(config['PROJECT-ROOT'], ['rev-parse', 'HEAD']) |
| 433 | + project_info['version'] = version |
| 434 | + project_info['sha'] = sha |
| 435 | + if 'PROJECT-TOPDOC' in config: |
| 436 | + project_info['topdoc'] = config['PROJECT-TOPDOC'] |
| 437 | + |
| 438 | + # Get 9pm version info |
| 439 | + ninepm_sha = run_git_cmd(ROOT_PATH, ['rev-parse', 'HEAD']) |
| 440 | + |
| 441 | + # Build complete JSON structure |
| 442 | + json_data = { |
| 443 | + 'metadata': { |
| 444 | + 'timestamp': current_time.isoformat(), |
| 445 | + 'date': current_time.strftime("%Y-%m-%d"), |
| 446 | + 'project': project_info, |
| 447 | + 'environment': { |
| 448 | + '9pm_version': ninepm_sha[:10] if ninepm_sha else 'unknown', |
| 449 | + 'log_dir': LOGDIR, |
| 450 | + 'scratch_dir': SCRATCHDIR, |
| 451 | + 'root_path': ROOT_PATH |
| 452 | + } |
| 453 | + }, |
| 454 | + 'summary': summary, |
| 455 | + 'suite': data |
542 | 456 | } |
543 | | - for test in data['suite']: |
544 | | - mark = icon_map.get(test['result'], "") |
545 | | - file.write(f"{' ' * depth}- {mark} : {test['uniq_id']} {test['name']}\n") |
546 | 457 |
|
547 | | - if 'suite' in test: |
548 | | - write_github_result_tree(file, test, depth + 1) |
549 | | - |
550 | | -def write_github_result(data): |
551 | | - with open(os.path.join(LOGDIR, 'result-gh.md'), 'a') as file: |
552 | | - file.write("# Test Result\n") |
553 | | - write_github_result_tree(file, data, 0) |
| 458 | + # Write JSON file |
| 459 | + json_path = os.path.join(LOGDIR, 'result.json') |
| 460 | + with open(json_path, 'w') as f: |
| 461 | + json.dump(json_data, f, indent=2, ensure_ascii=False) |
554 | 462 |
|
555 | | -def write_md_result_tree(file, data, depth): |
556 | | - for test in data['suite']: |
557 | | - file.write(f"{' ' * depth}- {test['result'].upper()} : {test['uniq_id']} {test['name']}\n") |
558 | | - |
559 | | - if 'suite' in test: |
560 | | - write_md_result_tree(file, test, depth + 1) |
561 | | - |
562 | | -def write_md_result(data): |
563 | | - with open(os.path.join(LOGDIR, 'result.md'), 'a') as file: |
564 | | - file.write("# Test Result\n") |
565 | | - write_md_result_tree(file, data, 0) |
| 463 | + return json_path |
566 | 464 |
|
567 | 465 | def print_result_tree(data, base): |
568 | 466 | i = 1 |
@@ -917,9 +815,10 @@ def main(): |
917 | 815 | cprint(pcolor.green, "\no Execution") |
918 | 816 |
|
919 | 817 | print_result_tree(suite, "") |
920 | | - write_md_result(suite) |
921 | | - write_github_result(suite) |
922 | | - write_report(suite, proj) |
| 818 | + |
| 819 | + # Export comprehensive JSON result |
| 820 | + json_path = write_json_result(suite, proj) |
| 821 | + vcprint(pcolor.faint, f"JSON results written to: {json_path}") |
923 | 822 |
|
924 | 823 | db.close() |
925 | 824 | sys.exit(err) |
|
0 commit comments