summaryrefslogtreecommitdiffstats
path: root/scripts/lib/testresultlog/view.py
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/testresultlog/view.py')
-rw-r--r--scripts/lib/testresultlog/view.py198
1 files changed, 198 insertions, 0 deletions
diff --git a/scripts/lib/testresultlog/view.py b/scripts/lib/testresultlog/view.py
new file mode 100644
index 00000000000..9edd3ec2ae7
--- /dev/null
+++ b/scripts/lib/testresultlog/view.py
@@ -0,0 +1,198 @@
+import glob
+import os
+import json
+from jinja2 import Environment, FileSystemLoader
+from testresultlog.gitstore import GitStore
+
+class TestResultView(object):
+
+ def _check_if_existing_dir_list_contain_parent_for_new_dir(self, dir_list, new_dir):
+ for existing_dir in dir_list:
+ if existing_dir in new_dir:
+ return True
+ return False
+
+ def _replace_existing_parent_dir_with_new_dir(self, dir_list, new_dir):
+ return [new_dir if dir in new_dir else dir for dir in dir_list]
+
+ def _get_test_report_directory_list(self, git_dir):
+ exclude = ['.git']
+ report_dir_list = []
+ for root, dirs, files in os.walk(git_dir, topdown=True):
+ for dir in dirs:
+ [dirs.remove(d) for d in list(dirs) if d in exclude]
+
+ for dir in dirs:
+ dirname = os.path.join(root, dir)
+ if self._check_if_existing_dir_list_contain_parent_for_new_dir(report_dir_list, dirname):
+ report_dir_list = self._replace_existing_parent_dir_with_new_dir(report_dir_list, dirname)
+ else:
+ report_dir_list.append(dirname)
+ return report_dir_list
+
+ def _get_list_of_test_result_files(self, report_dir):
+ path_pattern = os.path.join(report_dir, '*.json')
+ return glob.glob(path_pattern)
+
+ def _load_test_module_file_with_json_into_dictionary(self, file):
+ with open(file, "r") as f:
+ return json.load(f)
+
+ def _get_test_result_and_failed_error_testcase(self, test_result_dict, show_idle):
+ count_idle = 0
+ count_passed = 0
+ count_failed = 0
+ count_skipped = 0
+ test_suites_dict = test_result_dict['testsuite']
+ test_suites_list = test_suites_dict.keys()
+ for suite in test_suites_list:
+ test_cases_dict = test_suites_dict[suite]['testcase']
+ test_cases_list = test_cases_dict.keys()
+ failed_error_test_case_list = []
+ for test_case in test_cases_list:
+ test_status = test_cases_dict[test_case]['testresult']
+ if test_status == 'FAILED' or test_status == 'ERROR':
+ failed_error_test_case_list.append(test_case)
+ count_failed += 1
+ elif test_status == 'PASSED':
+ count_passed += 1
+ elif test_status == 'SKIPPED':
+ count_skipped += 1
+ elif test_status == "":
+ count_idle += 1
+ if show_idle:
+ return count_idle, count_passed, count_failed, count_skipped, failed_error_test_case_list
+ else:
+ return count_passed, count_failed, count_skipped, failed_error_test_case_list
+
+ def _compute_test_result_percent_indicator(self, test_result):
+ total_tested = test_result['passed'] + test_result['failed'] + test_result['skipped']
+ test_result['passed_percent'] = 0
+ test_result['failed_percent'] = 0
+ test_result['skipped_percent'] = 0
+ if total_tested > 0:
+ test_result['passed_percent'] = format(test_result['passed']/total_tested * 100, '.2f')
+ test_result['failed_percent'] = format(test_result['failed']/total_tested * 100, '.2f')
+ test_result['skipped_percent'] = format(test_result['skipped']/total_tested * 100, '.2f')
+
+ def _compute_test_result_include_idle_percent_indicator(self, test_result):
+ total_tested = test_result['idle'] + test_result['passed'] + test_result['failed'] + test_result['skipped']
+ test_result['complete_percent'] = 0
+ test_result['idle_percent'] = 0
+ test_result['passed_percent'] = 0
+ test_result['failed_percent'] = 0
+ test_result['skipped_percent'] = 0
+ if total_tested > 0:
+ test_result['complete_percent'] = format(test_result['complete']/total_tested * 100, '.2f')
+ test_result['idle_percent'] = format(test_result['idle']/total_tested * 100, '.2f')
+ test_result['passed_percent'] = format(test_result['passed']/total_tested * 100, '.2f')
+ test_result['failed_percent'] = format(test_result['failed']/total_tested * 100, '.2f')
+ test_result['skipped_percent'] = format(test_result['skipped']/total_tested * 100, '.2f')
+
+ def _convert_test_result_value_to_string(self, test_result):
+ test_result['passed_percent'] = str(test_result['passed_percent'])
+ test_result['failed_percent'] = str(test_result['failed_percent'])
+ test_result['skipped_percent'] = str(test_result['skipped_percent'])
+ test_result['passed'] = str(test_result['passed'])
+ test_result['failed'] = str(test_result['failed'])
+ test_result['skipped'] = str(test_result['skipped'])
+ if 'idle' in test_result:
+ test_result['idle'] = str(test_result['idle'])
+ if 'idle_percent' in test_result:
+ test_result['idle_percent'] = str(test_result['idle_percent'])
+ if 'complete' in test_result:
+ test_result['complete'] = str(test_result['complete'])
+ if 'complete_percent' in test_result:
+ test_result['complete_percent'] = str(test_result['complete_percent'])
+
+ def _get_max_string_len_from_test_result_list(self, test_result_list, key, default_max_len):
+ max_len = default_max_len
+ for test_result in test_result_list:
+ value_len = len(test_result[key])
+ if value_len > max_len:
+ max_len = value_len
+ return max_len
+
+ def _compile_test_result_for_test_report_directory(self, report_dir):
+ test_result_files = self._get_list_of_test_result_files(report_dir)
+ test_result = {'passed':0, 'failed':0, 'skipped':0, 'failed_testcases':[]}
+ for file in test_result_files:
+ test_result_dict = self._load_test_module_file_with_json_into_dictionary(file)
+ count_passed, count_failed, count_skipped, failed_error_test_case_list = self._get_test_result_and_failed_error_testcase(test_result_dict, False)
+ test_result['passed'] += count_passed
+ test_result['failed'] += count_failed
+ test_result['skipped'] += count_skipped
+ test_result['failed_testcases'] += failed_error_test_case_list
+ self._compute_test_result_percent_indicator(test_result)
+ self._convert_test_result_value_to_string(test_result)
+ return test_result
+
+ def _compile_test_result_include_idle_for_test_report_directory(self, report_dir):
+ test_result_files = self._get_list_of_test_result_files(report_dir)
+ test_result = {'complete':0, 'idle':0, 'passed':0, 'failed':0, 'skipped':0, 'failed_testcases':[]}
+ for file in test_result_files:
+ test_result_dict = self._load_test_module_file_with_json_into_dictionary(file)
+ count_idle, count_passed, count_failed, count_skipped, failed_error_test_case_list = self._get_test_result_and_failed_error_testcase(test_result_dict, True)
+ count_complete = count_passed + count_failed + count_skipped
+ test_result['complete'] += count_complete
+ test_result['idle'] += count_idle
+ test_result['passed'] += count_passed
+ test_result['failed'] += count_failed
+ test_result['skipped'] += count_skipped
+ test_result['failed_testcases'] += failed_error_test_case_list
+ self._compute_test_result_include_idle_percent_indicator(test_result)
+ self._convert_test_result_value_to_string(test_result)
+ return test_result
+
+ def _get_test_component_environment_from_test_report_dir(self, git_repo, report_dir):
+ test_component_environment = report_dir.replace(git_repo + '/', '')
+ test_component = test_component_environment[:test_component_environment.find("/")]
+ test_environment = test_component_environment.replace(test_component + '/', '')
+ return test_component, test_environment, test_component_environment
+
+ def _rendering_text_based_test_report(self, template_file_name, test_result_list, max_len_component, max_len_environment):
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ file_loader = FileSystemLoader(script_path + '/template')
+ env = Environment(loader=file_loader, trim_blocks=True)
+ #template = env.get_template('test_report_full_text.txt')
+ template = env.get_template(template_file_name)
+ output = template.render(test_reports=test_result_list, max_len_component=max_len_component, max_len_environment=max_len_environment)
+ print('Printing text-based test report:')
+ print(output)
+
+ def create_text_based_test_report(self, git_repo, show_completion):
+ report_dir_list = self._get_test_report_directory_list(git_repo)
+ test_result_list = []
+ for report_dir in report_dir_list:
+ print('Compiling test result for %s:' % report_dir)
+ if show_completion:
+ template_file_name = 'test_report_include_idle_full_text.txt'
+ test_result = self._compile_test_result_include_idle_for_test_report_directory(report_dir)
+ else:
+ template_file_name = 'test_report_full_text.txt'
+ test_result = self._compile_test_result_for_test_report_directory(report_dir)
+ test_component, test_environment, test_component_environment = self._get_test_component_environment_from_test_report_dir(git_repo, report_dir)
+ test_result['test_component'] = test_component
+ test_result['test_environment'] = test_environment
+ test_result['test_component_environment'] = test_component_environment
+ test_result_list.append(test_result)
+ max_len_component = self._get_max_string_len_from_test_result_list(test_result_list, 'test_component', len('test_component'))
+ max_len_environment = self._get_max_string_len_from_test_result_list(test_result_list, 'test_environment', len('test_environment'))
+ self._rendering_text_based_test_report(template_file_name, test_result_list, max_len_component, max_len_environment)
+
+def main(args):
+ gitstore = GitStore()
+ if gitstore.checkout_git_branch(args.git_repo, args.git_branch):
+ testresultview = TestResultView()
+ testresultview.create_text_based_test_report(args.git_repo, args.show_completion)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('view', help='View text-based summary test report',
+ description='View text-based summary test report',
+ group='view')
+ parser_build.set_defaults(func=main)
+ parser_build.add_argument('git_branch', help='Git branch to be used to compute test summary report')
+ parser_build.add_argument('-g', '--git_repo', default='default', help='(Optional) Full path to the git repository to be used to compute the test summary report, default will be <top_dir>/test-result-log.git')
+ parser_build.add_argument('-i', '--show_completion', action='store_true', help='(Optional) To show completion test case and its statistic')