update to phab report
- process recursive builds - fail on any non-pass state. This should not be run if something is still running. We consider skipped and other non-passed states as failures in assumption that if build is skipped due to a new build running we will get a new update soon enough. - report all failed tests and all failed jobs linkss. Not sure if that would be too verbose. For #438, #445
This commit is contained in:
parent
349ec53a57
commit
749193c8be
3 changed files with 139 additions and 57 deletions
24
scripts/command_utils.py
Normal file
24
scripts/command_utils.py
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2023 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://llvm.org/LICENSE.txt
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def get_env_or_die(name: str):
|
||||||
|
v = os.environ.get(name)
|
||||||
|
if not v:
|
||||||
|
sys.stderr.write(f"Error: '{name}' environment variable is not set.\n")
|
||||||
|
exit(2)
|
||||||
|
return v
|
|
@ -16,11 +16,14 @@
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from typing import Any, Tuple
|
||||||
|
|
||||||
from phabtalk.phabtalk import PhabTalk
|
from phabtalk.phabtalk import PhabTalk
|
||||||
from buildkite_utils import format_url, BuildkiteApi, strip_emojis
|
from buildkite_utils import format_url, BuildkiteApi, strip_emojis
|
||||||
import xunit_utils
|
import xunit_utils
|
||||||
|
from command_utils import get_env_or_die
|
||||||
from benedict import benedict
|
from benedict import benedict
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
def get_failed_jobs(build: benedict) -> []:
|
def get_failed_jobs(build: benedict) -> []:
|
||||||
|
@ -32,75 +35,126 @@ def get_failed_jobs(build: benedict) -> []:
|
||||||
return failed_jobs
|
return failed_jobs
|
||||||
|
|
||||||
|
|
||||||
def process_unit_test_reports(bk: BuildkiteApi, build: benedict, prefix: str) -> []:
|
@dataclass
|
||||||
failed_tests = []
|
class jobResult:
|
||||||
|
name: str
|
||||||
|
url: str
|
||||||
|
sub: list
|
||||||
|
success: bool
|
||||||
|
tests: list
|
||||||
|
|
||||||
|
# Returns list of jobs in the build and success flag.
|
||||||
|
def process_build(bk: BuildkiteApi, build: benedict) -> Tuple[list[jobResult], bool]:
|
||||||
|
logging.info(f"Processing build {build.get('id')} {build.get('pipeline.name')}. All jobs:")
|
||||||
|
for job in build.get('jobs', []):
|
||||||
|
logging.info(f'job ID={job.get("id")} NAME={job.get("name")} type={job.get("type")} state={job.get("state")}')
|
||||||
|
success = True
|
||||||
|
result = []
|
||||||
for job in build.get('jobs', []):
|
for job in build.get('jobs', []):
|
||||||
job = benedict(job)
|
job = benedict(job)
|
||||||
if job.get('state') != 'failed' or job.get('type') != 'script':
|
job_type = job.get('type')
|
||||||
# Job must run scripts and fail to be considered.
|
logging.info(f'Processing job ID={job.get("id")}')
|
||||||
# Recursive pipeline triggers are not processed at the moment.
|
job_state = job.get('state')
|
||||||
|
if job_type == 'waiter':
|
||||||
|
logging.info('job type is "waiter", ignoring')
|
||||||
continue
|
continue
|
||||||
artifacts_url = job.get('artifacts_url')
|
name = job.get('name')
|
||||||
if artifacts_url is None:
|
if (name is None) or (strip_emojis(name).strip() == ''):
|
||||||
continue
|
name = f"({job.get('type')})"
|
||||||
artifacts = bk.get(artifacts_url).json()
|
logging.info(f"name {name}")
|
||||||
for a in artifacts:
|
name = strip_emojis(name)
|
||||||
a = benedict(a)
|
j = jobResult(
|
||||||
if not a.get('filename').endswith('test-results.xml') or not a.get('download_url'):
|
name=name,
|
||||||
continue
|
sub=[],
|
||||||
content = bk.get(a.get('download_url')).content
|
success=job_state=='passed',
|
||||||
ctx = strip_emojis(prefix + ' ' + job.get('name', build.get('pipeline.name')))
|
tests=fetch_job_unit_tests(job),
|
||||||
failed_tests.extend(xunit_utils.parse_failures(content, ctx))
|
url=job.get('web_url',''))
|
||||||
return failed_tests
|
if job.get('type') == 'trigger':
|
||||||
|
triggered_url = job.get('triggered_build.url')
|
||||||
|
logging.info(f'processing a trigger build from {triggered_url}')
|
||||||
|
if triggered_url != '':
|
||||||
|
sub_build = benedict(bk.get(triggered_url).json())
|
||||||
|
j.name = sub_build.get('pipeline.name')
|
||||||
|
j.url = sub_build.get('web_url')
|
||||||
|
j.sub, s = process_build(bk, sub_build)
|
||||||
|
j.success = j.success and s
|
||||||
|
result.append(j)
|
||||||
|
success = success and j.success
|
||||||
|
return [result, success]
|
||||||
|
|
||||||
|
# Returns a list of failed tests from a failed script job.
|
||||||
|
def fetch_job_unit_tests(job: benedict) -> list[Any]:
|
||||||
|
if job.get('state') != 'failed' or job.get('type') != 'script':
|
||||||
|
logging.info(f"skipping job with state {job.get('state')} and type {job.get('type')}, only failed scripts are considered")
|
||||||
|
return []
|
||||||
|
artifacts_url = job.get('artifacts_url')
|
||||||
|
if artifacts_url is None:
|
||||||
|
logging.warning('job has not artifacts')
|
||||||
|
return []
|
||||||
|
artifacts = bk.get(artifacts_url).json()
|
||||||
|
for a in artifacts:
|
||||||
|
a = benedict(a)
|
||||||
|
if not a.get('filename').endswith('test-results.xml') or not a.get('download_url'):
|
||||||
|
continue
|
||||||
|
content = bk.get(a.get('download_url')).content
|
||||||
|
ctx = strip_emojis(job.get('name', build.get('pipeline.name')))
|
||||||
|
return xunit_utils.parse_failures(content, ctx)
|
||||||
|
logging.info('file test-results.xml not found')
|
||||||
|
return []
|
||||||
|
|
||||||
|
def print_jobs(jobs: list[jobResult], pad: str):
|
||||||
|
for j in jobs:
|
||||||
|
print(f"{pad} {j.name} {j.success}")
|
||||||
|
print_jobs(j.sub, pad + ' ')
|
||||||
|
|
||||||
|
# Returns a flat list of job results. Sub jobs get a prefix of a parent one.
|
||||||
|
def flatten_jobs(jobs: list[jobResult], prefix: str) -> Tuple[list[jobResult], list[Any]]:
|
||||||
|
r = []
|
||||||
|
t = []
|
||||||
|
for j in jobs:
|
||||||
|
j.name = prefix + j.name
|
||||||
|
t.extend(xunit_utils.add_context_prefix(j.tests, prefix))
|
||||||
|
r.append(j)
|
||||||
|
sr, st = flatten_jobs(j.sub, f"{j.name} - ")
|
||||||
|
r.extend(sr)
|
||||||
|
t.extend(st)
|
||||||
|
return [r, t]
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--log-level', type=str, default='INFO')
|
parser.add_argument('--log-level', type=str, default='INFO')
|
||||||
|
parser.add_argument('--debug', action='store_true')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
|
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
|
||||||
|
bk_api_token = get_env_or_die('BUILDKITE_API_TOKEN')
|
||||||
|
bk_pipeline_slug = get_env_or_die('BUILDKITE_PIPELINE_SLUG')
|
||||||
|
bk_organization_slug = get_env_or_die("BUILDKITE_ORGANIZATION_SLUG")
|
||||||
|
bk_build_number = get_env_or_die("BUILDKITE_BUILD_NUMBER")
|
||||||
|
dry_run=os.getenv('ph_dry_run_report') is not None
|
||||||
|
if dry_run:
|
||||||
|
logging.info('running in dry-run mode, not exchanging with phabricator')
|
||||||
ph_buildable_diff = os.getenv('ph_buildable_diff')
|
ph_buildable_diff = os.getenv('ph_buildable_diff')
|
||||||
ph_target_phid = os.getenv('ph_target_phid')
|
conduit_token = get_env_or_die('CONDUIT_TOKEN')
|
||||||
if ph_target_phid is None:
|
ph_target_phid = 'ph_target_phid'
|
||||||
logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator')
|
if not dry_run:
|
||||||
exit(0)
|
ph_target_phid = get_env_or_die('ph_target_phid')
|
||||||
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
|
phabtalk = PhabTalk(conduit_token, dry_run_updates=dry_run)
|
||||||
report_success = False # for try block
|
report_success = False # for try block
|
||||||
failed_tests = []
|
failed_tests = []
|
||||||
try:
|
try:
|
||||||
bk = BuildkiteApi(os.getenv("BUILDKITE_API_TOKEN"), os.getenv("BUILDKITE_ORGANIZATION_SLUG"))
|
bk = BuildkiteApi(bk_api_token, bk_organization_slug)
|
||||||
build = bk.get_build(os.getenv("BUILDKITE_PIPELINE_SLUG"), os.getenv("BUILDKITE_BUILD_NUMBER"))
|
# Build type is https://buildkite.com/docs/apis/rest-api/builds#get-a-build.
|
||||||
success = True
|
build = bk.get_build(bk_pipeline_slug, bk_build_number)
|
||||||
failed_tests = process_unit_test_reports(bk, build, '')
|
jobs, success = process_build(bk, build)
|
||||||
for i, job in enumerate(build.get('jobs', [])):
|
jobs, failed_tests = flatten_jobs(jobs, '')
|
||||||
job = benedict(job)
|
if args.debug:
|
||||||
job_web_url = job.get('web_url', os.getenv('BUILDKITE_BUILD_URL', ''))
|
print_jobs(jobs, '')
|
||||||
logging.info(f'{job.get("id")} {job.get("name")} state {job.get("state")}')
|
for t in failed_tests:
|
||||||
job_state = job.get('state')
|
t['details'] = ''
|
||||||
if job.get('type') == 'waiter':
|
for j in jobs:
|
||||||
continue
|
if not j.success:
|
||||||
if job_state != 'passed' and job_state != 'failed':
|
phabtalk.maybe_add_url_artifact(ph_target_phid, j.url, j.name)
|
||||||
# Current and irrelevant steps.
|
report_success = success
|
||||||
continue
|
|
||||||
if job_state == 'passed' and i == 0:
|
|
||||||
# Skip successful first step as we assume it to be a pipeline setup
|
|
||||||
continue
|
|
||||||
name = job.get('name')
|
|
||||||
if job.get('type') == 'trigger':
|
|
||||||
job_web_url = job.get('triggered_build.web_url', job_web_url)
|
|
||||||
triggered_url = job.get('triggered_build.url')
|
|
||||||
if triggered_url != '':
|
|
||||||
sub_build = benedict(bk.get(triggered_url).json())
|
|
||||||
name = name or sub_build.get('pipeline.name')
|
|
||||||
failed_steps = get_failed_jobs(sub_build)
|
|
||||||
failed_tests.extend(process_unit_test_reports(bk, sub_build, name))
|
|
||||||
if job_state == 'failed' and failed_steps:
|
|
||||||
name = f"{name} ({', '.join(failed_steps[:2])}{', ...' if len(failed_steps) > 2 else ''})"
|
|
||||||
name = strip_emojis(name) or 'unknown'
|
|
||||||
phabtalk.maybe_add_url_artifact(ph_target_phid, job_web_url, f"{name} {job_state}")
|
|
||||||
if job_state == 'failed':
|
|
||||||
success = False
|
|
||||||
report_success = success # Must be last before finally: block to report errors in this script.
|
|
||||||
finally:
|
finally:
|
||||||
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}'
|
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}'
|
||||||
print(f'Reporting results to Phabricator build {format_url(build_url)}', flush=True)
|
print(f'Reporting results to Phabricator build {format_url(build_url)}', flush=True)
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Optional
|
from typing import Any, Optional
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from phabtalk.phabtalk import Report, Step
|
from phabtalk.phabtalk import Report, Step
|
||||||
|
|
||||||
|
@ -81,10 +81,14 @@ def parse_failures(test_xml: bytes, context: str) -> []:
|
||||||
'namespace': test_case.attrib['classname'],
|
'namespace': test_case.attrib['classname'],
|
||||||
'result': 'fail',
|
'result': 'fail',
|
||||||
'duration': float(test_case.attrib['time']),
|
'duration': float(test_case.attrib['time']),
|
||||||
'details': failure.text
|
'details': failure.text,
|
||||||
})
|
})
|
||||||
return failed_cases
|
return failed_cases
|
||||||
|
|
||||||
|
def add_context_prefix(tests: list[Any], prefix: str) -> list[Any]:
|
||||||
|
for c in tests:
|
||||||
|
c['engine'] = prefix + c['engine']
|
||||||
|
return tests
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser(description='Processes results from xml report')
|
parser = argparse.ArgumentParser(description='Processes results from xml report')
|
||||||
|
|
Loading…
Reference in a new issue