1
0
Fork 0

Process results and unit-test output of libcxx

Now "report" step combines result in a uniform way and processes unit test
results XML output. It works for sub-builds only started from the 'premerge'
pipeline, i.e. non-recursive. One downside is that now one has to wait until
all jobs have finished.

- Add instructions to setup python environment

- added option to do full report cycle but not call Phabricator

- use "annotations" to show build status. That lifts the need to filter ninja
  and other output (thus `ph_no_filter_output` param removed) and output
  everything. That is nice as script failures no longer lead to loss of logs.

- improved annotate() usability

- misc fixes
This commit is contained in:
Mikhail Goncharov 2020-11-25 15:29:50 +01:00
parent 515f086ba2
commit 681fbbe2cf
15 changed files with 228 additions and 177 deletions

View file

@ -10,6 +10,20 @@
# Playbooks
## Development environment
You need will need recent python 3 installed, e.g. follow this
[installation guide](https://cloud.google.com/python/docs/setup?hl=en).
To install required packages run:
```shell script
pip install -r ./scripts/requirements.txt
```
optional:
```shell script
pip install jupyterlab pandas seaborn # for jupyter labs.
```
## Testing scripts locally
Build and run agent docker image `sudo ./containers/build_run.sh buildkite-premerge-debian /bin/bash`.
@ -165,11 +179,11 @@ please refer to the source code for the details. These variables have `ph_` pref
Most commonly used are:
- `ph_scripts_refspec` ("master" by default): refspec branch of llvm-premerge-checks to use. This variable is also used in pipeline "bootstrap" in Buildkite interface.
- `ph_dry_run_report`: do not report any results back to Phabricator.
- `ph_no_cache`: (if set to any value) clear compilation cache before the build.
- `ph_projects`: which projects to use, "detect" will look on diff to infer the projects, "default" selects all projects.
- `ph_notify_email`: comma-separated list of email addresses to be notified when build is complete.
- `ph_log_level` ("DEBUG", "INFO", "WARNING" (default) or "ERROR"): log level for build scripts.
- `ph_no_filter_output` (if set to any value): do not filter output of `ninja all` and other commands from buildkite log.
- `ph_linux_agents`, `ph_windows_agents`: custom JSON constraints on agents. For example you might put one machine to a custom queue if it's errornous and send jobs to it with `ph_windows_agents="{{\"queue\": \"custom\"}}"`.
- `ph_skip_linux`, `ph_skip_windows` (if set to any value): skip build on this OS.

View file

@ -28,5 +28,6 @@ if __name__ == '__main__':
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
PhabTalk(os.getenv('CONDUIT_TOKEN')).maybe_add_url_artifact(args.phid, args.url, args.name)
dry = os.getenv('ph_dry_run_report') is not None
PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=dry).maybe_add_url_artifact(args.phid, args.url, args.name)

View file

@ -3,13 +3,14 @@ import os
import re
import subprocess
import urllib.parse
import shlex
from typing import Optional
from benedict import benedict
import backoff
import requests
context_style = {}
previous_context = 'default'
styles = ['default', 'info', 'success', 'warning', 'error']
@ -30,26 +31,30 @@ def upload_file(base_dir: str, file: str):
return None
def annotate(message: str, style: str = 'default', context: str = 'default', append: bool = True):
def annotate(message: str, style: str = 'default', context: Optional[str] = None, append: bool = True):
"""
Adds an annotation for that currently running build.
Note that last `style` applied to the same `context` takes precedence.
"""
global previous_context, styles, context_style
if style not in styles:
style = 'default'
if context is None:
context = previous_context
previous_context = context
# Pick most severe style so far.
context_style.setdefault(context, 0)
context_style[context] = max(styles.index(style), context_style[context])
style = styles[context_style[context]]
if append:
message += '\n\n'
r = subprocess.run(f"buildkite-agent annotate {shlex.quote(message)}"
f' --style={shlex.quote(style)}'
f" {'--append' if append else ''}"
f" --context={shlex.quote(context)}", shell=True, capture_output=True)
cmd = ['buildkite-agent', 'annotate', message, '--style', style, '--context', context]
if append:
cmd.append('--append')
r = subprocess.run(cmd, capture_output=True)
logging.debug(f'annotate call {r}')
if r.returncode != 0:
logging.warning(message)
logging.warning(r)
def feedback_url():
@ -63,18 +68,24 @@ class BuildkiteApi:
self.token = token
self.organization = organization
@backoff.on_exception(backoff.expo, Exception, max_tries=3, logger='', factor=3)
def get_build(self, pipeline: str, build_number: str):
authorization = f'Bearer {self.token}'
# https://buildkite.com/docs/apis/rest-api/builds#get-a-build
url = f'https://api.buildkite.com/v2/organizations/{self.organization}/pipelines/{pipeline}/builds/{build_number}'
response = requests.get(url, headers={'Authorization': authorization})
return benedict(self.get(f'https://api.buildkite.com/v2/organizations/{self.organization}/pipelines/{pipeline}/builds/{build_number}').json())
@backoff.on_exception(backoff.expo, Exception, max_tries=3, logger='', factor=3)
def get(self, url: str):
authorization = f'Bearer {self.token}'
response = requests.get(url, allow_redirects=True, headers={'Authorization': authorization})
if response.status_code != 200:
raise Exception(f'Builkite responded with non-OK status: {response.status_code}')
return response.json()
raise Exception(f'Buildkite responded with non-OK status: {response.status_code}')
return response
def format_url(url: str, name: Optional[str] = None):
if name is None:
name = url
return f"\033]1339;url='{url}';content='{name}'\a\n"
def strip_emojis(s: str) -> str:
return re.sub(r':[^:]+:', '', s).strip()

View file

@ -22,6 +22,7 @@ import pathspec
import unidiff
from phabtalk.phabtalk import Report, Step
from buildkite_utils import annotate
def get_diff(base_commit) -> Tuple[bool, str]:
@ -93,8 +94,8 @@ def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Repor
report.add_artifact(os.getcwd(), patch_file, 'clang-format')
if not success:
step.success = False
step.messages.append(
'Please format your changes with clang-format by running `git-clang-format HEAD^` or applying patch.')
annotate(f'clang-format: Please format your changes with clang-format by running `git-clang-format HEAD^`'
f'or applying the attached patch.', style='error')
logging.debug(f'report: {report}')
logging.debug(f'step: {step}')

View file

@ -22,7 +22,7 @@ from typing import Optional
import pathspec
import ignore_diff
from buildkite_utils import format_url
from buildkite_utils import annotate
from phabtalk.phabtalk import Report, Step
@ -100,11 +100,9 @@ def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Repor
report.add_artifact(os.getcwd(), p, 'clang-tidy')
if errors_count + warn_count != 0:
step.success = False
url = format_url("https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md"
"#review-comments.", "why?")
step.messages.append(
f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them are added '
f'as review comments {url}')
url = "https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md#review-comments."
annotate(f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them were '
f'added as review comments [why?]({url})', style='error')
logging.debug(f'report: {report}')
logging.debug(f'step: {step}')

View file

@ -17,8 +17,6 @@ Interactions with Phabricator.
"""
import logging
import socket
import time
from typing import Optional, List, Dict
import uuid
@ -32,33 +30,26 @@ class PhabTalk:
"""
def __init__(self, token: Optional[str], host: Optional[str] = 'https://reviews.llvm.org/api/',
dryrun: bool = False):
dry_run_updates: bool = False):
self._phab = None # type: Optional[Phabricator]
if not dryrun:
self._phab = Phabricator(token=token, host=host)
self.update_interfaces()
self.dry_run_updates = dry_run_updates
self._phab = Phabricator(token=token, host=host)
self.update_interfaces()
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def update_interfaces(self):
self._phab.update_interfaces()
@property
def dryrun(self):
return self._phab is None
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def get_revision_id(self, diff: str) -> Optional[str]:
"""Get the revision ID for a diff from Phabricator."""
if self.dryrun:
return None
result = self._phab.differential.querydiffs(ids=[diff])
return 'D' + result[diff]['revisionID']
def comment_on_diff(self, diff_id: str, text: str):
"""Add a comment to a differential based on the diff_id"""
print('Sending comment to diff {}:'.format(diff_id))
print(text)
logging.info('Sending comment to diff {}:'.format(diff_id))
logging.info(text)
revision_id = self.get_revision_id(diff_id)
if revision_id is not None:
self._comment_on_revision(revision_id, text)
@ -72,16 +63,16 @@ class PhabTalk:
'value': text
}]
if self.dryrun:
print('differential.revision.edit =================')
print('Transactions: {}'.format(transactions))
if self.dry_run_updates:
logging.info('differential.revision.edit =================')
logging.info('Transactions: {}'.format(transactions))
return
# API details at
# https://secure.phabricator.com/conduit/method/differential.revision.edit/
self._phab.differential.revision.edit(objectIdentifier=revision,
transactions=transactions)
print('Uploaded comment to Revision D{}:{}'.format(revision, text))
logging.info('Uploaded comment to Revision D{}:{}'.format(revision, text))
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def update_build_status(self, phid: str, working: bool, success: bool, lint: {}, unit: []):
@ -114,11 +105,11 @@ class PhabTalk:
}
lint_messages.append(lint_message)
if self.dryrun:
print('harbormaster.sendmessage =================')
print('type: {}'.format(result_type))
print('unit: {}'.format(unit))
print('lint: {}'.format(lint_messages))
if self.dry_run_updates:
logging.info('harbormaster.sendmessage =================')
logging.info('type: {}'.format(result_type))
logging.info('unit: {}'.format(unit))
logging.info('lint: {}'.format(lint_messages))
return
self._phab.harbormaster.sendmessage(
@ -126,16 +117,16 @@ class PhabTalk:
type=result_type,
unit=unit,
lint=lint_messages)
print('Uploaded build status {}, {} test results and {} lint results'.format(
logging.info('Uploaded build status {}, {} test results and {} lint results'.format(
result_type, len(unit), len(lint_messages)))
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def create_artifact(self, phid, artifact_key, artifact_type, artifact_data):
if self.dryrun:
print('harbormaster.createartifact =================')
print('artifactKey: {}'.format(artifact_key))
print('artifactType: {}'.format(artifact_type))
print('artifactData: {}'.format(artifact_data))
if self.dry_run_updates:
logging.info('harbormaster.createartifact =================')
logging.info('artifactKey: {}'.format(artifact_key))
logging.info('artifactType: {}'.format(artifact_type))
logging.info('artifactData: {}'.format(artifact_data))
return
self._phab.harbormaster.createartifact(
buildTargetPHID=phid,
@ -144,6 +135,9 @@ class PhabTalk:
artifactData=artifact_data)
def maybe_add_url_artifact(self, phid: str, url: str, name: str):
if self.dry_run_updates:
logging.info(f'add ULR artifact "{name}" {url}')
return
if phid is None:
logging.warning('PHID is not provided, cannot create URL artifact')
return
@ -155,7 +149,6 @@ class Step:
self.name = ''
self.success = True
self.duration = 0.0
self.messages = []
self.reproduce_commands = []
def set_status_from_exit_code(self, exit_code: int):

View file

@ -30,7 +30,7 @@ if __name__ == '__main__':
'label': 'create branch',
'key': 'create-branch',
'commands': [
'pip install -r scripts/requirements.txt',
'pip install -q -r scripts/requirements.txt',
'scripts/apply_patch.sh'
],
'agents': {'queue': 'linux'},

View file

@ -25,7 +25,6 @@ steps_generators = [
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "master")
no_cache = os.getenv('ph_no_cache') is not None
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
projects = os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly')
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))

View file

@ -16,9 +16,9 @@
import logging
import os
from buildkite_utils import annotate, feedback_url
from choose_projects import ChooseProjects
import git
from phabtalk.phabtalk import PhabTalk
from steps import generic_linux, generic_windows, from_shell_output, checkout_scripts
import yaml
@ -30,15 +30,15 @@ if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "master")
diff_id = os.getenv("ph_buildable_diff", "")
no_cache = os.getenv('ph_no_cache') is not None
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
projects = os.getenv('ph_projects', 'detect')
log_level = os.getenv('ph_log_level', 'INFO')
logging.basicConfig(level=log_level, format='%(levelname)-7s %(message)s')
phid = os.getenv('ph_target_phid')
# Add link in review to the build.
if phid is not None:
PhabTalk(os.getenv('CONDUIT_TOKEN')).maybe_add_url_artifact(phid, os.getenv('BUILDKITE_BUILD_URL'), 'buildkite')
url = f"https://reviews.llvm.org/D{os.getenv('ph_buildable_revision')}?id={diff_id}"
annotate(f"Build for [D{os.getenv('ph_buildable_revision')}#{diff_id}]({url}). "
f"[Harbormaster build](https://reviews.llvm.org/harbormaster/build/{os.getenv('ph_build_id')}).\n"
f"If there is a build infrastructure issue, please [create a bug]({feedback_url()}).")
# List all affected projects.
repo = git.Repo('.')
@ -55,17 +55,19 @@ if __name__ == '__main__':
steps = []
projects = cp.add_dependencies(affected_projects)
logging.info(f'projects with dependencies: {projects}')
# Add generic Linux checks.
excluded_linux = cp.get_excluded('linux')
logging.info(f'excluded for linux: {excluded_linux}')
linux_projects = projects - excluded_linux
if len(linux_projects) > 0:
steps.extend(generic_linux(';'.join(sorted(linux_projects)), True))
# Add generic Windows steps.
excluded_windows = cp.get_excluded('windows')
logging.info(f'excluded for windows: {excluded_windows}')
windows_projects = projects - excluded_windows
if len(windows_projects) > 0:
steps.extend(generic_windows(';'.join(sorted(windows_projects))))
# Add custom checks.
for gen in steps_generators:
steps.extend(from_shell_output(gen))
@ -78,7 +80,7 @@ if __name__ == '__main__':
})
report_step = {
'label': ':spiral_note_pad: report',
'label': ':phabricator: update build status on Phabricator',
'commands': [
*checkout_scripts('linux', scripts_refspec),
'${SRC}/scripts/summary.py',

View file

@ -29,53 +29,30 @@ from typing import Callable
import clang_format_report
import clang_tidy_report
import run_cmake
import test_results_report
from buildkite_utils import upload_file
from buildkite_utils import upload_file, annotate, strip_emojis
from exec_utils import watch_shell, if_not_matches, tee
from phabtalk.phabtalk import Report, PhabTalk, Step
def ninja_all_report(step: Step, _: Report, filter_output: bool):
print('Full log will be available in Artifacts "ninja-all.log"', flush=True)
def ninja_all_report(step: Step, _: Report):
step.reproduce_commands.append('ninja all')
with open(f'{artifacts_dir}/ninja-all.log', 'wb') as f:
w = sys.stdout.buffer.write
if filter_output:
r = re.compile(r'^\[.*] (Building|Linking|Linting|Copying|Generating|Creating)')
w = partial(if_not_matches, write=sys.stdout.buffer.write, regexp=r)
rc = watch_shell(
partial(tee, write1=w, write2=f.write),
partial(tee, write1=sys.stderr.buffer.write, write2=f.write),
'ninja all', cwd=build_dir)
logging.debug(f'ninja all: returned {rc}')
step.set_status_from_exit_code(rc)
if not step.success:
report.add_artifact(artifacts_dir, 'ninja-all.log', 'build failed')
rc = watch_shell(
sys.stdout.buffer.write,
sys.stderr.buffer.write,
'ninja all', cwd=build_dir)
logging.debug(f'ninja all: returned {rc}')
step.set_status_from_exit_code(rc)
def ninja_check_all_report(step: Step, _: Report, filter_output: bool):
def ninja_check_all_report(step: Step, _: Report):
print('Full log will be available in Artifacts "ninja-check-all.log"', flush=True)
step.reproduce_commands.append('ninja check-all')
with open(f'{artifacts_dir}/ninja-check-all.log', 'wb') as f:
w = sys.stdout.buffer.write
if filter_output:
r = re.compile(r'^(\[.*] (Building|Linking|Generating)|(PASS|XFAIL|UNSUPPORTED):)')
w = partial(if_not_matches, write=sys.stdout.buffer.write, regexp=r)
rc = watch_shell(
partial(tee, write1=w, write2=f.write),
partial(tee, write1=sys.stderr.buffer.write, write2=f.write),
'ninja check-all', cwd=build_dir)
logging.debug(f'ninja check-all: returned {rc}')
step.set_status_from_exit_code(rc)
test_results_report.run(build_dir, 'test-results.xml', step, report)
if not step.success:
message = 'tests failed'
f = report.test_stats['fail']
if f == 1:
message = '1 test failed'
if f > 1:
message = f'{f} tests failed'
report.add_artifact(artifacts_dir, 'ninja-check-all.log', message)
rc = watch_shell(
sys.stdout.buffer.write,
sys.stderr.buffer.write,
'ninja check-all', cwd=build_dir)
logging.debug(f'ninja check-all: returned {rc}')
step.set_status_from_exit_code(rc)
def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -> Step:
@ -85,9 +62,13 @@ def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -
step.name = name
thunk(step, report)
step.duration = time.time() - start
# Expand section if it failed.
# Expand section if step has failed.
if not step.success:
print('^^^ +++', flush=True)
if step.success:
annotate(f"{name}: OK")
else:
annotate(f"{name}: FAILED", style='error')
report.steps.append(step)
return step
@ -114,13 +95,15 @@ if __name__ == '__main__':
parser.add_argument('--log-level', type=str, default='WARNING')
parser.add_argument('--check-clang-format', action='store_true')
parser.add_argument('--check-clang-tidy', action='store_true')
parser.add_argument('--filter-output', action='store_true')
parser.add_argument('--projects', type=str, default='detect',
help="Projects to select, either a list or projects like 'clang;libc', or "
"'detect' to automatically infer proejcts from the diff, or "
"'default' to add all enabled projects")
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
ctx = strip_emojis(os.getenv('BUILDKITE_LABEL', 'default'))
annotate(os.getenv('BUILDKITE_LABEL', 'default'), context=ctx)
build_dir = ''
step_key = os.getenv("BUILDKITE_STEP_KEY")
scripts_dir = pathlib.Path(__file__).parent.absolute()
@ -130,16 +113,13 @@ if __name__ == '__main__':
report = Report()
report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}'
report.name = step_key
report.success = False
# Create report with failure in case something below fails.
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
report.success = True
cmake = run_step('cmake', report, lambda s, r: cmake_report(args.projects, s, r))
if cmake.success:
ninja_all = run_step('ninja all', report, partial(ninja_all_report, filter_output=args.filter_output))
ninja_all = run_step('ninja all', report, ninja_all_report)
if ninja_all.success:
run_step('ninja check-all', report, partial(ninja_check_all_report, filter_output=args.filter_output))
run_step('ninja check-all', report, ninja_check_all_report)
if args.check_clang_tidy:
run_step('clang-tidy', report,
lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r))
@ -147,34 +127,25 @@ if __name__ == '__main__':
run_step('clang-format', report,
lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r))
logging.debug(report)
print('+++ Summary', flush=True)
for s in report.steps:
mark = 'OK '
if not s.success:
report.success = False
mark = 'FAIL '
msg = ''
if len(s.messages):
msg = ': ' + '\n '.join(s.messages)
print(f'{mark} {s.name}{msg}', flush=True)
print('--- Reproduce build locally', flush=True)
print(f'git clone {os.getenv("BUILDKITE_REPO")} llvm-project')
print('cd llvm-project')
print(f'git checkout {os.getenv("BUILDKITE_COMMIT")}')
summary = []
summary.append('''
<details>
<summary>Reproduce build locally</summary>
```''')
summary.append(f'git clone {os.getenv("BUILDKITE_REPO")} llvm-project')
summary.append('cd llvm-project')
summary.append(f'git checkout {os.getenv("BUILDKITE_COMMIT")}')
for s in report.steps:
if len(s.reproduce_commands) == 0:
continue
print('\n'.join(s.reproduce_commands), flush=True)
print('', flush=True)
if not report.success:
print('^^^ +++', flush=True)
summary.append('\n'.join(s.reproduce_commands))
summary.append('```\n</details>')
annotate('\n'.join(summary), style='success')
ph_target_phid = os.getenv('ph_target_phid')
if ph_target_phid is not None:
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'))
for u in report.unit:
u['engine'] = step_key
phabtalk.update_build_status(ph_target_phid, True, report.success, report.lint, report.unit)
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
phabtalk.update_build_status(ph_target_phid, True, report.success, report.lint, [])
for a in report.artifacts:
url = upload_file(a['dir'], a['file'])
if url is not None:

View file

@ -6,4 +6,5 @@ phabricator==0.7.0
pyaml==20.4.0
requests==2.24.0
retrying==1.3.3
unidiff==0.6.0
unidiff==0.6.0
python-benedict==0.22.0

View file

@ -27,7 +27,7 @@ if __name__ == '__main__':
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'))
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
ph_target_phid = os.getenv('ph_target_phid')
if ph_target_phid is None:
logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator')

View file

@ -28,7 +28,6 @@ def generic_linux(projects: str, check_diff: bool) -> List:
return []
scripts_refspec = os.getenv("ph_scripts_refspec", "master")
no_cache = os.getenv('ph_no_cache') is not None
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
log_level = os.getenv('ph_log_level', 'WARNING')
linux_agents = {'queue': 'linux'}
t = os.getenv('ph_linux_agents')
@ -48,7 +47,7 @@ def generic_linux(projects: str, check_diff: bool) -> List:
if check_diff:
commands.extend([
'${SRC}/scripts/premerge_checks.py --check-clang-format --check-clang-tidy '
f'--projects="{projects}" --log-level={log_level} {filter_output}',
f'--projects="{projects}" --log-level={log_level}',
])
else:
commands.extend([
@ -63,10 +62,10 @@ def generic_linux(projects: str, check_diff: bool) -> List:
])
linux_buld_step = {
'label': ':linux: build and test linux',
'label': ':linux: x64 debian',
'key': 'linux',
'commands': commands,
'artifact_paths': ['artifacts/**/*', '*_result.json'],
'artifact_paths': ['artifacts/**/*', '*_result.json', 'build/test-results.xml'],
'agents': linux_agents,
'timeout_in_minutes': 120,
'retry': {'automatic': [
@ -83,7 +82,6 @@ def generic_windows(projects: str) -> List:
scripts_refspec = os.getenv("ph_scripts_refspec", "master")
no_cache = os.getenv('ph_no_cache') is not None
log_level = os.getenv('ph_log_level', 'WARNING')
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
clear_sccache = 'powershell -command "sccache --stop-server; echo \\$env:SCCACHE_DIR; ' \
'Remove-Item -Recurse -Force -ErrorAction Ignore \\$env:SCCACHE_DIR; ' \
'sccache --start-server"'
@ -92,7 +90,7 @@ def generic_windows(projects: str) -> List:
if t is not None:
win_agents = json.loads(t)
windows_buld_step = {
'label': ':windows: build and test windows',
'label': ':windows: x64 windows',
'key': 'windows',
'commands': [
clear_sccache if no_cache else '',
@ -100,7 +98,7 @@ def generic_windows(projects: str) -> List:
*checkout_scripts('windows', scripts_refspec),
'powershell -command "'
f'%SRC%/scripts/premerge_checks.py --projects=\'{projects}\' --log-level={log_level} {filter_output}; '
f'%SRC%/scripts/premerge_checks.py --projects=\'{projects}\' --log-level={log_level}; '
'\\$exit=\\$?;'
'sccache --show-stats;'
'if (\\$exit) {'
@ -111,7 +109,7 @@ def generic_windows(projects: str) -> List:
' exit 1;'
'}"',
],
'artifact_paths': ['artifacts/**/*', '*_result.json'],
'artifact_paths': ['artifacts/**/*', '*_result.json', 'build/test-results.xml'],
'agents': win_agents,
'timeout_in_minutes': 90,
'retry': {'automatic': [
@ -164,7 +162,7 @@ def checkout_scripts(target_os: str, scripts_refspec: str) -> []:
'git checkout x',
'echo llvm-premerge-checks commit:',
'git rev-parse HEAD',
'pip install -r %SRC%/scripts/requirements.txt',
'pip install -q -r %SRC%/scripts/requirements.txt',
'cd %BUILDKITE_BUILD_CHECKOUT_PATH%',
]
return [
@ -176,6 +174,6 @@ def checkout_scripts(target_os: str, scripts_refspec: str) -> []:
'git checkout x',
'echo "llvm-premerge-checks commit"',
'git rev-parse HEAD',
'pip install -r ${SRC}/scripts/requirements.txt',
'pip install -q -r ${SRC}/scripts/requirements.txt',
'cd "$BUILDKITE_BUILD_CHECKOUT_PATH"',
]

View file

@ -18,44 +18,90 @@ import logging
import os
from phabtalk.phabtalk import PhabTalk
from buildkite_utils import format_url, BuildkiteApi
from buildkite_utils import format_url, BuildkiteApi, strip_emojis
import test_results_report
from benedict import benedict
def get_failed_jobs(build: benedict) -> []:
failed_jobs = []
for j in build.get('jobs', []):
j = benedict(j)
if j.get('state') == 'failed' and j.get('name'):
failed_jobs.append(j.get('name'))
return failed_jobs
def process_unit_test_reports(bk: BuildkiteApi, build: benedict, prefix: str) -> []:
failed_tests = []
for job in build.get('jobs', []):
job = benedict(job)
if job.get('state') != 'failed' or job.get('type') != 'script':
# Job must run scripts and fail to be considered.
# Recursive pipeline triggers are not processed at the moment.
continue
artifacts_url = job.get('artifacts_url')
if artifacts_url is None:
continue
artifacts = bk.get(artifacts_url).json()
for a in artifacts:
a = benedict(a)
if not a.get('filename').endswith('test-results.xml') or not a.get('download_url'):
continue
content = bk.get(a.get('download_url')).content
ctx = strip_emojis(prefix + ' ' + job.get('name', build.get('pipeline.name')))
failed_tests.extend(test_results_report.parse_failures(content, ctx))
return failed_tests
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log-level', type=str, default='INFO')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
print(f'Branch {os.getenv("BUILDKITE_BRANCH")} at {os.getenv("BUILDKITE_REPO")}', flush=True)
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_buildable_diff is not None:
url = f'https://reviews.llvm.org/D{os.getenv("ph_buildable_revision")}?id={ph_buildable_diff}'
print(f'Review: {format_url(url)}', flush=True)
if os.getenv('BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER') is not None:
url = f'https://buildkite.com/llvm-project/' \
f'{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG")}/' \
f'builds/{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER")}'
print(f'Triggered from build {format_url(url)}', flush=True)
ph_target_phid = os.getenv('ph_target_phid')
if ph_target_phid is None:
logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator')
exit(0)
bk = BuildkiteApi(os.getenv("BUILDKITE_API_TOKEN"), os.getenv("BUILDKITE_ORGANIZATION_SLUG"))
build = bk.get_build(os.getenv("BUILDKITE_PIPELINE_SLUG"), os.getenv("BUILDKITE_BUILD_NUMBER"))
success = True
build.setdefault('jobs', [])
for j in build['jobs']:
j.setdefault('state', '')
j.setdefault('id', '')
logging.info(f'{j["id"]} state {j["state"]}')
success = success and (j['state'] != 'failed')
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'))
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}'
print(f'Reporting results to Phabricator build {format_url(build_url)}', flush=True)
phabtalk.update_build_status(ph_target_phid, False, success, {}, [])
bug_url = f'https://github.com/google/llvm-premerge-checks/issues/new?assignees=&labels=bug' \
f'&template=bug_report.md&title=buildkite build {os.getenv("BUILDKITE_PIPELINE_SLUG")} ' \
f'{os.getenv("BUILDKITE_BUILD_NUMBER")}'
print(f'{format_url(bug_url, "report issue")}', flush=True)
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
report_success = False # for try block
failed_tests = []
try:
bk = BuildkiteApi(os.getenv("BUILDKITE_API_TOKEN"), os.getenv("BUILDKITE_ORGANIZATION_SLUG"))
build = bk.get_build(os.getenv("BUILDKITE_PIPELINE_SLUG"), os.getenv("BUILDKITE_BUILD_NUMBER"))
success = True
failed_tests = process_unit_test_reports(bk, build, '')
for i, job in enumerate(build.get('jobs', [])):
job = benedict(job)
job_web_url = job.get('web_url', os.getenv('BUILDKITE_BUILD_URL', ''))
logging.info(f'{job.get("id")} state {job.get("state")}')
job_state = job.get('state')
if job.get('type') == 'waiter':
continue
if job_state != 'passed' and job_state != 'failed':
# Current and irrelevant steps.
continue
if job_state == 'passed' and i == 0:
# Skip successful first step as we assume it to be a pipeline setup
continue
name = job.get('name')
if job.get('type') == 'trigger':
job_web_url = job.get('triggered_build.web_url', job_web_url)
triggered_url = job.get('triggered_build.url')
if triggered_url != '':
sub_build = benedict(bk.get(triggered_url).json())
name = name or sub_build.get('pipeline.name')
failed_steps = get_failed_jobs(sub_build)
failed_tests.extend(process_unit_test_reports(bk, sub_build, name))
if job_state == 'failed' and failed_steps:
name = f"{name} ({', '.join(failed_steps[:2])}{', ...' if len(failed_steps) > 2 else ''})"
name = strip_emojis(name) or 'unknown'
phabtalk.maybe_add_url_artifact(ph_target_phid, job_web_url, f"{name} {job_state}")
if job_state == 'failed':
success = False
report_success = success # Must be last before finally: block to report errors in this script.
finally:
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}'
print(f'Reporting results to Phabricator build {format_url(build_url)}', flush=True)
phabtalk.update_build_status(ph_target_phid, False, report_success, {}, failed_tests)

View file

@ -28,9 +28,8 @@ def run(working_dir: str, test_results: str, step: Optional[Step], report: Optio
step = Step()
path = os.path.join(working_dir, test_results)
if not os.path.exists(path):
logging.warning(f'{path} is not found')
logging.error(f'{path} is not found')
step.success = False
step.messages.append(f'test report "{path}" is not found')
return
try:
success = True
@ -63,13 +62,30 @@ def run(working_dir: str, test_results: str, step: Optional[Step], report: Optio
msg += f'{test_case["namespace"]}/{test_case["name"]}\n'
except Exception as e:
logging.error(e)
step.messages.append('Parsing of test results failed')
step.success = False
logging.debug(f'report: {report}')
logging.debug(f'step: {step}')
def parse_failures(test_xml: bytes, context: str) -> []:
failed_cases = []
root_node = etree.fromstring(test_xml)
for test_case in root_node.xpath('//testcase'):
failure = test_case.find('failure')
if failure is None:
continue
failed_cases.append({
'engine': context,
'name': test_case.attrib['name'],
'namespace': test_case.attrib['classname'],
'result': 'fail',
'duration': float(test_case.attrib['time']),
'details': failure.text
})
return failed_cases
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Processes results from xml report')
parser.add_argument('test-report', default='build/test-results.xml')