1
0
Fork 0

Process results and unit-test output of libcxx

Now "report" step combines result in a uniform way and processes unit test
results XML output. It works for sub-builds only started from the 'premerge'
pipeline, i.e. non-recursive. One downside is that now one has to wait until
all jobs have finished.

- Add instructions to setup python environment

- added option to do full report cycle but not call Phabricator

- use "annotations" to show build status. That lifts the need to filter ninja
  and other output (thus `ph_no_filter_output` param removed) and output
  everything. That is nice as script failures no longer lead to loss of logs.

- improved annotate() usability

- misc fixes
This commit is contained in:
Mikhail Goncharov 2020-11-25 15:29:50 +01:00
parent 515f086ba2
commit 681fbbe2cf
15 changed files with 228 additions and 177 deletions

View file

@ -10,6 +10,20 @@
# Playbooks # Playbooks
## Development environment
You need will need recent python 3 installed, e.g. follow this
[installation guide](https://cloud.google.com/python/docs/setup?hl=en).
To install required packages run:
```shell script
pip install -r ./scripts/requirements.txt
```
optional:
```shell script
pip install jupyterlab pandas seaborn # for jupyter labs.
```
## Testing scripts locally ## Testing scripts locally
Build and run agent docker image `sudo ./containers/build_run.sh buildkite-premerge-debian /bin/bash`. Build and run agent docker image `sudo ./containers/build_run.sh buildkite-premerge-debian /bin/bash`.
@ -165,11 +179,11 @@ please refer to the source code for the details. These variables have `ph_` pref
Most commonly used are: Most commonly used are:
- `ph_scripts_refspec` ("master" by default): refspec branch of llvm-premerge-checks to use. This variable is also used in pipeline "bootstrap" in Buildkite interface. - `ph_scripts_refspec` ("master" by default): refspec branch of llvm-premerge-checks to use. This variable is also used in pipeline "bootstrap" in Buildkite interface.
- `ph_dry_run_report`: do not report any results back to Phabricator.
- `ph_no_cache`: (if set to any value) clear compilation cache before the build. - `ph_no_cache`: (if set to any value) clear compilation cache before the build.
- `ph_projects`: which projects to use, "detect" will look on diff to infer the projects, "default" selects all projects. - `ph_projects`: which projects to use, "detect" will look on diff to infer the projects, "default" selects all projects.
- `ph_notify_email`: comma-separated list of email addresses to be notified when build is complete. - `ph_notify_email`: comma-separated list of email addresses to be notified when build is complete.
- `ph_log_level` ("DEBUG", "INFO", "WARNING" (default) or "ERROR"): log level for build scripts. - `ph_log_level` ("DEBUG", "INFO", "WARNING" (default) or "ERROR"): log level for build scripts.
- `ph_no_filter_output` (if set to any value): do not filter output of `ninja all` and other commands from buildkite log.
- `ph_linux_agents`, `ph_windows_agents`: custom JSON constraints on agents. For example you might put one machine to a custom queue if it's errornous and send jobs to it with `ph_windows_agents="{{\"queue\": \"custom\"}}"`. - `ph_linux_agents`, `ph_windows_agents`: custom JSON constraints on agents. For example you might put one machine to a custom queue if it's errornous and send jobs to it with `ph_windows_agents="{{\"queue\": \"custom\"}}"`.
- `ph_skip_linux`, `ph_skip_windows` (if set to any value): skip build on this OS. - `ph_skip_linux`, `ph_skip_windows` (if set to any value): skip build on this OS.

View file

@ -28,5 +28,6 @@ if __name__ == '__main__':
args = parser.parse_args() args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s') logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
PhabTalk(os.getenv('CONDUIT_TOKEN')).maybe_add_url_artifact(args.phid, args.url, args.name) dry = os.getenv('ph_dry_run_report') is not None
PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=dry).maybe_add_url_artifact(args.phid, args.url, args.name)

View file

@ -3,13 +3,14 @@ import os
import re import re
import subprocess import subprocess
import urllib.parse import urllib.parse
import shlex
from typing import Optional from typing import Optional
from benedict import benedict
import backoff import backoff
import requests import requests
context_style = {} context_style = {}
previous_context = 'default'
styles = ['default', 'info', 'success', 'warning', 'error'] styles = ['default', 'info', 'success', 'warning', 'error']
@ -30,26 +31,30 @@ def upload_file(base_dir: str, file: str):
return None return None
def annotate(message: str, style: str = 'default', context: str = 'default', append: bool = True): def annotate(message: str, style: str = 'default', context: Optional[str] = None, append: bool = True):
""" """
Adds an annotation for that currently running build. Adds an annotation for that currently running build.
Note that last `style` applied to the same `context` takes precedence. Note that last `style` applied to the same `context` takes precedence.
""" """
global previous_context, styles, context_style
if style not in styles: if style not in styles:
style = 'default' style = 'default'
if context is None:
context = previous_context
previous_context = context
# Pick most severe style so far. # Pick most severe style so far.
context_style.setdefault(context, 0) context_style.setdefault(context, 0)
context_style[context] = max(styles.index(style), context_style[context]) context_style[context] = max(styles.index(style), context_style[context])
style = styles[context_style[context]] style = styles[context_style[context]]
if append: if append:
message += '\n\n' message += '\n\n'
r = subprocess.run(f"buildkite-agent annotate {shlex.quote(message)}" cmd = ['buildkite-agent', 'annotate', message, '--style', style, '--context', context]
f' --style={shlex.quote(style)}' if append:
f" {'--append' if append else ''}" cmd.append('--append')
f" --context={shlex.quote(context)}", shell=True, capture_output=True) r = subprocess.run(cmd, capture_output=True)
logging.debug(f'annotate call {r}') logging.debug(f'annotate call {r}')
if r.returncode != 0: if r.returncode != 0:
logging.warning(message) logging.warning(r)
def feedback_url(): def feedback_url():
@ -63,18 +68,24 @@ class BuildkiteApi:
self.token = token self.token = token
self.organization = organization self.organization = organization
@backoff.on_exception(backoff.expo, Exception, max_tries=3, logger='', factor=3)
def get_build(self, pipeline: str, build_number: str): def get_build(self, pipeline: str, build_number: str):
authorization = f'Bearer {self.token}'
# https://buildkite.com/docs/apis/rest-api/builds#get-a-build # https://buildkite.com/docs/apis/rest-api/builds#get-a-build
url = f'https://api.buildkite.com/v2/organizations/{self.organization}/pipelines/{pipeline}/builds/{build_number}' return benedict(self.get(f'https://api.buildkite.com/v2/organizations/{self.organization}/pipelines/{pipeline}/builds/{build_number}').json())
response = requests.get(url, headers={'Authorization': authorization})
@backoff.on_exception(backoff.expo, Exception, max_tries=3, logger='', factor=3)
def get(self, url: str):
authorization = f'Bearer {self.token}'
response = requests.get(url, allow_redirects=True, headers={'Authorization': authorization})
if response.status_code != 200: if response.status_code != 200:
raise Exception(f'Builkite responded with non-OK status: {response.status_code}') raise Exception(f'Buildkite responded with non-OK status: {response.status_code}')
return response.json() return response
def format_url(url: str, name: Optional[str] = None): def format_url(url: str, name: Optional[str] = None):
if name is None: if name is None:
name = url name = url
return f"\033]1339;url='{url}';content='{name}'\a\n" return f"\033]1339;url='{url}';content='{name}'\a\n"
def strip_emojis(s: str) -> str:
return re.sub(r':[^:]+:', '', s).strip()

View file

@ -22,6 +22,7 @@ import pathspec
import unidiff import unidiff
from phabtalk.phabtalk import Report, Step from phabtalk.phabtalk import Report, Step
from buildkite_utils import annotate
def get_diff(base_commit) -> Tuple[bool, str]: def get_diff(base_commit) -> Tuple[bool, str]:
@ -93,8 +94,8 @@ def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Repor
report.add_artifact(os.getcwd(), patch_file, 'clang-format') report.add_artifact(os.getcwd(), patch_file, 'clang-format')
if not success: if not success:
step.success = False step.success = False
step.messages.append( annotate(f'clang-format: Please format your changes with clang-format by running `git-clang-format HEAD^`'
'Please format your changes with clang-format by running `git-clang-format HEAD^` or applying patch.') f'or applying the attached patch.', style='error')
logging.debug(f'report: {report}') logging.debug(f'report: {report}')
logging.debug(f'step: {step}') logging.debug(f'step: {step}')

View file

@ -22,7 +22,7 @@ from typing import Optional
import pathspec import pathspec
import ignore_diff import ignore_diff
from buildkite_utils import format_url from buildkite_utils import annotate
from phabtalk.phabtalk import Report, Step from phabtalk.phabtalk import Report, Step
@ -100,11 +100,9 @@ def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Repor
report.add_artifact(os.getcwd(), p, 'clang-tidy') report.add_artifact(os.getcwd(), p, 'clang-tidy')
if errors_count + warn_count != 0: if errors_count + warn_count != 0:
step.success = False step.success = False
url = format_url("https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md" url = "https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md#review-comments."
"#review-comments.", "why?") annotate(f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them were '
step.messages.append( f'added as review comments [why?]({url})', style='error')
f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them are added '
f'as review comments {url}')
logging.debug(f'report: {report}') logging.debug(f'report: {report}')
logging.debug(f'step: {step}') logging.debug(f'step: {step}')

View file

@ -17,8 +17,6 @@ Interactions with Phabricator.
""" """
import logging import logging
import socket
import time
from typing import Optional, List, Dict from typing import Optional, List, Dict
import uuid import uuid
@ -32,33 +30,26 @@ class PhabTalk:
""" """
def __init__(self, token: Optional[str], host: Optional[str] = 'https://reviews.llvm.org/api/', def __init__(self, token: Optional[str], host: Optional[str] = 'https://reviews.llvm.org/api/',
dryrun: bool = False): dry_run_updates: bool = False):
self._phab = None # type: Optional[Phabricator] self._phab = None # type: Optional[Phabricator]
if not dryrun: self.dry_run_updates = dry_run_updates
self._phab = Phabricator(token=token, host=host) self._phab = Phabricator(token=token, host=host)
self.update_interfaces() self.update_interfaces()
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3) @backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def update_interfaces(self): def update_interfaces(self):
self._phab.update_interfaces() self._phab.update_interfaces()
@property
def dryrun(self):
return self._phab is None
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3) @backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def get_revision_id(self, diff: str) -> Optional[str]: def get_revision_id(self, diff: str) -> Optional[str]:
"""Get the revision ID for a diff from Phabricator.""" """Get the revision ID for a diff from Phabricator."""
if self.dryrun:
return None
result = self._phab.differential.querydiffs(ids=[diff]) result = self._phab.differential.querydiffs(ids=[diff])
return 'D' + result[diff]['revisionID'] return 'D' + result[diff]['revisionID']
def comment_on_diff(self, diff_id: str, text: str): def comment_on_diff(self, diff_id: str, text: str):
"""Add a comment to a differential based on the diff_id""" """Add a comment to a differential based on the diff_id"""
print('Sending comment to diff {}:'.format(diff_id)) logging.info('Sending comment to diff {}:'.format(diff_id))
print(text) logging.info(text)
revision_id = self.get_revision_id(diff_id) revision_id = self.get_revision_id(diff_id)
if revision_id is not None: if revision_id is not None:
self._comment_on_revision(revision_id, text) self._comment_on_revision(revision_id, text)
@ -72,16 +63,16 @@ class PhabTalk:
'value': text 'value': text
}] }]
if self.dryrun: if self.dry_run_updates:
print('differential.revision.edit =================') logging.info('differential.revision.edit =================')
print('Transactions: {}'.format(transactions)) logging.info('Transactions: {}'.format(transactions))
return return
# API details at # API details at
# https://secure.phabricator.com/conduit/method/differential.revision.edit/ # https://secure.phabricator.com/conduit/method/differential.revision.edit/
self._phab.differential.revision.edit(objectIdentifier=revision, self._phab.differential.revision.edit(objectIdentifier=revision,
transactions=transactions) transactions=transactions)
print('Uploaded comment to Revision D{}:{}'.format(revision, text)) logging.info('Uploaded comment to Revision D{}:{}'.format(revision, text))
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3) @backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def update_build_status(self, phid: str, working: bool, success: bool, lint: {}, unit: []): def update_build_status(self, phid: str, working: bool, success: bool, lint: {}, unit: []):
@ -114,11 +105,11 @@ class PhabTalk:
} }
lint_messages.append(lint_message) lint_messages.append(lint_message)
if self.dryrun: if self.dry_run_updates:
print('harbormaster.sendmessage =================') logging.info('harbormaster.sendmessage =================')
print('type: {}'.format(result_type)) logging.info('type: {}'.format(result_type))
print('unit: {}'.format(unit)) logging.info('unit: {}'.format(unit))
print('lint: {}'.format(lint_messages)) logging.info('lint: {}'.format(lint_messages))
return return
self._phab.harbormaster.sendmessage( self._phab.harbormaster.sendmessage(
@ -126,16 +117,16 @@ class PhabTalk:
type=result_type, type=result_type,
unit=unit, unit=unit,
lint=lint_messages) lint=lint_messages)
print('Uploaded build status {}, {} test results and {} lint results'.format( logging.info('Uploaded build status {}, {} test results and {} lint results'.format(
result_type, len(unit), len(lint_messages))) result_type, len(unit), len(lint_messages)))
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3) @backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def create_artifact(self, phid, artifact_key, artifact_type, artifact_data): def create_artifact(self, phid, artifact_key, artifact_type, artifact_data):
if self.dryrun: if self.dry_run_updates:
print('harbormaster.createartifact =================') logging.info('harbormaster.createartifact =================')
print('artifactKey: {}'.format(artifact_key)) logging.info('artifactKey: {}'.format(artifact_key))
print('artifactType: {}'.format(artifact_type)) logging.info('artifactType: {}'.format(artifact_type))
print('artifactData: {}'.format(artifact_data)) logging.info('artifactData: {}'.format(artifact_data))
return return
self._phab.harbormaster.createartifact( self._phab.harbormaster.createartifact(
buildTargetPHID=phid, buildTargetPHID=phid,
@ -144,6 +135,9 @@ class PhabTalk:
artifactData=artifact_data) artifactData=artifact_data)
def maybe_add_url_artifact(self, phid: str, url: str, name: str): def maybe_add_url_artifact(self, phid: str, url: str, name: str):
if self.dry_run_updates:
logging.info(f'add ULR artifact "{name}" {url}')
return
if phid is None: if phid is None:
logging.warning('PHID is not provided, cannot create URL artifact') logging.warning('PHID is not provided, cannot create URL artifact')
return return
@ -155,7 +149,6 @@ class Step:
self.name = '' self.name = ''
self.success = True self.success = True
self.duration = 0.0 self.duration = 0.0
self.messages = []
self.reproduce_commands = [] self.reproduce_commands = []
def set_status_from_exit_code(self, exit_code: int): def set_status_from_exit_code(self, exit_code: int):

View file

@ -30,7 +30,7 @@ if __name__ == '__main__':
'label': 'create branch', 'label': 'create branch',
'key': 'create-branch', 'key': 'create-branch',
'commands': [ 'commands': [
'pip install -r scripts/requirements.txt', 'pip install -q -r scripts/requirements.txt',
'scripts/apply_patch.sh' 'scripts/apply_patch.sh'
], ],
'agents': {'queue': 'linux'}, 'agents': {'queue': 'linux'},

View file

@ -25,7 +25,6 @@ steps_generators = [
if __name__ == '__main__': if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "master") scripts_refspec = os.getenv("ph_scripts_refspec", "master")
no_cache = os.getenv('ph_no_cache') is not None no_cache = os.getenv('ph_no_cache') is not None
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
projects = os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly') projects = os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly')
log_level = os.getenv('ph_log_level', 'WARNING') log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(','))) notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))

View file

@ -16,9 +16,9 @@
import logging import logging
import os import os
from buildkite_utils import annotate, feedback_url
from choose_projects import ChooseProjects from choose_projects import ChooseProjects
import git import git
from phabtalk.phabtalk import PhabTalk
from steps import generic_linux, generic_windows, from_shell_output, checkout_scripts from steps import generic_linux, generic_windows, from_shell_output, checkout_scripts
import yaml import yaml
@ -30,15 +30,15 @@ if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "master") scripts_refspec = os.getenv("ph_scripts_refspec", "master")
diff_id = os.getenv("ph_buildable_diff", "") diff_id = os.getenv("ph_buildable_diff", "")
no_cache = os.getenv('ph_no_cache') is not None no_cache = os.getenv('ph_no_cache') is not None
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
projects = os.getenv('ph_projects', 'detect') projects = os.getenv('ph_projects', 'detect')
log_level = os.getenv('ph_log_level', 'INFO') log_level = os.getenv('ph_log_level', 'INFO')
logging.basicConfig(level=log_level, format='%(levelname)-7s %(message)s') logging.basicConfig(level=log_level, format='%(levelname)-7s %(message)s')
phid = os.getenv('ph_target_phid') phid = os.getenv('ph_target_phid')
# Add link in review to the build. url = f"https://reviews.llvm.org/D{os.getenv('ph_buildable_revision')}?id={diff_id}"
if phid is not None: annotate(f"Build for [D{os.getenv('ph_buildable_revision')}#{diff_id}]({url}). "
PhabTalk(os.getenv('CONDUIT_TOKEN')).maybe_add_url_artifact(phid, os.getenv('BUILDKITE_BUILD_URL'), 'buildkite') f"[Harbormaster build](https://reviews.llvm.org/harbormaster/build/{os.getenv('ph_build_id')}).\n"
f"If there is a build infrastructure issue, please [create a bug]({feedback_url()}).")
# List all affected projects. # List all affected projects.
repo = git.Repo('.') repo = git.Repo('.')
@ -55,17 +55,19 @@ if __name__ == '__main__':
steps = [] steps = []
projects = cp.add_dependencies(affected_projects) projects = cp.add_dependencies(affected_projects)
logging.info(f'projects with dependencies: {projects}') logging.info(f'projects with dependencies: {projects}')
# Add generic Linux checks.
excluded_linux = cp.get_excluded('linux') excluded_linux = cp.get_excluded('linux')
logging.info(f'excluded for linux: {excluded_linux}') logging.info(f'excluded for linux: {excluded_linux}')
linux_projects = projects - excluded_linux linux_projects = projects - excluded_linux
if len(linux_projects) > 0: if len(linux_projects) > 0:
steps.extend(generic_linux(';'.join(sorted(linux_projects)), True)) steps.extend(generic_linux(';'.join(sorted(linux_projects)), True))
# Add generic Windows steps.
excluded_windows = cp.get_excluded('windows') excluded_windows = cp.get_excluded('windows')
logging.info(f'excluded for windows: {excluded_windows}') logging.info(f'excluded for windows: {excluded_windows}')
windows_projects = projects - excluded_windows windows_projects = projects - excluded_windows
if len(windows_projects) > 0: if len(windows_projects) > 0:
steps.extend(generic_windows(';'.join(sorted(windows_projects)))) steps.extend(generic_windows(';'.join(sorted(windows_projects))))
# Add custom checks.
for gen in steps_generators: for gen in steps_generators:
steps.extend(from_shell_output(gen)) steps.extend(from_shell_output(gen))
@ -78,7 +80,7 @@ if __name__ == '__main__':
}) })
report_step = { report_step = {
'label': ':spiral_note_pad: report', 'label': ':phabricator: update build status on Phabricator',
'commands': [ 'commands': [
*checkout_scripts('linux', scripts_refspec), *checkout_scripts('linux', scripts_refspec),
'${SRC}/scripts/summary.py', '${SRC}/scripts/summary.py',

View file

@ -29,53 +29,30 @@ from typing import Callable
import clang_format_report import clang_format_report
import clang_tidy_report import clang_tidy_report
import run_cmake import run_cmake
import test_results_report from buildkite_utils import upload_file, annotate, strip_emojis
from buildkite_utils import upload_file
from exec_utils import watch_shell, if_not_matches, tee from exec_utils import watch_shell, if_not_matches, tee
from phabtalk.phabtalk import Report, PhabTalk, Step from phabtalk.phabtalk import Report, PhabTalk, Step
def ninja_all_report(step: Step, _: Report, filter_output: bool): def ninja_all_report(step: Step, _: Report):
print('Full log will be available in Artifacts "ninja-all.log"', flush=True)
step.reproduce_commands.append('ninja all') step.reproduce_commands.append('ninja all')
with open(f'{artifacts_dir}/ninja-all.log', 'wb') as f: rc = watch_shell(
w = sys.stdout.buffer.write sys.stdout.buffer.write,
if filter_output: sys.stderr.buffer.write,
r = re.compile(r'^\[.*] (Building|Linking|Linting|Copying|Generating|Creating)') 'ninja all', cwd=build_dir)
w = partial(if_not_matches, write=sys.stdout.buffer.write, regexp=r) logging.debug(f'ninja all: returned {rc}')
rc = watch_shell( step.set_status_from_exit_code(rc)
partial(tee, write1=w, write2=f.write),
partial(tee, write1=sys.stderr.buffer.write, write2=f.write),
'ninja all', cwd=build_dir)
logging.debug(f'ninja all: returned {rc}')
step.set_status_from_exit_code(rc)
if not step.success:
report.add_artifact(artifacts_dir, 'ninja-all.log', 'build failed')
def ninja_check_all_report(step: Step, _: Report, filter_output: bool): def ninja_check_all_report(step: Step, _: Report):
print('Full log will be available in Artifacts "ninja-check-all.log"', flush=True) print('Full log will be available in Artifacts "ninja-check-all.log"', flush=True)
step.reproduce_commands.append('ninja check-all') step.reproduce_commands.append('ninja check-all')
with open(f'{artifacts_dir}/ninja-check-all.log', 'wb') as f: rc = watch_shell(
w = sys.stdout.buffer.write sys.stdout.buffer.write,
if filter_output: sys.stderr.buffer.write,
r = re.compile(r'^(\[.*] (Building|Linking|Generating)|(PASS|XFAIL|UNSUPPORTED):)') 'ninja check-all', cwd=build_dir)
w = partial(if_not_matches, write=sys.stdout.buffer.write, regexp=r) logging.debug(f'ninja check-all: returned {rc}')
rc = watch_shell( step.set_status_from_exit_code(rc)
partial(tee, write1=w, write2=f.write),
partial(tee, write1=sys.stderr.buffer.write, write2=f.write),
'ninja check-all', cwd=build_dir)
logging.debug(f'ninja check-all: returned {rc}')
step.set_status_from_exit_code(rc)
test_results_report.run(build_dir, 'test-results.xml', step, report)
if not step.success:
message = 'tests failed'
f = report.test_stats['fail']
if f == 1:
message = '1 test failed'
if f > 1:
message = f'{f} tests failed'
report.add_artifact(artifacts_dir, 'ninja-check-all.log', message)
def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -> Step: def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -> Step:
@ -85,9 +62,13 @@ def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -
step.name = name step.name = name
thunk(step, report) thunk(step, report)
step.duration = time.time() - start step.duration = time.time() - start
# Expand section if it failed. # Expand section if step has failed.
if not step.success: if not step.success:
print('^^^ +++', flush=True) print('^^^ +++', flush=True)
if step.success:
annotate(f"{name}: OK")
else:
annotate(f"{name}: FAILED", style='error')
report.steps.append(step) report.steps.append(step)
return step return step
@ -114,13 +95,15 @@ if __name__ == '__main__':
parser.add_argument('--log-level', type=str, default='WARNING') parser.add_argument('--log-level', type=str, default='WARNING')
parser.add_argument('--check-clang-format', action='store_true') parser.add_argument('--check-clang-format', action='store_true')
parser.add_argument('--check-clang-tidy', action='store_true') parser.add_argument('--check-clang-tidy', action='store_true')
parser.add_argument('--filter-output', action='store_true')
parser.add_argument('--projects', type=str, default='detect', parser.add_argument('--projects', type=str, default='detect',
help="Projects to select, either a list or projects like 'clang;libc', or " help="Projects to select, either a list or projects like 'clang;libc', or "
"'detect' to automatically infer proejcts from the diff, or " "'detect' to automatically infer proejcts from the diff, or "
"'default' to add all enabled projects") "'default' to add all enabled projects")
args = parser.parse_args() args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s') logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
ctx = strip_emojis(os.getenv('BUILDKITE_LABEL', 'default'))
annotate(os.getenv('BUILDKITE_LABEL', 'default'), context=ctx)
build_dir = '' build_dir = ''
step_key = os.getenv("BUILDKITE_STEP_KEY") step_key = os.getenv("BUILDKITE_STEP_KEY")
scripts_dir = pathlib.Path(__file__).parent.absolute() scripts_dir = pathlib.Path(__file__).parent.absolute()
@ -130,16 +113,13 @@ if __name__ == '__main__':
report = Report() report = Report()
report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}' report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}'
report.name = step_key report.name = step_key
report.success = False
# Create report with failure in case something below fails.
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
report.success = True report.success = True
cmake = run_step('cmake', report, lambda s, r: cmake_report(args.projects, s, r)) cmake = run_step('cmake', report, lambda s, r: cmake_report(args.projects, s, r))
if cmake.success: if cmake.success:
ninja_all = run_step('ninja all', report, partial(ninja_all_report, filter_output=args.filter_output)) ninja_all = run_step('ninja all', report, ninja_all_report)
if ninja_all.success: if ninja_all.success:
run_step('ninja check-all', report, partial(ninja_check_all_report, filter_output=args.filter_output)) run_step('ninja check-all', report, ninja_check_all_report)
if args.check_clang_tidy: if args.check_clang_tidy:
run_step('clang-tidy', report, run_step('clang-tidy', report,
lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r)) lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r))
@ -147,34 +127,25 @@ if __name__ == '__main__':
run_step('clang-format', report, run_step('clang-format', report,
lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r)) lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r))
logging.debug(report) logging.debug(report)
print('+++ Summary', flush=True) summary = []
for s in report.steps: summary.append('''
mark = 'OK ' <details>
if not s.success: <summary>Reproduce build locally</summary>
report.success = False
mark = 'FAIL ' ```''')
msg = '' summary.append(f'git clone {os.getenv("BUILDKITE_REPO")} llvm-project')
if len(s.messages): summary.append('cd llvm-project')
msg = ': ' + '\n '.join(s.messages) summary.append(f'git checkout {os.getenv("BUILDKITE_COMMIT")}')
print(f'{mark} {s.name}{msg}', flush=True)
print('--- Reproduce build locally', flush=True)
print(f'git clone {os.getenv("BUILDKITE_REPO")} llvm-project')
print('cd llvm-project')
print(f'git checkout {os.getenv("BUILDKITE_COMMIT")}')
for s in report.steps: for s in report.steps:
if len(s.reproduce_commands) == 0: if len(s.reproduce_commands) == 0:
continue continue
print('\n'.join(s.reproduce_commands), flush=True) summary.append('\n'.join(s.reproduce_commands))
print('', flush=True) summary.append('```\n</details>')
if not report.success: annotate('\n'.join(summary), style='success')
print('^^^ +++', flush=True)
ph_target_phid = os.getenv('ph_target_phid') ph_target_phid = os.getenv('ph_target_phid')
if ph_target_phid is not None: if ph_target_phid is not None:
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN')) phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
for u in report.unit: phabtalk.update_build_status(ph_target_phid, True, report.success, report.lint, [])
u['engine'] = step_key
phabtalk.update_build_status(ph_target_phid, True, report.success, report.lint, report.unit)
for a in report.artifacts: for a in report.artifacts:
url = upload_file(a['dir'], a['file']) url = upload_file(a['dir'], a['file'])
if url is not None: if url is not None:

View file

@ -6,4 +6,5 @@ phabricator==0.7.0
pyaml==20.4.0 pyaml==20.4.0
requests==2.24.0 requests==2.24.0
retrying==1.3.3 retrying==1.3.3
unidiff==0.6.0 unidiff==0.6.0
python-benedict==0.22.0

View file

@ -27,7 +27,7 @@ if __name__ == '__main__':
args = parser.parse_args() args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s') logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN')) phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
ph_target_phid = os.getenv('ph_target_phid') ph_target_phid = os.getenv('ph_target_phid')
if ph_target_phid is None: if ph_target_phid is None:
logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator') logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator')

View file

@ -28,7 +28,6 @@ def generic_linux(projects: str, check_diff: bool) -> List:
return [] return []
scripts_refspec = os.getenv("ph_scripts_refspec", "master") scripts_refspec = os.getenv("ph_scripts_refspec", "master")
no_cache = os.getenv('ph_no_cache') is not None no_cache = os.getenv('ph_no_cache') is not None
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
log_level = os.getenv('ph_log_level', 'WARNING') log_level = os.getenv('ph_log_level', 'WARNING')
linux_agents = {'queue': 'linux'} linux_agents = {'queue': 'linux'}
t = os.getenv('ph_linux_agents') t = os.getenv('ph_linux_agents')
@ -48,7 +47,7 @@ def generic_linux(projects: str, check_diff: bool) -> List:
if check_diff: if check_diff:
commands.extend([ commands.extend([
'${SRC}/scripts/premerge_checks.py --check-clang-format --check-clang-tidy ' '${SRC}/scripts/premerge_checks.py --check-clang-format --check-clang-tidy '
f'--projects="{projects}" --log-level={log_level} {filter_output}', f'--projects="{projects}" --log-level={log_level}',
]) ])
else: else:
commands.extend([ commands.extend([
@ -63,10 +62,10 @@ def generic_linux(projects: str, check_diff: bool) -> List:
]) ])
linux_buld_step = { linux_buld_step = {
'label': ':linux: build and test linux', 'label': ':linux: x64 debian',
'key': 'linux', 'key': 'linux',
'commands': commands, 'commands': commands,
'artifact_paths': ['artifacts/**/*', '*_result.json'], 'artifact_paths': ['artifacts/**/*', '*_result.json', 'build/test-results.xml'],
'agents': linux_agents, 'agents': linux_agents,
'timeout_in_minutes': 120, 'timeout_in_minutes': 120,
'retry': {'automatic': [ 'retry': {'automatic': [
@ -83,7 +82,6 @@ def generic_windows(projects: str) -> List:
scripts_refspec = os.getenv("ph_scripts_refspec", "master") scripts_refspec = os.getenv("ph_scripts_refspec", "master")
no_cache = os.getenv('ph_no_cache') is not None no_cache = os.getenv('ph_no_cache') is not None
log_level = os.getenv('ph_log_level', 'WARNING') log_level = os.getenv('ph_log_level', 'WARNING')
filter_output = '--filter-output' if os.getenv('ph_no_filter_output') is None else ''
clear_sccache = 'powershell -command "sccache --stop-server; echo \\$env:SCCACHE_DIR; ' \ clear_sccache = 'powershell -command "sccache --stop-server; echo \\$env:SCCACHE_DIR; ' \
'Remove-Item -Recurse -Force -ErrorAction Ignore \\$env:SCCACHE_DIR; ' \ 'Remove-Item -Recurse -Force -ErrorAction Ignore \\$env:SCCACHE_DIR; ' \
'sccache --start-server"' 'sccache --start-server"'
@ -92,7 +90,7 @@ def generic_windows(projects: str) -> List:
if t is not None: if t is not None:
win_agents = json.loads(t) win_agents = json.loads(t)
windows_buld_step = { windows_buld_step = {
'label': ':windows: build and test windows', 'label': ':windows: x64 windows',
'key': 'windows', 'key': 'windows',
'commands': [ 'commands': [
clear_sccache if no_cache else '', clear_sccache if no_cache else '',
@ -100,7 +98,7 @@ def generic_windows(projects: str) -> List:
*checkout_scripts('windows', scripts_refspec), *checkout_scripts('windows', scripts_refspec),
'powershell -command "' 'powershell -command "'
f'%SRC%/scripts/premerge_checks.py --projects=\'{projects}\' --log-level={log_level} {filter_output}; ' f'%SRC%/scripts/premerge_checks.py --projects=\'{projects}\' --log-level={log_level}; '
'\\$exit=\\$?;' '\\$exit=\\$?;'
'sccache --show-stats;' 'sccache --show-stats;'
'if (\\$exit) {' 'if (\\$exit) {'
@ -111,7 +109,7 @@ def generic_windows(projects: str) -> List:
' exit 1;' ' exit 1;'
'}"', '}"',
], ],
'artifact_paths': ['artifacts/**/*', '*_result.json'], 'artifact_paths': ['artifacts/**/*', '*_result.json', 'build/test-results.xml'],
'agents': win_agents, 'agents': win_agents,
'timeout_in_minutes': 90, 'timeout_in_minutes': 90,
'retry': {'automatic': [ 'retry': {'automatic': [
@ -164,7 +162,7 @@ def checkout_scripts(target_os: str, scripts_refspec: str) -> []:
'git checkout x', 'git checkout x',
'echo llvm-premerge-checks commit:', 'echo llvm-premerge-checks commit:',
'git rev-parse HEAD', 'git rev-parse HEAD',
'pip install -r %SRC%/scripts/requirements.txt', 'pip install -q -r %SRC%/scripts/requirements.txt',
'cd %BUILDKITE_BUILD_CHECKOUT_PATH%', 'cd %BUILDKITE_BUILD_CHECKOUT_PATH%',
] ]
return [ return [
@ -176,6 +174,6 @@ def checkout_scripts(target_os: str, scripts_refspec: str) -> []:
'git checkout x', 'git checkout x',
'echo "llvm-premerge-checks commit"', 'echo "llvm-premerge-checks commit"',
'git rev-parse HEAD', 'git rev-parse HEAD',
'pip install -r ${SRC}/scripts/requirements.txt', 'pip install -q -r ${SRC}/scripts/requirements.txt',
'cd "$BUILDKITE_BUILD_CHECKOUT_PATH"', 'cd "$BUILDKITE_BUILD_CHECKOUT_PATH"',
] ]

View file

@ -18,44 +18,90 @@ import logging
import os import os
from phabtalk.phabtalk import PhabTalk from phabtalk.phabtalk import PhabTalk
from buildkite_utils import format_url, BuildkiteApi from buildkite_utils import format_url, BuildkiteApi, strip_emojis
import test_results_report
from benedict import benedict
def get_failed_jobs(build: benedict) -> []:
failed_jobs = []
for j in build.get('jobs', []):
j = benedict(j)
if j.get('state') == 'failed' and j.get('name'):
failed_jobs.append(j.get('name'))
return failed_jobs
def process_unit_test_reports(bk: BuildkiteApi, build: benedict, prefix: str) -> []:
failed_tests = []
for job in build.get('jobs', []):
job = benedict(job)
if job.get('state') != 'failed' or job.get('type') != 'script':
# Job must run scripts and fail to be considered.
# Recursive pipeline triggers are not processed at the moment.
continue
artifacts_url = job.get('artifacts_url')
if artifacts_url is None:
continue
artifacts = bk.get(artifacts_url).json()
for a in artifacts:
a = benedict(a)
if not a.get('filename').endswith('test-results.xml') or not a.get('download_url'):
continue
content = bk.get(a.get('download_url')).content
ctx = strip_emojis(prefix + ' ' + job.get('name', build.get('pipeline.name')))
failed_tests.extend(test_results_report.parse_failures(content, ctx))
return failed_tests
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--log-level', type=str, default='INFO') parser.add_argument('--log-level', type=str, default='INFO')
args = parser.parse_args() args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s') logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
print(f'Branch {os.getenv("BUILDKITE_BRANCH")} at {os.getenv("BUILDKITE_REPO")}', flush=True)
ph_buildable_diff = os.getenv('ph_buildable_diff') ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_buildable_diff is not None:
url = f'https://reviews.llvm.org/D{os.getenv("ph_buildable_revision")}?id={ph_buildable_diff}'
print(f'Review: {format_url(url)}', flush=True)
if os.getenv('BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER') is not None:
url = f'https://buildkite.com/llvm-project/' \
f'{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG")}/' \
f'builds/{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER")}'
print(f'Triggered from build {format_url(url)}', flush=True)
ph_target_phid = os.getenv('ph_target_phid') ph_target_phid = os.getenv('ph_target_phid')
if ph_target_phid is None: if ph_target_phid is None:
logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator') logging.warning('ph_target_phid is not specified. Will not update the build status in Phabricator')
exit(0) exit(0)
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), dry_run_updates=(os.getenv('ph_dry_run_report') is not None))
bk = BuildkiteApi(os.getenv("BUILDKITE_API_TOKEN"), os.getenv("BUILDKITE_ORGANIZATION_SLUG")) report_success = False # for try block
build = bk.get_build(os.getenv("BUILDKITE_PIPELINE_SLUG"), os.getenv("BUILDKITE_BUILD_NUMBER")) failed_tests = []
success = True try:
build.setdefault('jobs', []) bk = BuildkiteApi(os.getenv("BUILDKITE_API_TOKEN"), os.getenv("BUILDKITE_ORGANIZATION_SLUG"))
for j in build['jobs']: build = bk.get_build(os.getenv("BUILDKITE_PIPELINE_SLUG"), os.getenv("BUILDKITE_BUILD_NUMBER"))
j.setdefault('state', '') success = True
j.setdefault('id', '') failed_tests = process_unit_test_reports(bk, build, '')
logging.info(f'{j["id"]} state {j["state"]}') for i, job in enumerate(build.get('jobs', [])):
success = success and (j['state'] != 'failed') job = benedict(job)
job_web_url = job.get('web_url', os.getenv('BUILDKITE_BUILD_URL', ''))
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN')) logging.info(f'{job.get("id")} state {job.get("state")}')
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}' job_state = job.get('state')
print(f'Reporting results to Phabricator build {format_url(build_url)}', flush=True) if job.get('type') == 'waiter':
phabtalk.update_build_status(ph_target_phid, False, success, {}, []) continue
bug_url = f'https://github.com/google/llvm-premerge-checks/issues/new?assignees=&labels=bug' \ if job_state != 'passed' and job_state != 'failed':
f'&template=bug_report.md&title=buildkite build {os.getenv("BUILDKITE_PIPELINE_SLUG")} ' \ # Current and irrelevant steps.
f'{os.getenv("BUILDKITE_BUILD_NUMBER")}' continue
print(f'{format_url(bug_url, "report issue")}', flush=True) if job_state == 'passed' and i == 0:
# Skip successful first step as we assume it to be a pipeline setup
continue
name = job.get('name')
if job.get('type') == 'trigger':
job_web_url = job.get('triggered_build.web_url', job_web_url)
triggered_url = job.get('triggered_build.url')
if triggered_url != '':
sub_build = benedict(bk.get(triggered_url).json())
name = name or sub_build.get('pipeline.name')
failed_steps = get_failed_jobs(sub_build)
failed_tests.extend(process_unit_test_reports(bk, sub_build, name))
if job_state == 'failed' and failed_steps:
name = f"{name} ({', '.join(failed_steps[:2])}{', ...' if len(failed_steps) > 2 else ''})"
name = strip_emojis(name) or 'unknown'
phabtalk.maybe_add_url_artifact(ph_target_phid, job_web_url, f"{name} {job_state}")
if job_state == 'failed':
success = False
report_success = success # Must be last before finally: block to report errors in this script.
finally:
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}'
print(f'Reporting results to Phabricator build {format_url(build_url)}', flush=True)
phabtalk.update_build_status(ph_target_phid, False, report_success, {}, failed_tests)

View file

@ -28,9 +28,8 @@ def run(working_dir: str, test_results: str, step: Optional[Step], report: Optio
step = Step() step = Step()
path = os.path.join(working_dir, test_results) path = os.path.join(working_dir, test_results)
if not os.path.exists(path): if not os.path.exists(path):
logging.warning(f'{path} is not found') logging.error(f'{path} is not found')
step.success = False step.success = False
step.messages.append(f'test report "{path}" is not found')
return return
try: try:
success = True success = True
@ -63,13 +62,30 @@ def run(working_dir: str, test_results: str, step: Optional[Step], report: Optio
msg += f'{test_case["namespace"]}/{test_case["name"]}\n' msg += f'{test_case["namespace"]}/{test_case["name"]}\n'
except Exception as e: except Exception as e:
logging.error(e) logging.error(e)
step.messages.append('Parsing of test results failed')
step.success = False step.success = False
logging.debug(f'report: {report}') logging.debug(f'report: {report}')
logging.debug(f'step: {step}') logging.debug(f'step: {step}')
def parse_failures(test_xml: bytes, context: str) -> []:
failed_cases = []
root_node = etree.fromstring(test_xml)
for test_case in root_node.xpath('//testcase'):
failure = test_case.find('failure')
if failure is None:
continue
failed_cases.append({
'engine': context,
'name': test_case.attrib['name'],
'namespace': test_case.attrib['classname'],
'result': 'fail',
'duration': float(test_case.attrib['time']),
'details': failure.text
})
return failed_cases
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Processes results from xml report') parser = argparse.ArgumentParser(description='Processes results from xml report')
parser.add_argument('test-report', default='build/test-results.xml') parser.add_argument('test-report', default='build/test-results.xml')