1
0
Fork 0

Merge pull request #195 from google/win-build

Add windows build
This commit is contained in:
Mikhail Goncharov 2020-06-03 13:44:14 +02:00 committed by GitHub
commit 6d54fbcbd2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 364 additions and 205 deletions

View file

@ -1,4 +1,9 @@
c:\credentials\buildkite-env.ps1 c:\credentials\buildkite-env.ps1
# Install Buildkite agent. # Install Buildkite agent.
iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/buildkite/agent/master/install.ps1')) iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/buildkite/agent/master/install.ps1'))
$env:SCCACHE_DIR="C:\ws\sccache"
Remove-Item -Recurse -Force -ErrorAction Ignore $env:SCCACHE_DIR
sccache --start-server
C:\buildkite-agent\bin\buildkite-agent.exe start C:\buildkite-agent\bin\buildkite-agent.exe start

View file

@ -113,21 +113,34 @@ To spawn a new windows agent:
1. Go to the [GCP page](https://pantheon.corp.google.com/compute/instances?project=llvm-premerge-checks&instancessize=50) and pick a new number for the agent. 1. Go to the [GCP page](https://pantheon.corp.google.com/compute/instances?project=llvm-premerge-checks&instancessize=50) and pick a new number for the agent.
1. Run `kubernetes/windows_agent_create.sh agent-windows-<number>` 1. Run `kubernetes/windows_agent_create.sh agent-windows-<number>`
1. Go to the [GCP page](https://pantheon.corp.google.com/compute/instances?project=llvm-premerge-checks&instancessize=50) again 1. Go to the [GCP page](https://pantheon.corp.google.com/compute/instances?project=llvm-premerge-checks&instancessize=50) again
1. login to the new machine via RDP (you will need a RDP client, e.g. Chrome app). 1. Login to the new machine via RDP (you will need a RDP client, e.g. Chrome app).
1. In the RDP session: run these commands in the CMD window under Administrator to bootstrap the Windows machine: 1. In the RDP session: run these commands in the CMD window under Administrator to bootstrap the Windows machine:
```powershell ```powershell
Invoke-WebRequest -uri 'https://raw.githubusercontent.com/google/llvm-premerge-checks/master/scripts/windows_agent_bootstrap.ps1' -OutFile windows_agent_bootstrap.ps1 Invoke-WebRequest -uri 'https://raw.githubusercontent.com/google/llvm-premerge-checks/master/scripts/windows_agent_bootstrap.ps1' -OutFile windows_agent_bootstrap.ps1
./windows_agent_bootstrap.ps1 ./windows_agent_bootstrap.ps1
``` ```
Ignore the pop-up to format the new disk andw wait for the machine to reboot. Ignore the pop-up to format the new disk andw wait for the machine to reboot.
1. Create `c:\credentials` folder with the agent credentials:
For *Buildkite* add file `buildkite-env.ps1`: ### Buildkite
1. Create `c:\credentials` folder with file `buildkite-env.ps1`:
```powershell ```powershell
$Env:buildkiteAgentToken = "secret-token" $Env:buildkiteAgentToken = "secret-token"
$Env:BUILDKITE_AGENT_TAGS = "queue=premerge;os=windows" $Env:BUILDKITE_AGENT_TAGS = "queue=premerge,os=windows"
$Env:CONDUIT_TOKEN = "conduit-api-token"
```
1. Run
```powershell
C:\llvm-premerge-checks\scripts\windows_agent_start_buildkite.ps1 [-workdir D:/] [-testing] [-version latest]
```
### Jenkins
1. Create `c:\credentials` folder with `build-agent-results_key.json` to access cloud storage copy from one of the existing machines.
1. Run
```powershell
git clone https://github.com/google/llvm-premerge-checks.git "c:\llvm-premerge-checks"
C:\llvm-premerge-checks\scripts\windows_agent_start_buildkite.ps1 [-testing] [-version latest]
``` ```
For *Jenkins*: `build-agent-results_key.json` to access cloud storage copy from one of the existing machines.
1. Start the container `C:\llvm-premerge-checks\scripts\windows_agent_start_[buildkite|jenkins].ps1 `
## Testing scripts locally ## Testing scripts locally

View file

@ -41,7 +41,7 @@ spec:
- name: BUILDKITE_AGENT_TOKEN - name: BUILDKITE_AGENT_TOKEN
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: agent-token name: buildkite-agent-token
key: token key: token
- name: BUILDKITE_AGENT_TAGS - name: BUILDKITE_AGENT_TAGS
value: "queue=release,os=linux" value: "queue=release,os=linux"

View file

@ -18,7 +18,7 @@ metadata:
name: premerge-debian name: premerge-debian
namespace: buildkite namespace: buildkite
spec: spec:
replicas: 1 replicas: 2
template: template:
metadata: metadata:
labels: labels:
@ -43,7 +43,7 @@ spec:
- name: BUILDKITE_AGENT_TOKEN - name: BUILDKITE_AGENT_TOKEN
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: agent-token name: buildkite-agent-token
key: token key: token
- name: BUILDKITE_AGENT_TAGS - name: BUILDKITE_AGENT_TAGS
value: "queue=premerge,os=linux" value: "queue=premerge,os=linux"

View file

@ -1,24 +0,0 @@
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# store the buildkite token as kubernetes secret
#
# Get the token from the website [1] and store it in this file locally in
# ~/.llvm-premerge-checks/buildkite-token
# Do not share this token with anyone!
# [1] https://buildkite.com/organizations/llvm-project/agents
kubectl create secret generic buildkite-token --namespace jenkins --from-file ~/.llvm-premerge-checks/buildkite-token

0
scripts/__init__.py Normal file
View file

View file

@ -21,15 +21,61 @@ if __name__ == '__main__':
queue = os.getenv("BUILDKITE_AGENT_META_DATA_QUEUE", "default") queue = os.getenv("BUILDKITE_AGENT_META_DATA_QUEUE", "default")
diff_id = os.getenv("ph_buildable_diff", "") diff_id = os.getenv("ph_buildable_diff", "")
steps = [] steps = []
# SCRIPT_DIR is defined in buildkite pipeline step.
linux_buld_step = { linux_buld_step = {
'label': 'build linux', 'label': ':linux: build and test linux',
'key': 'build-linux', 'key': 'linux',
'commands': [ 'commands': [
'${SCRIPT_DIR}/premerge_checks.py', 'export SRC=${BUILDKITE_BUILD_PATH}/llvm-premerge-checks',
'rm -rf ${SRC}',
'git clone --depth 1 --branch ${scripts_branch} https://github.com/google/llvm-premerge-checks.git ${SRC}',
# Add link in review to the build.
'${SRC}/scripts/phabtalk/add_url_artifact.py '
'--phid="$ph_target_phid" '
'--url="$BUILDKITE_BUILD_URL" '
'--name="Buildkite build"',
'${SRC}/scripts/premerge_checks.py --check-clang-format --check-clang-tidy',
], ],
'artifact_paths': ['artifacts/**/*', '*_result.json'],
'agents': {'queue': queue, 'os': 'linux'}
}
windows_buld_step = {
'label': ':windows: build and test windows',
'key': 'windows',
'commands': [
'sccache --show-stats',
'set SRC=%BUILDKITE_BUILD_PATH%/llvm-premerge-checks',
'rm -rf %SRC%',
'git clone --depth 1 --branch %scripts_branch% https://github.com/google/llvm-premerge-checks.git %SRC%',
'powershell -command "%SRC%/scripts/premerge_checks.py; '
'\\$exit=\\$?;'
'sccache --show-stats;'
'if (\\$exit) {'
' echo "success";'
' exit 0; } '
'else {'
' echo "failure";'
' exit 1;'
'}',
],
'artifact_paths': ['artifacts/**/*', '*_result.json'],
'agents': {'queue': queue, 'os': 'windows'},
}
steps.append(linux_buld_step)
steps.append(windows_buld_step)
report_step = {
'label': ':spiral_note_pad: report',
'depends_on': [linux_buld_step['key'], windows_buld_step['key']],
'commands': [
'mkdir -p artifacts',
'buildkite-agent artifact download "*_result.json" .',
'export SRC=${BUILDKITE_BUILD_PATH}/llvm-premerge-checks',
'rm -rf ${SRC}',
'git clone --depth 1 --branch ${scripts_branch} https://github.com/google/llvm-premerge-checks.git ${SRC}',
'${SRC}/scripts/buildkite/summary.py',
],
'allow_dependency_failure': True,
'artifact_paths': ['artifacts/**/*'], 'artifact_paths': ['artifacts/**/*'],
'agents': {'queue': queue, 'os': 'linux'} 'agents': {'queue': queue, 'os': 'linux'}
} }
steps.append(linux_buld_step) steps.append(report_step)
print(yaml.dump({'steps': steps})) print(yaml.dump({'steps': steps}))

View file

@ -26,9 +26,9 @@ if __name__ == '__main__':
'commands': ['scripts/buildkite/apply_patch.sh'], 'commands': ['scripts/buildkite/apply_patch.sh'],
'agents': {'queue': queue, 'os': 'linux'} 'agents': {'queue': queue, 'os': 'linux'}
} }
run_build_step = { build_linux_step = {
'trigger': 'premerge-checks', 'trigger': 'premerge-checks',
'label': ':rocket: build', 'label': ':rocket: build and test',
'async': False, 'async': False,
'depends_on': 'create-branch', 'depends_on': 'create-branch',
'build': { 'build': {
@ -38,7 +38,7 @@ if __name__ == '__main__':
} }
for e in os.environ: for e in os.environ:
if e.startswith('ph_'): if e.startswith('ph_'):
run_build_step['build']['env'][e] = os.getenv(e) build_linux_step['build']['env'][e] = os.getenv(e)
steps.append(create_branch_step) steps.append(create_branch_step)
steps.append(run_build_step) steps.append(build_linux_step)
print(yaml.dump({'steps': steps})) print(yaml.dump({'steps': steps}))

71
scripts/buildkite/summary.py Executable file
View file

@ -0,0 +1,71 @@
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import logging
import os
import sys
import uuid
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from phabtalk.phabtalk import PhabTalk
from buildkite.utils import format_url
def maybe_add_url_artifact(phab: PhabTalk, phid: str, url: str, name: str):
if phid is None:
logging.warning('PHID is not provided, cannot create URL artifact')
return
phab.create_artifact(phid, str(uuid.uuid4()), 'uri', {'uri': url, 'ui.external': True, 'name': name})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log-level', type=str, default='WARNING')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
print(f'Branch {os.getenv("BUILDKITE_BRANCH")} at {os.getenv("BUILDKITE_REPO")}')
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_buildable_diff is not None:
url = f'https://reviews.llvm.org/D{os.getenv("ph_buildable_revision")}?id={ph_buildable_diff}'
print(f'Review: {format_url(url)}')
if os.getenv('BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER') is not None:
url = f'https://buildkite.com/llvm-project/' \
f'{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG")}/' \
f'builds/{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER")}'
print(f'Triggered from build {format_url(url)}')
success = True
for path in glob.glob("*_result.json"):
logging.info(f'analysing {path}')
with open(path, 'r') as f:
report = json.load(f)
logging.info(report)
success = success and report['success']
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}'
print(f'Reporting results to Phabricator build {format_url(build_url)}')
ph_buildable_diff = os.getenv('ph_buildable_diff')
ph_target_phid = os.getenv('ph_target_phid')
phabtalk.update_build_status(ph_buildable_diff, ph_target_phid, False, success)
bug_url = f'https://github.com/google/llvm-premerge-checks/issues/new?assignees=&labels=bug' \
f'&template=bug_report.md&title=buildkite build {os.getenv("BUILDKITE_PIPELINE_SLUG")} ' \
f'{os.getenv("BUILDKITE_BUILD_NUMBER")}'
print(f'{format_url(bug_url, "report issue")}')

View file

@ -0,0 +1,28 @@
import logging
import os
import re
import subprocess
from typing import Optional
def upload_file(base_dir: str, file: str):
"""
Uploads artifact to buildkite and returns URL to it
"""
r = subprocess.run(f'buildkite-agent artifact upload "{file}"', shell=True, capture_output=True, cwd=base_dir)
logging.debug(f'upload-artifact {r}')
match = re.search('Uploading artifact ([^ ]*) ', r.stderr.decode())
logging.debug(f'match {match}')
if match:
url = f'https://buildkite.com/organizations/llvm-project/pipelines/premerge-checks/builds/{os.getenv("BUILDKITE_BUILD_NUMBER")}/jobs/{os.getenv("BUILDKITE_JOB_ID")}/artifacts/{match.group(1)}'
logging.info(f'uploaded {file} to {url}')
return url
else:
logging.warning(f'could not find artifact {base_dir}/{file}')
return None
def format_url(url: str, name: Optional[str] = None):
if name is None:
name = url
return f"\033]1339;url='{url}';content='{name}'\a\n"

View file

@ -14,15 +14,14 @@
# limitations under the License. # limitations under the License.
import argparse import argparse
import logging
import os import os
import subprocess import subprocess
import logging from typing import Tuple, Optional
import pathspec import pathspec
import unidiff import unidiff
from typing import Tuple, Optional from phabtalk.phabtalk import Report, Step
from phabtalk.phabtalk import Report, CheckResult
def get_diff(base_commit) -> Tuple[bool, str]: def get_diff(base_commit) -> Tuple[bool, str]:
@ -40,13 +39,15 @@ def get_diff(base_commit) -> Tuple[bool, str]:
return True, diff_run.stdout.decode() return True, diff_run.stdout.decode()
def run(base_commit, ignore_config, report: Optional[Report]): def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Report]):
"""Apply clang-format and return if no issues were found.""" """Apply clang-format and return if no issues were found."""
if report is None: if report is None:
report = Report() # For debugging. report = Report() # For debugging.
if step is None:
step = Step() # For debugging.
r, patch = get_diff(base_commit) r, patch = get_diff(base_commit)
if not r: if not r:
report.add_step('clang-format', CheckResult.FAILURE, '') step.success = False
return return
add_artifact = False add_artifact = False
patches = unidiff.PatchSet(patch) patches = unidiff.PatchSet(patch)
@ -88,14 +89,12 @@ def run(base_commit, ignore_config, report: Optional[Report]):
with open(patch_file, 'w') as f: with open(patch_file, 'w') as f:
f.write(patch) f.write(patch)
report.add_artifact(os.getcwd(), patch_file, 'clang-format') report.add_artifact(os.getcwd(), patch_file, 'clang-format')
if success: if not success:
report.add_step('clang-format', CheckResult.SUCCESS, message='') step.success = False
else: step.messages.append(
report.add_step(
'clang-format',
CheckResult.FAILURE,
'Please format your changes with clang-format by running `git-clang-format HEAD^` or applying patch.') 'Please format your changes with clang-format by running `git-clang-format HEAD^` or applying patch.')
logging.debug(f'report: {report}') logging.debug(f'report: {report}')
logging.debug(f'step: {step}')
if __name__ == '__main__': if __name__ == '__main__':

View file

@ -22,11 +22,15 @@ from typing import Optional
import pathspec import pathspec
import ignore_diff import ignore_diff
from phabtalk.phabtalk import Report, CheckResult from phabtalk.phabtalk import Report, Step
def run(base_commit, ignore_config, report: Optional[Report]): def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Report]):
"""Apply clang-format and return if no issues were found.""" """Apply clang-format and return if no issues were found."""
if report is None:
report = Report() # For debugging.
if step is None:
step = Step() # For debugging.
r = subprocess.run(f'git diff -U0 --no-prefix {base_commit}', shell=True, capture_output=True) r = subprocess.run(f'git diff -U0 --no-prefix {base_commit}', shell=True, capture_output=True)
logging.debug(f'git diff {r}') logging.debug(f'git diff {r}')
diff = r.stdout.decode() diff = r.stdout.decode()
@ -43,8 +47,6 @@ def run(base_commit, ignore_config, report: Optional[Report]):
logging.info(f'clang-tidy input: {a}') logging.info(f'clang-tidy input: {a}')
out = p.communicate(input=a.encode())[0].decode() out = p.communicate(input=a.encode())[0].decode()
logging.debug(f'clang-tidy-diff {p}: {out}') logging.debug(f'clang-tidy-diff {p}: {out}')
if report is None:
report = Report() # For debugging.
# Typical finding looks like: # Typical finding looks like:
# [cwd/]clang/include/clang/AST/DeclCXX.h:3058:20: error: ... [clang-diagnostic-error] # [cwd/]clang/include/clang/AST/DeclCXX.h:3058:20: error: ... [clang-diagnostic-error]
pattern = '^([^:]*):(\\d+):(\\d+): (.*): (.*)' pattern = '^([^:]*):(\\d+):(\\d+): (.*): (.*)'
@ -94,16 +96,14 @@ def run(base_commit, ignore_config, report: Optional[Report]):
with open(p, 'w') as f: with open(p, 'w') as f:
f.write(out) f.write(out)
report.add_artifact(os.getcwd(), p, 'clang-tidy') report.add_artifact(os.getcwd(), p, 'clang-tidy')
if errors_count + warn_count == 0: if errors_count + warn_count != 0:
report.add_step('clang-tidy', CheckResult.SUCCESS, message='') step.success = False
else: step.messages.append(
report.add_step(
'clang-tidy',
CheckResult.FAILURE,
f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them are added ' f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them are added '
f'as review comments. See' f'as review comments. See'
f'https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md#review-comments.') f'https://github.com/google/llvm-premerge-checks/blob/master/docs/clang_tidy.md#review-comments.')
logging.debug(f'report: {report}') logging.debug(f'report: {report}')
logging.debug(f'step: {step}')
if __name__ == '__main__': if __name__ == '__main__':
@ -114,4 +114,4 @@ if __name__ == '__main__':
parser.add_argument('--log-level', type=str, default='INFO') parser.add_argument('--log-level', type=str, default='INFO')
args = parser.parse_args() args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s') logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
run(args.base, args.ignore_config, None) run(args.base, args.ignore_config, None, None)

View file

View file

@ -0,0 +1,46 @@
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import uuid
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# from phabtalk import PhabTalk
# else:
from phabtalk.phabtalk import PhabTalk
def maybe_add_url_artifact(phab: PhabTalk, phid: str, url: str, name: str):
if phid is None:
logging.warning('PHID is not provided, cannot create URL artifact')
return
phab.create_artifact(phid, str(uuid.uuid4()), 'uri', {'uri': url, 'ui.external': True, 'name': name})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs premerge checks8')
parser.add_argument('--url', type=str)
parser.add_argument('--name', type=str)
parser.add_argument('--phid', type=str)
parser.add_argument('--log-level', type=str, default='WARNING')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
maybe_add_url_artifact(phabtalk, args.phid, args.url, args.name)

View file

@ -28,7 +28,7 @@ from typing import Optional, List, Dict
import pathspec import pathspec
from lxml import etree from lxml import etree
from phabricator import Phabricator from phabricator import Phabricator
from enum import Enum from enum import IntEnum
class PhabTalk: class PhabTalk:
@ -81,7 +81,7 @@ class PhabTalk:
transactions=transactions) transactions=transactions)
print('Uploaded comment to Revision D{}:{}'.format(revision, text)) print('Uploaded comment to Revision D{}:{}'.format(revision, text))
def update_build_status(self, diff_id: str, phid: str, working: bool, success: bool, lint: {}, unit: []): def update_build_status(self, diff_id: str, phid: str, working: bool, success: bool, lint: {} = {}, unit: [] = []):
"""Submit collected report to Phabricator. """Submit collected report to Phabricator.
""" """
@ -193,14 +193,22 @@ def _parse_patch(patch) -> List[Dict[str, str]]:
return entries return entries
class CheckResult(Enum): class Step:
UNKNOWN = 0 def __init__(self):
SUCCESS = 1 self.name = ''
FAILURE = 2 self.success = True
self.duration = 0.0
self.messages = []
def set_status_from_exit_code(self, exit_code: int):
if exit_code != 0:
self.success = False
class Report: class Report:
def __init__(self): def __init__(self):
self.os = ''
self.name = ''
self.comments = [] self.comments = []
self.success = True self.success = True
self.working = False self.working = False
@ -211,7 +219,7 @@ class Report:
'fail': 0, 'fail': 0,
'skip': 0 'skip': 0
} # type: Dict[str, int] } # type: Dict[str, int]
self.steps = [] # type: List self.steps = [] # type: List[Step]
self.artifacts = [] # type: List self.artifacts = [] # type: List
def __str__(self): def __str__(self):
@ -223,13 +231,6 @@ class Report:
self.lint[key] = [] self.lint[key] = []
self.lint[key].append(m) self.lint[key].append(m)
def add_step(self, title: str, result: CheckResult, message: str):
self.steps.append({
'title': title,
'result': result,
'message': message,
})
def add_artifact(self, dir: str, file: str, name: str): def add_artifact(self, dir: str, file: str, name: str):
self.artifacts.append({'dir': dir, 'file': file, 'name': name}) self.artifacts.append({'dir': dir, 'file': file, 'name': name})

View file

@ -14,162 +14,133 @@
# limitations under the License. # limitations under the License.
# Runs all check on buildkite agent. # Runs all check on buildkite agent.
import argparse
import json import json
import logging import logging
import os import os
import pathlib import pathlib
import re
import shutil import shutil
import subprocess import subprocess
import time import time
import uuid from typing import Callable
from typing import Callable, Optional
import clang_format_report import clang_format_report
import clang_tidy_report import clang_tidy_report
import run_cmake import run_cmake
import test_results_report import test_results_report
from phabtalk.phabtalk import Report, CheckResult, PhabTalk from buildkite.utils import upload_file
from phabtalk.add_url_artifact import maybe_add_url_artifact
from phabtalk.phabtalk import Report, PhabTalk, Step
def upload_file(base_dir: str, file: str): def ninja_all_report(step: Step, _: Report):
"""
Uploads artifact to buildkite and returns URL to it
"""
r = subprocess.run(f'buildkite-agent artifact upload "{file}"', shell=True, capture_output=True, cwd=base_dir)
logging.debug(f'upload-artifact {r}')
match = re.search('Uploading artifact ([^ ]*) ', r.stderr.decode())
logging.debug(f'match {match}')
if match:
url = f'https://buildkite.com/organizations/llvm-project/pipelines/premerge-checks/builds/{os.getenv("BUILDKITE_BUILD_NUMBER")}/jobs/{os.getenv("BUILDKITE_JOB_ID")}/artifacts/{match.group(1)}'
logging.info(f'uploaded {file} to {url}')
return url
else:
logging.warning(f'could not find artifact {base_dir}/{file}')
return None
def maybe_add_url_artifact(phab: PhabTalk, url: str, name: str):
phid = os.getenv('ph_target_phid')
if phid is None:
return
phab.create_artifact(phid, str(uuid.uuid4()), 'uri', {'uri': url, 'ui.external': True, 'name': name})
def add_shell_result(report: Report, name: str, exit_code: int) -> CheckResult:
logging.info(f'"{name}" exited with {exit_code}')
z = CheckResult.SUCCESS
if exit_code != 0:
z = CheckResult.FAILURE
report.add_step(name, z, '')
return z
def ninja_all_report(report: Report) -> CheckResult:
print('Full will be available in Artifacts "ninja-all.log"') print('Full will be available in Artifacts "ninja-all.log"')
r = subprocess.run(f'ninja all | ' r = subprocess.run(f'ninja all | '
f'tee {artifacts_dir}/ninja-all.log | ' f'tee {artifacts_dir}/ninja-all.log | '
f'grep -vE "\\[.*] (Building|Linking|Copying|Generating|Creating)"', f'grep -vE "\\[.*] (Building|Linking|Copying|Generating|Creating)"',
shell=True, cwd=build_dir) shell=True, cwd=build_dir)
return add_shell_result(report, 'ninja all', r.returncode) logging.debug(f'ninja all: returned {r.returncode}, stderr: "{r.stderr}"')
step.set_status_from_exit_code(r.returncode)
def ninja_check_all_report(report: Report) -> CheckResult: def ninja_check_all_report(step: Step, _: Report):
# TODO: merge running ninja check all and analysing results in one step?
print('Full will be available in Artifacts "ninja-check-all.log"') print('Full will be available in Artifacts "ninja-check-all.log"')
r = subprocess.run(f'ninja check-all | tee {artifacts_dir}/ninja-check-all.log | ' r = subprocess.run(f'ninja check-all | tee {artifacts_dir}/ninja-check-all.log | '
f'grep -vE "^\\[.*] (Building|Linking)" | ' f'grep -vE "^\\[.*] (Building|Linking)" | '
f'grep -vE "^(PASS|XFAIL|UNSUPPORTED):"', shell=True, cwd=build_dir) f'grep -vE "^(PASS|XFAIL|UNSUPPORTED):"', shell=True, cwd=build_dir)
z = add_shell_result(report, 'ninja check all', r.returncode) logging.debug(f'ninja check-all: returned {r.returncode}, stderr: "{r.stderr}"')
# TODO: check if test-results are present. step.set_status_from_exit_code(r.returncode)
report.add_artifact(build_dir, 'test-results.xml', 'test results') test_results_report.run(build_dir, 'test-results.xml', step, report)
test_results_report.run(os.path.join(build_dir, 'test-results.xml'), report)
return z
def run_step(name: str, report: Report, thunk: Callable[[Report], CheckResult]) -> CheckResult: def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -> Step:
global timings
start = time.time() start = time.time()
print(f'--- {name}') # New section in Buildkite log. print(f'--- {name}') # New section in Buildkite log.
result = thunk(report) step = Step()
timings[name] = time.time() - start step.name = name
thunk(step, report)
step.duration = time.time() - start
# Expand section if it failed. # Expand section if it failed.
if result == CheckResult.FAILURE: if not step.success:
print('^^^ +++') print('^^^ +++')
return result report.steps.append(step)
return step
def cmake_report(report: Report) -> CheckResult: def cmake_report(step: Step, _: Report):
global build_dir global build_dir
cmake_result, build_dir, cmake_artifacts = run_cmake.run('detect', os.getcwd()) cmake_result, build_dir, cmake_artifacts = run_cmake.run('detect', os.getcwd())
for file in cmake_artifacts: for file in cmake_artifacts:
if os.path.exists(file): if os.path.exists(file):
shutil.copy2(file, artifacts_dir) shutil.copy2(file, artifacts_dir)
return add_shell_result(report, 'cmake', cmake_result) step.set_status_from_exit_code(cmake_result)
def furl(url: str, name: Optional[str] = None): def as_dict(obj):
if name is None: try:
name = url return obj.toJSON()
return f"\033]1339;url='{url}';content='{name}'\a\n" except:
return obj.__dict__
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs premerge checks8')
parser.add_argument('--log-level', type=str, default='WARNING')
parser.add_argument('--check-clang-format', action='store_true')
parser.add_argument('--check-clang-tidy', action='store_true')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
build_dir = '' build_dir = ''
logging.basicConfig(level=logging.WARNING, format='%(levelname)-7s %(message)s') step_key = os.getenv("BUILDKITE_STEP_KEY")
scripts_dir = pathlib.Path(__file__).parent.absolute() scripts_dir = pathlib.Path(__file__).parent.absolute()
phab = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
maybe_add_url_artifact(phab, os.getenv('BUILDKITE_BUILD_URL'), 'Buildkite build')
artifacts_dir = os.path.join(os.getcwd(), 'artifacts') artifacts_dir = os.path.join(os.getcwd(), 'artifacts')
os.makedirs(artifacts_dir, exist_ok=True) os.makedirs(artifacts_dir, exist_ok=True)
report_path = f'{step_key}_result.json'
report = Report() report = Report()
timings = {} report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}'
cmake_result = run_step('cmake', report, cmake_report) report.name = step_key
if cmake_result == CheckResult.SUCCESS: report.success = False
compile_result = run_step('ninja all', report, ninja_all_report) # Create report with failure in case something below fails.
if compile_result == CheckResult.SUCCESS: with open(report_path, 'w') as f:
run_step('ninja check all', report, ninja_check_all_report) json.dump(report.__dict__, f, default=as_dict)
report.success = True
cmake = run_step('cmake', report, cmake_report)
if cmake.success:
ninja_all = run_step('ninja all', report, ninja_all_report)
if ninja_all.success:
run_step('ninja check-all', report, ninja_check_all_report)
if args.check_clang_tidy:
run_step('clang-tidy', report, run_step('clang-tidy', report,
lambda x: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), x)) lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r))
if args.check_clang_format:
run_step('clang-format', report, run_step('clang-format', report,
lambda x: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), x)) lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r))
print('+++ summary')
print(f'Branch {os.getenv("BUILDKITE_BRANCH")} at {os.getenv("BUILDKITE_REPO")}')
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_buildable_diff is not None:
url = f'https://reviews.llvm.org/D{os.getenv("ph_buildable_revision")}?id={ph_buildable_diff}'
print(f'Review: {furl(url)}')
if os.getenv('BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER') is not None:
url = f'https://buildkite.com/llvm-project/' \
f'{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG")}/'\
f'builds/{os.getenv("BUILDKITE_TRIGGERED_FROM_BUILD_NUMBER")}'
print(f'Triggered from build {furl(url)}')
logging.debug(report) logging.debug(report)
success = True
for s in report.steps: for s in report.steps:
mark = 'V' mark = 'OK '
if s['result'] == CheckResult.UNKNOWN: if not s.success:
mark = '?' report.success = False
if s['result'] == CheckResult.FAILURE: mark = 'FAIL '
success = False msg = ''
mark = 'X' if len(s.messages):
msg = s['message'] msg = ': ' + '\n '.join(s.messages)
if len(msg): print(f'{mark} {s.name}{msg}')
msg = ': ' + msg
print(f'{mark} {s["title"]}{msg}')
# TODO: dump the report and deduplicate tests and other reports later (for multiple OS) in a separate step.
ph_target_phid = os.getenv('ph_target_phid') ph_target_phid = os.getenv('ph_target_phid')
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_target_phid is not None: if ph_target_phid is not None:
build_url = f'https://reviews.llvm.org/harbormaster/build/{os.getenv("ph_build_id")}' phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
print(f'Reporting results to Phabricator build {furl(build_url)}') for u in report.unit:
phab.update_build_status(ph_buildable_diff, ph_target_phid, False, success, report.lint, report.unit) u['engine'] = step_key
phabtalk.update_build_status(ph_buildable_diff, ph_target_phid, True, report.success, report.lint, report.unit)
for a in report.artifacts: for a in report.artifacts:
url = upload_file(a['dir'], a['file']) url = upload_file(a['dir'], a['file'])
if url is not None: if url is not None:
maybe_add_url_artifact(phab, url, a['name']) maybe_add_url_artifact(phabtalk, ph_target_phid, url, f'{a["name"]} (${step_key})')
else: else:
logging.warning('No phabricator phid is specified. Will not update the build status in Phabricator') logging.warning('No phabricator phid is specified. Will not update the build status in Phabricator')
# TODO: add link to report issue on github with open(report_path, 'w') as f:
with open(os.path.join(artifacts_dir, 'step-timings.json'), 'w') as f: json.dump(report.__dict__, f, default=as_dict)
f.write(json.dumps(timings)) if not report.success:
print('Build completed with failures')
exit(1)

View file

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
import argparse import argparse
import logging
from enum import Enum from enum import Enum
from git import Repo from git import Repo
import os import os
@ -114,12 +115,14 @@ def _create_args(config: Configuration, llvm_enable_projects: str) -> List[str]:
# enable sccache # enable sccache
if 'SCCACHE_DIR' in os.environ: if 'SCCACHE_DIR' in os.environ:
logging.info("using sccache")
arguments.extend([ arguments.extend([
'-DCMAKE_C_COMPILER_LAUNCHER=sccache', '-DCMAKE_C_COMPILER_LAUNCHER=sccache',
'-DCMAKE_CXX_COMPILER_LAUNCHER=sccache', '-DCMAKE_CXX_COMPILER_LAUNCHER=sccache',
]) ])
# enable ccache if the path is set in the environment # enable ccache if the path is set in the environment
elif 'CCACHE_PATH' in os.environ: elif 'CCACHE_PATH' in os.environ:
logging.info("using ccache")
arguments.extend([ arguments.extend([
'-D LLVM_CCACHE_BUILD=ON', '-D LLVM_CCACHE_BUILD=ON',
'-D LLVM_CCACHE_DIR={}'.format(os.environ['CCACHE_PATH']), '-D LLVM_CCACHE_DIR={}'.format(os.environ['CCACHE_PATH']),

View file

@ -14,23 +14,27 @@
# limitations under the License. # limitations under the License.
import argparse import argparse
import os
import logging import logging
import os
from typing import Optional from typing import Optional
from lxml import etree from lxml import etree
from phabtalk.phabtalk import Report, CheckResult from phabtalk.phabtalk import Report, Step
def run(test_results, report: Optional[Report]): def run(working_dir: str, test_results: str, step: Optional[Step], report: Optional[Report]):
"""Apply clang-format and return if no issues were found."""
if report is None: if report is None:
report = Report() # For debugging. report = Report() # For debugging.
if not os.path.exists(test_results): if step is None:
logging.warning(f'{test_results} not found') step = Step()
report.add_step('clang-format', CheckResult.UNKNOWN, 'test report is not found') path = os.path.join(working_dir, test_results)
if not os.path.exists(path):
logging.warning(f'{path} is not found')
step.success = False
step.messages.append(f'test report "{path}" is not found')
return return
report.add_artifact(working_dir, test_results, 'test results')
success = True success = True
root_node = etree.parse(test_results) root_node = etree.parse(path)
for test_case in root_node.xpath('//testcase'): for test_case in root_node.xpath('//testcase'):
test_result = 'pass' test_result = 'pass'
if test_case.find('failure') is not None: if test_case.find('failure') is not None:
@ -50,21 +54,21 @@ def run(test_results, report: Optional[Report]):
} }
report.unit.append(test_result) report.unit.append(test_result)
msg = f'{report.test_stats["pass"]} tests passed, {report.test_stats["fail"]} failed and' \ msg = f'{report.test_stats["pass"]} tests passed, {report.test_stats["fail"]} failed and ' \
f'{report.test_stats["skip"]} were skipped.\n' f'{report.test_stats["skip"]} were skipped.\n'
if success: if not success:
report.add_step('test results', CheckResult.SUCCESS, msg) step.success = False
else:
for test_case in report.unit: for test_case in report.unit:
if test_case['result'] == 'fail': if test_case['result'] == 'fail':
msg += f'{test_case["namespace"]}/{test_case["name"]}\n' msg += f'{test_case["namespace"]}/{test_case["name"]}\n'
report.add_step('unit tests', CheckResult.FAILURE, msg) logging.debug(f'report: {report}')
logging.debug(f'step: {step}')
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Processes results from xml report') parser = argparse.ArgumentParser(description='Processes results from xml report')
parser.add_argument('test_report', default='build/test-results.xml') parser.add_argument('test-report', default='build/test-results.xml')
parser.add_argument('--log-level', type=str, default='INFO') parser.add_argument('--log-level', type=str, default='INFO')
args = parser.parse_args() args = parser.parse_args()
logging.basicConfig(level=args.log_level) logging.basicConfig(level=args.log_level)
run(args.test_report, None) run(os.getcwd(), args.test_report, None, None)

View file

@ -17,7 +17,8 @@
param( param(
[string]$version = "latest", [string]$version = "latest",
[switch]$testing = $false [switch]$testing = $false,
[string]$workdir = "D:\"
) )
$NAME="agent-windows-buildkite" $NAME="agent-windows-buildkite"
@ -28,26 +29,21 @@ Write-Output "y`n" | gcloud auth configure-docker
Write-Output "Pulling new image..." Write-Output "Pulling new image..."
docker pull ${IMAGE} docker pull ${IMAGE}
Write-Output "Stopping old container..." Write-Output "Stopping old container..."
docker stop ${NAME} docker stop ${NAME}
docker rm ${NAME} docker rm ${NAME}
Write-Output "Starting container..." Write-Output "Starting container..."
if (${testing}) { if (${testing}) {
docker run -it ` docker run -it `
-v D:\:C:\ws ` -v ${workdir}:C:\ws `
-v C:\credentials:C:\credentials ` -v C:\credentials:C:\credentials `
-e BUILDKITE_AGENT_NAME=$env:computername `
-e BUILDKITE_BUILD_PATH=C:\ws ` -e BUILDKITE_BUILD_PATH=C:\ws `
--restart unless-stopped ` --restart unless-stopped `
--name ${NAME} `
${IMAGE} powershell ${IMAGE} powershell
} else { } else {
docker run -d ` docker run -d `
-v D:\:C:\ws ` -v ${workdir}:C:\ws `
-v C:\credentials:C:\credentials ` -v C:\credentials:C:\credentials `
-e BUILDKITE_AGENT_NAME=$env:computername `
-e BUILDKITE_BUILD_PATH=C:\ws ` -e BUILDKITE_BUILD_PATH=C:\ws `
--restart unless-stopped ` --restart unless-stopped `
--name ${NAME} ` --name ${NAME} `