1
0
Fork 0
llvm-premerge-checks/scripts/metrics/analyze_jobs.ipynb
Mikhail Goncharov c350101a23 Allow projects to define custom steps
Now it's possible to allow sub-projects to define own checks and skip
"generic" ones.

To properly accomodate affected projects that might not have special
treatment we:

1. extend the set of affected projecs with dependent (e.g. add 'libc' if
'clang' was modified)

2. add custom steps for projects that define own workflow. At the moment
it's only libcxx and it has a custom trigger pipeline so it's noop.

3. add dependent projects and run generic check on them.

To illustrate: imagine that we have a dependency graph:

llvm -> clang -> openmp

and only clang was modified in a diff; also clang defines own checks.
Thus list of affected projects will be [clang, openmp].
After adding custom checks and removing their projecst: [openmp].
After adding dependencies: [llvm, clang, openmp]. Generic linux /
windows checks will be run on thouse 3 projects.
So as you can see in some scenarios projects with custom checks will
still go through generic checks.

Note that clang-format and clang-tidy checks are run only for "generic"
checks at the moment.
2020-10-02 16:19:57 +02:00

323 lines
10 KiB
Text

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## License\n",
"\n",
"Copyright 2020 Google LLC\n",
"\n",
"Licensed under the the Apache License v2.0 with LLVM Exceptions (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
"https://llvm.org/LICENSE.txt\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"import os\n",
"import logging\n",
"import argparse\n",
"import json\n",
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cache_file = \"cache.json\"\n",
"token = f'Bearer {os.getenv(\"BUILDKITE_API_TOKEN\")}'\n",
"builds = []"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"if os.path.exists(cache_file):\n",
" with open(cache_file) as f:\n",
" builds = json.load(f)\n",
" print(f'loaded {len(builds)} entries') "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# load new jobs from Buildkite API\n",
"max_pages = 2000\n",
"if True:\n",
" existing = set()\n",
" for j in builds:\n",
" existing.add(j['id'])\n",
"\n",
" # uncomment to reset\n",
"# builds = []\n",
"# existing = set()\n",
" page = 1\n",
" stop = False\n",
" while page <= max_pages:\n",
" print('loading page', page)\n",
" re = requests.get('https://api.buildkite.com/v2/organizations/llvm-project/builds',\n",
" params={'page': page},\n",
" headers={'Authorization': token})\n",
" if re.status_code != 200:\n",
" print('response status', re.status_code, re)\n",
" break\n",
" x = re.json()\n",
" if x == []:\n",
" print('empty response')\n",
" break\n",
" for j in x:\n",
" if j['id'] in existing:\n",
"# print('found existing job', j['id'])\n",
" # load some more pages as some of them might be running before and wasn't added. \n",
" max_pages = min(page + 5, max_pages)\n",
" else:\n",
" # skip running jobs \n",
" if (j['state'] == 'running') or (j['state'] == 'scheduled'):\n",
" print(j['web_url'], 'is', j['state'], ', skipping')\n",
" continue\n",
" builds.append(j)\n",
" page += 1\n",
" print(len(builds), 'jobs in total') \n",
" with open(cache_file, 'w') as f:\n",
" json.dump(builds, f)\n",
" print(f'saved {len(builds)} entries')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"d = {\n",
" 'id': [],\n",
" 'number': [],\n",
" 'pipeline': [],\n",
"}\n",
"\n",
"jobs = {\n",
" 'pipeline': [],\n",
" 'name': [],\n",
" 'step_key': [],\n",
" 'state': [],\n",
" 'exit_status': [],\n",
" 'agent_id': [],\n",
" \n",
" 'agent_name': [],\n",
" 'runnable_at': [],\n",
" 'started_at': [],\n",
" 'wait_duration': [],\n",
" 'finished_at': [],\n",
" 'run_duration': [],\n",
"}\n",
"\n",
"sec = np.timedelta64(1, 's')\n",
"\n",
"for b in builds:\n",
" env = b['env']\n",
"# if 'ph_windows_agents' not in env:\n",
"# continue\n",
"# if 'scripts_branch' not in env:\n",
"# continue\n",
" d['id'].append(b['id'])\n",
" d['number'].append(b['number'])\n",
" d['pipeline'].append(b['pipeline']['slug'])\n",
" for x in b['jobs']:\n",
" if x['state'] in ['waiting_failed', 'canceled', 'skipped', 'broken']:\n",
" continue\n",
" try:\n",
" jobs['pipeline'].append(b['pipeline']['slug'])\n",
" jobs['name'].append(x['name'])\n",
" jobs['step_key'].append(x['step_key'] if 'step_key' in x else '')\n",
" jobs['state'].append(x['state'] )\n",
" jobs['exit_status'].append(x['exit_status'] if 'exit_status' in x else -1)\n",
" jobs['agent_id'].append(x['agent']['id'] if 'agent' in x else '')\n",
" jobs['agent_name'].append(x['agent']['name'] if 'agent' in x else '')\n",
" runnable = np.datetime64(x['runnable_at'].replace('Z', ''))\n",
" started = np.datetime64(x['started_at'].replace('Z', ''))\n",
" finished = np.datetime64(x['finished_at'].replace('Z', ''))\n",
" jobs['runnable_at'].append(runnable)\n",
" jobs['started_at'].append(started)\n",
" jobs['wait_duration'].append((started - runnable) / sec)\n",
" jobs['finished_at'].append(finished)\n",
" jobs['run_duration'].append((finished - started) / sec)\n",
" except Exception as e:\n",
" print(x)\n",
" raise e \n",
"jobs = pd.DataFrame(jobs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"jobs.pipeline.unique()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"jobs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# ds = jobs[jobs['pipeline'] == 'llvm-master-build'][jobs['step_key'] == 'windows'][jobs['state']=='passed'][~jobs['agent_name'].str.startswith('buildkite-')][jobs['started_at'] > np.datetime64('2020-01-22')]\n",
"ds = jobs[jobs['pipeline'] == 'llvm-master-build'][jobs['step_key'] == 'windows'][jobs['state']=='passed'][~jobs['agent_name'].str.startswith('buildkite-')][jobs['started_at'] > np.datetime64('2020-01-22')]\n",
"ds = ds.drop_duplicates()\n",
"# remove one slowest run (repo checkout)\n",
"# t = ds.loc[ds.groupby([\"agent_name\"])[\"run_duration\"].idxmax()]\n",
"# ds = pd.concat([ds, t]).drop_duplicates(keep=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"fig, ax = plt.subplots(figsize=(10,10)) # size of the plot (width, height)\n",
"\n",
"sns.swarmplot(\n",
" ax=ax,\n",
" x='run_duration',\n",
" y='agent_name',\n",
" split=True,\n",
" data=ds)\n",
"\n",
"plt.xlim(0, None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"t = pd.pivot_table(ds, values=['run_duration'], index=['agent_name'],\n",
" aggfunc=[np.median, np.mean, np.std, np.count_nonzero])\n",
"t = t.reindex(t.sort_values(by=('median', 'run_duration'), ascending=False).index)\n",
"t"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"```\n",
"// TODO: try with w64 w/o AV (temporary disabled)\n",
"w1 - 16 - non ssd 100 + ssd\n",
"w1a - 16 - non ssd 100 + ssd - AV (temp)\n",
"w4a - 16 - non ssd 100 + ssd - AV\n",
"w16-1 - 16 - non sdd 200 + ssd\n",
"w32-1 - 32 - non ssd 100 + ssd\n",
"w32-2 - 32 - non ssd 100 + ssd - AV\n",
"w32-2d - 32 - non ssd 200 + ssd\n",
"w64 - 64 - ssd 200 + ssd \n",
"w64a - 64 - ssd 200 + ssd - AV (temp)\n",
"wh63-3 - 64 low ram - non ssd 100 + ssd - AV\n",
"\n",
"buildkite-windows-16n1-exp\n",
"gcloud beta compute instances create \"${NAME}\" \\\n",
" --project=\"${GCP_PROJECT}\" \\\n",
" --zone=\"${GCP_ZONE}\" \\\n",
" --machine-type=n1-standard-16 \\\n",
" --local-ssd=device-name=local-ssd-0 \\\n",
" --image=windows-server-2019-dc-for-containers-v20200714 \\\n",
" --image-project=windows-cloud \\\n",
" --boot-disk-size=200GB --boot-disk-type=pd-ssd\n",
"\n",
"buildkite-32-experimental-1\n",
"gcloud beta compute instances create \"${NAME}\" \\\n",
" --project=\"${GCP_PROJECT}\" \\\n",
" --zone=\"${GCP_ZONE}\" \\\n",
" --machine-type=n1-standard-32 \\\n",
" --local-ssd=device-name=local-ssd-0 \\\n",
" --image=windows-server-2019-dc-for-containers-v20200714 \\\n",
" --image-project=windows-cloud \\\n",
" --boot-disk-size=200GB --boot-disk-type=pd-ssd\n",
"\n",
" \n",
"buildkite-16c2-exp\n",
"gcloud beta compute instances create \"${NAME}\" \\\n",
" --project=\"${GCP_PROJECT}\" \\\n",
" --zone=\"${GCP_ZONE}\" \\\n",
" --machine-type=c2-standard-16 \\\n",
" --local-ssd=device-name=local-ssd-0 \\\n",
" --local-ssd=interface=SCSI \\\n",
" --image=windows-server-2019-dc-for-containers-v20200714 \\\n",
" --image-project=windows-cloud \\\n",
" --boot-disk-size=200GB --boot-disk-type=pd-ssd\n",
" \n",
"...a: as is D:/ is a workdir (ealy run with full cache build)\n",
"...b: C:/ws as a workdir, no cache\n",
"...c: c:/ws as workdir, no av\n",
"```\n",
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.5rc1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}