1
0
Fork 0
mirror of https://we.phorge.it/source/phorge.git synced 2024-11-18 21:02:41 +01:00

Use PhutilRope as a buffer in Harbormaster BuildLogs

Summary:
Ref T10457. Currently, every `append()` call necessarily generates queries, and these queries are slightly inefficient if a large block of data is appended to a partial log (they do about twice as much work as they technically need to).

Use `PhutilRope` to buffer `append()` input so the logic is a little cleaner and we could add a rule like "flush logs no more than once every 500ms" later.

Test Plan:
  - Ran a build, saw logs.
  - Set chunk size very small, ran build, saw logs, verified small logs in database.

{F1137115}

Reviewers: chad

Reviewed By: chad

Maniphest Tasks: T10457

Differential Revision: https://secure.phabricator.com/D15375
This commit is contained in:
epriestley 2016-03-01 07:15:09 -08:00
parent cf0957451e
commit 0daa9ad987
2 changed files with 75 additions and 57 deletions

View file

@ -24,7 +24,7 @@ final class HarbormasterSchemaSpec extends PhabricatorConfigSchemaSpec {
$this->buildRawSchema( $this->buildRawSchema(
id(new HarbormasterBuildable())->getApplicationName(), id(new HarbormasterBuildable())->getApplicationName(),
'harbormaster_buildlogchunk', HarbormasterBuildLog::CHUNK_TABLE,
array( array(
'id' => 'auto', 'id' => 'auto',
'logID' => 'id', 'logID' => 'id',

View file

@ -11,16 +11,23 @@ final class HarbormasterBuildLog
protected $live; protected $live;
private $buildTarget = self::ATTACHABLE; private $buildTarget = self::ATTACHABLE;
private $rope;
private $isOpen;
const CHUNK_BYTE_LIMIT = 102400; const CHUNK_BYTE_LIMIT = 102400;
const CHUNK_TABLE = 'harbormaster_buildlogchunk';
/** /**
* The log is encoded as plain text. * The log is encoded as plain text.
*/ */
const ENCODING_TEXT = 'text'; const ENCODING_TEXT = 'text';
public function __construct() {
$this->rope = new PhutilRope();
}
public function __destruct() { public function __destruct() {
if ($this->getLive()) { if ($this->isOpen) {
$this->closeBuildLog(); $this->closeBuildLog();
} }
} }
@ -35,17 +42,19 @@ final class HarbormasterBuildLog
} }
public function openBuildLog() { public function openBuildLog() {
if ($this->getLive()) { if ($this->isOpen) {
throw new Exception(pht('This build log is already open!')); throw new Exception(pht('This build log is already open!'));
} }
$this->isOpen = true;
return $this return $this
->setLive(1) ->setLive(1)
->save(); ->save();
} }
public function closeBuildLog() { public function closeBuildLog() {
if (!$this->getLive()) { if (!$this->isOpen) {
throw new Exception(pht('This build log is not open!')); throw new Exception(pht('This build log is not open!'));
} }
@ -108,63 +117,72 @@ final class HarbormasterBuildLog
} }
$content = (string)$content; $content = (string)$content;
if (!strlen($content)) {
return;
}
// If the length of the content is greater than the chunk size limit, $this->rope->append($content);
// then we can never fit the content in a single record. We need to $this->flush();
// split our content out and call append on it for as many parts as there }
// are to the content.
if (strlen($content) > self::CHUNK_BYTE_LIMIT) { private function flush() {
$current = $content;
while (strlen($current) > self::CHUNK_BYTE_LIMIT) { // TODO: Maybe don't flush more than a couple of times per second. If a
$part = substr($current, 0, self::CHUNK_BYTE_LIMIT); // caller writes a single character over and over again, we'll currently
$current = substr($current, self::CHUNK_BYTE_LIMIT); // spend a lot of time flushing that.
$this->append($part);
$chunk_table = self::CHUNK_TABLE;
$chunk_limit = self::CHUNK_BYTE_LIMIT;
$rope = $this->rope;
while (true) {
$length = $rope->getByteLength();
if (!$length) {
break;
} }
$this->append($current);
return;
}
// Retrieve the size of last chunk from the DB for this log. If the $conn_w = $this->establishConnection('w');
// chunk is over 500K, then we need to create a new log entry. $tail = queryfx_one(
$conn = $this->establishConnection('w'); $conn_w,
$result = queryfx_all( 'SELECT id, size, encoding FROM %T WHERE logID = %d
$conn, ORDER BY id DESC LIMIT 1',
'SELECT id, size, encoding '. $chunk_table,
'FROM harbormaster_buildlogchunk '. $this->getID());
'WHERE logID = %d '.
'ORDER BY id DESC '.
'LIMIT 1',
$this->getID());
if (count($result) === 0 ||
$result[0]['size'] + strlen($content) > self::CHUNK_BYTE_LIMIT ||
$result[0]['encoding'] !== self::ENCODING_TEXT) {
// We must insert a new chunk because the data we are appending $can_append =
// won't fit into the existing one, or we don't have any existing ($tail) &&
// chunk data. ($tail['encoding'] == self::ENCODING_TEXT) &&
queryfx( ($tail['size'] < $chunk_limit);
$conn, if ($can_append) {
'INSERT INTO harbormaster_buildlogchunk '. $append_id = $tail['id'];
'(logID, encoding, size, chunk) '. $prefix_size = $tail['size'];
'VALUES '. } else {
'(%d, %s, %d, %B)', $append_id = null;
$this->getID(), $prefix_size = 0;
self::ENCODING_TEXT, }
strlen($content),
$content); $data_limit = ($chunk_limit - $prefix_size);
} else { $append_data = $rope->getPrefixBytes($data_limit);
// We have a resulting record that we can append our content onto. $data_size = strlen($append_data);
queryfx(
$conn, if ($append_id) {
'UPDATE harbormaster_buildlogchunk '. queryfx(
'SET chunk = CONCAT(chunk, %B), size = LENGTH(CONCAT(chunk, %B))'. $conn_w,
'WHERE id = %d', 'UPDATE %T SET chunk = CONCAT(chunk, %B), size = %d WHERE id = %d',
$content, $chunk_table,
$content, $append_data,
$result[0]['id']); $prefix_size + $data_size,
$append_id);
} else {
queryfx(
$conn_w,
'INSERT INTO %T (logID, encoding, size, chunk)
VALUES (%d, %s, %d, %B)',
$chunk_table,
$this->getID(),
self::ENCODING_TEXT,
$data_size,
$append_data);
}
$rope->removeBytesFromHead(strlen($append_data));
} }
} }