These go way back to php 4 and don't need to be like this for any reason. Currently error handling is acting up when these throw an error.
131 lines
5.2 KiB
PHP
131 lines
5.2 KiB
PHP
<?php
|
|
|
|
use Aws\S3\S3Client;
|
|
|
|
class Amazon_S3StorageBackend extends StorageBackend
|
|
{
|
|
|
|
private $s3Client;
|
|
private $proxyHost;
|
|
|
|
public function __construct($securityCredentials)
|
|
{
|
|
$this->setBucket($securityCredentials['bucket']);
|
|
$this->setAccessKey($securityCredentials['api_key']);
|
|
$this->setSecretKey($securityCredentials['api_key_secret']);
|
|
|
|
$s3Options = array(
|
|
'key' => $securityCredentials['api_key'],
|
|
'secret' => $securityCredentials['api_key_secret'],
|
|
'region' => $securityCredentials['region']
|
|
);
|
|
if (array_key_exists("proxy_host", $securityCredentials)) {
|
|
$s3Options = array_merge($s3Options, array(
|
|
//'base_url' => "http://" . $securityCredentials['proxy_host'],
|
|
'base_url' => "http://s3.amazonaws.com",
|
|
'scheme' => "http",
|
|
//'force_path_style' => true,
|
|
'signature' => 'v4'
|
|
));
|
|
$this->proxyHost = $securityCredentials['proxy_host'];
|
|
}
|
|
|
|
$this->s3Client = S3Client::factory($s3Options);
|
|
}
|
|
|
|
public function getAbsoluteFilePath($resourceId)
|
|
{
|
|
return $this->s3Client->getObjectUrl($this->getBucket(), $resourceId);
|
|
}
|
|
|
|
/** Returns a signed download URL from Amazon S3, expiring in 60 minutes */
|
|
public function getDownloadURLs($resourceId, $contentDispositionFilename)
|
|
{
|
|
$urls = array();
|
|
|
|
$s3args = array('ResponseContentDisposition' => 'attachment; filename="' . urlencode($contentDispositionFilename) . '"');
|
|
$signedS3Url = $this->s3Client->getObjectUrl($this->getBucket(), $resourceId, '+60 minutes', $s3args);
|
|
|
|
//If we're using the proxy cache, we need to modify the request URL after it has
|
|
//been generated by the above. (The request signature must be for the amazonaws.com,
|
|
//not our proxy, since the proxy translates the host back to amazonaws.com)
|
|
if ($this->proxyHost) {
|
|
$p = parse_url($signedS3Url);
|
|
$p["host"] = $this->getBucket() . "." . $this->proxyHost;
|
|
$p["scheme"] = "http";
|
|
//If the path contains the bucket name (which is the case with HTTPS requests to Amazon),
|
|
//we need to strip that part out, since we're forcing everything to HTTP. The Amazon S3
|
|
//URL convention for HTTP is to prepend the bucket name to the hostname instead of having
|
|
//it in the path.
|
|
//eg. http://bucket.s3.amazonaws.com/ instead of https://s3.amazonaws.com/bucket/
|
|
if (strpos($p["path"], $this->getBucket()) == 1) {
|
|
$p["path"] = substr($p["path"], 1 + strlen($this->getBucket()));
|
|
}
|
|
$proxyUrl = $p["scheme"] . "://" . $p["host"] . $p["path"] . "?" . $p["query"];
|
|
//Add this proxy cache URL to the list of download URLs.s
|
|
array_push($urls, $proxyUrl);
|
|
}
|
|
|
|
//Add the direct S3 URL to the list (as a fallback)
|
|
array_push($urls, $signedS3Url);
|
|
|
|
//http_build_url() would be nice to use but it requires pecl_http :-(
|
|
|
|
//Logging::info($url);
|
|
|
|
return $urls;
|
|
}
|
|
|
|
public function deletePhysicalFile($resourceId)
|
|
{
|
|
$bucket = $this->getBucket();
|
|
|
|
if ($this->s3Client->doesObjectExist($bucket, $resourceId)) {
|
|
|
|
$result = $this->s3Client->deleteObject(array(
|
|
'Bucket' => $bucket,
|
|
'Key' => $resourceId,
|
|
));
|
|
} else {
|
|
throw new Exception("ERROR: Could not locate file to delete.");
|
|
}
|
|
}
|
|
|
|
// This should only be called for station termination.
|
|
// We are only deleting the file objects from Amazon S3.
|
|
// Records in the database will remain in case we have to restore the files.
|
|
public function deleteAllCloudFileObjects()
|
|
{
|
|
$bucket = $this->getBucket();
|
|
$prefix = $this->getFilePrefix();
|
|
|
|
//Add a trailing slash in for safety
|
|
//(so that deleting /13/413 doesn't delete /13/41313 !)
|
|
$prefix = $prefix . "/";
|
|
|
|
//Do a bunch of safety checks to ensure we don't delete more than we intended.
|
|
//An valid prefix is like "12/4312" for instance 4312.
|
|
$slashPos = strpos($prefix, "/");
|
|
if (($slashPos === FALSE) || //Slash must exist
|
|
($slashPos != 2) || //Slash must be the third character
|
|
(strlen($prefix) <= $slashPos) || //String must have something after the first slash
|
|
(substr_count($prefix, "/") != 2)) //String must have two slashes
|
|
{
|
|
throw new Exception("Invalid file prefix in " . __FUNCTION__);
|
|
}
|
|
$this->s3Client->deleteMatchingObjects($bucket, $prefix);
|
|
}
|
|
|
|
public function getFilePrefix()
|
|
{
|
|
$filePrefix = '';
|
|
// only prefix files on S3 when billing is active since saas customers share a s3 bucket
|
|
// I'm not sure why the choice was made to put everything into one bucket
|
|
// We might refactor this to use a bucket per customer if we revisit S3
|
|
if (LIBRETIME_ENABLE_BILLING === true) {
|
|
$hostingId = Billing::getClientInstanceId();
|
|
$filePrefix = substr($hostingId, -2)."/".$hostingId;
|
|
}
|
|
return $filePrefix;
|
|
}
|
|
}
|