Merge branch 'master' into dragonslaying

This commit is contained in:
ryan 2018-11-21 10:42:05 -06:00
commit fd5587cd81
47 changed files with 402 additions and 862 deletions

1
.gitignore vendored
View File

@ -14,3 +14,4 @@ composer.phar
VERSION
airtime_mvc/tests/log/*.log
.vagrant/
.DS_Store

View File

@ -2,6 +2,7 @@ dist: trusty
language: php
php:
# the latest and greatest, has some issues that are excluded below in matrix.allow_failures
- 7.2
- 7.1
# the 7.0 build demonstrates that everything is basically ok for 7.0, users might want to wait for 7.1 to run it
- 7.0
@ -24,6 +25,9 @@ matrix:
# there are currently some testing issues with DateTime precision on 7.1
- env: PYTHON=false
php: 7.1
# there are some issues with phpunit as well as some deep in zf1 on 7.2
- env: PYTHON=false
php: 7.2
exclude:
# by excluding all of python we make way to just runu python tests in one seperate instance
- env: PYTHON=true

6
Vagrantfile vendored
View File

@ -38,11 +38,17 @@ Vagrant.configure("2") do |config|
installer_args="--force --in-place --verbose --postgres --apache --icecast "
# define all the OS boxes we support
config.vm.define "ubuntu-bionic" do |os|
os.vm.box = "bento/ubuntu-18.04"
provision_libretime(os, "ubuntu.sh", installer_args)
end
config.vm.define "ubuntu-xenial" do |os|
os.vm.box = "bento/ubuntu-16.04"
provision_libretime(os, "ubuntu.sh", installer_args)
end
config.vm.define "ubuntu-trusty" do |os|
STDERR.puts 'WARNING: The "ubuntu-trusty" option is deprecated. Please migrate to "ubuntu-bionic".'
STDERR.puts
os.vm.box = "bento/ubuntu-14.04"
provision_libretime(os, "ubuntu.sh", installer_args)
end

View File

@ -1,131 +0,0 @@
<?php
use Aws\S3\S3Client;
class Amazon_S3StorageBackend extends StorageBackend
{
private $s3Client;
private $proxyHost;
public function __construct($securityCredentials)
{
$this->setBucket($securityCredentials['bucket']);
$this->setAccessKey($securityCredentials['api_key']);
$this->setSecretKey($securityCredentials['api_key_secret']);
$s3Options = array(
'key' => $securityCredentials['api_key'],
'secret' => $securityCredentials['api_key_secret'],
'region' => $securityCredentials['region']
);
if (array_key_exists("proxy_host", $securityCredentials)) {
$s3Options = array_merge($s3Options, array(
//'base_url' => "http://" . $securityCredentials['proxy_host'],
'base_url' => "http://s3.amazonaws.com",
'scheme' => "http",
//'force_path_style' => true,
'signature' => 'v4'
));
$this->proxyHost = $securityCredentials['proxy_host'];
}
$this->s3Client = S3Client::factory($s3Options);
}
public function getAbsoluteFilePath($resourceId)
{
return $this->s3Client->getObjectUrl($this->getBucket(), $resourceId);
}
/** Returns a signed download URL from Amazon S3, expiring in 60 minutes */
public function getDownloadURLs($resourceId, $contentDispositionFilename)
{
$urls = array();
$s3args = array('ResponseContentDisposition' => 'attachment; filename="' . urlencode($contentDispositionFilename) . '"');
$signedS3Url = $this->s3Client->getObjectUrl($this->getBucket(), $resourceId, '+60 minutes', $s3args);
//If we're using the proxy cache, we need to modify the request URL after it has
//been generated by the above. (The request signature must be for the amazonaws.com,
//not our proxy, since the proxy translates the host back to amazonaws.com)
if ($this->proxyHost) {
$p = parse_url($signedS3Url);
$p["host"] = $this->getBucket() . "." . $this->proxyHost;
$p["scheme"] = "http";
//If the path contains the bucket name (which is the case with HTTPS requests to Amazon),
//we need to strip that part out, since we're forcing everything to HTTP. The Amazon S3
//URL convention for HTTP is to prepend the bucket name to the hostname instead of having
//it in the path.
//eg. http://bucket.s3.amazonaws.com/ instead of https://s3.amazonaws.com/bucket/
if (strpos($p["path"], $this->getBucket()) == 1) {
$p["path"] = substr($p["path"], 1 + strlen($this->getBucket()));
}
$proxyUrl = $p["scheme"] . "://" . $p["host"] . $p["path"] . "?" . $p["query"];
//Add this proxy cache URL to the list of download URLs.s
array_push($urls, $proxyUrl);
}
//Add the direct S3 URL to the list (as a fallback)
array_push($urls, $signedS3Url);
//http_build_url() would be nice to use but it requires pecl_http :-(
//Logging::info($url);
return $urls;
}
public function deletePhysicalFile($resourceId)
{
$bucket = $this->getBucket();
if ($this->s3Client->doesObjectExist($bucket, $resourceId)) {
$result = $this->s3Client->deleteObject(array(
'Bucket' => $bucket,
'Key' => $resourceId,
));
} else {
throw new Exception("ERROR: Could not locate file to delete.");
}
}
// This should only be called for station termination.
// We are only deleting the file objects from Amazon S3.
// Records in the database will remain in case we have to restore the files.
public function deleteAllCloudFileObjects()
{
$bucket = $this->getBucket();
$prefix = $this->getFilePrefix();
//Add a trailing slash in for safety
//(so that deleting /13/413 doesn't delete /13/41313 !)
$prefix = $prefix . "/";
//Do a bunch of safety checks to ensure we don't delete more than we intended.
//An valid prefix is like "12/4312" for instance 4312.
$slashPos = strpos($prefix, "/");
if (($slashPos === FALSE) || //Slash must exist
($slashPos != 2) || //Slash must be the third character
(strlen($prefix) <= $slashPos) || //String must have something after the first slash
(substr_count($prefix, "/") != 2)) //String must have two slashes
{
throw new Exception("Invalid file prefix in " . __FUNCTION__);
}
$this->s3Client->deleteMatchingObjects($bucket, $prefix);
}
public function getFilePrefix()
{
$filePrefix = '';
// only prefix files on S3 when billing is active since saas customers share a s3 bucket
// I'm not sure why the choice was made to put everything into one bucket
// We might refactor this to use a bucket per customer if we revisit S3
if (LIBRETIME_ENABLE_BILLING === true) {
$hostingId = Billing::getClientInstanceId();
$filePrefix = substr($hostingId, -2)."/".$hostingId;
}
return $filePrefix;
}
}

View File

@ -18,12 +18,10 @@ class ProxyStorageBackend extends StorageBackend
{
$CC_CONFIG = Config::getConfig();
//The storage backend in the airtime.conf directly corresponds to
//the name of the class that implements it (eg. Amazon_S3), so we
//can easily create the right backend object dynamically:
if ($storageBackend == "amazon_S3") {
$this->storageBackend = new Amazon_S3StorageBackend($CC_CONFIG["amazon_S3"]);
} else if ($storageBackend == "file") {
// The storage backend in the airtime.conf directly corresponds to
// the name of the class that implements it, so we can create the
// right backend object dynamically:
if ($storageBackend == "file") {
$this->storageBackend = new FileStorageBackend();
} else {
$this->storageBackend = new $storageBackend($CC_CONFIG[$storageBackend]);

View File

@ -26,8 +26,16 @@ class Application_Common_FileIO
}
//Note that $size is allowed to be zero. If that's the case, it means we don't
//know the filesize, and we just won't send the Content-Length header.
if ($size < 0) {
//know the filesize, and we need to figure one out so modern browsers don't get
//confused. This should only affect files imported by legacy upstream since
//media monitor did not always set the proper size in the database but analyzer
//seems to always have a value for this.
if ($size === 0) {
$fstats = fstat($fm);
$size = $fstats['size'];
}
if ($size <= 0) {
throw new Exception("Invalid file size returned for file at $filePath");
}
@ -56,11 +64,9 @@ class Application_Common_FileIO
header('Cache-Control: public, must-revalidate, max-age=0');
header('Pragma: no-cache');
header('Accept-Ranges: bytes');
if ($size > 0) {
header('Content-Length:' . (($end - $begin) + 1));
if (isset($_SERVER['HTTP_RANGE'])) {
header("Content-Range: bytes $begin-$end/$size");
}
header('Content-Length:' . (($end - $begin) + 1));
if (isset($_SERVER['HTTP_RANGE'])) {
header("Content-Range: bytes $begin-$end/$size");
}
//We can have multiple levels of output buffering. Need to

View File

@ -49,13 +49,6 @@ class Config {
$CC_CONFIG['staticBaseDir'] = '/';
}
$CC_CONFIG['amazon_S3'] = array(
'provider' => $values['amazon_S3']['provider'],
'bucket' => $values['amazon_S3']['bucket'],
'api_key' => $values['amazon_S3']['api_key'],
'api_key_secret' => $values['amazon_S3']['api_key_secret']
);
// Tells us where file uploads will be uploaded to.
// It will either be set to a cloud storage backend or local file storage.
$CC_CONFIG["current_backend"] = $values["current_backend"]["storage_backend"];

View File

@ -16,7 +16,8 @@ $rabbitmq = $externalServices["rabbitmq"];
$pypo = $externalServices["pypo"];
$liquidsoap = $externalServices["liquidsoap"];
$analyzer = $externalServices["analyzer"];
$analyzer = $externalServices["analyzer"];
$celery = $externalServices['celery'];
$r1 = array_reduce($phpDependencies, "booleanReduce", true);
$r2 = array_reduce($externalServices, "booleanReduce", true);
@ -222,6 +223,26 @@ $result = $r1 && $r2;
?>
</td>
</tr>
<tr class="<?=$celery ? 'success' : 'danger';?>">
<td class="component">
Celery
</td>
<td class="description">
Airtime Celery Task service
</td>
<td class="solution <?php if ($celery) {echo 'check';?>">
<?php
} else {
?>">
Check that the airtime-celery service is installed correctly in <code>/etc/init.d</code>,
and ensure that it's running with
<br/><code>initctl list | grep airtime-celery</code><br/>
If not, try running <code>sudo service airtime-celery restart</code>
<?php
}
?>
</td>
</tr>
</tbody>
</table>
</div>

View File

@ -927,28 +927,6 @@ class ApiController extends Zend_Controller_Action
Logging::info("Registered Component: ".$component."@".$remoteAddr);
Application_Model_ServiceRegister::Register($component, $remoteAddr);
//send ip, subdomain
if ($component == "pypo"){
$split = explode('.', $_SERVER['SERVER_NAME']);
$subdomain = array();
foreach ($split as $value) {
if ($value == 'airtime') {
break;
} else {
$subdomain[] = $value;
}
}
if (count($subdomain) > 0){
$subDomain = implode('.',$subdomain);
$md = array();
$md["sub_domain"] = $subDomain;
$md["pypo_ip"] = $remoteAddr;
Application_Model_RabbitMq::SendMessageToHaproxyConfigDaemon($md);
}
}
}
public function updateLiquidsoapStatusAction()

View File

@ -909,13 +909,13 @@ class Application_Model_Preference
$versions[] = $item->get_title();
}
$latest = $versions;
self::setValue('latest_version', json_encode($latest));
self::setValue('latest_version_nextcheck', strtotime('+1 week'));
if (empty($latest)) {
return $config['airtime_version'];
} else {
return $latest;
return array($config['airtime_version']);
}
self::setValue('latest_version', json_encode($latest));
return $latest;
}
public static function SetLatestVersion($version)

View File

@ -129,10 +129,4 @@ class Application_Model_RabbitMq
$conn->close();
}
public static function SendMessageToHaproxyConfigDaemon($md){
//XXX: This function has been deprecated and is no longer needed
}
}

View File

@ -11,10 +11,6 @@
<div id="schedule-show-what" class="collapsible-content">
<?php echo $this->what; ?>
</div>
<h3 class="collapsible-header"><span class="arrow-icon"></span><?php echo _("Automatic Playlist") ?></h3>
<div id="schedule-show-what" class="collapsible-content">
<?php echo $this->autoplaylist; ?>
</div>
<h3 class="collapsible-header"><span class="arrow-icon"></span><?php echo _("When") ?></h3>
<div id="schedule-show-when" class="collapsible-content">
<?php
@ -29,6 +25,10 @@
<?php echo $this->repeats; ?>
</div>
<h3 class="collapsible-header"><span class="arrow-icon"></span><?php echo _("Automatic Playlist") ?></h3>
<div id="schedule-show-auto" class="collapsible-content">
<?php echo $this->autoplaylist; ?>
</div>
<h3 class="collapsible-header"><span class="arrow-icon"></span><?php echo _("Live Stream Input") ?></h3>
<div id="live-stream-override" class="collapsible-content">
<?php echo $this->live; ?>

View File

@ -13,6 +13,7 @@
$pypo = $externalServices["pypo"];
$liquidsoap = $externalServices["liquidsoap"];
$analyzer = $externalServices["analyzer"];
$celery = $externalServices['celery'];
$r1 = array_reduce($phpDependencies, "booleanReduce", true);
$r2 = array_reduce($externalServices, "booleanReduce", true);
@ -149,6 +150,26 @@
?>
</td>
</tr>
<tr>
<td class="component">
Celery
</td>
<td class="description">
LibreTime Celery Task service
</td>
<td class="solution <?php if ($celery) {echo 'check';?>" >
<?php
} else {
?>">
Check that the airtime-celery service is installed correctly in <code>/etc/init</code>,
and ensure that it's running with
<br/><code>initctl list | grep airtime-celery</code><br/>
If not, try <br/><code>sudo service airtime-celery restart</code>
<?php
}
?>
</td>
</tr>
</tbody>
<tr id="partitions" class="even">
<th colspan="5"><?php echo _("Disk Space") ?></th>

View File

@ -54,7 +54,8 @@ function checkExternalServices() {
"analyzer" => checkAnalyzerService(),
"pypo" => checkPlayoutService(),
"liquidsoap" => checkLiquidsoapService(),
"rabbitmq" => checkRMQConnection()
"rabbitmq" => checkRMQConnection(),
"celery" => checkCeleryService(),
);
}
@ -144,3 +145,16 @@ function checkLiquidsoapService() {
}
return $status == 0;
}
/**
* Check if airtime-celery is currently running
*
* @return boolean true if airtime-celery is running
*/
function checkCeleryService() {
exec("pgrep -f -u celery airtime-celery", $out, $status);
if (array_key_exists(0, $out) && $status == 0) {
return 1;
}
return $status == 0;
}

View File

@ -127,11 +127,6 @@ vhost = /airtime
[current_backend]
storage_backend=file
[amazon_S3]
provider=amazon_S3
bucket=0
api_key=0
api_key_secret=0
# ----------------------------------------------------------------------
# M O N I T

View File

@ -387,4 +387,9 @@ ANALYZE cc_pref; -- this validates the new partial index
--end added in 2.5.14
-- For now, just needs to be truthy - to be updated later; we should find a better way to implement this...
INSERT INTO cc_pref("keystr", "valstr") VALUES('whats_new_dialog_viewed', 1);
INSERT INTO cc_pref("keystr", "valstr") VALUES('whats_new_dialog_viewed', 1);
--added for LibreTime to turn on podcast album override by default 3.0.0.alpha6
INSERT INTO cc_pref("keystr", "valstr") VALUES('podcast_album_override', 1);
INSERT INTO cc_pref("keystr", "valstr") VALUES('podcast_auto_smartblock', 1);
-- end

View File

@ -583,16 +583,15 @@ li.ui-state-default {
}
.spl_sortable {
height: 100%;
-webkit-flex: 1 0 auto;
-moz-flex: 1 0 auto;
-ms-flex: 1 0 auto;
-o-flex: 1 0 auto;
flex: 1 0 auto;
overflow: auto;
-webkit-flex: 1 100%;
-moz-flex: 1 100%;
-ms-flex: 1 100%;
-o-flex: 1 100%;
flex: 1 100%;
margin: 4px 0;
min-height: 0;
min-height: 6em;
max-height: calc(100% - 40px);
padding: 5px;
border: 1px solid #444;
border-radius: 3px;

View File

@ -16,6 +16,14 @@ function openAddShowForm(nowOrFuture) {
$("#add-show-form").show();
windowResize();
// collapse advanced configuration sections
$('#schedule-show-auto').hide();
$('#live-stream-override').hide();
$('#schedule-record-rebroadcast').hide();
$('#schedule-show-who').hide();
$('#schedule-show-style').hide();
}
$("#schedule-show-what").show(0, function(){
$add_show_name = $("#add_show_name");

View File

@ -310,15 +310,6 @@ class AirtimeInstall
echo "* Removing logs directory ".$path.PHP_EOL;
exec("rm -rf \"$path\"");
}
public static function CreateCronFile(){
echo "* Creating Cron File".PHP_EOL;
// Create CRON task to run every day. Time of day is initialized to a random time.
$hour = rand(0,23);
$minute = rand(0,59);
$fp = fopen('/etc/cron.d/airtime-crons','w');
fwrite($fp, "$minute $hour * * * root /usr/lib/airtime/utils/phone_home_stat\n");
fclose($fp);
}
public static function removeVirtualEnvDistributeFile(){
echo "* Removing distribute-0.6.10.tar.gz".PHP_EOL;
if(file_exists('/usr/share/python-virtualenv/distribute-0.6.10.tar.gz')){

View File

@ -26,12 +26,6 @@ station_id = teststation
[current_backend]
storage_backend=file
[amazon_S3]
provider=amazon_S3
bucket=0
api_key=0
api_key_secret=0
[monit]
monit_user = guest
monit_password = airtime

View File

@ -1,11 +1,24 @@
# Installing LibreTime
LibreTime should generally be installed on a dedicated host. By default, its installer will install and configure all its dependencies. At the moment, the installer works best on Ubuntu 16.04 LTS (Xenial Xerus), or Ubuntu 14.04.5 LTS (Trusty Tahr).
LibreTime releases can be downloaded [here](https://github.com/LibreTime/libretime/releases).
:::bash
sudo ./install
Recommendations:
Instalation in Debian 9 and other Linux distributions is possible, but multiple outstanding issues have yet to be resolved.
- LibreTime should generally be installed on a dedicated host running Ubuntu 16.04 LTS (Xenial Xerus).
- LibreTime is undergoing active development, and is currently in ALPHA.
- It is not recommended that you install LibreTime on the same computer you are using as a desktop.
- Please review the release notes of the version you are planning on installing.
Once you have downloaded and extracted LibreTime, run the instalation script by navigating into the folder containing the LibreTime codebase, and run it's install script from the command line:
```
sudo ./install
```
By default, the installer will install and configure all dependencies.
## Alternative OS installations
Instalation in Debian 9 and other Linux distributions is possible, but multiple outstanding issues have yet to be resolved. Instalation on Ubuntu 14.04.5 LTS (Trusty Tahr) is also working, but deprecated due to the fact that this version will reach its official end of life in April 2019.
Plans are in the works for `.deb` and `.rpm` packages, as well as Docker and AWS images.

View File

@ -1,6 +1,6 @@
The following instructions assume that you have root access (**sudo** on most distributions) to a GNU/Linux server, and are familiar with basic command line tasks.
The recommended Libretime server platform is Ubuntu 16.04 LTS (Xenial Xerus), or Ubuntu 14.04.5 LTS (Trusty Tahr).
The recommended Libretime server platform is Ubuntu 16.04 LTS (Xenial Xerus).
The server should have at least a 1GHz processor and 1GB of RAM, preferably 2GB RAM or more. If you are using a desktop environment and web browser directly on the server you should install at least 2GB RAM, to avoid swapping to disk.

View File

@ -19,7 +19,9 @@ Click the **Save** button on the right side of the page to save any changes that
Input stream settings
---------------------
On the lower left side of the Stream Settings page you can configure remote live input streams from DJ programs such as **Mixxx** or **IDJC**, or smartphone applications used by broadcast journalists. Airtime supports two types of live input stream; the **Show Source**, which enables a specific person to stream in during their own show, and the **Master Source**, which can override the Show Source if necessary. If neither type of live input is available, Airtime will fall back to **Scheduled Play** (playlists, smart blocks, remote streams and files scheduled in Airtime, in advance of or during a show).
On the lower left side of the Stream Settings page you can configure remote live input streams from DJ programs such as **Mixxx** or **IDJC**, or smartphone applications used by broadcast journalists. Note that a [bug](https://sourceforge.net/p/butt/bugs/12/) has been reported with **Butt** versions 1.14 to 1.16 that causes streams not to connect to LibreTime. Butt version 1.13 does appear to be working.
Airtime supports two types of live input stream; the **Show Source**, which enables a specific person to stream in during their own show, and the **Master Source**, which can override the Show Source if necessary. If neither type of live input is available, Airtime will fall back to **Scheduled Play** (playlists, smart blocks, remote streams and files scheduled in Airtime, in advance of or during a show).
The **Auto Switch Off** and **Auto Switch On** checkboxes enable playout to be switched automatically to the highest priority source whenever an authenticated input source disconnects from or connects to Airtime, respectively. The field **Switch Transition Fade** sets the length of the audio fade as scheduled playout is switched to a remote input source, and back.

View File

@ -20,26 +20,26 @@ To get started you clone the repo and run `vagrant up`.
```bash
git clone https://github.com/libretime/libretime.git
cd libretime
vagrant up ubuntu-trusty
vagrant up ubuntu-xenial
```
If everything works out, you will find LibreTime on [port 8080](http://localhost:8080), icecast on [port 8000](http://localhost:8000) and the docs on [port 8888](http://localhost:8888).
Once you reach the web setup GUI you can click through it using the default values. To connect to the vagrant machine you can run `vagrant ssh ubuntu-trusty` in the libretime directory.
Once you reach the web setup GUI you can click through it using the default values. To connect to the vagrant machine you can run `vagrant ssh ubuntu-xenial` in the libretime directory.
## Alternative OS installations
With the above instructions LibreTime is installed on Ubuntu Trusty Tahir. The Vagrant setup offers the option to choose a different operation system according to you needs.
With the above instructions LibreTime is installed on Ubuntu Xenial Xerus. The Vagrant setup offers the option to choose a different operation system according to you needs.
| OS | Command | Comment |
| ------ | ------------------- | ------- |
| Ubuntu 14.04 | `vagrant up ubuntu-trusty` | Current default install since it was used by legacy upstream, based on Trusty Tahir. |
| Debian 8.7 | `vagrant up debian-jessie` | Recommended install on Jessie as per the docs. |
| Ubuntu 16.04 | `vagrant up ubuntu-xenial` | Experimental install on current Ubuntu Xenial Xerus. |
| Debian 7.11 | `vagrant up debian-wheezy` | Recommended install on Wheezy as per the docs. |
| CentOS | `vagrant up centos` | Experimental install on 7.3 with native systemd support and activated SELinux. |
| Ubuntu | `vagrant up ubuntu` | Deprecated Ubuntu Trusty install, replaced by `ubuntu-trusty`. Do not use for new installs! |
| Debian | `vagrant up debian` | Deprecated Debian Jessie install, replaced by `debian-jessie`. Do not use for new installs! |
| Debian 9.2 | `vagrant up debian-stretch` | Install on current Debian Stretch. |
| Debian 8.7 | `vagrant up debian-jessie` | Install on Debian Jessie. |
| Debian 7.11 | `vagrant up debian-wheezy` | Deprecated install on Debian Wheezy. Please switch to debian-stretch. |
| Ubuntu 18.04 | `vagrant up ubuntu-bionic` | Experimental install on current Ubuntu Bionic Beaver. |
| Ubuntu 16.04 | `vagrant up ubuntu-xenial` | Install on Ubuntu Xenial Xerus. |
| Ubuntu 14.04 | `vagrant up ubuntu-trusty` | Deprecated install on Ubuntu Trusty Tahir. Recommended by legacy upstream. |
| CentOS | `vagrant up centos` | Extremely experimental install on 7.3 with native systemd support and activated SELinux. Needs manual intervention due to Liquidsoap 1.3.3. |
## Troubleshooting

135
install
View File

@ -52,7 +52,13 @@ showhelp () {
Install Icecast 2 and deploy a basic configuration for Airtime
--selinux
Run restorecon on directories and files that need tagging to
allow the WEB_USER access."
allow the WEB_USER access
--no-postgres
Skips all postgres related install tasks (Useful if you configure
postgresql as part of another script / docker builds)
--no-rabbitmq
Skips all rabbitmq related install tasks.
"
exit 0
}
@ -85,6 +91,8 @@ upgrade="f"
dist=""
code=""
apache_bin=""
skip_postgres=0
skip_rabbitmq=0
function verbose() {
@ -449,6 +457,12 @@ while :; do
--selinux)
selinux="t"
;;
--no-postgres)
skip_postgres=1
;;
--no-rabbitmq)
skip_rabbitmq=1
;;
--)
shift
break
@ -574,6 +588,7 @@ is_debian_stretch=false
is_debian_jessie=false
is_debian_wheezy=false
is_ubuntu_dist=false
is_ubuntu_bionic=false
is_ubuntu_xenial=false
is_ubuntu_trusty=false
is_centos_dist=false
@ -584,7 +599,12 @@ code="${code:-$VERSION_ID}"
code="${code,,}"
verbose "Validating dist-code: ${dist}-${code}"
case "${dist}-${code}" in
ubuntu-16.04|ubuntu-xenial)
ubuntu-18.04)
code="bionic"
is_ubuntu_dist=true
is_ubuntu_bionic=true
;;
ubuntu-16.04|ubuntu-xenial|ubuntu-xenial_docker_minimal)
code="xenial"
is_ubuntu_dist=true
is_ubuntu_xenial=true
@ -593,6 +613,9 @@ case "${dist}-${code}" in
code="trusty"
is_ubuntu_dist=true
is_ubuntu_trusty=true
echo -e "WARNING: Ubuntu Trusty will be EOL by April 2019 and LibreTime will no longer support it at that point." >&2
echo -e "Please upgrade to a non-EOL distro ASAP!" >&2
sleep 6
;;
debian-9|debian-stretch)
code="stretch"
@ -956,6 +979,7 @@ loud " * Configuring PHP in Apache * "
loud "-----------------------------------------------------"
# Test common locations for php conf directory
php_conf_dirs=(
"/etc/php/7.2/apache2/conf.d" # Ubuntu Bionic
"/etc/php/7.0/apache2/conf.d" # Ubuntu Xenial
"/etc/php5/apache2/conf.d" # Debian Stretch, Debian Jessie, Ubuntu Trusty
"/etc/php.d" # CentOS 7
@ -978,7 +1002,9 @@ else
fi
# Enable Apache modules
if $is_ubuntu_xenial || $is_debian_stretch; then
if $is_ubuntu_bionic; then
loudCmd "a2enmod rewrite php7.2"
elif $is_ubuntu_xenial || $is_debian_stretch; then
loudCmd "a2enmod rewrite php7.0"
elif $is_centos_dist; then
verbose "TODO: enable Apache modules mod_rewrite and mod_php manually"
@ -986,70 +1012,75 @@ else
loudCmd "a2enmod rewrite php5"
fi
loud "\n-----------------------------------------------------"
loud " * Configuring PostgreSQL * "
loud "-----------------------------------------------------"
if [ $skip_postgres -eq 0 ]; then
loud "\n-----------------------------------------------------"
loud " * Configuring PostgreSQL * "
loud "-----------------------------------------------------"
# Ensure postgres is running - It isn't after you install the postgres package on Ubuntu 15.04
systemInitCommand start postgresql
# Ensure postgres is running - It isn't after you install the postgres package on Ubuntu 15.04
systemInitCommand start postgresql
setupAirtimePostgresUser() {
# here-doc to execute this block as postgres user
su postgres <<'EOF'
set +e
count=$(psql -d postgres -tAc "SELECT count(*) FROM pg_roles WHERE rolname='airtime';")
if [[ $count -eq 0 ]]; then
psql -d postgres -tAc "CREATE USER airtime WITH ENCRYPTED PASSWORD 'airtime'; ALTER USER airtime CREATEDB;"
[[ $? -eq 0 ]] &&
echo "Created airtime user in PostgreSQL" ||
echo "$0:${FUNCNAME}(): ERROR: Can't create airtime user in PostgreSQL!"
else
echo "airtime user already exists in PostgreSQL"
fi
set -e
setupAirtimePostgresUser() {
# here-doc to execute this block as postgres user
su postgres <<'EOF'
set +e
count=$(psql -d postgres -tAc "SELECT count(*) FROM pg_roles WHERE rolname='airtime';")
if [[ $count -eq 0 ]]; then
psql -d postgres -tAc "CREATE USER airtime WITH ENCRYPTED PASSWORD 'airtime'; ALTER USER airtime CREATEDB;"
[[ $? -eq 0 ]] &&
echo "Created airtime user in PostgreSQL" ||
echo "$0:${FUNCNAME}(): ERROR: Can't create airtime user in PostgreSQL!"
else
echo "airtime user already exists in PostgreSQL"
fi
set -e
# don't indent this!
EOF
}
}
if [ "$postgres" = "t" ]; then
setupAirtimePostgresUser
elif [ ${_i} -eq 1 ]; then
echo -e "Create default airtime postgres user? (Y/n): \c"
read IN
if [ "$IN" = "y" -o "$IN" = "Y" ]; then
if [ "$postgres" = "t" ]; then
setupAirtimePostgresUser
elif [ ${_i} -eq 1 ]; then
echo -e "Create default airtime postgres user? (Y/n): \c"
read IN
if [ "$IN" = "y" -o "$IN" = "Y" ]; then
setupAirtimePostgresUser
fi
fi
fi
loud "\n-----------------------------------------------------"
loud " * Configuring RabbitMQ * "
loud "-----------------------------------------------------"
if [ $skip_rabbitmq -eq 0 ]; then
RABBITMQ_VHOST=/airtime
RABBITMQ_USER=airtime
RABBITMQ_PASSWORD=airtime
EXCHANGES="airtime-pypo|pypo-fetch|airtime-analyzer|media-monitor"
loud "\n-----------------------------------------------------"
loud " * Configuring RabbitMQ * "
loud "-----------------------------------------------------"
# Ignore errors in this check to avoid dying when vhost isn't found
set +e
rabbitmqctl list_vhosts | grep -w "^${RABBITMQ_VHOST}$" > /dev/null
RESULT="$?"
set -e
RABBITMQ_VHOST=/airtime
RABBITMQ_USER=airtime
RABBITMQ_PASSWORD=airtime
EXCHANGES="airtime-pypo|pypo-fetch|airtime-analyzer|media-monitor"
# Only run these if the vhost doesn't exist
if [ "$RESULT" != "0" ]; then
verbose "\n * Creating RabbitMQ user ${RABBITMQ_USER}..."
# Ignore errors in this check to avoid dying when vhost isn't found
set +e
rabbitmqctl list_vhosts | grep -w "^${RABBITMQ_VHOST}$" > /dev/null
RESULT="$?"
set -e
rabbitmqctl add_vhost ${RABBITMQ_VHOST}
rabbitmqctl add_user ${RABBITMQ_USER} ${RABBITMQ_PASSWORD}
else
verbose "\nRabbitMQ user already exists, skipping creation"
# Only run these if the vhost doesn't exist
if [ "$RESULT" != "0" ]; then
verbose "\n * Creating RabbitMQ user ${RABBITMQ_USER}..."
rabbitmqctl add_vhost ${RABBITMQ_VHOST}
rabbitmqctl add_user ${RABBITMQ_USER} ${RABBITMQ_PASSWORD}
else
verbose "\nRabbitMQ user already exists, skipping creation"
fi
verbose "\n * Setting RabbitMQ user permissions..."
#loudCmd "rabbitmqctl set_permissions -p ${RABBITMQ_VHOST} ${RABBITMQ_USER} \"$EXCHANGES\" \"$EXCHANGES\" \"$EXCHANGES\""
loudCmd "rabbitmqctl set_permissions -p ${RABBITMQ_VHOST} ${RABBITMQ_USER} .\* .\* .\*"
fi
verbose "\n * Setting RabbitMQ user permissions..."
#loudCmd "rabbitmqctl set_permissions -p ${RABBITMQ_VHOST} ${RABBITMQ_USER} \"$EXCHANGES\" \"$EXCHANGES\" \"$EXCHANGES\""
loudCmd "rabbitmqctl set_permissions -p ${RABBITMQ_VHOST} ${RABBITMQ_USER} .\* .\* .\*"
if [ ! -d "/etc/airtime" ]; then
loud "\n-----------------------------------------------------"
loud " * Installing Airtime * "

View File

@ -1,66 +0,0 @@
apache2
libapache2-mod-php5
php5
php-pear
php5-gd
lsb-release
zip
unzip
rabbitmq-server
postgresql
postgresql-client
php5-pgsql
python
python-virtualenv
python-pip
libsoundtouch-ocaml
libtaglib-ocaml
libao-ocaml
libmad-ocaml
ecasound
libportaudio2
libsamplerate0
libvo-aacenc0
python-rgain
python-gst0.10
gstreamer0.10-plugins-ugly
gir1.2-gstreamer-0.10
patch
icecast2
curl
php5-curl
mpg123
libcamomile-ocaml-data
libpulse0
vorbis-tools
lsb-release
lsof
mp3gain
vorbisgain
flac
vorbis-tools
pwgen
libfaad2
php-apc
lame
coreutils
liquidsoap
libopus0
sysvinit
sysvinit-utils

View File

@ -0,0 +1,83 @@
apache2
libapache2-mod-php7.2
php7.2
php-pear
php7.2-gd
php-bcmath
php-mbstring
lsb-release
zip
unzip
rabbitmq-server
postgresql
postgresql-client
php7.2-pgsql
python
python-virtualenv
python-pip
libsoundtouch-ocaml
libtaglib-ocaml
libao-ocaml
libmad-ocaml
ecasound
libportaudio2
libsamplerate0
python-rgain
python-gst-1.0
gstreamer1.0-plugins-ugly
python-pika
patch
php7.2-curl
mpg123
curl
icecast2
libcamomile-ocaml-data
libpulse0
vorbis-tools
lsof
vorbisgain
flac
vorbis-tools
pwgen
libfaad2
php-apcu
lame
coreutils
liquidsoap
liquidsoap-plugin-alsa
liquidsoap-plugin-ao
liquidsoap-plugin-faad
liquidsoap-plugin-flac
liquidsoap-plugin-icecast
liquidsoap-plugin-lame
liquidsoap-plugin-mad
liquidsoap-plugin-ogg
liquidsoap-plugin-portaudio
liquidsoap-plugin-pulseaudio
liquidsoap-plugin-taglib
liquidsoap-plugin-voaacenc
liquidsoap-plugin-vorbis
silan
libopus0
sysvinit-utils
build-essential
libssl-dev
libffi-dev
python-dev

View File

@ -44,7 +44,6 @@ libpulse0
vorbis-tools
lsb-release
lsof
mp3gain
vorbisgain
flac
vorbis-tools

View File

@ -45,7 +45,6 @@ libpulse0
vorbis-tools
lsb-release
lsof
mp3gain
vorbisgain
flac
vorbis-tools

View File

@ -46,8 +46,6 @@ libcamomile-ocaml-data
libpulse0
vorbis-tools
lsof
# mp3gain need to be installed over an external ppa or the use of easymp3gain
easymp3gain-gtk
vorbisgain
flac
vorbis-tools

View File

@ -0,0 +1,78 @@
apache2
libapache2-mod-php7.0
php7.0
php-pear
php7.0-gd
php-bcmath
php-mbstring
lsb-release
zip
unzip
postgresql-client
php7.0-pgsql
python
python-virtualenv
python-pip
libsoundtouch-ocaml
libtaglib-ocaml
libao-ocaml
libmad-ocaml
ecasound
libportaudio2
libsamplerate0
python-rgain
python-gst-1.0
gstreamer1.0-plugins-ugly
python-pika
patch
php7.0-curl
mpg123
curl
libcamomile-ocaml-data
libpulse0
vorbis-tools
lsof
vorbisgain
flac
vorbis-tools
pwgen
libfaad2
php-apcu
lame
coreutils
liquidsoap
liquidsoap-plugin-alsa
liquidsoap-plugin-ao
liquidsoap-plugin-faad
liquidsoap-plugin-flac
liquidsoap-plugin-icecast
liquidsoap-plugin-lame
liquidsoap-plugin-mad
liquidsoap-plugin-ogg
liquidsoap-plugin-portaudio
liquidsoap-plugin-pulseaudio
liquidsoap-plugin-taglib
liquidsoap-plugin-voaacenc
liquidsoap-plugin-vorbis
silan
libopus0
sysvinit-utils
build-essential
libssl-dev
libffi-dev
python-dev

View File

@ -7,7 +7,7 @@ User=celery
Group=celery
Environment=RMQ_CONFIG_FILE=/etc/airtime/airtime.conf
WorkingDirectory=/srv/airtime
ExecStart=/usr/local/bin/celery worker -A airtime-celery.tasks:celery --time-limit=300 --concurrency=1 --config=celeryconfig -l INFO
ExecStart=/usr/local/bin/celery worker -A airtime-celery.tasks:celery --time-limit=1800 --concurrency=1 --config=celeryconfig -l INFO
Restart=always
[Install]

View File

@ -47,9 +47,6 @@ pages:
- 'Installation':
- 'Install': install.md
- 'Preparing the Server': manual/preparing-the-server/index.md
- 'Easy Setup': manual/easy-setup/index.md
- 'Automated Installation': manual/automated-installation/index.md
- 'Manual Installation': manual/manual-installation/index.md
- 'Setting the Server Time': manual/setting-the-server-time/index.md
- 'Administration':
- 'The airtime-log Command': manual/the-airtime-log-command/index.md

View File

@ -8,7 +8,7 @@ CELERY_BIN="/usr/local/bin/celery"
CELERY_APP="airtime-celery.tasks:celery"
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=1 --config=celeryconfig"
CELERYD_OPTS="--time-limit=1800 --concurrency=1 --config=celeryconfig"
# %N will be replaced with the first part of the nodename.
CELERYD_LOG_FILE="/var/log/airtime/%N.log"

View File

@ -23,7 +23,7 @@ class AirtimeAnalyzerServer:
# Variables
_log_level = logging.INFO
def __init__(self, rmq_config_path, cloud_storage_config_path, http_retry_queue_path, debug=False):
def __init__(self, rmq_config_path, http_retry_queue_path, debug=False):
# Dump a stacktrace with 'kill -SIGUSR2 <PID>'
signal.signal(signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace())
@ -34,15 +34,12 @@ class AirtimeAnalyzerServer:
# Read our rmq config file
rmq_config = config_file.read_config_file(rmq_config_path)
# Read the cloud storage config file
cloud_storage_config = config_file.read_config_file(cloud_storage_config_path)
# Start up the StatusReporter process
StatusReporter.start_thread(http_retry_queue_path)
# Start listening for RabbitMQ messages telling us about newly
# uploaded files. This blocks until we recieve a shutdown signal.
self._msg_listener = MessageListener(rmq_config, cloud_storage_config)
self._msg_listener = MessageListener(rmq_config)
StatusReporter.stop_thread()
@ -61,9 +58,6 @@ class AirtimeAnalyzerServer:
pika_logger = logging.getLogger('pika')
pika_logger.setLevel(logging.CRITICAL)
boto_logger = logging.getLogger('auth')
boto_logger.setLevel(logging.CRITICAL)
# Set up logging
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
@ -89,5 +83,3 @@ class AirtimeAnalyzerServer:
if line:
code.append(" %s" % (line.strip()))
logging.info('\n'.join(code))

View File

@ -7,7 +7,6 @@ import Queue
import ConfigParser
from metadata_analyzer import MetadataAnalyzer
from filemover_analyzer import FileMoverAnalyzer
from cloud_storage_uploader import CloudStorageUploader
from cuepoint_analyzer import CuePointAnalyzer
from replaygain_analyzer import ReplayGainAnalyzer
from playability_analyzer import *
@ -25,7 +24,7 @@ class AnalyzerPipeline:
IMPORT_STATUS_FAILED = 2
@staticmethod
def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config):
def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix):
"""Analyze and import an audio file, and put all extracted metadata into queue.
Keyword arguments:
@ -40,7 +39,6 @@ class AnalyzerPipeline:
to know what the original name was.
storage_backend: String indicating the storage backend (amazon_s3 or file)
file_prefix:
cloud_storage_config: ConfigParser object containing the cloud storage configuration settings
"""
# It is super critical to initialize a separate log file here so that we
# don't inherit logging/locks from the parent process. Supposedly
@ -58,8 +56,6 @@ class AnalyzerPipeline:
raise TypeError("original_filename must be unicode. Was of type " + type(original_filename).__name__ + " instead.")
if not isinstance(file_prefix, unicode):
raise TypeError("file_prefix must be unicode. Was of type " + type(file_prefix).__name__ + " instead.")
if not isinstance(cloud_storage_config, ConfigParser.SafeConfigParser):
raise TypeError("cloud_storage_config must be a SafeConfigParser. Was of type " + type(cloud_storage_config).__name__ + " instead.")
# Analyze the audio file we were told to analyze:
@ -72,11 +68,7 @@ class AnalyzerPipeline:
metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata)
metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata)
if storage_backend.lower() == u"amazon_s3":
csu = CloudStorageUploader(cloud_storage_config)
metadata = csu.upload_obj(audio_file_path, metadata)
else:
metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata)
metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata)
metadata["import_status"] = 0 # Successfully imported

View File

@ -1,122 +0,0 @@
import os
import logging
import uuid
import socket
from boto.s3.connection import S3Connection
from boto.s3.key import Key
# Fix for getaddrinfo deadlock. See these issues for details:
# https://github.com/gevent/gevent/issues/349
# https://github.com/docker/docker-registry/issues/400
u'fix getaddrinfo deadlock'.encode('idna')
CLOUD_CONFIG_PATH = os.path.join(os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime'), 'airtime.conf')
STORAGE_BACKEND_FILE = "file"
SOCKET_TIMEOUT = 240
class CloudStorageUploader:
""" A class that uses Python-Boto SDK to upload objects into Amazon S3.
It is important to note that every file, coming from different Airtime Pro
stations, will get uploaded into the same bucket on the same Amazon S3
account.
Attributes:
_host: Host name for the specific region assigned to the bucket.
_bucket: Name of container on Amazon S3 where files will get uploaded into.
_api_key: Access key to objects on Amazon S3.
_api_key_secret: Secret access key to objects on Amazon S3.
"""
def __init__(self, config):
try:
cloud_storage_config_section = config.get("current_backend", "storage_backend")
self._storage_backend = cloud_storage_config_section
except Exception as e:
print e
print "Defaulting to file storage"
self._storage_backend = STORAGE_BACKEND_FILE
if self._storage_backend == STORAGE_BACKEND_FILE:
self._host = ""
self._bucket = ""
self._api_key = ""
self._api_key_secret = ""
else:
self._host = config.get(cloud_storage_config_section, 'host')
self._bucket = config.get(cloud_storage_config_section, 'bucket')
self._api_key = config.get(cloud_storage_config_section, 'api_key')
self._api_key_secret = config.get(cloud_storage_config_section, 'api_key_secret')
def enabled(self):
if self._storage_backend == "file":
return False
else:
return True
def upload_obj(self, audio_file_path, metadata):
"""Uploads a file into Amazon S3 object storage.
Before a file is uploaded onto Amazon S3 we generate a unique object
name consisting of the filename and a unqiue string using the uuid4
module.
Keyword arguments:
audio_file_path: Path on disk to the audio file that is about to be
uploaded to Amazon S3 object storage.
metadata: ID3 tags and other metadata extracted from the audio file.
Returns:
The metadata dictionary it received with two new keys:
filename: The file's filename.
resource_id: The unique object name used to identify the objects
on Amazon S3
"""
file_base_name = os.path.basename(audio_file_path)
file_name, extension = os.path.splitext(file_base_name)
# With Amazon S3 you cannot create a signed url if there are spaces
# in the object name. URL encoding the object name doesn't solve the
# problem. As a solution we will replace spaces with dashes.
file_name = file_name.replace(" ", "-")
unique_id = str(uuid.uuid4())
# We add another prefix to the resource name with the last two characters
# of the unique id so files are not all placed under the root folder. We
# do this in case we need to restore a customer's file/s; File restoration
# is done via the S3 Browser client. The client will hang if there are too
# many files under the same folder.
unique_id_prefix = unique_id[-2:]
resource_id = "%s/%s/%s_%s%s" % (metadata['file_prefix'], unique_id_prefix, file_name, unique_id, extension)
# Boto uses the "global default timeout" by default, which is infinite! To prevent network problems from
# turning into deadlocks, we explicitly set the global default timeout period here:
socket.setdefaulttimeout(SOCKET_TIMEOUT)
conn = S3Connection(self._api_key, self._api_key_secret, host=self._host)
bucket = conn.get_bucket(self._bucket)
key = Key(bucket)
key.key = resource_id
key.set_metadata('filename', file_base_name)
key.set_contents_from_filename(audio_file_path)
# Remove file from organize directory
try:
os.remove(audio_file_path)
except OSError:
logging.info("Could not remove %s from organize directory" % audio_file_path)
# Pass original filename to Airtime so we can store it in the db
metadata["filename"] = file_base_name
metadata["resource_id"] = resource_id
metadata["storage_backend"] = self._storage_backend
return metadata

View File

@ -1,121 +0,0 @@
import os
import logging
import uuid
import ConfigParser
from libcloud.storage.providers import get_driver
from libcloud.storage.types import Provider, ContainerDoesNotExistError, ObjectDoesNotExistError
CLOUD_CONFIG_PATH = os.path.join(os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime'), 'airtime.conf')
STORAGE_BACKEND_FILE = "file"
class CloudStorageUploader:
""" A class that uses Apache Libcloud's Storage API to upload objects into
a cloud storage backend. For this implementation all files will be uploaded
into a bucket on Amazon S3.
It is important to note that every file, coming from different Airtime Pro
stations, will get uploaded into the same bucket on the same Amazon S3
account.
Attributes:
_provider: Storage backend. For exmaple, Amazon S3, Google Storage.
_bucket: Name of container on provider where files will get uploaded into.
_api_key: Access key to objects on the provider's storage backend.
_api_key_secret: Secret access key to objects on the provider's storage backend.
"""
def __init__(self):
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(CLOUD_CONFIG_PATH))
cloud_storage_config_section = config.get("current_backend", "storage_backend")
self._storage_backend = cloud_storage_config_section
except IOError as e:
print "Failed to open config file at " + CLOUD_CONFIG_PATH + ": " + e.strerror
print "Defaulting to file storage"
self._storage_backend = STORAGE_BACKEND_FILE
except Exception as e:
print e
print "Defaulting to file storage"
self._storage_backend = STORAGE_BACKEND_FILE
if self._storage_backend == STORAGE_BACKEND_FILE:
self._provider = ""
self._bucket = ""
self._api_key = ""
self._api_key_secret = ""
else:
self._provider = config.get(cloud_storage_config_section, 'provider')
self._bucket = config.get(cloud_storage_config_section, 'bucket')
self._api_key = config.get(cloud_storage_config_section, 'api_key')
self._api_key_secret = config.get(cloud_storage_config_section, 'api_key_secret')
def enabled(self):
if self._storage_backend == "file":
return False
else:
return True
def upload_obj(self, audio_file_path, metadata):
"""Uploads a file into Amazon S3 object storage.
Before a file is uploaded onto Amazon S3 we generate a unique object
name consisting of the filename and a unqiue string using the uuid4
module.
Keyword arguments:
audio_file_path: Path on disk to the audio file that is about to be
uploaded to Amazon S3 object storage.
metadata: ID3 tags and other metadata extracted from the audio file.
Returns:
The metadata dictionary it received with three new keys:
filesize: The file's filesize in bytes.
filename: The file's filename.
resource_id: The unique object name used to identify the objects
on Amazon S3
"""
file_base_name = os.path.basename(audio_file_path)
file_name, extension = os.path.splitext(file_base_name)
# With Amazon S3 you cannot create a signed url if there are spaces
# in the object name. URL encoding the object name doesn't solve the
# problem. As a solution we will replace spaces with dashes.
file_name = file_name.replace(" ", "-")
object_name = "%s_%s%s" % (file_name, str(uuid.uuid4()), extension)
provider_driver_class = get_driver(getattr(Provider, self._provider))
driver = provider_driver_class(self._api_key, self._api_key_secret)
try:
container = driver.get_container(self._bucket)
except ContainerDoesNotExistError:
container = driver.create_container(self._bucket)
extra = {'meta_data': {'filename': file_base_name}}
obj = driver.upload_object(file_path=audio_file_path,
container=container,
object_name=object_name,
verify_hash=False,
extra=extra)
metadata["filesize"] = os.path.getsize(audio_file_path)
# Remove file from organize directory
try:
os.remove(audio_file_path)
except OSError:
logging.info("Could not remove %s from organize directory" % audio_file_path)
# Pass original filename to Airtime so we can store it in the db
metadata["filename"] = file_base_name
metadata["resource_id"] = object_name
metadata["storage_backend"] = self._storage_backend
return metadata

View File

@ -9,7 +9,6 @@ import multiprocessing
import Queue
from analyzer_pipeline import AnalyzerPipeline
from status_reporter import StatusReporter
from cloud_storage_uploader import CloudStorageUploader
EXCHANGE = "airtime-uploads"
EXCHANGE_TYPE = "topic"
@ -56,13 +55,12 @@ QUEUE = "airtime-uploads"
"""
class MessageListener:
def __init__(self, rmq_config, cloud_storage_config):
def __init__(self, rmq_config):
''' Start listening for file upload notification messages
from RabbitMQ
Keyword arguments:
rmq_config: A ConfigParser object containing the [rabbitmq] configuration.
cloud_storage_config: A ConfigParser object containing the cloud storage configuration.
'''
self._shutdown = False
@ -76,8 +74,6 @@ class MessageListener:
self._password = rmq_config.get(RMQ_CONFIG_SECTION, 'password')
self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, 'vhost')
self.cloud_storage_config = cloud_storage_config
# Set up a signal handler so we can shutdown gracefully
# For some reason, this signal handler must be set up here. I'd rather
# put it in AirtimeAnalyzerServer, but it doesn't work there (something to do
@ -172,7 +168,7 @@ class MessageListener:
file_prefix = msg_dict["file_prefix"]
storage_backend = msg_dict["storage_backend"]
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix, self.cloud_storage_config)
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix)
StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata)
except KeyError as e:
@ -211,12 +207,12 @@ class MessageListener:
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
@staticmethod
def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config):
def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix):
''' Spawn a child process to analyze and import a new audio file. '''
'''
q = multiprocessing.Queue()
p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis,
args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config))
args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix))
p.start()
p.join()
if p.exitcode == 0:
@ -230,7 +226,7 @@ class MessageListener:
q = Queue.Queue()
try:
AnalyzerPipeline.run_analysis(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config)
AnalyzerPipeline.run_analysis(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix)
metadata = q.get()
except Exception as e:
logging.error("Analyzer pipeline exception: %s" % str(e))

View File

@ -10,7 +10,6 @@ import airtime_analyzer.airtime_analyzer as aa
VERSION = "1.0"
LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime')
DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf')
DEFAULT_CLOUD_STORAGE_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf')
DEFAULT_HTTP_RETRY_PATH = '/tmp/airtime_analyzer_http_retries'
def run():
@ -20,7 +19,6 @@ def run():
parser.add_argument("-d", "--daemon", help="run as a daemon", action="store_true")
parser.add_argument("--debug", help="log full debugging output", action="store_true")
parser.add_argument("--rmq-config-file", help="specify a configuration file with RabbitMQ settings (default is %s)" % DEFAULT_RMQ_CONFIG_PATH)
parser.add_argument("--cloud-storage-config-file", help="specify a configuration file with cloud storage settings (default is %s)" % DEFAULT_CLOUD_STORAGE_CONFIG_PATH)
parser.add_argument("--http-retry-queue-file", help="specify where incompleted HTTP requests will be serialized (default is %s)" % DEFAULT_HTTP_RETRY_PATH)
args = parser.parse_args()
@ -28,25 +26,20 @@ def run():
#Default config file path
rmq_config_path = DEFAULT_RMQ_CONFIG_PATH
cloud_storage_config_path = DEFAULT_CLOUD_STORAGE_CONFIG_PATH
http_retry_queue_path = DEFAULT_HTTP_RETRY_PATH
if args.rmq_config_file:
rmq_config_path = args.rmq_config_file
if args.cloud_storage_config_file:
cloud_storage_config_path = args.cloud_storage_config_file
if args.http_retry_queue_file:
http_retry_queue_path = args.http_retry_queue_file
if args.daemon:
with daemon.DaemonContext():
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path,
cloud_storage_config_path = cloud_storage_config_path,
http_retry_queue_path=http_retry_queue_path,
debug=args.debug)
else:
# Run without daemonizing
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path,
cloud_storage_config_path = cloud_storage_config_path,
http_retry_queue_path=http_retry_queue_path,
debug=args.debug)
@ -71,5 +64,3 @@ def check_if_media_monitor_is_running():
continue
run()

View File

@ -37,9 +37,7 @@ setup(name='airtime_analyzer',
'mock',
'python-daemon==1.6',
'requests>=2.7.0',
'apache-libcloud',
'rgain',
'boto',
# These next 3 are required for requests to support SSL with SNI. Learned this the hard way...
# What sucks is that GCC is required to pip install these.
#'ndg-httpsclient',

View File

@ -22,15 +22,10 @@ def teardown():
def test_basic():
filename = os.path.basename(DEFAULT_AUDIO_FILE)
q = Queue.Queue()
#cloud_storage_config_path = os.path.join(os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime'), '/production/cloud_storage.conf')
#cloud_storage_config = config_file.read_config_file(cloud_storage_config_path)
cloud_storage_config = SafeConfigParser()
cloud_storage_config.add_section("current_backend")
cloud_storage_config.set("current_backend", "storage_backend", "file")
file_prefix = u''
storage_backend = "file"
#This actually imports the file into the "./Test Artist" directory.
AnalyzerPipeline.run_analysis(q, DEFAULT_AUDIO_FILE, u'.', filename, storage_backend, file_prefix, cloud_storage_config)
AnalyzerPipeline.run_analysis(q, DEFAULT_AUDIO_FILE, u'.', filename, storage_backend, file_prefix)
metadata = q.get()
assert metadata['track_title'] == u'Test Title'
assert metadata['artist_name'] == u'Test Artist'

View File

@ -1,18 +0,0 @@
from nose.tools import *
from ConfigParser import SafeConfigParser
from airtime_analyzer.cloud_storage_uploader import CloudStorageUploader
from airtime_analyzer.airtime_analyzer import AirtimeAnalyzerServer
from airtime_analyzer import config_file
def setup():
pass
def teardown():
pass
def test_analyze():
cloud_storage_config = SafeConfigParser()
cloud_storage_config.add_section("current_backend")
cloud_storage_config.set("current_backend", "storage_backend", "file")
cl = CloudStorageUploader(cloud_storage_config)

View File

@ -1,45 +0,0 @@
#!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root." 1>&2
exit 1
fi
echo "This script deletes all traces of Airtime from your system,"
echo "including files uploaded through the web interface."
echo "It will delete files from all known versions of Airtime."
echo
echo "Are you sure you want to do this? Press Enter to continue..."
read
service airtime-playout stop >/dev/null 2>&1
service airtime-liquidsoap stop >/dev/null 2>&1
service airtime-media-monitor stop >/dev/null 2>&1
service airtime-show-recorder stop >/dev/null 2>&1
airtime-pypo-stop >/dev/null 2>&1
airtime-show-recorder-stop >/dev/null 2>&1
killall liquidsoap
rm -rf "/etc/airtime"
rm -rf "/var/log/airtime"
rm -rf "/etc/service/pypo"
rm -rf "/etc/service/pypo-liquidsoap"
rm -rf "/etc/service/recorder"
rm -rf "/usr/share/airtime"
rm -rf "/var/tmp/airtime"
rm -rf "/var/www/airtime"
rm -rf "/usr/bin/airtime-*"
rm -rf "/usr/lib/airtime"
rm -rf "/var/lib/airtime"
rm -rf "/var/tmp/airtime"
rm -rf "/opt/pypo"
rm -rf "/opt/recorder"
rm -rf "/srv/airtime"
rm -rf "/etc/monit/conf.d/airtime-monit.cfg"
rm -rf /etc/monit/conf.d/monit-airtime-*
echo "DROP DATABASE AIRTIME;" | su postgres -c psql
echo "DROP LANGUAGE plpgsql;" | su postgres -c psql
echo "DROP USER AIRTIME;" | su postgres -c psql

View File

@ -1,7 +0,0 @@
#!/bin/bash
if [ -z "${1}" ]; then
echo "Usage: make_tarball.sh git_tag"
exit
fi
GIT_TAG=${1}
git archive ${GIT_TAG} --prefix ${GIT_TAG}/ -o "${GIT_TAG}".tar.gz

View File

@ -1,34 +0,0 @@
#!/bin/bash
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Sourcefabric O.P.S.
#
# This file is part of the Airtime project.
# http://airtime.sourcefabric.org/
#
# Airtime is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Airtime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Airtime; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# This script send data to data collection server
#
# Absolute path to this script
SCRIPT=`readlink -f $0`
# Absolute directory this script is in
SCRIPTPATH=`dirname $SCRIPT`
invokePwd=$PWD
cd $SCRIPTPATH
php -q phone_home_stat.php "$@" || exit 1

View File

@ -1,108 +0,0 @@
<?php
/**
* Ensures that the user is running this PHP script with root
* permissions. If not running with root permissions, causes the
* script to exit.
*/
function exitIfNotRoot()
{
// Need to check that we are superuser before running this.
if(posix_geteuid() != 0){
echo "Must be root user.\n";
exit(1);
}
}
exitIfNotRoot();
date_default_timezone_set("UTC");
$values = parse_ini_file('/etc/airtime/airtime.conf', true);
$CC_CONFIG['phpDir'] = $values['general']['airtime_dir'];
require_once($CC_CONFIG['phpDir'].'/application/configs/conf.php');
$CC_CONFIG = Config::getConfig();
require_once($CC_CONFIG['phpDir'].'/application/configs/constants.php');
require_once($CC_CONFIG['phpDir'].'/application/logging/Logging.php');
Logging::setLogPath("/var/log/airtime/zendphp.log");
// Ensure library/ is on include_path
set_include_path(implode(PATH_SEPARATOR, array(
get_include_path(),
realpath($CC_CONFIG['phpDir'] . '/library'),
realpath($CC_CONFIG['phpDir']),
realpath($CC_CONFIG['phpDir'].'/application/models'),
)));
require_once 'propel/runtime/lib/Propel.php';
Propel::init($CC_CONFIG['phpDir']."/application/configs/airtime-conf-production.php");
//Zend framework
if (file_exists('/usr/share/php/libzend-framework-php')){
set_include_path('/usr/share/php/libzend-framework-php' . PATH_SEPARATOR . get_include_path());
}
require_once('Zend/Loader/Autoloader.php');
$autoloader = Zend_Loader_Autoloader::getInstance();
$autoloader->registerNamespace('Application_');
$resourceLoader = new Zend_Loader_Autoloader_Resource(array(
'basePath' => $CC_CONFIG['phpDir'].'/'.'application',
'namespace' => 'Application',
'resourceTypes' => array(
'model' => array(
'path' => 'models/',
'namespace' => 'Model',
),
'common' => array(
'path' => 'common/',
'namespace' => 'Common',
),
),
));
$infoArray = Application_Model_Preference::GetSystemInfo(true);
if(Application_Model_Preference::GetSupportFeedback() == '1'){
$url = 'http://stat.sourcefabric.org/index.php?p=airtime';
//$url = 'http://localhost:9999/index.php?p=airtime';
$ch = curl_init();
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_POST, 1);
curl_setopt($ch, CURLOPT_URL, $url);
$data = json_encode($infoArray);
$dataArray = array("data" => $data );
curl_setopt($ch, CURLOPT_POSTFIELDS, $dataArray);
$result = curl_exec($ch);
curl_close($ch);
}
// Get latest version from stat server and store to db
if(Application_Model_Preference::GetPlanLevel() == 'disabled'){
$url = 'http://stat.sourcefabric.org/airtime-stats/airtime_latest_version';
//$url = 'http://localhost:9999/index.php?p=airtime';
$ch = curl_init();
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_URL, $url);
$result = curl_exec($ch);
if(curl_errno($ch)) {
echo "curl error: " . curl_error($ch) . "\n";
} else {
$resultArray = explode("\n", $result);
if (isset($resultArray[0])) {
Application_Model_Preference::SetLatestVersion($resultArray[0]);
}
if (isset($resultArray[1])) {
Application_Model_Preference::SetLatestLink($resultArray[1]);
}
}
curl_close($ch);
}