summaryrefslogtreecommitdiffstats
path: root/Bugzilla
diff options
context:
space:
mode:
authorByron Jones <glob@mozilla.com>2015-08-11 17:16:38 +0200
committerByron Jones <glob@mozilla.com>2015-08-11 17:17:00 +0200
commit99bec2e52b174c1e8774ebf12da5963330d47af9 (patch)
treeee9f39a742eb6739be38355eac4af3399d17fb01 /Bugzilla
parentdd7cd13b7cecaa92caebd3c4930154b2de35ff29 (diff)
downloadbugzilla-99bec2e52b174c1e8774ebf12da5963330d47af9.tar.gz
bugzilla-99bec2e52b174c1e8774ebf12da5963330d47af9.tar.xz
Bug 1160929 - add support for storing attachments in s3
Diffstat (limited to 'Bugzilla')
-rw-r--r--Bugzilla/Attachment.pm4
-rw-r--r--Bugzilla/Attachment/S3.pm56
-rw-r--r--Bugzilla/Config/Attachment.pm104
-rw-r--r--Bugzilla/Install/Requirements.pm16
-rw-r--r--Bugzilla/S3.pm538
-rw-r--r--Bugzilla/S3/Bucket.pm433
6 files changed, 1118 insertions, 33 deletions
diff --git a/Bugzilla/Attachment.pm b/Bugzilla/Attachment.pm
index c3e48b899..0cdec6bf0 100644
--- a/Bugzilla/Attachment.pm
+++ b/Bugzilla/Attachment.pm
@@ -908,6 +908,10 @@ sub get_storage_by_name {
require Bugzilla::Attachment::FileSystem;
return Bugzilla::Attachment::FileSystem->new();
}
+ elsif ($name eq 's3') {
+ require Bugzilla::Attachment::S3;
+ return Bugzilla::Attachment::S3->new();
+ }
else {
return undef;
}
diff --git a/Bugzilla/Attachment/S3.pm b/Bugzilla/Attachment/S3.pm
new file mode 100644
index 000000000..6d11f423e
--- /dev/null
+++ b/Bugzilla/Attachment/S3.pm
@@ -0,0 +1,56 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+# This Source Code Form is "Incompatible With Secondary Licenses", as
+# defined by the Mozilla Public License, v. 2.0.
+
+package Bugzilla::Attachment::S3;
+use strict;
+use warnings;
+
+use Bugzilla::Error;
+use Bugzilla::S3;
+
+sub new {
+ my $s3 = Bugzilla::S3->new({
+ aws_access_key_id => Bugzilla->params->{aws_access_key_id},
+ aws_secret_access_key => Bugzilla->params->{aws_secret_access_key},
+ secure => 1,
+ });
+ return bless({
+ s3 => $s3,
+ bucket => $s3->bucket(Bugzilla->params->{s3_bucket}),
+ }, shift);
+}
+
+sub store {
+ my ($self, $attach_id, $data) = @_;
+ unless ($self->{bucket}->add_key($attach_id, $data)) {
+ warn "Failed to add attachment $attach_id to S3: " . $self->{bucket}->errstr . "\n";
+ ThrowCodeError('s3_add_failed', { attach_id => $attach_id, reason => $self->{bucket}->errstr });
+ }
+}
+
+sub retrieve {
+ my ($self, $attach_id) = @_;
+ my $response = $self->{bucket}->get_key($attach_id);
+ if (!$response) {
+ warn "Failed to retrieve attachment $attach_id from S3: " . $self->{bucket}->errstr . "\n";
+ ThrowCodeError('s3_get_failed', { attach_id => $attach_id, reason => $self->{bucket}->errstr });
+ }
+ return $response->{value};
+}
+
+sub remove {
+ my ($self, $attach_id) = @_;
+ $self->{bucket}->delete_key($attach_id)
+ or warn "Failed to remove attachment $attach_id from S3: " . $self->{bucket}->errstr . "\n";
+}
+
+sub exists {
+ my ($self, $attach_id) = @_;
+ return !!$self->{bucket}->head_key($attach_id);
+}
+
+1;
diff --git a/Bugzilla/Config/Attachment.pm b/Bugzilla/Config/Attachment.pm
index cf87f4c89..20cde0b01 100644
--- a/Bugzilla/Config/Attachment.pm
+++ b/Bugzilla/Config/Attachment.pm
@@ -38,42 +38,82 @@ use Bugzilla::Config::Common;
our $sortkey = 400;
sub get_param_list {
- my $class = shift;
- my @param_list = (
- {
- name => 'allow_attachment_display',
- type => 'b',
- default => 0
- },
+ my $class = shift;
+ my @param_list = (
+ {
+ name => 'allow_attachment_display',
+ type => 'b',
+ default => 0
+ },
+ {
+ name => 'attachment_base',
+ type => 't',
+ default => '',
+ checker => \&check_urlbase
+ },
+ {
+ name => 'allow_attachment_deletion',
+ type => 'b',
+ default => 0
+ },
+ {
+ name => 'maxattachmentsize',
+ type => 't',
+ default => '1000',
+ checker => \&check_maxattachmentsize
+ },
+ {
+ name => 'attachment_storage',
+ type => 's',
+ choices => ['database', 'filesystem', 's3'],
+ default => 'database',
+ checker => \&check_storage
+ },
+ {
+ name => 's3_bucket',
+ type => 't',
+ default => '',
+ },
+ {
+ name => 'aws_access_key_id',
+ type => 't',
+ default => '',
+ },
+ {
+ name => 'aws_secret_access_key',
+ type => 't',
+ default => '',
+ },
+ );
+ return @param_list;
+}
- {
- name => 'attachment_base',
- type => 't',
- default => '',
- checker => \&check_urlbase
- },
+sub check_params {
+ my ($class, $params) = @_;
+ return unless $params->{attachment_storage} eq 's3';
- {
- name => 'allow_attachment_deletion',
- type => 'b',
- default => 0
- },
+ if ($params->{s3_bucket} eq ''
+ || $params->{aws_access_key_id} eq ''
+ || $params->{aws_secret_access_key} eq ''
+ ) {
+ return "You must set s3_bucket, aws_access_key_id, and aws_secret_access_key when attachment_storage is set to S3";
+ }
+ return '';
+}
- {
- name => 'maxattachmentsize',
- type => 't',
- default => '1000',
- checker => \&check_maxattachmentsize
- },
+sub check_storage {
+ my ($value, $param) = (@_);
+ my $check_multi = check_multi($value, $param);
+ return $check_multi if $check_multi;
- {
- name => 'attachment_storage',
- type => 's',
- choices => ['database', 'filesystem'],
- default => 'database',
- checker => \&check_multi
- } );
- return @param_list;
+ if ($value eq 's3') {
+ return Bugzilla->feature('s3')
+ ? ''
+ : 'The perl modules required for S3 support are not installed';
+ }
+ else {
+ return '';
+ }
}
1;
diff --git a/Bugzilla/Install/Requirements.pm b/Bugzilla/Install/Requirements.pm
index 71c01b9be..4b3e8aa76 100644
--- a/Bugzilla/Install/Requirements.pm
+++ b/Bugzilla/Install/Requirements.pm
@@ -332,6 +332,20 @@ sub OPTIONAL_MODULES {
feature => ['detect_charset'],
},
+ # S3 attachments
+ {
+ package => 'Class-Accessor-Fast',
+ module => 'Class::Accessor::Fast',
+ version => 0,
+ feature => ['s3'],
+ },
+ {
+ package => 'XML-Simple',
+ module => 'XML::Simple',
+ version => 0,
+ feature => ['s3'],
+ },
+
# Inbound Email
{
package => 'Email-MIME-Attachment-Stripper',
@@ -381,7 +395,7 @@ sub OPTIONAL_MODULES {
package => 'URI-Escape',
module => 'URI::Escape',
version => 0,
- feature => ['memcached'],
+ feature => ['memcached', 's3'],
},
{
package => 'Cache-Memcached',
diff --git a/Bugzilla/S3.pm b/Bugzilla/S3.pm
new file mode 100644
index 000000000..eb30816a4
--- /dev/null
+++ b/Bugzilla/S3.pm
@@ -0,0 +1,538 @@
+package Bugzilla::S3;
+
+# Forked from Amazon::S3, which appears to be abandoned.
+#
+# changes for Bugzilla:
+# - fixed error handling
+# (https://rt.cpan.org/Ticket/Display.html?id=93033)
+# - made LWP::UserAgent::Determined optional
+# (https://rt.cpan.org/Ticket/Display.html?id=76471)
+# - replaced croaking with returning undef in Bucket->get_key and Bucket->get_acl
+# (https://rt.cpan.org/Public/Bug/Display.html?id=40281)
+# - default to secure (https) connections to AWS
+#
+
+use strict;
+use warnings;
+
+use Bugzilla::S3::Bucket;
+use Bugzilla::Util qw(trim);
+use Carp;
+use Digest::HMAC_SHA1;
+use HTTP::Date;
+use LWP::UserAgent;
+use MIME::Base64 qw(encode_base64);
+use URI::Escape qw(uri_escape_utf8);
+use XML::Simple;
+
+use base qw(Class::Accessor::Fast);
+__PACKAGE__->mk_accessors(
+ qw(aws_access_key_id aws_secret_access_key secure ua err errstr timeout retry host)
+);
+our $VERSION = '0.45bmo';
+
+my $AMAZON_HEADER_PREFIX = 'x-amz-';
+my $METADATA_PREFIX = 'x-amz-meta-';
+my $KEEP_ALIVE_CACHESIZE = 10;
+
+sub new {
+ my $class = shift;
+ my $self = $class->SUPER::new(@_);
+
+ die "No aws_access_key_id" unless $self->aws_access_key_id;
+ die "No aws_secret_access_key" unless $self->aws_secret_access_key;
+
+ $self->secure(1) if not defined $self->secure;
+ $self->timeout(30) if not defined $self->timeout;
+ $self->host('s3.amazonaws.com') if not defined $self->host;
+
+ my $ua;
+ if ($self->retry) {
+ require LWP::UserAgent::Determined;
+ $ua = LWP::UserAgent::Determined->new(
+ keep_alive => $KEEP_ALIVE_CACHESIZE,
+ requests_redirectable => [qw(GET HEAD DELETE PUT)],
+ );
+ $ua->timing('1,2,4,8,16,32');
+ }
+ else {
+ $ua = LWP::UserAgent->new(
+ keep_alive => $KEEP_ALIVE_CACHESIZE,
+ requests_redirectable => [qw(GET HEAD DELETE PUT)],
+ );
+ }
+
+ $ua->timeout($self->timeout);
+ if (my $proxy = Bugzilla->params->{proxy_url}) {
+ $ua->proxy([ 'https', 'http' ], $proxy);
+ }
+ $self->ua($ua);
+ return $self;
+}
+
+sub bucket {
+ my ($self, $bucketname) = @_;
+ return Bugzilla::S3::Bucket->new({bucket => $bucketname, account => $self});
+}
+
+sub _validate_acl_short {
+ my ($self, $policy_name) = @_;
+
+ if (!grep({$policy_name eq $_}
+ qw(private public-read public-read-write authenticated-read)))
+ {
+ croak "$policy_name is not a supported canned access policy";
+ }
+}
+
+# EU buckets must be accessed via their DNS name. This routine figures out if
+# a given bucket name can be safely used as a DNS name.
+sub _is_dns_bucket {
+ my $bucketname = $_[0];
+
+ if (length $bucketname > 63) {
+ return 0;
+ }
+ if (length $bucketname < 3) {
+ return;
+ }
+ return 0 unless $bucketname =~ m{^[a-z0-9][a-z0-9.-]+$};
+ my @components = split /\./, $bucketname;
+ for my $c (@components) {
+ return 0 if $c =~ m{^-};
+ return 0 if $c =~ m{-$};
+ return 0 if $c eq '';
+ }
+ return 1;
+}
+
+# make the HTTP::Request object
+sub _make_request {
+ my ($self, $method, $path, $headers, $data, $metadata) = @_;
+ croak 'must specify method' unless $method;
+ croak 'must specify path' unless defined $path;
+ $headers ||= {};
+ $data = '' if not defined $data;
+ $metadata ||= {};
+ my $http_headers = $self->_merge_meta($headers, $metadata);
+
+ $self->_add_auth_header($http_headers, $method, $path)
+ unless exists $headers->{Authorization};
+ my $protocol = $self->secure ? 'https' : 'http';
+ my $host = $self->host;
+ my $url = "$protocol://$host/$path";
+ if ($path =~ m{^([^/?]+)(.*)} && _is_dns_bucket($1)) {
+ $url = "$protocol://$1.$host$2";
+ }
+
+ my $request = HTTP::Request->new($method, $url, $http_headers);
+ $request->content($data);
+
+ # my $req_as = $request->as_string;
+ # $req_as =~ s/[^\n\r\x20-\x7f]/?/g;
+ # $req_as = substr( $req_as, 0, 1024 ) . "\n\n";
+ # warn $req_as;
+
+ return $request;
+}
+
+# $self->_send_request($HTTP::Request)
+# $self->_send_request(@params_to_make_request)
+sub _send_request {
+ my $self = shift;
+ my $request;
+ if (@_ == 1) {
+ $request = shift;
+ }
+ else {
+ $request = $self->_make_request(@_);
+ }
+
+ my $response = $self->_do_http($request);
+ my $content = $response->content;
+
+ return $content unless $response->content_type eq 'application/xml';
+ return unless $content;
+ return $self->_xpc_of_content($content);
+}
+
+# centralize all HTTP work, for debugging
+sub _do_http {
+ my ($self, $request, $filename) = @_;
+
+ # convenient time to reset any error conditions
+ $self->err(undef);
+ $self->errstr(undef);
+ return $self->ua->request($request, $filename);
+}
+
+sub _send_request_expect_nothing {
+ my $self = shift;
+ my $request = $self->_make_request(@_);
+
+ my $response = $self->_do_http($request);
+ my $content = $response->content;
+
+ return 1 if $response->code =~ /^2\d\d$/;
+
+ # anything else is a failure, and we save the parsed result
+ $self->_remember_errors($response->content);
+ return 0;
+}
+
+# Send a HEAD request first, to find out if we'll be hit with a 307 redirect.
+# Since currently LWP does not have true support for 100 Continue, it simply
+# slams the PUT body into the socket without waiting for any possible redirect.
+# Thus when we're reading from a filehandle, when LWP goes to reissue the request
+# having followed the redirect, the filehandle's already been closed from the
+# first time we used it. Thus, we need to probe first to find out what's going on,
+# before we start sending any actual data.
+sub _send_request_expect_nothing_probed {
+ my $self = shift;
+ my ($method, $path, $conf, $value) = @_;
+ my $request = $self->_make_request('HEAD', $path);
+ my $override_uri = undef;
+
+ my $old_redirectable = $self->ua->requests_redirectable;
+ $self->ua->requests_redirectable([]);
+
+ my $response = $self->_do_http($request);
+
+ if ($response->code =~ /^3/ && defined $response->header('Location')) {
+ $override_uri = $response->header('Location');
+ }
+ $request = $self->_make_request(@_);
+ $request->uri($override_uri) if defined $override_uri;
+
+ $response = $self->_do_http($request);
+ $self->ua->requests_redirectable($old_redirectable);
+
+ my $content = $response->content;
+
+ return 1 if $response->code =~ /^2\d\d$/;
+
+ # anything else is a failure, and we save the parsed result
+ $self->_remember_errors($response->content);
+ return 0;
+}
+
+sub _check_response {
+ my ($self, $response) = @_;
+ return 1 if $response->code =~ /^2\d\d$/;
+ $self->err("network_error");
+ $self->errstr($response->status_line);
+ $self->_remember_errors($response->content);
+ return undef;
+}
+
+sub _croak_if_response_error {
+ my ($self, $response) = @_;
+ unless ($response->code =~ /^2\d\d$/) {
+ $self->err("network_error");
+ $self->errstr($response->status_line);
+ croak "Bugzilla::S3: Amazon responded with "
+ . $response->status_line . "\n";
+ }
+}
+
+sub _xpc_of_content {
+ return XMLin($_[1], 'KeepRoot' => 1, 'SuppressEmpty' => '', 'ForceArray' => ['Contents']);
+}
+
+# returns 1 if errors were found
+sub _remember_errors {
+ my ($self, $src) = @_;
+
+ unless (ref $src || $src =~ m/^[[:space:]]*</) { # if not xml
+ (my $code = $src) =~ s/^[[:space:]]*\([0-9]*\).*$/$1/;
+ $self->err($code);
+ $self->errstr($src);
+ return 1;
+ }
+
+ my $r = ref $src ? $src : $self->_xpc_of_content($src);
+
+ if ($r->{Error}) {
+ $self->err($r->{Error}{Code});
+ $self->errstr($r->{Error}{Message});
+ return 1;
+ }
+ return 0;
+}
+
+sub _add_auth_header {
+ my ($self, $headers, $method, $path) = @_;
+ my $aws_access_key_id = $self->aws_access_key_id;
+ my $aws_secret_access_key = $self->aws_secret_access_key;
+
+ if (not $headers->header('Date')) {
+ $headers->header(Date => time2str(time));
+ }
+ my $canonical_string = $self->_canonical_string($method, $path, $headers);
+ my $encoded_canonical =
+ $self->_encode($aws_secret_access_key, $canonical_string);
+ $headers->header(
+ Authorization => "AWS $aws_access_key_id:$encoded_canonical");
+}
+
+# generates an HTTP::Headers objects given one hash that represents http
+# headers to set and another hash that represents an object's metadata.
+sub _merge_meta {
+ my ($self, $headers, $metadata) = @_;
+ $headers ||= {};
+ $metadata ||= {};
+
+ my $http_header = HTTP::Headers->new;
+ while (my ($k, $v) = each %$headers) {
+ $http_header->header($k => $v);
+ }
+ while (my ($k, $v) = each %$metadata) {
+ $http_header->header("$METADATA_PREFIX$k" => $v);
+ }
+
+ return $http_header;
+}
+
+# generate a canonical string for the given parameters. expires is optional and is
+# only used by query string authentication.
+sub _canonical_string {
+ my ($self, $method, $path, $headers, $expires) = @_;
+ my %interesting_headers = ();
+ while (my ($key, $value) = each %$headers) {
+ my $lk = lc $key;
+ if ( $lk eq 'content-md5'
+ or $lk eq 'content-type'
+ or $lk eq 'date'
+ or $lk =~ /^$AMAZON_HEADER_PREFIX/)
+ {
+ $interesting_headers{$lk} = trim($value);
+ }
+ }
+
+ # these keys get empty strings if they don't exist
+ $interesting_headers{'content-type'} ||= '';
+ $interesting_headers{'content-md5'} ||= '';
+
+ # just in case someone used this. it's not necessary in this lib.
+ $interesting_headers{'date'} = ''
+ if $interesting_headers{'x-amz-date'};
+
+ # if you're using expires for query string auth, then it trumps date
+ # (and x-amz-date)
+ $interesting_headers{'date'} = $expires if $expires;
+
+ my $buf = "$method\n";
+ foreach my $key (sort keys %interesting_headers) {
+ if ($key =~ /^$AMAZON_HEADER_PREFIX/) {
+ $buf .= "$key:$interesting_headers{$key}\n";
+ }
+ else {
+ $buf .= "$interesting_headers{$key}\n";
+ }
+ }
+
+ # don't include anything after the first ? in the resource...
+ $path =~ /^([^?]*)/;
+ $buf .= "/$1";
+
+ # ...unless there is an acl or torrent parameter
+ if ($path =~ /[&?]acl($|=|&)/) {
+ $buf .= '?acl';
+ }
+ elsif ($path =~ /[&?]torrent($|=|&)/) {
+ $buf .= '?torrent';
+ }
+ elsif ($path =~ /[&?]location($|=|&)/) {
+ $buf .= '?location';
+ }
+
+ return $buf;
+}
+
+# finds the hmac-sha1 hash of the canonical string and the aws secret access key and then
+# base64 encodes the result (optionally urlencoding after that).
+sub _encode {
+ my ($self, $aws_secret_access_key, $str, $urlencode) = @_;
+ my $hmac = Digest::HMAC_SHA1->new($aws_secret_access_key);
+ $hmac->add($str);
+ my $b64 = encode_base64($hmac->digest, '');
+ if ($urlencode) {
+ return $self->_urlencode($b64);
+ }
+ else {
+ return $b64;
+ }
+}
+
+sub _urlencode {
+ my ($self, $unencoded) = @_;
+ return uri_escape_utf8($unencoded, '^A-Za-z0-9_-');
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+Bugzilla::S3 - A portable client library for working with and
+managing Amazon S3 buckets and keys.
+
+=head1 DESCRIPTION
+
+Bugzilla::S3 provides a portable client interface to Amazon Simple
+Storage System (S3).
+
+This need for this module arose from some work that needed
+to work with S3 and would be distributed, installed and used
+on many various environments where compiled dependencies may
+not be an option. L<Net::Amazon::S3> used L<XML::LibXML>
+tying it to that specific and often difficult to install
+option. In order to remove this potential barrier to entry,
+this module is forked and then modified to use L<XML::SAX>
+via L<XML::Simple>.
+
+Bugzilla::S3 is intended to be a drop-in replacement for
+L<Net:Amazon::S3> that trades some performance in return for
+portability.
+
+=head1 METHODS
+
+=head2 new
+
+Create a new S3 client object. Takes some arguments:
+
+=over
+
+=item aws_access_key_id
+
+Use your Access Key ID as the value of the AWSAccessKeyId parameter
+in requests you send to Amazon Web Services (when required). Your
+Access Key ID identifies you as the party responsible for the
+request.
+
+=item aws_secret_access_key
+
+Since your Access Key ID is not encrypted in requests to AWS, it
+could be discovered and used by anyone. Services that are not free
+require you to provide additional information, a request signature,
+to verify that a request containing your unique Access Key ID could
+only have come from you.
+
+B<DO NOT INCLUDE THIS IN SCRIPTS OR APPLICATIONS YOU
+DISTRIBUTE. YOU'LL BE SORRY.>
+
+=item secure
+
+Set this to C<0> if you not want to use SSL-encrypted
+connections when talking to S3. Defaults to C<1>.
+
+=item timeout
+
+Defines the time, in seconds, your script should wait or a
+response before bailing. Defaults is 30 seconds.
+
+=item retry
+
+Enables or disables the library to retry upon errors. This
+uses exponential backoff with retries after 1, 2, 4, 8, 16,
+32 seconds, as recommended by Amazon. Defaults to off, no
+retries.
+
+=item host
+
+Defines the S3 host endpoint to use. Defaults to
+'s3.amazonaws.com'.
+
+=back
+
+=head1 ABOUT
+
+This module contains code modified from Amazon that contains the
+following notice:
+
+ # This software code is made available "AS IS" without warranties of any
+ # kind. You may copy, display, modify and redistribute the software
+ # code either by itself or as incorporated into your code; provided that
+ # you do not remove any proprietary notices. Your use of this software
+ # code is at your own risk and you waive any claim against Amazon
+ # Digital Services, Inc. or its affiliates with respect to your use of
+ # this software code. (c) 2006 Amazon Digital Services, Inc. or its
+ # affiliates.
+
+=head1 TESTING
+
+Testing S3 is a tricky thing. Amazon wants to charge you a bit of
+money each time you use their service. And yes, testing counts as using.
+Because of this, the application's test suite skips anything approaching
+a real test unless you set these three environment variables:
+
+=over
+
+=item AMAZON_S3_EXPENSIVE_TESTS
+
+Doesn't matter what you set it to. Just has to be set
+
+=item AWS_ACCESS_KEY_ID
+
+Your AWS access key
+
+=item AWS_ACCESS_KEY_SECRET
+
+Your AWS sekkr1t passkey. Be forewarned that setting this environment variable
+on a shared system might leak that information to another user. Be careful.
+
+=back
+
+=head1 TO DO
+
+=over
+
+=item Continued to improve and refine of documentation.
+
+=item Reduce dependencies wherever possible.
+
+=item Implement debugging mode
+
+=item Refactor and consolidate request code in Bugzilla::S3
+
+=item Refactor URI creation code to make use of L<URI>.
+
+=back
+
+=head1 SUPPORT
+
+Bugs should be reported via the CPAN bug tracker at
+
+<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Amazon-S3>
+
+For other issues, contact the author.
+
+=head1 AUTHOR
+
+Timothy Appnel <tima@cpan.org>
+
+=head1 SEE ALSO
+
+L<Bugzilla::S3::Bucket>, L<Net::Amazon::S3>
+
+=head1 COPYRIGHT AND LICENCE
+
+This module was initially based on L<Net::Amazon::S3> 0.41, by
+Leon Brocard. Net::Amazon::S3 was based on example code from
+Amazon with this notice:
+
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006 Amazon Digital Services, Inc. or its
+# affiliates.
+
+The software is released under the Artistic License. The
+terms of the Artistic License are described at
+http://www.perl.com/language/misc/Artistic.html. Except
+where otherwise noted, Amazon::S3 is Copyright 2008, Timothy
+Appnel, tima@cpan.org. All rights reserved.
diff --git a/Bugzilla/S3/Bucket.pm b/Bugzilla/S3/Bucket.pm
new file mode 100644
index 000000000..37c61e666
--- /dev/null
+++ b/Bugzilla/S3/Bucket.pm
@@ -0,0 +1,433 @@
+package Bugzilla::S3::Bucket;
+
+# Forked from Amazon::S3, which appears to be abandoned.
+
+use strict;
+use warnings;
+use Carp;
+use File::stat;
+use IO::File;
+
+use base qw(Class::Accessor::Fast);
+__PACKAGE__->mk_accessors(qw(bucket creation_date account));
+
+sub new {
+ my $class = shift;
+ my $self = $class->SUPER::new(@_);
+ croak "no bucket" unless $self->bucket;
+ croak "no account" unless $self->account;
+ return $self;
+}
+
+sub _uri {
+ my ($self, $key) = @_;
+ return ($key)
+ ? $self->bucket . "/" . $self->account->_urlencode($key)
+ : $self->bucket . "/";
+}
+
+# returns bool
+sub add_key {
+ my ($self, $key, $value, $conf) = @_;
+ croak 'must specify key' unless $key && length $key;
+
+ if ($conf->{acl_short}) {
+ $self->account->_validate_acl_short($conf->{acl_short});
+ $conf->{'x-amz-acl'} = $conf->{acl_short};
+ delete $conf->{acl_short};
+ }
+
+ if (ref($value) eq 'SCALAR') {
+ $conf->{'Content-Length'} ||= -s $$value;
+ $value = _content_sub($$value);
+ }
+ else {
+ $conf->{'Content-Length'} ||= length $value;
+ }
+
+ # If we're pushing to a bucket that's under DNS flux, we might get a 307
+ # Since LWP doesn't support actually waiting for a 100 Continue response,
+ # we'll just send a HEAD first to see what's going on
+
+ if (ref($value)) {
+ return $self->account->_send_request_expect_nothing_probed('PUT',
+ $self->_uri($key), $conf, $value);
+ }
+ else {
+ return $self->account->_send_request_expect_nothing('PUT',
+ $self->_uri($key), $conf, $value);
+ }
+}
+
+sub add_key_filename {
+ my ($self, $key, $value, $conf) = @_;
+ return $self->add_key($key, \$value, $conf);
+}
+
+sub head_key {
+ my ($self, $key) = @_;
+ return $self->get_key($key, "HEAD");
+}
+
+sub get_key {
+ my ($self, $key, $method, $filename) = @_;
+ $method ||= "GET";
+ $filename = $$filename if ref $filename;
+ my $acct = $self->account;
+
+ my $request = $acct->_make_request($method, $self->_uri($key), {});
+ my $response = $acct->_do_http($request, $filename);
+
+ if ($response->code == 404) {
+ $acct->err(404);
+ $acct->errstr('The requested key was not found');
+ return undef;
+ }
+
+ return undef unless $acct->_check_response($response);
+
+ my $etag = $response->header('ETag');
+ if ($etag) {
+ $etag =~ s/^"//;
+ $etag =~ s/"$//;
+ }
+
+ my $return = {
+ content_length => $response->content_length || 0,
+ content_type => $response->content_type,
+ etag => $etag,
+ value => $response->content,
+ };
+
+ foreach my $header ($response->headers->header_field_names) {
+ next unless $header =~ /x-amz-meta-/i;
+ $return->{lc $header} = $response->header($header);
+ }
+
+ return $return;
+
+}
+
+sub get_key_filename {
+ my ($self, $key, $method, $filename) = @_;
+ $filename = $key unless defined $filename;
+ return $self->get_key($key, $method, \$filename);
+}
+
+# returns bool
+sub delete_key {
+ my ($self, $key) = @_;
+ croak 'must specify key' unless $key && length $key;
+ return $self->account->_send_request_expect_nothing('DELETE',
+ $self->_uri($key), {});
+}
+
+sub get_acl {
+ my ($self, $key) = @_;
+ my $acct = $self->account;
+
+ my $request = $acct->_make_request('GET', $self->_uri($key) . '?acl', {});
+ my $response = $acct->_do_http($request);
+
+ if ($response->code == 404) {
+ return undef;
+ }
+
+ return undef unless $acct->_check_response($response);
+
+ return $response->content;
+}
+
+sub set_acl {
+ my ($self, $conf) = @_;
+ $conf ||= {};
+
+ unless ($conf->{acl_xml} || $conf->{acl_short}) {
+ croak "need either acl_xml or acl_short";
+ }
+
+ if ($conf->{acl_xml} && $conf->{acl_short}) {
+ croak "cannot provide both acl_xml and acl_short";
+ }
+
+ my $path = $self->_uri($conf->{key}) . '?acl';
+
+ my $hash_ref =
+ ($conf->{acl_short})
+ ? {'x-amz-acl' => $conf->{acl_short}}
+ : {};
+
+ my $xml = $conf->{acl_xml} || '';
+
+ return $self->account->_send_request_expect_nothing('PUT', $path,
+ $hash_ref, $xml);
+
+}
+
+sub get_location_constraint {
+ my ($self) = @_;
+
+ my $xpc =
+ $self->account->_send_request('GET', $self->bucket . '/?location');
+ return undef unless $xpc && !$self->account->_remember_errors($xpc);
+
+ my $lc = $xpc->{content};
+ if (defined $lc && $lc eq '') {
+ $lc = undef;
+ }
+ return $lc;
+}
+
+# proxy up the err requests
+
+sub err { $_[0]->account->err }
+
+sub errstr { $_[0]->account->errstr }
+
+sub _content_sub {
+ my $filename = shift;
+ my $stat = stat($filename);
+ my $remaining = $stat->size;
+ my $blksize = $stat->blksize || 4096;
+
+ croak "$filename not a readable file with fixed size"
+ unless -r $filename
+ and $remaining;
+
+ my $fh = IO::File->new($filename, 'r')
+ or croak "Could not open $filename: $!";
+ $fh->binmode;
+
+ return sub {
+ my $buffer;
+
+ # upon retries the file is closed and we must reopen it
+ unless ($fh->opened) {
+ $fh = IO::File->new($filename, 'r')
+ or croak "Could not open $filename: $!";
+ $fh->binmode;
+ $remaining = $stat->size;
+ }
+
+ unless (my $read = $fh->read($buffer, $blksize)) {
+ croak
+ "Error while reading upload content $filename ($remaining remaining) $!"
+ if $! and $remaining;
+ $fh->close # otherwise, we found EOF
+ or croak "close of upload content $filename failed: $!";
+ $buffer
+ ||= ''; # LWP expects an empty string on finish, read returns 0
+ }
+ $remaining -= length($buffer);
+ return $buffer;
+ };
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+Bugzilla::S3::Bucket - A container class for a S3 bucket and its contents.
+
+=head1 METHODS
+
+=head2 new
+
+Instaniates a new bucket object.
+
+Requires a hash containing two arguments:
+
+=over
+
+=item bucket
+
+The name (identifier) of the bucket.
+
+=item account
+
+The L<S3::Amazon> object (representing the S3 account) this
+bucket is associated with.
+
+=back
+
+NOTE: This method does not check if a bucket actually
+exists. It simply instaniates the bucket.
+
+Typically a developer will not call this method directly,
+but work through the interface in L<S3::Amazon> that will
+handle their creation.
+
+=head2 add_key
+
+Takes three positional parameters:
+
+=over
+
+=item key
+
+A string identifier for the resource in this bucket
+
+=item value
+
+A SCALAR string representing the contents of the resource.
+
+=item configuration
+
+A HASHREF of configuration data for this key. The configuration
+is generally the HTTP headers you want to pass the S3
+service. The client library will add all necessary headers.
+Adding them to the configuration hash will override what the
+library would send and add headers that are not typically
+required for S3 interactions.
+
+In addition to additional and overriden HTTP headers, this
+HASHREF can have a C<acl_short> key to set the permissions
+(access) of the resource without a seperate call via
+C<add_acl> or in the form of an XML document. See the
+documentation in C<add_acl> for the values and usage.
+
+=back
+
+Returns a boolean indicating its success. Check C<err> and
+C<errstr> for error message if this operation fails.
+
+=head2 add_key_filename
+
+The method works like C<add_key> except the value is assumed
+to be a filename on the local file system. The file will
+be streamed rather then loaded into memory in one big chunk.
+
+=head2 head_key $key_name
+
+Returns a configuration HASH of the given key. If a key does
+not exist in the bucket C<undef> will be returned.
+
+=head2 get_key $key_name, [$method]
+
+Takes a key and an optional HTTP method and fetches it from
+S3. The default HTTP method is GET.
+
+The method returns C<undef> if the key does not exist in the
+bucket. If a server error occurs C<undef> is returned and
+C<err> and C<errstr> are set.
+
+On success, the method returns a HASHREF containing:
+
+=over
+
+=item content_type
+
+=item etag
+
+=item value
+
+=item @meta
+
+=back
+
+=head2 get_key_filename $key_name, $method, $filename
+
+This method works like C<get_key>, but takes an added
+filename that the S3 resource will be written to.
+
+=head2 delete_key $key_name
+
+Permanently removes C<$key_name> from the bucket. Returns a
+boolean value indicating the operations success.
+
+=head2 get_acl
+
+Retrieves the Access Control List (ACL) for the bucket or
+resource as an XML document.
+
+=over
+
+=item key
+
+The key of the stored resource to fetch. This parameter is
+optional. By default the method returns the ACL for the
+bucket itself.
+
+=back
+
+=head2 set_acl $conf
+
+Retrieves the Access Control List (ACL) for the bucket or
+resource. Requires a HASHREF argument with one of the following keys:
+
+=over
+
+=item acl_xml
+
+An XML string which contains access control information
+which matches Amazon's published schema.
+
+=item acl_short
+
+Alternative shorthand notation for common types of ACLs that
+can be used in place of a ACL XML document.
+
+According to the Amazon S3 API documentation the following recognized acl_short
+types are defined as follows:
+
+=over
+
+=item private
+
+Owner gets FULL_CONTROL. No one else has any access rights.
+This is the default.
+
+=item public-read
+
+Owner gets FULL_CONTROL and the anonymous principal is
+granted READ access. If this policy is used on an object, it
+can be read from a browser with no authentication.
+
+=item public-read-write
+
+Owner gets FULL_CONTROL, the anonymous principal is granted
+READ and WRITE access. This is a useful policy to apply to a
+bucket, if you intend for any anonymous user to PUT objects
+into the bucket.
+
+=item authenticated-read
+
+Owner gets FULL_CONTROL, and any principal authenticated as
+a registered Amazon S3 user is granted READ access.
+
+=back
+
+=item key
+
+The key name to apply the permissions. If the key is not
+provided the bucket ACL will be set.
+
+=back
+
+Returns a boolean indicating the operations success.
+
+=head2 get_location_constraint
+
+Returns the location constraint data on a bucket.
+
+For more information on location constraints, refer to the
+Amazon S3 Developer Guide.
+
+=head2 err
+
+The S3 error code for the last error the account encountered.
+
+=head2 errstr
+
+A human readable error string for the last error the account encountered.
+
+=head1 SEE ALSO
+
+L<Bugzilla::S3>
+
+=head1 AUTHOR & COPYRIGHT
+
+Please see the L<Bugzilla::S3> manpage for author, copyright, and
+license information.