lib/fog/aws/models/storage/file.rb in fog-1.5.0 vs lib/fog/aws/models/storage/file.rb in fog-1.6.0

- old
+ new

@@ -99,13 +99,13 @@ def public_url requires :directory, :key if connection.get_object_acl(directory.key, key).body['AccessControlList'].detect {|grant| grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' && grant['Permission'] == 'READ'} if directory.key.to_s =~ /^(?:[a-z]|\d(?!\d{0,2}(?:\.\d{1,3}){3}$))(?:[a-z0-9]|\-(?![\.])){1,61}[a-z0-9]$/ - "https://#{directory.key}.s3.amazonaws.com/#{Fog::AWS.escape(key)}" + "https://#{directory.key}.s3.amazonaws.com/#{Fog::AWS.escape(key)}".gsub('%2F','/') else - "https://s3.amazonaws.com/#{directory.key}/#{Fog::AWS.escape(key)}" + "https://s3.amazonaws.com/#{directory.key}/#{Fog::AWS.escape(key)}".gsub('%2F','/') end else nil end end @@ -171,10 +171,11 @@ # TODO: optionally upload chunks in parallel using threads # (may cause network performance problems with many small chunks) # TODO: Support large chunk sizes without reading the chunk into memory body.rewind if body.respond_to?(:rewind) while (chunk = body.read(multipart_chunk_size)) do - part_upload = connection.upload_part(directory.key, key, upload_id, part_tags.size + 1, chunk ) + md5 = Base64.encode64(Digest::MD5.digest(chunk)).strip + part_upload = connection.upload_part(directory.key, key, upload_id, part_tags.size + 1, chunk, 'Content-MD5' => md5 ) part_tags << part_upload.headers["ETag"] end rescue # Abort the upload & reraise