要将文件上传到 S3 存储桶,请使用 TransferUtility
类。在从文件上传数据时,您必须提供对象的键名。如果未提供,该 API 将使用文件名作为键名。在从流上传数据时,您必须提供对象的键名。
要设置高级上传选项(如段大小、并发上传段时的线程数、元数据、存储类或 ACL),请使用 TransferUtilityUploadRequest
类。
以下 C# 示例将文件分段上传到 Amazon S3 存储桶。它说明如何使用各种 TransferUtility.Upload
重载来上传文件。每个对上传的后续调用都将替换先前的上传。有关示例与适用于 .NET 的 AWS 开发工具包的特定版本的兼容性信息以及有关创建和测试有效示例的说明,请参阅运行 Amazon S3 .NET 代码示例。
using Amazon;
using Amazon.S3;
using Amazon.S3.Transfer;
using System;
using System.IO;
using System.Threading.Tasks;
namespace Amazon.DocSamples.S3
{
class UploadFileMPUHighLevelAPITest
{
private const string bucketName = "*** provide bucket name ***";
private const string keyName = "*** provide a name for the uploaded object ***";
private const string filePath = "*** provide the full path name of the file to upload ***";
// Specify your bucket region (an example region is shown).
private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2;
private static IAmazonS3 s3Client;
public static void Main()
{
s3Client = new AmazonS3Client(bucketRegion);
UploadFileAsync().Wait();
}
private static async Task UploadFileAsync()
{
try
{
var fileTransferUtility =
new TransferUtility(s3Client);
// Option 1. Upload a file. The file name is used as the object key name.
await fileTransferUtility.UploadAsync(filePath, bucketName);
Console.WriteLine("Upload 1 completed");
// Option 2. Specify object key name explicitly.
await fileTransferUtility.UploadAsync(filePath, bucketName, keyName);
Console.WriteLine("Upload 2 completed");
// Option 3. Upload data from a type of System.IO.Stream.
using (var fileToUpload =
new FileStream(filePath, FileMode.Open, FileAccess.Read))
{
await fileTransferUtility.UploadAsync(fileToUpload,
bucketName, keyName);
}
Console.WriteLine("Upload 3 completed");
// Option 4. Specify advanced settings.
var fileTransferUtilityRequest = new TransferUtilityUploadRequest
{
BucketName = bucketName,
FilePath = filePath,
StorageClass = S3StorageClass.StandardInfrequentAccess,
PartSize = 6291456, // 6 MB.
Key = keyName,
CannedACL = S3CannedACL.PublicRead
};
fileTransferUtilityRequest.Metadata.Add("param1", "Value1");
fileTransferUtilityRequest.Metadata.Add("param2", "Value2");
await fileTransferUtility.UploadAsync(fileTransferUtilityRequest);
Console.WriteLine("Upload 4 completed");
}
catch (AmazonS3Exception e)
{
Console.WriteLine("Error encountered on server. Message:'{0}' when writing an object", e.Message);
}
catch (Exception e)
{
Console.WriteLine("Unknown encountered on server. Message:'{0}' when writing an object", e.Message);
}
}
}
}
本主题介绍如何使用适用于 PHP 的 AWS 开发工具包中的高级别 Aws\S3\Model\MultipartUpload\UploadBuilder
类执行文件分段上传。此部分假定您已按照使用适用于 PHP 的 AWS 开发工具包和运行 PHP 示例中的说明执行操作,并正确安装了适用于 PHP 的 AWS 开发工具包。
以下 PHP 代码示例将文件上传到 Amazon S3 存储桶。该示例演示如何设置 MultipartUploader
对象的参数。
有关运行本指南中的 PHP 示例的信息,请参阅运行 PHP 示例。
require 'vendor/autoload.php';
use Aws\Common\Exception\MultipartUploadException;
use Aws\S3\MultipartUploader;
use Aws\S3\S3Client;
$bucket = '*** Your Bucket Name ***';
$keyname = '*** Your Object Key ***';
$s3 = new S3Client([
'version' => 'latest',
'region' => 'us-east-1'
]);
// Prepare the upload parameters.
$uploader = new MultipartUploader($s3, '/path/to/large/file.zip', [
'bucket' => $bucket,
'key' => $keyname
]);
// Perform the upload.
try {
$result = $uploader->upload();
echo "Upload complete: {$result['ObjectURL']}" . PHP_EOL;
} catch (MultipartUploadException $e) {
echo $e->getMessage() . PHP_EOL;
}
以下示例使用高级别分段上传 Python API(TransferManager
类)上传对象。
"""
Use Boto 3 managed file transfers to manage multipart uploads to and downloads
from an Amazon S3 bucket.
When the file to transfer is larger than the specified threshold, the transfer
manager automatically uses multipart uploads or downloads. This demonstration
shows how to use several of the available transfer manager settings and reports
thread usage and time to transfer.
"""
import sys
import threading
import boto3
from boto3.s3.transfer import TransferConfig
MB = 1024 * 1024
s3 = boto3.resource('s3')
class TransferCallback:
"""
Handle callbacks from the transfer manager.
The transfer manager periodically calls the __call__ method throughout
the upload and download process so that it can take action, such as
displaying progress to the user and collecting data about the transfer.
"""
def __init__(self, target_size):
self._target_size = target_size
self._total_transferred = 0
self._lock = threading.Lock()
self.thread_info = {}
def __call__(self, bytes_transferred):
"""
The callback method that is called by the transfer manager.
Display progress during file transfer and collect per-thread transfer
data. This method can be called by multiple threads, so shared instance
data is protected by a thread lock.
"""
thread = threading.current_thread()
with self._lock:
self._total_transferred += bytes_transferred
if thread.ident not in self.thread_info.keys():
self.thread_info[thread.ident] = bytes_transferred
else:
self.thread_info[thread.ident] += bytes_transferred
target = self._target_size * MB
sys.stdout.write(
f"\r{self._total_transferred} of {target} transferred "
f"({(self._total_transferred / target) * 100:.2f}%).")
sys.stdout.flush()
def upload_with_default_configuration(local_file_path, bucket_name,
object_key, file_size_mb):
"""
Upload a file from a local folder to an Amazon S3 bucket, using the default
configuration.
"""
transfer_callback = TransferCallback(file_size_mb)
s3.Bucket(bucket_name).upload_file(
local_file_path,
object_key,
Callback=transfer_callback)
return transfer_callback.thread_info
def upload_with_chunksize_and_meta(local_file_path, bucket_name, object_key,
file_size_mb, metadata=None):
"""
Upload a file from a local folder to an Amazon S3 bucket, setting a
multipart chunk size and adding metadata to the Amazon S3 object.
The multipart chunk size controls the size of the chunks of data that are
sent in the request. A smaller chunk size typically results in the transfer
manager using more threads for the upload.
The metadata is a set of key-value pairs that are stored with the object
in Amazon S3.
"""
transfer_callback = TransferCallback(file_size_mb)
config = TransferConfig(multipart_chunksize=1 * MB)
extra_args = {'Metadata': metadata} if metadata else None
s3.Bucket(bucket_name).upload_file(
local_file_path,
object_key,
Config=config,
ExtraArgs=extra_args,
Callback=transfer_callback)
return transfer_callback.thread_info
def upload_with_high_threshold(local_file_path, bucket_name, object_key,
file_size_mb):
"""
Upload a file from a local folder to an Amazon S3 bucket, setting a
multipart threshold larger than the size of the file.
Setting a multipart threshold larger than the size of the file results
in the transfer manager sending the file as a standard upload instead of
a multipart upload.
"""
transfer_callback = TransferCallback(file_size_mb)
config = TransferConfig(multipart_threshold=file_size_mb * 2 * MB)
s3.Bucket(bucket_name).upload_file(
local_file_path,
object_key,
Config=config,
Callback=transfer_callback)
return transfer_callback.thread_info
def upload_with_sse(local_file_path, bucket_name, object_key,
file_size_mb, sse_key=None):
"""
Upload a file from a local folder to an Amazon S3 bucket, adding server-side
encryption with customer-provided encryption keys to the object.
When this kind of encryption is specified, Amazon S3 encrypts the object
at rest and allows downloads only when the expected encryption key is
provided in the download request.
"""
transfer_callback = TransferCallback(file_size_mb)
if sse_key:
extra_args = {
'SSECustomerAlgorithm': 'AES256',
'SSECustomerKey': sse_key}
else:
extra_args = None
s3.Bucket(bucket_name).upload_file(
local_file_path,
object_key,
ExtraArgs=extra_args,
Callback=transfer_callback)
return transfer_callback.thread_info
def download_with_default_configuration(bucket_name, object_key,
download_file_path, file_size_mb):
"""
Download a file from an Amazon S3 bucket to a local folder, using the
default configuration.
"""
transfer_callback = TransferCallback(file_size_mb)
s3.Bucket(bucket_name).Object(object_key).download_file(
download_file_path,
Callback=transfer_callback)
return transfer_callback.thread_info
def download_with_single_thread(bucket_name, object_key,
download_file_path, file_size_mb):
"""
Download a file from an Amazon S3 bucket to a local folder, using a
single thread.
"""
transfer_callback = TransferCallback(file_size_mb)
config = TransferConfig(use_threads=False)
s3.Bucket(bucket_name).Object(object_key).download_file(
download_file_path,
Config=config,
Callback=transfer_callback)
return transfer_callback.thread_info
def download_with_high_threshold(bucket_name, object_key,
download_file_path, file_size_mb):
"""
Download a file from an Amazon S3 bucket to a local folder, setting a
multipart threshold larger than the size of the file.
Setting a multipart threshold larger than the size of the file results
in the transfer manager sending the file as a standard download instead
of a multipart download.
"""
transfer_callback = TransferCallback(file_size_mb)
config = TransferConfig(multipart_threshold=file_size_mb * 2 * MB)
s3.Bucket(bucket_name).Object(object_key).download_file(
download_file_path,
Config=config,
Callback=transfer_callback)
return transfer_callback.thread_info
def download_with_sse(bucket_name, object_key, download_file_path,
file_size_mb, sse_key):
"""
Download a file from an Amazon S3 bucket to a local folder, adding a
customer-provided encryption key to the request.
When this kind of encryption is specified, Amazon S3 encrypts the object
at rest and allows downloads only when the expected encryption key is
provided in the download request.
"""
transfer_callback = TransferCallback(file_size_mb)
if sse_key:
extra_args = {
'SSECustomerAlgorithm': 'AES256',
'SSECustomerKey': sse_key}
else:
extra_args = None
s3.Bucket(bucket_name).Object(object_key).download_file(
download_file_path,
ExtraArgs=extra_args,
Callback=transfer_callback)
return transfer_callback.thread_info