Using DigitalOcean Spaces with AWS S3 SDKs

Spaces is an S3-compatible object storage service that lets you store and serve large amounts of data. The free, built-in Spaces CDN minimizes page load times, improves performance, and reduces bandwidth and infrastructure costs.

The Spaces API is inter-operable with the AWS S3 API, meaning you can use existing S3 tools and libraries with Spaces. A common use case is managing Spaces programmatically with AWS’ S3 SDKs.

After you set up and configure an SDK, you can follow the examples below to see how to perform common Spaces operations in JavaScript, Go, PHP, Python 3, and Ruby.

Note
When using S3-focused tools, keep in mind that S3 terminology differs from DigitalOcean terminology. An S3 “bucket” is the equivalent of an individual Space and an S3 “key” is the name of a file.

Setup and Configuration

Install the SDK

Install the AWS SDK using the package manager for your language of choice.

npm install aws-sdk
go get -u github.com/aws/aws-sdk-go
php composer.phar require aws/aws-sdk-php
pip install boto3
gem install aws-sdk-s3

Create Access Keys

To use the Spaces API, you need to create an access key and secret key for your Space from the API page in the control panel.

The examples below rely on environment variables to access these keys. Export ACCESS_KEY and SECRET_KEY to your environment (e.g. export ACCESS_KEY=EXAMPLE7UQOTHDTF3GK4) to make them available to your code.

Configure a Client

To use Spaces with tools or libraries designed for the S3 API, you must configure the “endpoint” setting to point to Spaces. The value should be ${REGION}.digitaloceanspaces.com where ${REGION} is the DigitalOcean datacenter region (e.g. nyc3) where your Space is located.

const AWS = require('aws-sdk');
const fs = require('fs'); // Needed for example below

const spacesEndpoint = new AWS.Endpoint('nyc3.digitaloceanspaces.com');
const s3 = new AWS.S3({
    endpoint: spacesEndpoint,
    accessKeyId: process.env.SPACES_KEY,
    secretAccessKey: process.env.SPACES_SECRET
});
Note
This SDK requires the Region to be us-east-1, an AWS region name, to successfully create a new Space. The DigitalOcean datacenter region is based on the Endpoint value, which is nyc3 in the example below.
package main

import (
    "os"
    // Additional imports needed for examples below
    "fmt"
    "io"
    "strings"
    "time"

    "github.com/aws/aws-sdk-go/aws"
    "github.com/aws/aws-sdk-go/aws/credentials"
    "github.com/aws/aws-sdk-go/aws/session"
    "github.com/aws/aws-sdk-go/service/s3"
)

func main() {
    key := os.Getenv("SPACES_KEY")
    secret := os.Getenv("SPACES_SECRET")

    s3Config := &aws.Config{
        Credentials: credentials.NewStaticCredentials(key, secret, ""),
        Endpoint:    aws.String("https://nyc3.digitaloceanspaces.com"),
        Region:      aws.String("us-east-1"),
    }

    newSession := session.New(s3Config)
    s3Client := s3.New(newSession)

    // ...
Note
This SDK requires the region to be us-east-1, an AWS region name, to successfully create a new Space. The DigitalOcean datacenter region is based on the endpoint value, which is nyc3 in the example below.
<?php

// Included aws/aws-sdk-php via Composer's autoloader
require 'vendor/autoload.php';
use Aws\S3\S3Client;

$client = new Aws\S3\S3Client([
        'version' => 'latest',
        'region'  => 'us-east-1',
        'endpoint' => 'https://nyc3.digitaloceanspaces.com',
        'credentials' => [
                'key'    => getenv('SPACES_KEY'),
                'secret' => getenv('SPACES_SECRET'),
            ],
]);
import os
import boto3

session = boto3.session.Session()
client = session.client('s3',
                        region_name='nyc3',
                        endpoint_url='https://nyc3.digitaloceanspaces.com',
                        aws_access_key_id=os.getenv('SPACES_KEY'),
                        aws_secret_access_key=os.getenv('SPACES_SECRET'))
Note
This SDK requires the region to be us-east-1, an AWS region name, to successfully create a new Space. The DigitalOcean datacenter region is based on the endpoint value, which is nyc3 in the example below.
require 'aws-sdk-s3'

client = Aws::S3::Client.new(
  access_key_id: ENV['SPACES_KEY'],
  secret_access_key: ENV['SPACES_SECRET'],
  endpoint: 'https://nyc3.digitaloceanspaces.com',
  region: 'us-east-1'
)

Usage Examples

Create a New Space

These examples create a new Space in the region configured above.

Space names must be globally unique. Attempting to create a Space with a name that is in use will fail with a BucketAlreadyExists error and return a 409 status code.

var params = {
    Bucket: "example-space-name"
};

s3.createBucket(params, function(err, data) {
    if (err) console.log(err, err.stack);
    else     console.log(data);
});
    params := &s3.CreateBucketInput{
        Bucket: aws.String("example-space-name"),
    }

    _, err := s3Client.CreateBucket(params)
    if err != nil {
        fmt.Println(err.Error())
    }
$client->createBucket([
    'Bucket' => 'example-space-name',
]);
client.create_bucket(Bucket='example-space-name')
client.create_bucket({
  bucket: "example-space-name",
})

List All Spaces in a Region

These examples list all of your account's Spaces in your client's endpoint region by retrieving the list of Spaces from the API and looping through them to print their names.

s3.listBuckets({}, function(err, data) {
    if (err) console.log(err, err.stack);
    else {
        data['Buckets'].forEach(function(space) {
            console.log(space['Name']);
        })
    };
});
    spaces, err := s3Client.ListBuckets(nil)
    if err != nil {
        fmt.Println(err.Error())
        return
    }

    for _, b := range spaces.Buckets {
        fmt.Println(aws.StringValue(b.Name))
    }
$spaces = $client->listBuckets();
foreach ($spaces['Buckets'] as $space){
    echo $space['Name']."\n";
}
response = client.list_buckets()
for space in response['Buckets']:
    print(space['Name'])
spaces =  client.list_buckets()
spaces.buckets.each do |space|
  puts "#{space.name}"
end

Upload a File to a Space

These examples upload a file to a Space using the private canned ACL so the uploaded file is not publicly accessible.

In the S3 API, “canned-ACLs” are pre-defined sets of permissions that can be used to manage access to buckets and objects. Spaces only supports the private and public-read canned-ACLs.

var params = {
    Bucket: "example-space-name",
    Key: "file.ext",
    Body: "The contents of the file.",
    ACL: "private"
};

s3.putObject(params, function(err, data) {
    if (err) console.log(err, err.stack);
    else     console.log(data);
});
    object := s3.PutObjectInput{
        Bucket: aws.String("example-space-name"),
        Key:    aws.String("file.ext"),
        Body:   strings.NewReader("The contents of the file."),
        ACL:    aws.String("private"),
    }
    _, err := s3Client.PutObject(&object)
    if err != nil {
        fmt.Println(err.Error())
    }
$client->putObject([
     'Bucket' => 'example-space-name',
     'Key'    => 'file.ext',
     'Body'   => 'The contents of the file.',
     'ACL'    => 'private'
]);
client.put_object(Bucket='example-space-name',
                  Key='file.ext',
                  Body=b'The contents of the file.',
                  ACL='private')
client.put_object({
  bucket: "example-space-name",
  key: "file.ext",
  body: "The contents of the file.",
  acl: "private"
})

List All Files in a Space

These examples list all of the files stored in a specific Space by retrieving the list of files from the API and looping through them to print their names.

var params = {
    Bucket: "example-space-name",
};

s3.listObjects(params, function(err, data) {
    if (err) console.log(err, err.stack);
    else {
        data['Contents'].forEach(function(obj) {
            console.log(obj['Key']);
        })
    };
});
    input := &s3.ListObjectsInput{
      Bucket:  aws.String("example-space-name"),
    }

    objects, err := s3Client.ListObjects(input)
    if err != nil {
      fmt.Println(err.Error())
    }

    for _, obj := range objects.Contents {
      fmt.Println(aws.StringValue(obj.Key))
    }
$objects = $client->listObjects([
    'Bucket' => 'example-space-name',
]);

foreach ($objects['Contents'] as $obj){
    echo $obj['Key']."\n";
}
response = client.list_objects(Bucket='example-space-name')
for obj in response['Contents']:
    print(obj['Key'])
objects = client.list_objects({bucket: "example-space-name"})
objects.contents.each do |obj|
  puts "#{obj.key}"
end

Download a File from a Space

These examples make an authenticated request to download a file from a specific Space. They will download a file stored in Spaces (file.ext) to /tmp/local-file.ext on the local file-system.

var params = {
    Bucket: "example-space-name",
    Key: "file.ext"
};

s3.getObject(params, function(err, data) {
    if (err) console.log(err, err.stack);
    else     fs.writeFileSync("/tmp/local-file.ext", data.Body);
});
    input := &s3.GetObjectInput{
        Bucket: aws.String("example-space-name"),
        Key:    aws.String("file.ext"),
    }

    result, err := s3Client.GetObject(input)
    if err != nil {
        fmt.Println(err.Error())
    }

    out, err := os.Create("/tmp/local-file.ext")
    defer out.Close()

    _, err = io.Copy(out, result.Body)
    if err != nil {
        fmt.Println(err.Error())
    }
$result = $client->getObject([
    'Bucket' => 'example-space-name',
    'Key' => 'file.ext',
]);

file_put_contents('/tmp/local-file.ext', $result['Body']);
client.download_file('example-space-name',
                     'file.ext',
                     '/tmp/local-file.ext')
client.get_object(
  bucket: 'example-space-name',
  key: 'file.ext',
  response_target: '/tmp/local-file.ext'
)

Generate a Pre-Signed URL to Download a Private File

Using pre-signed URLs, you can share private files for a limited period of time with people that have the link. In the control panel, these are called Quick Share links.

The examples generate pre-signed URLs for a file (file.ext) in a Space that will last for five minutes.

const expireSeconds = 60 * 5

const url = s3.getSignedUrl('getObject', {
    Bucket: 'example-space-name',
    Key: 'file.ext',
    Expires: expireSeconds
});

console.log(url);
    req, _ := s3Client.GetObjectRequest(&s3.GetObjectInput{
        Bucket: aws.String("example-space-name"),
        Key:    aws.String("file.ext"),
    })

    urlStr, err := req.Presign(5 * time.Minute)
    if err != nil {
        fmt.Println(err.Error())
    }

    fmt.Println(urlStr)
$cmd = $client->getCommand('GetObject', [
    'Bucket' => 'example-space-name',
    'Key'    => 'file.ext'
]);

$request = $client->createPresignedRequest($cmd, '+5 minutes');
$presignedUrl = (string) $request->getUri();

echo $presignedUrl."\n";
url = client.generate_presigned_url(ClientMethod='get_object',
                                    Params={'Bucket': 'example-space-name',
                                            'Key': 'file.ext'},
                                    ExpiresIn=300)

print(url)
signer = Aws::S3::Presigner.new(client: client)
url = signer.presigned_url(
  :get_object,
  bucket: "example-space-name",
  key: "file.ext",
  expires_in: 300
)

puts url

Generate a Pre-Signed URL to Upload a File

You can also use pre-signed URLs to grant permission to upload a specific file using a PUT request. These URLs are only valid for a limited time period. These examples generate pre-signed URLs that will last for five minutes.

To create the pre-signed URL, you must specify the filename and its expected content type, like text or application/json.

const expireSeconds = 60 * 5

const url = s3.getSignedUrl('putObject', {
    Bucket: 'example-space-name',
    Key: 'new-file.ext',
    ContentType: 'text',
    Expires: expireSeconds
});

console.log(url);
    req, _ := s3Client.PutObjectRequest(&s3.PutObjectInput{
        Bucket: aws.String("example-space-name"),
        Key:    aws.String("new-file.ext"),
    })
    urlStr, err := req.Presign(5 * time.Minute)
    if err != nil {
        fmt.Println(err.Error())
    }

    fmt.Println(urlStr)
$cmd = $client->getCommand('PutObject', [
    'Bucket' => 'example-space-name',
    'Key'    => 'new-file.ext'
]);

$request = $client->createPresignedRequest($cmd, '+5 minutes');
$presignedUrl = (string) $request->getUri();

echo $presignedUrl."\n";
url = client.generate_presigned_url(ClientMethod='put_object',
                                    Params={'Bucket': 'example-space-name',
                                            'Key': 'new-file.ext'},
                                    ExpiresIn=300)

print(url)
signer = Aws::S3::Presigner.new(client: client)
url = signer.presigned_url(
  :put_object,
  bucket: "example-space-name",
  key: "new-file.ext",
  expires_in: 300
)

puts url

You can use the resulting URL to upload the file using standard HTTP requests without needing access to the Space's secret key. The content type and file name used in the upload must match the ones used when generating the URL. For example:

curl -X PUT \
  -H "Content-Type: text" \
  -d "The contents of the file." \
  "https://example-space-name.nyc3.digitaloceanspaces.com/new-file.ext?AWSAccessKeyId=EXAMPLE7UQOTHDTF3GK4&Content-Type=text&Expires=1580419378&Signature=YIXPlynk4BALXE6fH7vqbnwjSEw%3D"

Delete a File from a Space

These examples delete a file (example-file-to-delete.ext) from a specific Space.

var params = {
    Bucket: "example-space-name",
    Key: "example-file-to-delete.ext"
};

s3.deleteObject(params, function(err, data) {
   if (err) console.log(err, err.stack);
   else     console.log(data);
});
    input := &s3.DeleteObjectInput{
        Bucket: aws.String("example-space-name"),
        Key:    aws.String("example-file-to-delete.ext"),
    }

    result, err := s3Client.DeleteObject(input)
    if err != nil {
        fmt.Println(err.Error())
    }
$client->deleteObject([
    'Bucket' => 'example-space-name',
    'Key' => 'example-file-to-delete.ext',
]);
client.delete_object(Bucket='example-space-name',
                     Key='example-file-to-delete.ext')
client.delete_object({
  bucket: 'example-space-name',
  key: 'example-file-to-delete.ext'
})

Delete a Space

These examples delete a Space. To do so, you must first delete all files in the Space. Attempting to delete a Space that still contains files will fail with a BucketNotEmpty error and return a 409 status code.

var params = {
    Bucket: "example-space-name",
};

s3.deleteBucket(params, function(err, data) {
    if (err) console.log(err, err.stack);
    else     console.log(data);
});
    input := &s3.DeleteBucketInput{
        Bucket: aws.String("example-space-name"),
    }

    result, err := s3Client.DeleteBucket(input)
    if err != nil {
        fmt.Println(err.Error())
    }
$client->deleteBucket([
    'Bucket' => 'example-space-name',
]);
client.delete_bucket(Bucket='example-space-name')
client.delete_bucket({bucket: 'example-space-name'})

Additional Resources

For more details on compatibility with the S3 API, see the Spaces API documentation.

The full reference documentation for the SDKs used above can be found at:

When using those references, remember that S3 terminology differs from DigitalOcean terminology. An S3 “bucket” is the equivalent of an individual Space and an S3 “key” is the name of a file.