S3 ListObjectsV2 operation: api error NotImplemented, Server does not support one or more requested headers


I am trying to use Digitalocean spaces using the AWS-SDK V2 for Golang.

Everything works almost fine until the point where I attempt to perform an operation, then I get the error below;

operation error S3: ListObjectsV2, https response error StatusCode: 501, RequestID: , HostID: , api error NotImplemented: Server does not support one or more requested headers. Please see

Going through the error link, I understand that certain operations on Spaces on DO are not supported using AWS-SDK, however, CRUD operations are supported.

Also, this is very similar to the Upload file example in the Spaces API documentation here

Here’s a snippet of the code being used to perform the operation;


package graph

// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.

import (


type S3PutObjectAPI interface {
	PutObject(ctx context.Context,
		params *s3.PutObjectInput,
		optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)

func PutFile(c context.Context, api S3PutObjectAPI, input *s3.PutObjectInput) (*s3.PutObjectOutput, error) {
	return api.PutObject(c, input)

func (r *mutationResolver) UploadProfileImage(ctx context.Context, input model.ProfileImage) (bool, error) {
	SpaceName := os.Getenv("DO_SPACE_NAME")
	key := os.Getenv("ACCESS_KEY")
	secret := os.Getenv("ACCESS_SECRET")
	token := os.Getenv("API_TOKEN")

	_, userErr := r.GetUserField("id", *input.UserID); if userErr != nil {
		fmt.Errorf("error getting user: %v", userErr)

	customResolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
		return aws.Endpoint{
			URL:         fmt.Sprintf(""),
		}, nil

	cfg, err := config.LoadDefaultConfig(
		func(options *config.LoadOptions) error {
			options.Credentials = credentials.NewStaticCredentialsProvider(key, secret, token)
			options.EndpointResolver = customResolver
			options.Region = "fra1"

			return nil
	); if err != nil {
		fmt.Errorf("error getting config: %v", err)

	client := s3.NewFromConfig(cfg)

	objectsInput := &s3.ListObjectsV2Input{
		Bucket:  aws.String(SpaceName),

	objects, err := client.ListObjectsV2(context.TODO(), objectsInput)

	// print out <nil> even when there are files in the bucket

	fileInput := &s3.PutObjectInput{
		Bucket: aws.String(SpaceName),
		Key:    aws.String(input.File.Filename),
		Body:   input.File.File,
		ACL:    "public-read",

	_, putErr := client.PutObject(context.TODO(), fileInput); if putErr != nil {
		fmt.Printf("error uploading file: %v", err)

	return true, nil

Note: This operation is being performed from a resolver function within a GraphQL backend application. Here is a link to the entire resolver file.


Still searching for a solution…

Submit an answer
You can type!ref in this text area to quickly search our full set of tutorials, documentation & marketplace offerings and insert the link!

These answers are provided by our Community. If you find them useful, show some love by clicking the heart. If you run into issues leave a comment, or add your own answer to help others.

It seems like that it changed a few months ago. It’s now working. Here is how I set it up to retrieve all files :

import (

	awsV2 ""
	credentialsV2 ""
	s3v2 ""

var dstConfig *awsV2.Config
var dstClientV2 *s3v2.Client
var ExistingFiles map[string]struct{}

func getdstConfig() (_config *awsV2.Config, err error){

	if dstConfig != nil {
		return dstConfig, err

	dstKey := os.Getenv("SPACES_KEY")
	dstSecret := os.Getenv("SPACES_SECRET")
	dstRegion := os.Getenv("SPACES_REGION")

	if dstKey == "" || dstSecret == "" || dstRegion == "" {
		return _config, errors.New("invalid dst key, secret or region")

	appCreds := awsV2.NewCredentialsCache(credentialsV2.NewStaticCredentialsProvider(dstKey, dstSecret, ""))

	customResolver := awsV2.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (awsV2.Endpoint, error) {
			return awsV2.Endpoint{
				//PartitionID:   "aws",
				URL:           "",
				SigningRegion: "us-east-1",
			}, nil

	cfg, err := config.LoadDefaultConfig(context.TODO(),
	if err != nil {
	dstConfig = &cfg
	return dstConfig, err

func getDstClientV2() (*s3v2.Client, error) {

	_config, err := getdstConfig()
	if err != nil {
		return nil, err

	if dstClientV2 == nil && dstConfig != nil {
	        dstConfig = _config
		dstClientV2 = s3v2.NewFromConfig(*dstConfig)
	return dstClientV2, nil

func getExistingFilesV2(bucket string) error {

	var continuationToken *string
	input := &s3v2.ListObjectsV2Input{
		Bucket:  awsV2.String(bucket),
		ContinuationToken: continuationToken,

	if ExistingFiles == nil {
		ExistingFiles = make(map[string]struct{}, 0)

	i:= 0
	for {

		token := ""
		if input.ContinuationToken != nil {
			token = *input.ContinuationToken
		log.Printf("Page %d %s", i, token)
		resp, err := getDstClientV2().ListObjectsV2(context.TODO(), input)
		if err != nil {
			return err
		for _, obj := range resp.Contents {
		        ExistingFiles[*obj.Key] = struct{}{}
		if !resp.IsTruncated || len(resp.Contents) == 0 {
		input.ContinuationToken = resp.NextContinuationToken

	log.Printf("found %d existing files", len(ExistingFiles))
	return nil


It seems to be not supported, as it is stated in this doc:

To list the contents of a bucket, send a GET request to ${BUCKET}.${REGION}

Note: The version 2 list type is not currently supported.

You can consider using an older version s3.ListObjects