first-commit
This commit is contained in:
307
modules/storage/azureblob.go
Normal file
307
modules/storage/azureblob.go
Normal file
@@ -0,0 +1,307 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||
)
|
||||
|
||||
var _ Object = &azureBlobObject{}
|
||||
|
||||
type azureBlobObject struct {
|
||||
blobClient *blob.Client
|
||||
Context context.Context
|
||||
Name string
|
||||
Size int64
|
||||
ModTime *time.Time
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (a *azureBlobObject) Read(p []byte) (int, error) {
|
||||
// TODO: improve the performance, we can implement another interface, maybe implement io.WriteTo
|
||||
if a.offset >= a.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
count := min(int64(len(p)), a.Size-a.offset)
|
||||
|
||||
res, err := a.blobClient.DownloadBuffer(a.Context, p, &blob.DownloadBufferOptions{
|
||||
Range: blob.HTTPRange{
|
||||
Offset: a.offset,
|
||||
Count: count,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return 0, convertAzureBlobErr(err)
|
||||
}
|
||||
a.offset += res
|
||||
|
||||
return int(res), nil
|
||||
}
|
||||
|
||||
func (a *azureBlobObject) Close() error {
|
||||
a.offset = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *azureBlobObject) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
offset += a.offset
|
||||
case io.SeekEnd:
|
||||
offset = a.Size + offset
|
||||
default:
|
||||
return 0, errors.New("Seek: invalid whence")
|
||||
}
|
||||
|
||||
if offset > a.Size {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
} else if offset < 0 {
|
||||
return 0, errors.New("Seek: invalid offset")
|
||||
}
|
||||
a.offset = offset
|
||||
return a.offset, nil
|
||||
}
|
||||
|
||||
func (a *azureBlobObject) Stat() (os.FileInfo, error) {
|
||||
return &azureBlobFileInfo{
|
||||
a.Name,
|
||||
a.Size,
|
||||
*a.ModTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ ObjectStorage = &AzureBlobStorage{}
|
||||
|
||||
// AzureStorage returns a azure blob storage
|
||||
type AzureBlobStorage struct {
|
||||
cfg *setting.AzureBlobStorageConfig
|
||||
ctx context.Context
|
||||
credential *azblob.SharedKeyCredential
|
||||
client *azblob.Client
|
||||
}
|
||||
|
||||
func convertAzureBlobErr(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bloberror.HasCode(err, bloberror.BlobNotFound) {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
var respErr *azcore.ResponseError
|
||||
if !errors.As(err, &respErr) {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("%s", respErr.ErrorCode)
|
||||
}
|
||||
|
||||
// NewAzureBlobStorage returns a azure blob storage
|
||||
func NewAzureBlobStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error) {
|
||||
config := cfg.AzureBlobConfig
|
||||
|
||||
log.Info("Creating Azure Blob storage at %s:%s with base path %s", config.Endpoint, config.Container, config.BasePath)
|
||||
|
||||
cred, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
|
||||
if err != nil {
|
||||
return nil, convertAzureBlobErr(err)
|
||||
}
|
||||
client, err := azblob.NewClientWithSharedKeyCredential(config.Endpoint, cred, &azblob.ClientOptions{})
|
||||
if err != nil {
|
||||
return nil, convertAzureBlobErr(err)
|
||||
}
|
||||
|
||||
_, err = client.CreateContainer(ctx, config.Container, &container.CreateOptions{})
|
||||
if err != nil {
|
||||
// Check to see if we already own this container (which happens if you run this twice)
|
||||
if !bloberror.HasCode(err, bloberror.ContainerAlreadyExists) {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
return &AzureBlobStorage{
|
||||
cfg: &config,
|
||||
ctx: ctx,
|
||||
credential: cred,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AzureBlobStorage) buildAzureBlobPath(p string) string {
|
||||
p = util.PathJoinRelX(a.cfg.BasePath, p)
|
||||
if p == "." || p == "/" {
|
||||
p = "" // azure uses prefix, so path should be empty as relative path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (a *AzureBlobStorage) getObjectNameFromPath(path string) string {
|
||||
s := strings.Split(path, "/")
|
||||
return s[len(s)-1]
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (a *AzureBlobStorage) Open(path string) (Object, error) {
|
||||
blobClient := a.getBlobClient(path)
|
||||
res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, convertAzureBlobErr(err)
|
||||
}
|
||||
return &azureBlobObject{
|
||||
Context: a.ctx,
|
||||
blobClient: blobClient,
|
||||
Name: a.getObjectNameFromPath(path),
|
||||
Size: *res.ContentLength,
|
||||
ModTime: res.LastModified,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Save saves a file to azure blob storage
|
||||
func (a *AzureBlobStorage) Save(path string, r io.Reader, size int64) (int64, error) {
|
||||
rd := util.NewCountingReader(r)
|
||||
_, err := a.client.UploadStream(
|
||||
a.ctx,
|
||||
a.cfg.Container,
|
||||
a.buildAzureBlobPath(path),
|
||||
rd,
|
||||
// TODO: support set block size and concurrency
|
||||
&blockblob.UploadStreamOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, convertAzureBlobErr(err)
|
||||
}
|
||||
return int64(rd.Count()), nil
|
||||
}
|
||||
|
||||
type azureBlobFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (a azureBlobFileInfo) Name() string {
|
||||
return path.Base(a.name)
|
||||
}
|
||||
|
||||
func (a azureBlobFileInfo) Size() int64 {
|
||||
return a.size
|
||||
}
|
||||
|
||||
func (a azureBlobFileInfo) ModTime() time.Time {
|
||||
return a.modTime
|
||||
}
|
||||
|
||||
func (a azureBlobFileInfo) IsDir() bool {
|
||||
return strings.HasSuffix(a.name, "/")
|
||||
}
|
||||
|
||||
func (a azureBlobFileInfo) Mode() os.FileMode {
|
||||
return os.ModePerm
|
||||
}
|
||||
|
||||
func (a azureBlobFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns the stat information of the object
|
||||
func (a *AzureBlobStorage) Stat(path string) (os.FileInfo, error) {
|
||||
blobClient := a.getBlobClient(path)
|
||||
res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, convertAzureBlobErr(err)
|
||||
}
|
||||
s := strings.Split(path, "/")
|
||||
return &azureBlobFileInfo{
|
||||
s[len(s)-1],
|
||||
*res.ContentLength,
|
||||
*res.LastModified,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete delete a file
|
||||
func (a *AzureBlobStorage) Delete(path string) error {
|
||||
blobClient := a.getBlobClient(path)
|
||||
_, err := blobClient.Delete(a.ctx, nil)
|
||||
return convertAzureBlobErr(err)
|
||||
}
|
||||
|
||||
// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
|
||||
func (a *AzureBlobStorage) URL(path, name string, reqParams url.Values) (*url.URL, error) {
|
||||
blobClient := a.getBlobClient(path)
|
||||
|
||||
startTime := time.Now()
|
||||
u, err := blobClient.GetSASURL(sas.BlobPermissions{
|
||||
Read: true,
|
||||
}, time.Now().Add(5*time.Minute), &blob.GetSASURLOptions{
|
||||
StartTime: &startTime,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, convertAzureBlobErr(err)
|
||||
}
|
||||
|
||||
return url.Parse(u)
|
||||
}
|
||||
|
||||
// IterateObjects iterates across the objects in the azureblobstorage
|
||||
func (a *AzureBlobStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
|
||||
dirName = a.buildAzureBlobPath(dirName)
|
||||
if dirName != "" {
|
||||
dirName += "/"
|
||||
}
|
||||
pager := a.client.NewListBlobsFlatPager(a.cfg.Container, &container.ListBlobsFlatOptions{
|
||||
Prefix: &dirName,
|
||||
})
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(a.ctx)
|
||||
if err != nil {
|
||||
return convertAzureBlobErr(err)
|
||||
}
|
||||
for _, object := range resp.Segment.BlobItems {
|
||||
blobClient := a.getBlobClient(*object.Name)
|
||||
object := &azureBlobObject{
|
||||
Context: a.ctx,
|
||||
blobClient: blobClient,
|
||||
Name: *object.Name,
|
||||
Size: *object.Properties.ContentLength,
|
||||
ModTime: object.Properties.LastModified,
|
||||
}
|
||||
if err := func(object *azureBlobObject, fn func(path string, obj Object) error) error {
|
||||
defer object.Close()
|
||||
return fn(strings.TrimPrefix(object.Name, a.cfg.BasePath), object)
|
||||
}(object, fn); err != nil {
|
||||
return convertAzureBlobErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete delete a file
|
||||
func (a *AzureBlobStorage) getBlobClient(path string) *blob.Client {
|
||||
return a.client.ServiceClient().NewContainerClient(a.cfg.Container).NewBlobClient(a.buildAzureBlobPath(path))
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterStorageType(setting.AzureBlobStorageType, NewAzureBlobStorage)
|
||||
}
|
101
modules/storage/azureblob_test.go
Normal file
101
modules/storage/azureblob_test.go
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAzureBlobStorageIterator(t *testing.T) {
|
||||
if os.Getenv("CI") == "" {
|
||||
t.Skip("azureBlobStorage not present outside of CI")
|
||||
return
|
||||
}
|
||||
testStorageIterator(t, setting.AzureBlobStorageType, &setting.Storage{
|
||||
AzureBlobConfig: setting.AzureBlobStorageConfig{
|
||||
// https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#ip-style-url
|
||||
Endpoint: "http://devstoreaccount1.azurite.local:10000",
|
||||
// https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#well-known-storage-account-and-key
|
||||
AccountName: "devstoreaccount1",
|
||||
AccountKey: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==",
|
||||
Container: "test",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAzureBlobStoragePath(t *testing.T) {
|
||||
m := &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: ""}}
|
||||
assert.Empty(t, m.buildAzureBlobPath("/"))
|
||||
assert.Empty(t, m.buildAzureBlobPath("."))
|
||||
assert.Equal(t, "a", m.buildAzureBlobPath("/a"))
|
||||
assert.Equal(t, "a/b", m.buildAzureBlobPath("/a/b/"))
|
||||
|
||||
m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/"}}
|
||||
assert.Empty(t, m.buildAzureBlobPath("/"))
|
||||
assert.Empty(t, m.buildAzureBlobPath("."))
|
||||
assert.Equal(t, "a", m.buildAzureBlobPath("/a"))
|
||||
assert.Equal(t, "a/b", m.buildAzureBlobPath("/a/b/"))
|
||||
|
||||
m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/base"}}
|
||||
assert.Equal(t, "base", m.buildAzureBlobPath("/"))
|
||||
assert.Equal(t, "base", m.buildAzureBlobPath("."))
|
||||
assert.Equal(t, "base/a", m.buildAzureBlobPath("/a"))
|
||||
assert.Equal(t, "base/a/b", m.buildAzureBlobPath("/a/b/"))
|
||||
|
||||
m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/base/"}}
|
||||
assert.Equal(t, "base", m.buildAzureBlobPath("/"))
|
||||
assert.Equal(t, "base", m.buildAzureBlobPath("."))
|
||||
assert.Equal(t, "base/a", m.buildAzureBlobPath("/a"))
|
||||
assert.Equal(t, "base/a/b", m.buildAzureBlobPath("/a/b/"))
|
||||
}
|
||||
|
||||
func Test_azureBlobObject(t *testing.T) {
|
||||
if os.Getenv("CI") == "" {
|
||||
t.Skip("azureBlobStorage not present outside of CI")
|
||||
return
|
||||
}
|
||||
|
||||
s, err := NewStorage(setting.AzureBlobStorageType, &setting.Storage{
|
||||
AzureBlobConfig: setting.AzureBlobStorageConfig{
|
||||
// https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#ip-style-url
|
||||
Endpoint: "http://devstoreaccount1.azurite.local:10000",
|
||||
// https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#well-known-storage-account-and-key
|
||||
AccountName: "devstoreaccount1",
|
||||
AccountKey: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==",
|
||||
Container: "test",
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
data := "Q2xTckt6Y1hDOWh0"
|
||||
_, err = s.Save("test.txt", strings.NewReader(data), int64(len(data)))
|
||||
assert.NoError(t, err)
|
||||
obj, err := s.Open("test.txt")
|
||||
assert.NoError(t, err)
|
||||
offset, err := obj.Seek(2, io.SeekStart)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 2, offset)
|
||||
buf1 := make([]byte, 3)
|
||||
read, err := obj.Read(buf1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, read)
|
||||
assert.Equal(t, data[2:5], string(buf1))
|
||||
offset, err = obj.Seek(-5, io.SeekEnd)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, len(data)-5, offset)
|
||||
buf2 := make([]byte, 4)
|
||||
read, err = obj.Read(buf2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, read)
|
||||
assert.Equal(t, data[11:15], string(buf2))
|
||||
assert.NoError(t, obj.Close())
|
||||
assert.NoError(t, s.Delete("test.txt"))
|
||||
}
|
39
modules/storage/helper.go
Normal file
39
modules/storage/helper.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
)
|
||||
|
||||
var uninitializedStorage = discardStorage("uninitialized storage")
|
||||
|
||||
type discardStorage string
|
||||
|
||||
func (s discardStorage) Open(_ string) (Object, error) {
|
||||
return nil, fmt.Errorf("%s", s)
|
||||
}
|
||||
|
||||
func (s discardStorage) Save(_ string, _ io.Reader, _ int64) (int64, error) {
|
||||
return 0, fmt.Errorf("%s", s)
|
||||
}
|
||||
|
||||
func (s discardStorage) Stat(_ string) (os.FileInfo, error) {
|
||||
return nil, fmt.Errorf("%s", s)
|
||||
}
|
||||
|
||||
func (s discardStorage) Delete(_ string) error {
|
||||
return fmt.Errorf("%s", s)
|
||||
}
|
||||
|
||||
func (s discardStorage) URL(_, _ string, _ url.Values) (*url.URL, error) {
|
||||
return nil, fmt.Errorf("%s", s)
|
||||
}
|
||||
|
||||
func (s discardStorage) IterateObjects(_ string, _ func(string, Object) error) error {
|
||||
return fmt.Errorf("%s", s)
|
||||
}
|
50
modules/storage/helper_test.go
Normal file
50
modules/storage/helper_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_discardStorage(t *testing.T) {
|
||||
tests := []discardStorage{
|
||||
uninitializedStorage,
|
||||
discardStorage("empty"),
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(string(tt), func(t *testing.T) {
|
||||
{
|
||||
got, err := tt.Open("path")
|
||||
assert.Nil(t, got)
|
||||
assert.Error(t, err, string(tt))
|
||||
}
|
||||
{
|
||||
got, err := tt.Save("path", bytes.NewReader([]byte{0}), 1)
|
||||
assert.Equal(t, int64(0), got)
|
||||
assert.Error(t, err, string(tt))
|
||||
}
|
||||
{
|
||||
got, err := tt.Stat("path")
|
||||
assert.Nil(t, got)
|
||||
assert.Error(t, err, string(tt))
|
||||
}
|
||||
{
|
||||
err := tt.Delete("path")
|
||||
assert.Error(t, err, string(tt))
|
||||
}
|
||||
{
|
||||
got, err := tt.URL("path", "name", nil)
|
||||
assert.Nil(t, got)
|
||||
assert.Errorf(t, err, string(tt))
|
||||
}
|
||||
{
|
||||
err := tt.IterateObjects("", func(_ string, _ Object) error { return nil })
|
||||
assert.Error(t, err, string(tt))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
154
modules/storage/local.go
Normal file
154
modules/storage/local.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
var _ ObjectStorage = &LocalStorage{}
|
||||
|
||||
// LocalStorage represents a local files storage
|
||||
type LocalStorage struct {
|
||||
ctx context.Context
|
||||
dir string
|
||||
tmpdir string
|
||||
}
|
||||
|
||||
// NewLocalStorage returns a local files
|
||||
func NewLocalStorage(ctx context.Context, config *setting.Storage) (ObjectStorage, error) {
|
||||
if !filepath.IsAbs(config.Path) {
|
||||
return nil, fmt.Errorf("LocalStorageConfig.Path should have been prepared by setting/storage.go and should be an absolute path, but not: %q", config.Path)
|
||||
}
|
||||
log.Info("Creating new Local Storage at %s", config.Path)
|
||||
if err := os.MkdirAll(config.Path, os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.TemporaryPath == "" {
|
||||
config.TemporaryPath = filepath.Join(config.Path, "tmp")
|
||||
}
|
||||
if !filepath.IsAbs(config.TemporaryPath) {
|
||||
return nil, fmt.Errorf("LocalStorageConfig.TemporaryPath should be an absolute path, but not: %q", config.TemporaryPath)
|
||||
}
|
||||
|
||||
return &LocalStorage{
|
||||
ctx: ctx,
|
||||
dir: config.Path,
|
||||
tmpdir: config.TemporaryPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LocalStorage) buildLocalPath(p string) string {
|
||||
return util.FilePathJoinAbs(l.dir, p)
|
||||
}
|
||||
|
||||
// Open a file
|
||||
func (l *LocalStorage) Open(path string) (Object, error) {
|
||||
return os.Open(l.buildLocalPath(path))
|
||||
}
|
||||
|
||||
// Save a file
|
||||
func (l *LocalStorage) Save(path string, r io.Reader, size int64) (int64, error) {
|
||||
p := l.buildLocalPath(path)
|
||||
if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Create a temporary file to save to
|
||||
if err := os.MkdirAll(l.tmpdir, os.ModePerm); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
tmp, err := os.CreateTemp(l.tmpdir, "upload-*")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
tmpRemoved := false
|
||||
defer func() {
|
||||
if !tmpRemoved {
|
||||
_ = util.Remove(tmp.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := io.Copy(tmp, r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := tmp.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := util.Rename(tmp.Name(), p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Golang's tmp file (os.CreateTemp) always have 0o600 mode, so we need to change the file to follow the umask (as what Create/MkDir does)
|
||||
// but we don't want to make these files executable - so ensure that we mask out the executable bits
|
||||
if err := util.ApplyUmask(p, os.ModePerm&0o666); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
tmpRemoved = true
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Stat returns the info of the file
|
||||
func (l *LocalStorage) Stat(path string) (os.FileInfo, error) {
|
||||
return os.Stat(l.buildLocalPath(path))
|
||||
}
|
||||
|
||||
// Delete delete a file
|
||||
func (l *LocalStorage) Delete(path string) error {
|
||||
return util.Remove(l.buildLocalPath(path))
|
||||
}
|
||||
|
||||
// URL gets the redirect URL to a file
|
||||
func (l *LocalStorage) URL(path, name string, reqParams url.Values) (*url.URL, error) {
|
||||
return nil, ErrURLNotSupported
|
||||
}
|
||||
|
||||
// IterateObjects iterates across the objects in the local storage
|
||||
func (l *LocalStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
|
||||
dir := l.buildLocalPath(dirName)
|
||||
return filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
return l.ctx.Err()
|
||||
default:
|
||||
}
|
||||
if path == l.dir {
|
||||
return nil
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
relPath, err := filepath.Rel(l.dir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer obj.Close()
|
||||
return fn(relPath, obj)
|
||||
})
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterStorageType(setting.LocalStorageType, NewLocalStorage)
|
||||
}
|
58
modules/storage/local_test.go
Normal file
58
modules/storage/local_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildLocalPath(t *testing.T) {
|
||||
kases := []struct {
|
||||
localDir string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"/a",
|
||||
"0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
"/a/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
},
|
||||
{
|
||||
"/a",
|
||||
"../0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
"/a/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
},
|
||||
{
|
||||
"/a",
|
||||
"0\\a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
"/a/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
},
|
||||
{
|
||||
"/b",
|
||||
"a/../0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
"/b/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
},
|
||||
{
|
||||
"/b",
|
||||
"a\\..\\0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
"/b/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
|
||||
},
|
||||
}
|
||||
|
||||
for _, k := range kases {
|
||||
t.Run(k.path, func(t *testing.T) {
|
||||
l := LocalStorage{dir: k.localDir}
|
||||
|
||||
assert.Equal(t, k.expected, l.buildLocalPath(k.path))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalStorageIterator(t *testing.T) {
|
||||
testStorageIterator(t, setting.LocalStorageType, &setting.Storage{Path: t.TempDir()})
|
||||
}
|
317
modules/storage/minio.go
Normal file
317
modules/storage/minio.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
var (
|
||||
_ ObjectStorage = &MinioStorage{}
|
||||
|
||||
quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
|
||||
)
|
||||
|
||||
type minioObject struct {
|
||||
*minio.Object
|
||||
}
|
||||
|
||||
func (m *minioObject) Stat() (os.FileInfo, error) {
|
||||
oi, err := m.Object.Stat()
|
||||
if err != nil {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
|
||||
return &minioFileInfo{oi}, nil
|
||||
}
|
||||
|
||||
// MinioStorage returns a minio bucket storage
|
||||
type MinioStorage struct {
|
||||
cfg *setting.MinioStorageConfig
|
||||
ctx context.Context
|
||||
client *minio.Client
|
||||
bucket string
|
||||
basePath string
|
||||
}
|
||||
|
||||
func convertMinioErr(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
errResp, ok := err.(minio.ErrorResponse)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert two responses to standard analogues
|
||||
switch errResp.Code {
|
||||
case "NoSuchKey":
|
||||
return os.ErrNotExist
|
||||
case "AccessDenied":
|
||||
return os.ErrPermission
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var getBucketVersioning = func(ctx context.Context, minioClient *minio.Client, bucket string) error {
|
||||
_, err := minioClient.GetBucketVersioning(ctx, bucket)
|
||||
return err
|
||||
}
|
||||
|
||||
// NewMinioStorage returns a minio storage
|
||||
func NewMinioStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error) {
|
||||
config := cfg.MinioConfig
|
||||
if config.ChecksumAlgorithm != "" && config.ChecksumAlgorithm != "default" && config.ChecksumAlgorithm != "md5" {
|
||||
return nil, fmt.Errorf("invalid minio checksum algorithm: %s", config.ChecksumAlgorithm)
|
||||
}
|
||||
|
||||
log.Info("Creating Minio storage at %s:%s with base path %s", config.Endpoint, config.Bucket, config.BasePath)
|
||||
|
||||
var lookup minio.BucketLookupType
|
||||
switch config.BucketLookUpType {
|
||||
case "auto", "":
|
||||
lookup = minio.BucketLookupAuto
|
||||
case "dns":
|
||||
lookup = minio.BucketLookupDNS
|
||||
case "path":
|
||||
lookup = minio.BucketLookupPath
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid minio bucket lookup type: %s", config.BucketLookUpType)
|
||||
}
|
||||
|
||||
minioClient, err := minio.New(config.Endpoint, &minio.Options{
|
||||
Creds: buildMinioCredentials(config),
|
||||
Secure: config.UseSSL,
|
||||
Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: config.InsecureSkipVerify}},
|
||||
Region: config.Location,
|
||||
BucketLookup: lookup,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
|
||||
// The GetBucketVersioning is only used for checking whether the Object Storage parameters are generally good. It doesn't need to succeed.
|
||||
// The assumption is that if the API returns the HTTP code 400, then the parameters could be incorrect.
|
||||
// Otherwise even if the request itself fails (403, 404, etc), the code should still continue because the parameters seem "good" enough.
|
||||
// Keep in mind that GetBucketVersioning requires "owner" to really succeed, so it can't be used to check the existence.
|
||||
// Not using "BucketExists (HeadBucket)" because it doesn't include detailed failure reasons.
|
||||
err = getBucketVersioning(ctx, minioClient, config.Bucket)
|
||||
if err != nil {
|
||||
errResp, ok := err.(minio.ErrorResponse)
|
||||
if !ok {
|
||||
return nil, err
|
||||
}
|
||||
if errResp.StatusCode == http.StatusBadRequest {
|
||||
log.Error("S3 storage connection failure at %s:%s with base path %s and region: %s", config.Endpoint, config.Bucket, config.Location, errResp.Message)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if we already own this bucket
|
||||
exists, err := minioClient.BucketExists(ctx, config.Bucket)
|
||||
if err != nil {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if err := minioClient.MakeBucket(ctx, config.Bucket, minio.MakeBucketOptions{
|
||||
Region: config.Location,
|
||||
}); err != nil {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
return &MinioStorage{
|
||||
cfg: &config,
|
||||
ctx: ctx,
|
||||
client: minioClient,
|
||||
bucket: config.Bucket,
|
||||
basePath: config.BasePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MinioStorage) buildMinioPath(p string) string {
|
||||
p = strings.TrimPrefix(util.PathJoinRelX(m.basePath, p), "/") // object store doesn't use slash for root path
|
||||
if p == "." {
|
||||
p = "" // object store doesn't use dot as relative path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (m *MinioStorage) buildMinioDirPrefix(p string) string {
|
||||
// ending slash is required for avoiding matching like "foo/" and "foobar/" with prefix "foo"
|
||||
p = m.buildMinioPath(p) + "/"
|
||||
if p == "/" {
|
||||
p = "" // object store doesn't use slash for root path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func buildMinioCredentials(config setting.MinioStorageConfig) *credentials.Credentials {
|
||||
// If static credentials are provided, use those
|
||||
if config.AccessKeyID != "" {
|
||||
return credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, "")
|
||||
}
|
||||
|
||||
// Otherwise, fallback to a credentials chain for S3 access
|
||||
chain := []credentials.Provider{
|
||||
// configure based upon MINIO_ prefixed environment variables
|
||||
&credentials.EnvMinio{},
|
||||
// configure based upon AWS_ prefixed environment variables
|
||||
&credentials.EnvAWS{},
|
||||
// read credentials from MINIO_SHARED_CREDENTIALS_FILE
|
||||
// environment variable, or default json config files
|
||||
&credentials.FileMinioClient{},
|
||||
// read credentials from AWS_SHARED_CREDENTIALS_FILE
|
||||
// environment variable, or default credentials file
|
||||
&credentials.FileAWSCredentials{},
|
||||
// read IAM role from EC2 metadata endpoint if available
|
||||
&credentials.IAM{
|
||||
// passing in an empty Endpoint lets the IAM Provider
|
||||
// decide which endpoint to resolve internally
|
||||
Endpoint: config.IamEndpoint,
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
},
|
||||
}
|
||||
return credentials.NewChainCredentials(chain)
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (m *MinioStorage) Open(path string) (Object, error) {
|
||||
opts := minio.GetObjectOptions{}
|
||||
object, err := m.client.GetObject(m.ctx, m.bucket, m.buildMinioPath(path), opts)
|
||||
if err != nil {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
return &minioObject{object}, nil
|
||||
}
|
||||
|
||||
// Save saves a file to minio
|
||||
func (m *MinioStorage) Save(path string, r io.Reader, size int64) (int64, error) {
|
||||
uploadInfo, err := m.client.PutObject(
|
||||
m.ctx,
|
||||
m.bucket,
|
||||
m.buildMinioPath(path),
|
||||
r,
|
||||
size,
|
||||
minio.PutObjectOptions{
|
||||
ContentType: "application/octet-stream",
|
||||
// some storages like:
|
||||
// * https://developers.cloudflare.com/r2/api/s3/api/
|
||||
// * https://www.backblaze.com/b2/docs/s3_compatible_api.html
|
||||
// do not support "x-amz-checksum-algorithm" header, so use legacy MD5 checksum
|
||||
SendContentMd5: m.cfg.ChecksumAlgorithm == "md5",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, convertMinioErr(err)
|
||||
}
|
||||
return uploadInfo.Size, nil
|
||||
}
|
||||
|
||||
type minioFileInfo struct {
|
||||
minio.ObjectInfo
|
||||
}
|
||||
|
||||
func (m minioFileInfo) Name() string {
|
||||
return path.Base(m.ObjectInfo.Key)
|
||||
}
|
||||
|
||||
func (m minioFileInfo) Size() int64 {
|
||||
return m.ObjectInfo.Size
|
||||
}
|
||||
|
||||
func (m minioFileInfo) ModTime() time.Time {
|
||||
return m.LastModified
|
||||
}
|
||||
|
||||
func (m minioFileInfo) IsDir() bool {
|
||||
return strings.HasSuffix(m.ObjectInfo.Key, "/")
|
||||
}
|
||||
|
||||
func (m minioFileInfo) Mode() os.FileMode {
|
||||
return os.ModePerm
|
||||
}
|
||||
|
||||
func (m minioFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns the stat information of the object
|
||||
func (m *MinioStorage) Stat(path string) (os.FileInfo, error) {
|
||||
info, err := m.client.StatObject(
|
||||
m.ctx,
|
||||
m.bucket,
|
||||
m.buildMinioPath(path),
|
||||
minio.StatObjectOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, convertMinioErr(err)
|
||||
}
|
||||
return &minioFileInfo{info}, nil
|
||||
}
|
||||
|
||||
// Delete delete a file
|
||||
func (m *MinioStorage) Delete(path string) error {
|
||||
err := m.client.RemoveObject(m.ctx, m.bucket, m.buildMinioPath(path), minio.RemoveObjectOptions{})
|
||||
|
||||
return convertMinioErr(err)
|
||||
}
|
||||
|
||||
// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
|
||||
func (m *MinioStorage) URL(path, name string, serveDirectReqParams url.Values) (*url.URL, error) {
|
||||
// copy serveDirectReqParams
|
||||
reqParams, err := url.ParseQuery(serveDirectReqParams.Encode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO it may be good to embed images with 'inline' like ServeData does, but we don't want to have to read the file, do we?
|
||||
reqParams.Set("response-content-disposition", "attachment; filename=\""+quoteEscaper.Replace(name)+"\"")
|
||||
u, err := m.client.PresignedGetObject(m.ctx, m.bucket, m.buildMinioPath(path), 5*time.Minute, reqParams)
|
||||
return u, convertMinioErr(err)
|
||||
}
|
||||
|
||||
// IterateObjects iterates across the objects in the miniostorage
|
||||
func (m *MinioStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
|
||||
opts := minio.GetObjectOptions{}
|
||||
for mObjInfo := range m.client.ListObjects(m.ctx, m.bucket, minio.ListObjectsOptions{
|
||||
Prefix: m.buildMinioDirPrefix(dirName),
|
||||
Recursive: true,
|
||||
}) {
|
||||
object, err := m.client.GetObject(m.ctx, m.bucket, mObjInfo.Key, opts)
|
||||
if err != nil {
|
||||
return convertMinioErr(err)
|
||||
}
|
||||
if err := func(object *minio.Object, fn func(path string, obj Object) error) error {
|
||||
defer object.Close()
|
||||
return fn(strings.TrimPrefix(mObjInfo.Key, m.basePath), &minioObject{object})
|
||||
}(object, fn); err != nil {
|
||||
return convertMinioErr(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterStorageType(setting.MinioStorageType, NewMinioStorage)
|
||||
}
|
203
modules/storage/minio_test.go
Normal file
203
modules/storage/minio_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMinioStorageIterator(t *testing.T) {
|
||||
if os.Getenv("CI") == "" {
|
||||
t.Skip("minioStorage not present outside of CI")
|
||||
return
|
||||
}
|
||||
testStorageIterator(t, setting.MinioStorageType, &setting.Storage{
|
||||
MinioConfig: setting.MinioStorageConfig{
|
||||
Endpoint: "minio:9000",
|
||||
AccessKeyID: "123456",
|
||||
SecretAccessKey: "12345678",
|
||||
Bucket: "gitea",
|
||||
Location: "us-east-1",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMinioStoragePath(t *testing.T) {
|
||||
m := &MinioStorage{basePath: ""}
|
||||
assert.Empty(t, m.buildMinioPath("/"))
|
||||
assert.Empty(t, m.buildMinioPath("."))
|
||||
assert.Equal(t, "a", m.buildMinioPath("/a"))
|
||||
assert.Equal(t, "a/b", m.buildMinioPath("/a/b/"))
|
||||
assert.Empty(t, m.buildMinioDirPrefix(""))
|
||||
assert.Equal(t, "a/", m.buildMinioDirPrefix("/a/"))
|
||||
|
||||
m = &MinioStorage{basePath: "/"}
|
||||
assert.Empty(t, m.buildMinioPath("/"))
|
||||
assert.Empty(t, m.buildMinioPath("."))
|
||||
assert.Equal(t, "a", m.buildMinioPath("/a"))
|
||||
assert.Equal(t, "a/b", m.buildMinioPath("/a/b/"))
|
||||
assert.Empty(t, m.buildMinioDirPrefix(""))
|
||||
assert.Equal(t, "a/", m.buildMinioDirPrefix("/a/"))
|
||||
|
||||
m = &MinioStorage{basePath: "/base"}
|
||||
assert.Equal(t, "base", m.buildMinioPath("/"))
|
||||
assert.Equal(t, "base", m.buildMinioPath("."))
|
||||
assert.Equal(t, "base/a", m.buildMinioPath("/a"))
|
||||
assert.Equal(t, "base/a/b", m.buildMinioPath("/a/b/"))
|
||||
assert.Equal(t, "base/", m.buildMinioDirPrefix(""))
|
||||
assert.Equal(t, "base/a/", m.buildMinioDirPrefix("/a/"))
|
||||
|
||||
m = &MinioStorage{basePath: "/base/"}
|
||||
assert.Equal(t, "base", m.buildMinioPath("/"))
|
||||
assert.Equal(t, "base", m.buildMinioPath("."))
|
||||
assert.Equal(t, "base/a", m.buildMinioPath("/a"))
|
||||
assert.Equal(t, "base/a/b", m.buildMinioPath("/a/b/"))
|
||||
assert.Equal(t, "base/", m.buildMinioDirPrefix(""))
|
||||
assert.Equal(t, "base/a/", m.buildMinioDirPrefix("/a/"))
|
||||
}
|
||||
|
||||
func TestS3StorageBadRequest(t *testing.T) {
|
||||
if os.Getenv("CI") == "" {
|
||||
t.Skip("S3Storage not present outside of CI")
|
||||
return
|
||||
}
|
||||
cfg := &setting.Storage{
|
||||
MinioConfig: setting.MinioStorageConfig{
|
||||
Endpoint: "minio:9000",
|
||||
AccessKeyID: "123456",
|
||||
SecretAccessKey: "12345678",
|
||||
Bucket: "bucket",
|
||||
Location: "us-east-1",
|
||||
},
|
||||
}
|
||||
message := "ERROR"
|
||||
old := getBucketVersioning
|
||||
defer func() { getBucketVersioning = old }()
|
||||
getBucketVersioning = func(ctx context.Context, minioClient *minio.Client, bucket string) error {
|
||||
return minio.ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "FixtureError",
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
_, err := NewStorage(setting.MinioStorageType, cfg)
|
||||
assert.ErrorContains(t, err, message)
|
||||
}
|
||||
|
||||
func TestMinioCredentials(t *testing.T) {
|
||||
const (
|
||||
ExpectedAccessKey = "ExampleAccessKeyID"
|
||||
ExpectedSecretAccessKey = "ExampleSecretAccessKeyID"
|
||||
// Use a FakeEndpoint for IAM credentials to avoid logging any
|
||||
// potential real IAM credentials when running in EC2.
|
||||
FakeEndpoint = "http://localhost"
|
||||
)
|
||||
|
||||
t.Run("Static Credentials", func(t *testing.T) {
|
||||
cfg := setting.MinioStorageConfig{
|
||||
AccessKeyID: ExpectedAccessKey,
|
||||
SecretAccessKey: ExpectedSecretAccessKey,
|
||||
IamEndpoint: FakeEndpoint,
|
||||
}
|
||||
creds := buildMinioCredentials(cfg)
|
||||
v, err := creds.Get()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ExpectedAccessKey, v.AccessKeyID)
|
||||
assert.Equal(t, ExpectedSecretAccessKey, v.SecretAccessKey)
|
||||
})
|
||||
|
||||
t.Run("Chain", func(t *testing.T) {
|
||||
cfg := setting.MinioStorageConfig{
|
||||
IamEndpoint: FakeEndpoint,
|
||||
}
|
||||
|
||||
t.Run("EnvMinio", func(t *testing.T) {
|
||||
t.Setenv("MINIO_ACCESS_KEY", ExpectedAccessKey+"Minio")
|
||||
t.Setenv("MINIO_SECRET_KEY", ExpectedSecretAccessKey+"Minio")
|
||||
|
||||
creds := buildMinioCredentials(cfg)
|
||||
v, err := creds.Get()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ExpectedAccessKey+"Minio", v.AccessKeyID)
|
||||
assert.Equal(t, ExpectedSecretAccessKey+"Minio", v.SecretAccessKey)
|
||||
})
|
||||
|
||||
t.Run("EnvAWS", func(t *testing.T) {
|
||||
t.Setenv("AWS_ACCESS_KEY", ExpectedAccessKey+"AWS")
|
||||
t.Setenv("AWS_SECRET_KEY", ExpectedSecretAccessKey+"AWS")
|
||||
|
||||
creds := buildMinioCredentials(cfg)
|
||||
v, err := creds.Get()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ExpectedAccessKey+"AWS", v.AccessKeyID)
|
||||
assert.Equal(t, ExpectedSecretAccessKey+"AWS", v.SecretAccessKey)
|
||||
})
|
||||
|
||||
t.Run("FileMinio", func(t *testing.T) {
|
||||
// prevent loading any actual credentials files from the user
|
||||
t.Setenv("MINIO_SHARED_CREDENTIALS_FILE", "testdata/minio.json")
|
||||
t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata/fake")
|
||||
|
||||
creds := buildMinioCredentials(cfg)
|
||||
v, err := creds.Get()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ExpectedAccessKey+"MinioFile", v.AccessKeyID)
|
||||
assert.Equal(t, ExpectedSecretAccessKey+"MinioFile", v.SecretAccessKey)
|
||||
})
|
||||
|
||||
t.Run("FileAWS", func(t *testing.T) {
|
||||
// prevent loading any actual credentials files from the user
|
||||
t.Setenv("MINIO_SHARED_CREDENTIALS_FILE", "testdata/fake.json")
|
||||
t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata/aws_credentials")
|
||||
|
||||
creds := buildMinioCredentials(cfg)
|
||||
v, err := creds.Get()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ExpectedAccessKey+"AWSFile", v.AccessKeyID)
|
||||
assert.Equal(t, ExpectedSecretAccessKey+"AWSFile", v.SecretAccessKey)
|
||||
})
|
||||
|
||||
t.Run("IAM", func(t *testing.T) {
|
||||
// prevent loading any actual credentials files from the user
|
||||
t.Setenv("MINIO_SHARED_CREDENTIALS_FILE", "testdata/fake.json")
|
||||
t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata/fake")
|
||||
|
||||
// Spawn a server to emulate the EC2 Instance Metadata
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// The client will actually make 3 requests here,
|
||||
// first will be to get the IMDSv2 token, second to
|
||||
// get the role, and third for the actual
|
||||
// credentials. However, we can return credentials
|
||||
// every request since we're not emulating a full
|
||||
// IMDSv2 flow.
|
||||
w.Write([]byte(`{"Code":"Success","AccessKeyId":"ExampleAccessKeyIDIAM","SecretAccessKey":"ExampleSecretAccessKeyIDIAM"}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Use the provided EC2 Instance Metadata server
|
||||
creds := buildMinioCredentials(setting.MinioStorageConfig{
|
||||
IamEndpoint: server.URL,
|
||||
})
|
||||
v, err := creds.Get()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ExpectedAccessKey+"IAM", v.AccessKeyID)
|
||||
assert.Equal(t, ExpectedSecretAccessKey+"IAM", v.SecretAccessKey)
|
||||
})
|
||||
})
|
||||
}
|
226
modules/storage/storage.go
Normal file
226
modules/storage/storage.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
)
|
||||
|
||||
// ErrURLNotSupported represents url is not supported
|
||||
var ErrURLNotSupported = errors.New("url method not supported")
|
||||
|
||||
// ErrInvalidConfiguration is called when there is invalid configuration for a storage
|
||||
type ErrInvalidConfiguration struct {
|
||||
cfg any
|
||||
err error
|
||||
}
|
||||
|
||||
func (err ErrInvalidConfiguration) Error() string {
|
||||
if err.err != nil {
|
||||
return fmt.Sprintf("Invalid Configuration Argument: %v: Error: %v", err.cfg, err.err)
|
||||
}
|
||||
return fmt.Sprintf("Invalid Configuration Argument: %v", err.cfg)
|
||||
}
|
||||
|
||||
// IsErrInvalidConfiguration checks if an error is an ErrInvalidConfiguration
|
||||
func IsErrInvalidConfiguration(err error) bool {
|
||||
_, ok := err.(ErrInvalidConfiguration)
|
||||
return ok
|
||||
}
|
||||
|
||||
type Type = setting.StorageType
|
||||
|
||||
// NewStorageFunc is a function that creates a storage
|
||||
type NewStorageFunc func(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error)
|
||||
|
||||
var storageMap = map[Type]NewStorageFunc{}
|
||||
|
||||
// RegisterStorageType registers a provided storage type with a function to create it
|
||||
func RegisterStorageType(typ Type, fn func(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error)) {
|
||||
storageMap[typ] = fn
|
||||
}
|
||||
|
||||
// Object represents the object on the storage
|
||||
type Object interface {
|
||||
io.ReadCloser
|
||||
io.Seeker
|
||||
Stat() (os.FileInfo, error)
|
||||
}
|
||||
|
||||
// ObjectStorage represents an object storage to handle a bucket and files
|
||||
type ObjectStorage interface {
|
||||
Open(path string) (Object, error)
|
||||
// Save store a object, if size is unknown set -1
|
||||
Save(path string, r io.Reader, size int64) (int64, error)
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
Delete(path string) error
|
||||
URL(path, name string, reqParams url.Values) (*url.URL, error)
|
||||
IterateObjects(path string, iterator func(path string, obj Object) error) error
|
||||
}
|
||||
|
||||
// Copy copies a file from source ObjectStorage to dest ObjectStorage
|
||||
func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, srcPath string) (int64, error) {
|
||||
f, err := srcStorage.Open(srcPath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
size := int64(-1)
|
||||
fsinfo, err := f.Stat()
|
||||
if err == nil {
|
||||
size = fsinfo.Size()
|
||||
}
|
||||
|
||||
return dstStorage.Save(dstPath, f, size)
|
||||
}
|
||||
|
||||
// Clean delete all the objects in this storage
|
||||
func Clean(storage ObjectStorage) error {
|
||||
return storage.IterateObjects("", func(path string, obj Object) error {
|
||||
_ = obj.Close()
|
||||
return storage.Delete(path)
|
||||
})
|
||||
}
|
||||
|
||||
// SaveFrom saves data to the ObjectStorage with path p from the callback
|
||||
func SaveFrom(objStorage ObjectStorage, path string, callback func(w io.Writer) error) error {
|
||||
pr, pw := io.Pipe()
|
||||
defer pr.Close()
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
if err := callback(pw); err != nil {
|
||||
_ = pw.CloseWithError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := objStorage.Save(path, pr, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
// Attachments represents attachments storage
|
||||
Attachments ObjectStorage = uninitializedStorage
|
||||
|
||||
// LFS represents lfs storage
|
||||
LFS ObjectStorage = uninitializedStorage
|
||||
|
||||
// Avatars represents user avatars storage
|
||||
Avatars ObjectStorage = uninitializedStorage
|
||||
// RepoAvatars represents repository avatars storage
|
||||
RepoAvatars ObjectStorage = uninitializedStorage
|
||||
|
||||
// RepoArchives represents repository archives storage
|
||||
RepoArchives ObjectStorage = uninitializedStorage
|
||||
|
||||
// Packages represents packages storage
|
||||
Packages ObjectStorage = uninitializedStorage
|
||||
|
||||
// Actions represents actions storage
|
||||
Actions ObjectStorage = uninitializedStorage
|
||||
// Actions Artifacts represents actions artifacts storage
|
||||
ActionsArtifacts ObjectStorage = uninitializedStorage
|
||||
)
|
||||
|
||||
// Init init the storage
|
||||
func Init() error {
|
||||
for _, f := range []func() error{
|
||||
initAttachments,
|
||||
initAvatars,
|
||||
initRepoAvatars,
|
||||
initLFS,
|
||||
initRepoArchives,
|
||||
initPackages,
|
||||
initActions,
|
||||
} {
|
||||
if err := f(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewStorage takes a storage type and some config and returns an ObjectStorage or an error
|
||||
func NewStorage(typStr Type, cfg *setting.Storage) (ObjectStorage, error) {
|
||||
if len(typStr) == 0 {
|
||||
typStr = setting.LocalStorageType
|
||||
}
|
||||
fn, ok := storageMap[typStr]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unsupported storage type: %s", typStr)
|
||||
}
|
||||
|
||||
return fn(context.Background(), cfg)
|
||||
}
|
||||
|
||||
func initAvatars() (err error) {
|
||||
log.Info("Initialising Avatar storage with type: %s", setting.Avatar.Storage.Type)
|
||||
Avatars, err = NewStorage(setting.Avatar.Storage.Type, setting.Avatar.Storage)
|
||||
return err
|
||||
}
|
||||
|
||||
func initAttachments() (err error) {
|
||||
if !setting.Attachment.Enabled {
|
||||
Attachments = discardStorage("Attachment isn't enabled")
|
||||
return nil
|
||||
}
|
||||
log.Info("Initialising Attachment storage with type: %s", setting.Attachment.Storage.Type)
|
||||
Attachments, err = NewStorage(setting.Attachment.Storage.Type, setting.Attachment.Storage)
|
||||
return err
|
||||
}
|
||||
|
||||
func initLFS() (err error) {
|
||||
if !setting.LFS.StartServer {
|
||||
LFS = discardStorage("LFS isn't enabled")
|
||||
return nil
|
||||
}
|
||||
log.Info("Initialising LFS storage with type: %s", setting.LFS.Storage.Type)
|
||||
LFS, err = NewStorage(setting.LFS.Storage.Type, setting.LFS.Storage)
|
||||
return err
|
||||
}
|
||||
|
||||
func initRepoAvatars() (err error) {
|
||||
log.Info("Initialising Repository Avatar storage with type: %s", setting.RepoAvatar.Storage.Type)
|
||||
RepoAvatars, err = NewStorage(setting.RepoAvatar.Storage.Type, setting.RepoAvatar.Storage)
|
||||
return err
|
||||
}
|
||||
|
||||
func initRepoArchives() (err error) {
|
||||
log.Info("Initialising Repository Archive storage with type: %s", setting.RepoArchive.Storage.Type)
|
||||
RepoArchives, err = NewStorage(setting.RepoArchive.Storage.Type, setting.RepoArchive.Storage)
|
||||
return err
|
||||
}
|
||||
|
||||
func initPackages() (err error) {
|
||||
if !setting.Packages.Enabled {
|
||||
Packages = discardStorage("Packages isn't enabled")
|
||||
return nil
|
||||
}
|
||||
log.Info("Initialising Packages storage with type: %s", setting.Packages.Storage.Type)
|
||||
Packages, err = NewStorage(setting.Packages.Storage.Type, setting.Packages.Storage)
|
||||
return err
|
||||
}
|
||||
|
||||
func initActions() (err error) {
|
||||
if !setting.Actions.Enabled {
|
||||
Actions = discardStorage("Actions isn't enabled")
|
||||
ActionsArtifacts = discardStorage("ActionsArtifacts isn't enabled")
|
||||
return nil
|
||||
}
|
||||
log.Info("Initialising Actions storage with type: %s", setting.Actions.LogStorage.Type)
|
||||
if Actions, err = NewStorage(setting.Actions.LogStorage.Type, setting.Actions.LogStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("Initialising ActionsArtifacts storage with type: %s", setting.Actions.ArtifactStorage.Type)
|
||||
ActionsArtifacts, err = NewStorage(setting.Actions.ArtifactStorage.Type, setting.Actions.ArtifactStorage)
|
||||
return err
|
||||
}
|
52
modules/storage/storage_test.go
Normal file
52
modules/storage/storage_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testStorageIterator(t *testing.T, typStr Type, cfg *setting.Storage) {
|
||||
l, err := NewStorage(typStr, cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
testFiles := [][]string{
|
||||
{"a/1.txt", "a1"},
|
||||
{"/a/1.txt", "aa1"}, // same as above, but with leading slash that will be trim
|
||||
{"ab/1.txt", "ab1"},
|
||||
{"b/1.txt", "b1"},
|
||||
{"b/2.txt", "b2"},
|
||||
{"b/3.txt", "b3"},
|
||||
{"b/x 4.txt", "bx4"},
|
||||
}
|
||||
for _, f := range testFiles {
|
||||
_, err = l.Save(f[0], strings.NewReader(f[1]), -1)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
expectedList := map[string][]string{
|
||||
"a": {"a/1.txt"},
|
||||
"b": {"b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt"},
|
||||
"": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
||||
"/": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
||||
".": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
||||
"a/b/../../a": {"a/1.txt"},
|
||||
}
|
||||
for dir, expected := range expectedList {
|
||||
count := 0
|
||||
err = l.IterateObjects(dir, func(path string, f Object) error {
|
||||
defer f.Close()
|
||||
assert.Contains(t, expected, path)
|
||||
count++
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, expected, count)
|
||||
}
|
||||
}
|
3
modules/storage/testdata/aws_credentials
vendored
Normal file
3
modules/storage/testdata/aws_credentials
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[default]
|
||||
aws_access_key_id=ExampleAccessKeyIDAWSFile
|
||||
aws_secret_access_key=ExampleSecretAccessKeyIDAWSFile
|
12
modules/storage/testdata/minio.json
vendored
Normal file
12
modules/storage/testdata/minio.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"version": "10",
|
||||
"aliases": {
|
||||
"s3": {
|
||||
"url": "https://s3.amazonaws.com",
|
||||
"accessKey": "ExampleAccessKeyIDMinioFile",
|
||||
"secretKey": "ExampleSecretAccessKeyIDMinioFile",
|
||||
"api": "S3v4",
|
||||
"path": "dns"
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user