first-commit

This commit is contained in:
2025-08-25 15:46:12 +08:00
commit f4d95dfff4
5665 changed files with 705359 additions and 0 deletions

375
modules/assetfs/embed.go Normal file
View File

@@ -0,0 +1,375 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package assetfs
import (
"bytes"
"compress/gzip"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/util"
)
type EmbeddedFile interface {
io.ReadSeeker
fs.ReadDirFile
ReadDir(n int) ([]fs.DirEntry, error)
}
type EmbeddedFileInfo interface {
fs.FileInfo
fs.DirEntry
GetGzipContent() ([]byte, bool)
}
type decompressor interface {
io.Reader
Close() error
Reset(io.Reader) error
}
type embeddedFileInfo struct {
fs *embeddedFS
fullName string
data []byte
BaseName string `json:"n"`
OriginSize int64 `json:"s,omitempty"`
DataBegin int64 `json:"b,omitempty"`
DataLen int64 `json:"l,omitempty"`
Children []*embeddedFileInfo `json:"c,omitempty"`
}
func (fi *embeddedFileInfo) GetGzipContent() ([]byte, bool) {
// when generating the bindata, if the compressed data equals or is larger than the original data, we store the original data
if fi.DataLen == fi.OriginSize {
return nil, false
}
return fi.data, true
}
type EmbeddedFileBase struct {
info *embeddedFileInfo
dataReader io.ReadSeeker
seekPos int64
}
func (f *EmbeddedFileBase) ReadDir(n int) ([]fs.DirEntry, error) {
// this method is used to satisfy the "func (f ioFile) ReadDir(...)" in httpfs
l, err := f.info.fs.ReadDir(f.info.fullName)
if err != nil {
return nil, err
}
if n < 0 || n > len(l) {
return l, nil
}
return l[:n], nil
}
type EmbeddedOriginFile struct {
EmbeddedFileBase
}
type EmbeddedCompressedFile struct {
EmbeddedFileBase
decompressor decompressor
decompressorPos int64
}
type embeddedFS struct {
meta func() *EmbeddedMeta
files map[string]*embeddedFileInfo
filesMu sync.RWMutex
data []byte
}
type EmbeddedMeta struct {
Root *embeddedFileInfo
}
func NewEmbeddedFS(data []byte) fs.ReadDirFS {
efs := &embeddedFS{data: data, files: make(map[string]*embeddedFileInfo)}
efs.meta = sync.OnceValue(func() *EmbeddedMeta {
var meta EmbeddedMeta
p := bytes.LastIndexByte(data, '\n')
if p < 0 {
return &meta
}
if err := json.Unmarshal(data[p+1:], &meta); err != nil {
panic("embedded file is not valid")
}
return &meta
})
return efs
}
var _ fs.ReadDirFS = (*embeddedFS)(nil)
func (e *embeddedFS) ReadDir(name string) (l []fs.DirEntry, err error) {
fi, err := e.getFileInfo(name)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, fs.ErrNotExist
}
l = make([]fs.DirEntry, len(fi.Children))
for i, child := range fi.Children {
l[i], err = e.getFileInfo(name + "/" + child.BaseName)
if err != nil {
return nil, err
}
}
return l, nil
}
func (e *embeddedFS) getFileInfo(fullName string) (*embeddedFileInfo, error) {
// no need to do heavy "path.Clean()" because we don't want to support "foo/../bar" or absolute paths
fullName = strings.TrimPrefix(fullName, "./")
if fullName == "" {
fullName = "."
}
e.filesMu.RLock()
fi := e.files[fullName]
e.filesMu.RUnlock()
if fi != nil {
return fi, nil
}
fields := strings.Split(fullName, "/")
fi = e.meta().Root
if fullName != "." {
found := true
for _, field := range fields {
for _, child := range fi.Children {
if found = child.BaseName == field; found {
fi = child
break
}
}
if !found {
return nil, fs.ErrNotExist
}
}
}
e.filesMu.Lock()
defer e.filesMu.Unlock()
if fi != nil {
fi.fs = e
fi.fullName = fullName
fi.data = e.data[fi.DataBegin : fi.DataBegin+fi.DataLen]
e.files[fullName] = fi // do not cache nil, otherwise keeping accessing random non-existing file will cause OOM
return fi, nil
}
return nil, fs.ErrNotExist
}
func (e *embeddedFS) Open(name string) (fs.File, error) {
info, err := e.getFileInfo(name)
if err != nil {
return nil, err
}
base := EmbeddedFileBase{info: info}
base.dataReader = bytes.NewReader(base.info.data)
if info.DataLen != info.OriginSize {
decomp, err := gzip.NewReader(base.dataReader)
if err != nil {
return nil, err
}
return &EmbeddedCompressedFile{EmbeddedFileBase: base, decompressor: decomp}, nil
}
return &EmbeddedOriginFile{base}, nil
}
var (
_ EmbeddedFileInfo = (*embeddedFileInfo)(nil)
_ EmbeddedFile = (*EmbeddedOriginFile)(nil)
_ EmbeddedFile = (*EmbeddedCompressedFile)(nil)
)
func (f *EmbeddedOriginFile) Read(p []byte) (n int, err error) {
return f.dataReader.Read(p)
}
func (f *EmbeddedCompressedFile) Read(p []byte) (n int, err error) {
if f.decompressorPos > f.seekPos {
if err = f.decompressor.Reset(bytes.NewReader(f.info.data)); err != nil {
return 0, err
}
f.decompressorPos = 0
}
if f.decompressorPos < f.seekPos {
if _, err = io.CopyN(io.Discard, f.decompressor, f.seekPos-f.decompressorPos); err != nil {
return 0, err
}
f.decompressorPos = f.seekPos
}
n, err = f.decompressor.Read(p)
f.decompressorPos += int64(n)
f.seekPos = f.decompressorPos
return n, err
}
func (f *EmbeddedFileBase) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
f.seekPos = offset
case io.SeekCurrent:
f.seekPos += offset
case io.SeekEnd:
f.seekPos = f.info.OriginSize + offset
}
return f.seekPos, nil
}
func (f *EmbeddedFileBase) Stat() (fs.FileInfo, error) {
return f.info, nil
}
func (f *EmbeddedOriginFile) Close() error {
return nil
}
func (f *EmbeddedCompressedFile) Close() error {
return f.decompressor.Close()
}
func (fi *embeddedFileInfo) Name() string {
return fi.BaseName
}
func (fi *embeddedFileInfo) Size() int64 {
return fi.OriginSize
}
func (fi *embeddedFileInfo) Mode() fs.FileMode {
return util.Iif(fi.IsDir(), fs.ModeDir|0o555, 0o444)
}
func (fi *embeddedFileInfo) ModTime() time.Time {
return getExecutableModTime()
}
func (fi *embeddedFileInfo) IsDir() bool {
return fi.Children != nil
}
func (fi *embeddedFileInfo) Sys() any {
return nil
}
func (fi *embeddedFileInfo) Type() fs.FileMode {
return util.Iif(fi.IsDir(), fs.ModeDir, 0)
}
func (fi *embeddedFileInfo) Info() (fs.FileInfo, error) {
return fi, nil
}
// getExecutableModTime returns the modification time of the executable file.
// In bindata, we can't use the ModTime of the files because we need to make the build reproducible
var getExecutableModTime = sync.OnceValue(func() (modTime time.Time) {
exePath, err := os.Executable()
if err != nil {
return modTime
}
exePath, err = filepath.Abs(exePath)
if err != nil {
return modTime
}
exePath, err = filepath.EvalSymlinks(exePath)
if err != nil {
return modTime
}
st, err := os.Stat(exePath)
if err != nil {
return modTime
}
return st.ModTime()
})
func GenerateEmbedBindata(fsRootPath, outputFile string) error {
output, err := os.OpenFile(outputFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer output.Close()
meta := &EmbeddedMeta{}
meta.Root = &embeddedFileInfo{}
var outputOffset int64
var embedFiles func(parent *embeddedFileInfo, fsPath, embedPath string) error
embedFiles = func(parent *embeddedFileInfo, fsPath, embedPath string) error {
dirEntries, err := os.ReadDir(fsPath)
if err != nil {
return err
}
for _, dirEntry := range dirEntries {
if err != nil {
return err
}
if dirEntry.IsDir() {
child := &embeddedFileInfo{
BaseName: dirEntry.Name(),
Children: []*embeddedFileInfo{}, // non-nil means it's a directory
}
parent.Children = append(parent.Children, child)
if err = embedFiles(child, filepath.Join(fsPath, dirEntry.Name()), path.Join(embedPath, dirEntry.Name())); err != nil {
return err
}
} else {
data, err := os.ReadFile(filepath.Join(fsPath, dirEntry.Name()))
if err != nil {
return err
}
var compressed bytes.Buffer
gz, _ := gzip.NewWriterLevel(&compressed, gzip.BestCompression)
if _, err = gz.Write(data); err != nil {
return err
}
if err = gz.Close(); err != nil {
return err
}
// only use the compressed data if it is smaller than the original data
outputBytes := util.Iif(len(compressed.Bytes()) < len(data), compressed.Bytes(), data)
child := &embeddedFileInfo{
BaseName: dirEntry.Name(),
OriginSize: int64(len(data)),
DataBegin: outputOffset,
DataLen: int64(len(outputBytes)),
}
if _, err = output.Write(outputBytes); err != nil {
return err
}
outputOffset += child.DataLen
parent.Children = append(parent.Children, child)
}
}
return nil
}
if err = embedFiles(meta.Root, fsRootPath, ""); err != nil {
return err
}
jsonBuf, err := json.Marshal(meta) // can't use json.NewEncoder here because it writes extra EOL
if err != nil {
return err
}
_, _ = output.Write([]byte{'\n'})
_, err = output.Write(jsonBuf)
return err
}

View File

@@ -0,0 +1,98 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package assetfs
import (
"bytes"
"io/fs"
"net/http"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEmbed(t *testing.T) {
tmpDir := t.TempDir()
tmpDataDir := tmpDir + "/data"
_ = os.MkdirAll(tmpDataDir+"/foo/bar", 0o755)
_ = os.WriteFile(tmpDataDir+"/a.txt", []byte("a"), 0o644)
_ = os.WriteFile(tmpDataDir+"/foo/bar/b.txt", bytes.Repeat([]byte("a"), 1000), 0o644)
_ = os.WriteFile(tmpDataDir+"/foo/c.txt", []byte("c"), 0o644)
require.NoError(t, GenerateEmbedBindata(tmpDataDir, tmpDir+"/out.dat"))
data, err := os.ReadFile(tmpDir + "/out.dat")
require.NoError(t, err)
efs := NewEmbeddedFS(data)
// test a non-existing file
_, err = fs.ReadFile(efs, "not exist")
assert.ErrorIs(t, err, fs.ErrNotExist)
// test a normal file (no compression)
content, err := fs.ReadFile(efs, "a.txt")
require.NoError(t, err)
assert.Equal(t, "a", string(content))
fi, err := fs.Stat(efs, "a.txt")
require.NoError(t, err)
_, ok := fi.(EmbeddedFileInfo).GetGzipContent()
assert.False(t, ok)
// test a compressed file
content, err = fs.ReadFile(efs, "foo/bar/b.txt")
require.NoError(t, err)
assert.Equal(t, bytes.Repeat([]byte("a"), 1000), content)
fi, err = fs.Stat(efs, "foo/bar/b.txt")
require.NoError(t, err)
assert.False(t, fi.Mode().IsDir())
assert.True(t, fi.Mode().IsRegular())
gzipContent, ok := fi.(EmbeddedFileInfo).GetGzipContent()
assert.True(t, ok)
assert.Greater(t, len(gzipContent), 1)
assert.Less(t, len(gzipContent), 1000)
// test list root directory
entries, err := fs.ReadDir(efs, ".")
require.NoError(t, err)
assert.Len(t, entries, 2)
assert.Equal(t, "a.txt", entries[0].Name())
assert.False(t, entries[0].IsDir())
// test list subdirectory
entries, err = fs.ReadDir(efs, "foo")
require.NoError(t, err)
require.Len(t, entries, 2)
assert.Equal(t, "bar", entries[0].Name())
assert.True(t, entries[0].IsDir())
assert.Equal(t, "c.txt", entries[1].Name())
assert.False(t, entries[1].IsDir())
// test directory mode
fi, err = fs.Stat(efs, "foo")
require.NoError(t, err)
assert.True(t, fi.IsDir())
assert.True(t, fi.Mode().IsDir())
assert.False(t, fi.Mode().IsRegular())
// test httpfs
hfs := http.FS(efs)
hf, err := hfs.Open("foo/bar/b.txt")
require.NoError(t, err)
hi, err := hf.Stat()
require.NoError(t, err)
fiEmbedded, ok := hi.(EmbeddedFileInfo)
require.True(t, ok)
gzipContent, ok = fiEmbedded.GetGzipContent()
assert.True(t, ok)
assert.Greater(t, len(gzipContent), 1)
assert.Less(t, len(gzipContent), 1000)
// test httpfs directory listing
hf, err = hfs.Open("foo")
require.NoError(t, err)
dirs, err := hf.Readdir(1)
require.NoError(t, err)
assert.Len(t, dirs, 1)
}

256
modules/assetfs/layered.go Normal file
View File

@@ -0,0 +1,256 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package assetfs
import (
"context"
"fmt"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"sort"
"time"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/util"
"github.com/fsnotify/fsnotify"
)
// Layer represents a layer in a layered asset file-system. It has a name and works like http.FileSystem
type Layer struct {
name string
fs http.FileSystem
localPath string
}
func (l *Layer) Name() string {
return l.name
}
// Open opens the named file. The caller is responsible for closing the file.
func (l *Layer) Open(name string) (http.File, error) {
return l.fs.Open(name)
}
// Local returns a new Layer with the given name, it serves files from the given local path.
func Local(name, base string, sub ...string) *Layer {
// TODO: the old behavior (StaticRootPath might not be absolute), not ideal, just keep the same as before
// Ideally, the caller should guarantee the base is absolute, guessing a relative path based on the current working directory is unreliable.
base, err := filepath.Abs(base)
if err != nil {
// This should never happen in a real system. If it happens, the user must have already been in trouble: the system is not able to resolve its own paths.
panic(fmt.Sprintf("Unable to get absolute path for %q: %v", base, err))
}
root := util.FilePathJoinAbs(base, sub...)
return &Layer{name: name, fs: http.Dir(root), localPath: root}
}
// Bindata returns a new Layer with the given name, it serves files from the given bindata asset.
func Bindata(name string, fs fs.FS) *Layer {
return &Layer{name: name, fs: http.FS(fs)}
}
// LayeredFS is a layered asset file-system. It works like http.FileSystem, but it can have multiple layers.
// The first layer is the top layer, and it will be used first.
// If the file is not found in the top layer, it will be searched in the next layer.
type LayeredFS struct {
layers []*Layer
}
// Layered returns a new LayeredFS with the given layers. The first layer is the top layer.
func Layered(layers ...*Layer) *LayeredFS {
return &LayeredFS{layers: layers}
}
// Open opens the named file. The caller is responsible for closing the file.
func (l *LayeredFS) Open(name string) (http.File, error) {
for _, layer := range l.layers {
f, err := layer.Open(name)
if err == nil || !os.IsNotExist(err) {
return f, err
}
}
return nil, fs.ErrNotExist
}
// ReadFile reads the named file.
func (l *LayeredFS) ReadFile(elems ...string) ([]byte, error) {
bs, _, err := l.ReadLayeredFile(elems...)
return bs, err
}
// ReadLayeredFile reads the named file, and returns the layer name.
func (l *LayeredFS) ReadLayeredFile(elems ...string) ([]byte, string, error) {
name := util.PathJoinRel(elems...)
for _, layer := range l.layers {
f, err := layer.Open(name)
if os.IsNotExist(err) {
continue
} else if err != nil {
return nil, layer.name, err
}
bs, err := io.ReadAll(f)
_ = f.Close()
return bs, layer.name, err
}
return nil, "", fs.ErrNotExist
}
func shouldInclude(info fs.FileInfo, fileMode ...bool) bool {
if util.IsCommonHiddenFileName(info.Name()) {
return false
}
if len(fileMode) == 0 {
return true
} else if len(fileMode) == 1 {
return fileMode[0] == !info.Mode().IsDir()
}
panic("too many arguments for fileMode in shouldInclude")
}
func readDir(layer *Layer, name string) ([]fs.FileInfo, error) {
f, err := layer.Open(name)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
defer f.Close()
return f.Readdir(-1)
}
// ListFiles lists files/directories in the given directory. The fileMode controls the returned files.
// * omitted: all files and directories will be returned.
// * true: only files will be returned.
// * false: only directories will be returned.
// The returned files are sorted by name.
func (l *LayeredFS) ListFiles(name string, fileMode ...bool) ([]string, error) {
fileSet := make(container.Set[string])
for _, layer := range l.layers {
infos, err := readDir(layer, name)
if err != nil {
return nil, err
}
for _, info := range infos {
if shouldInclude(info, fileMode...) {
fileSet.Add(info.Name())
}
}
}
files := fileSet.Values()
sort.Strings(files)
return files, nil
}
// ListAllFiles returns files/directories in the given directory, including subdirectories, recursively.
// The fileMode controls the returned files:
// * omitted: all files and directories will be returned.
// * true: only files will be returned.
// * false: only directories will be returned.
// The returned files are sorted by name.
func (l *LayeredFS) ListAllFiles(name string, fileMode ...bool) ([]string, error) {
return listAllFiles(l.layers, name, fileMode...)
}
func listAllFiles(layers []*Layer, name string, fileMode ...bool) ([]string, error) {
fileSet := make(container.Set[string])
var list func(dir string) error
list = func(dir string) error {
for _, layer := range layers {
infos, err := readDir(layer, dir)
if err != nil {
return err
}
for _, info := range infos {
path := util.PathJoinRelX(dir, info.Name())
if shouldInclude(info, fileMode...) {
fileSet.Add(path)
}
if info.IsDir() {
if err = list(path); err != nil {
return err
}
}
}
}
return nil
}
if err := list(name); err != nil {
return nil, err
}
files := fileSet.Values()
sort.Strings(files)
return files, nil
}
// WatchLocalChanges watches local changes in the file-system. It's used to help to reload assets when the local file-system changes.
func (l *LayeredFS) WatchLocalChanges(ctx context.Context, callback func()) {
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Asset Local FileSystem Watcher", process.SystemProcessType, true)
defer finished()
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Error("Unable to create watcher for asset local file-system: %v", err)
return
}
defer watcher.Close()
for _, layer := range l.layers {
if layer.localPath == "" {
continue
}
layerDirs, err := listAllFiles([]*Layer{layer}, ".", false)
if err != nil {
log.Error("Unable to list directories for asset local file-system %q: %v", layer.localPath, err)
continue
}
layerDirs = append(layerDirs, ".")
for _, dir := range layerDirs {
if err = watcher.Add(util.FilePathJoinAbs(layer.localPath, dir)); err != nil && !os.IsNotExist(err) {
log.Error("Unable to watch directory %s: %v", dir, err)
}
}
}
debounce := util.Debounce(100 * time.Millisecond)
for {
select {
case <-ctx.Done():
return
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Trace("Watched asset local file-system had event: %v", event)
debounce(callback)
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Error("Watched asset local file-system had error: %v", err)
}
}
}
// GetFileLayerName returns the name of the first-seen layer that contains the given file.
func (l *LayeredFS) GetFileLayerName(elems ...string) string {
name := util.PathJoinRel(elems...)
for _, layer := range l.layers {
f, err := layer.Open(name)
if os.IsNotExist(err) {
continue
} else if err != nil {
return ""
}
_ = f.Close()
return layer.name
}
return ""
}

View File

@@ -0,0 +1,109 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package assetfs
import (
"io"
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLayered(t *testing.T) {
dir := filepath.Join(t.TempDir(), "assetfs-layers")
dir1 := filepath.Join(dir, "l1")
dir2 := filepath.Join(dir, "l2")
mkdir := func(elems ...string) {
assert.NoError(t, os.MkdirAll(filepath.Join(elems...), 0o755))
}
write := func(content string, elems ...string) {
assert.NoError(t, os.WriteFile(filepath.Join(elems...), []byte(content), 0o644))
}
// d1 & f1: only in "l1"; d2 & f2: only in "l2"
// da & fa: in both "l1" and "l2"
mkdir(dir1, "d1")
mkdir(dir1, "da")
mkdir(dir1, "da/sub1")
mkdir(dir2, "d2")
mkdir(dir2, "da")
mkdir(dir2, "da/sub2")
write("dummy", dir1, ".DS_Store")
write("f1", dir1, "f1")
write("fa-1", dir1, "fa")
write("d1-f", dir1, "d1/f")
write("da-f-1", dir1, "da/f")
write("f2", dir2, "f2")
write("fa-2", dir2, "fa")
write("d2-f", dir2, "d2/f")
write("da-f-2", dir2, "da/f")
assets := Layered(Local("l1", dir1), Local("l2", dir2))
f, err := assets.Open("f1")
assert.NoError(t, err)
bs, err := io.ReadAll(f)
assert.NoError(t, err)
assert.Equal(t, "f1", string(bs))
_ = f.Close()
assertRead := func(expected string, expectedErr error, elems ...string) {
bs, err := assets.ReadFile(elems...)
if err != nil {
assert.ErrorIs(t, err, expectedErr)
} else {
assert.NoError(t, err)
assert.Equal(t, expected, string(bs))
}
}
assertRead("f1", nil, "f1")
assertRead("f2", nil, "f2")
assertRead("fa-1", nil, "fa")
assertRead("d1-f", nil, "d1/f")
assertRead("d2-f", nil, "d2/f")
assertRead("da-f-1", nil, "da/f")
assertRead("", fs.ErrNotExist, "no-such")
files, err := assets.ListFiles(".", true)
assert.NoError(t, err)
assert.Equal(t, []string{"f1", "f2", "fa"}, files)
files, err = assets.ListFiles(".", false)
assert.NoError(t, err)
assert.Equal(t, []string{"d1", "d2", "da"}, files)
files, err = assets.ListFiles(".")
assert.NoError(t, err)
assert.Equal(t, []string{"d1", "d2", "da", "f1", "f2", "fa"}, files)
files, err = assets.ListAllFiles(".", true)
assert.NoError(t, err)
assert.Equal(t, []string{"d1/f", "d2/f", "da/f", "f1", "f2", "fa"}, files)
files, err = assets.ListAllFiles(".", false)
assert.NoError(t, err)
assert.Equal(t, []string{"d1", "d2", "da", "da/sub1", "da/sub2"}, files)
files, err = assets.ListAllFiles(".")
assert.NoError(t, err)
assert.Equal(t, []string{
"d1", "d1/f",
"d2", "d2/f",
"da", "da/f", "da/sub1", "da/sub2",
"f1", "f2", "fa",
}, files)
assert.Empty(t, assets.GetFileLayerName("no-such"))
assert.Equal(t, "l1", assets.GetFileLayerName("f1"))
assert.Equal(t, "l2", assets.GetFileLayerName("f2"))
}