first-commit
This commit is contained in:
24
routers/api/actions/actions.go
Normal file
24
routers/api/actions/actions.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package actions
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"code.gitea.io/gitea/modules/web"
|
||||
"code.gitea.io/gitea/routers/api/actions/ping"
|
||||
"code.gitea.io/gitea/routers/api/actions/runner"
|
||||
)
|
||||
|
||||
func Routes(prefix string) *web.Router {
|
||||
m := web.NewRouter()
|
||||
|
||||
path, handler := ping.NewPingServiceHandler()
|
||||
m.Post(path+"*", http.StripPrefix(prefix, handler).ServeHTTP)
|
||||
|
||||
path, handler = runner.NewRunnerServiceHandler()
|
||||
m.Post(path+"*", http.StripPrefix(prefix, handler).ServeHTTP)
|
||||
|
||||
return m
|
||||
}
|
1058
routers/api/actions/artifact.pb.go
generated
Normal file
1058
routers/api/actions/artifact.pb.go
generated
Normal file
File diff suppressed because it is too large
Load Diff
73
routers/api/actions/artifact.proto
Normal file
73
routers/api/actions/artifact.proto
Normal file
@@ -0,0 +1,73 @@
|
||||
syntax = "proto3";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
package github.actions.results.api.v1;
|
||||
|
||||
message CreateArtifactRequest {
|
||||
string workflow_run_backend_id = 1;
|
||||
string workflow_job_run_backend_id = 2;
|
||||
string name = 3;
|
||||
google.protobuf.Timestamp expires_at = 4;
|
||||
int32 version = 5;
|
||||
}
|
||||
|
||||
message CreateArtifactResponse {
|
||||
bool ok = 1;
|
||||
string signed_upload_url = 2;
|
||||
}
|
||||
|
||||
message FinalizeArtifactRequest {
|
||||
string workflow_run_backend_id = 1;
|
||||
string workflow_job_run_backend_id = 2;
|
||||
string name = 3;
|
||||
int64 size = 4;
|
||||
google.protobuf.StringValue hash = 5;
|
||||
}
|
||||
|
||||
message FinalizeArtifactResponse {
|
||||
bool ok = 1;
|
||||
int64 artifact_id = 2;
|
||||
}
|
||||
|
||||
message ListArtifactsRequest {
|
||||
string workflow_run_backend_id = 1;
|
||||
string workflow_job_run_backend_id = 2;
|
||||
google.protobuf.StringValue name_filter = 3;
|
||||
google.protobuf.Int64Value id_filter = 4;
|
||||
}
|
||||
|
||||
message ListArtifactsResponse {
|
||||
repeated ListArtifactsResponse_MonolithArtifact artifacts = 1;
|
||||
}
|
||||
|
||||
message ListArtifactsResponse_MonolithArtifact {
|
||||
string workflow_run_backend_id = 1;
|
||||
string workflow_job_run_backend_id = 2;
|
||||
int64 database_id = 3;
|
||||
string name = 4;
|
||||
int64 size = 5;
|
||||
google.protobuf.Timestamp created_at = 6;
|
||||
}
|
||||
|
||||
message GetSignedArtifactURLRequest {
|
||||
string workflow_run_backend_id = 1;
|
||||
string workflow_job_run_backend_id = 2;
|
||||
string name = 3;
|
||||
}
|
||||
|
||||
message GetSignedArtifactURLResponse {
|
||||
string signed_url = 1;
|
||||
}
|
||||
|
||||
message DeleteArtifactRequest {
|
||||
string workflow_run_backend_id = 1;
|
||||
string workflow_job_run_backend_id = 2;
|
||||
string name = 3;
|
||||
}
|
||||
|
||||
message DeleteArtifactResponse {
|
||||
bool ok = 1;
|
||||
int64 artifact_id = 2;
|
||||
}
|
503
routers/api/actions/artifacts.go
Normal file
503
routers/api/actions/artifacts.go
Normal file
@@ -0,0 +1,503 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package actions
|
||||
|
||||
// GitHub Actions Artifacts API Simple Description
|
||||
//
|
||||
// 1. Upload artifact
|
||||
// 1.1. Post upload url
|
||||
// Post: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
|
||||
// Request:
|
||||
// {
|
||||
// "Type": "actions_storage",
|
||||
// "Name": "artifact"
|
||||
// }
|
||||
// Response:
|
||||
// {
|
||||
// "fileContainerResourceUrl":"/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload"
|
||||
// }
|
||||
// it acquires an upload url for artifact upload
|
||||
// 1.2. Upload artifact
|
||||
// PUT: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
|
||||
// it upload chunk with headers:
|
||||
// x-tfs-filelength: 1024 // total file length
|
||||
// content-length: 1024 // chunk length
|
||||
// x-actions-results-md5: md5sum // md5sum of chunk
|
||||
// content-range: bytes 0-1023/1024 // chunk range
|
||||
// we save all chunks to one storage directory after md5sum check
|
||||
// 1.3. Confirm upload
|
||||
// PATCH: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
|
||||
// it confirm upload and merge all chunks to one file, save this file to storage
|
||||
//
|
||||
// 2. Download artifact
|
||||
// 2.1 list artifacts
|
||||
// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
|
||||
// Response:
|
||||
// {
|
||||
// "count": 1,
|
||||
// "value": [
|
||||
// {
|
||||
// "name": "artifact",
|
||||
// "fileContainerResourceUrl": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path"
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// 2.2 download artifact
|
||||
// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path?api-version=6.0-preview
|
||||
// Response:
|
||||
// {
|
||||
// "value": [
|
||||
// {
|
||||
// "contentLocation": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download",
|
||||
// "path": "artifact/filename",
|
||||
// "itemType": "file"
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// 2.3 download artifact file
|
||||
// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download?itemPath=artifact%2Ffilename
|
||||
// Response:
|
||||
// download file
|
||||
//
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/models/actions"
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/modules/httplib"
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/modules/web"
|
||||
web_types "code.gitea.io/gitea/modules/web/types"
|
||||
actions_service "code.gitea.io/gitea/services/actions"
|
||||
"code.gitea.io/gitea/services/context"
|
||||
)
|
||||
|
||||
const artifactRouteBase = "/_apis/pipelines/workflows/{run_id}/artifacts"
|
||||
|
||||
type artifactContextKeyType struct{}
|
||||
|
||||
var artifactContextKey = artifactContextKeyType{}
|
||||
|
||||
type ArtifactContext struct {
|
||||
*context.Base
|
||||
|
||||
ActionTask *actions.ActionTask
|
||||
}
|
||||
|
||||
func init() {
|
||||
web.RegisterResponseStatusProvider[*ArtifactContext](func(req *http.Request) web_types.ResponseStatusProvider {
|
||||
return req.Context().Value(artifactContextKey).(*ArtifactContext)
|
||||
})
|
||||
}
|
||||
|
||||
func ArtifactsRoutes(prefix string) *web.Router {
|
||||
m := web.NewRouter()
|
||||
m.Use(ArtifactContexter())
|
||||
|
||||
r := artifactRoutes{
|
||||
prefix: prefix,
|
||||
fs: storage.ActionsArtifacts,
|
||||
}
|
||||
|
||||
m.Group(artifactRouteBase, func() {
|
||||
// retrieve, list and confirm artifacts
|
||||
m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.comfirmUploadArtifact)
|
||||
// handle container artifacts list and download
|
||||
m.Put("/{artifact_hash}/upload", r.uploadArtifact)
|
||||
// handle artifacts download
|
||||
m.Get("/{artifact_hash}/download_url", r.getDownloadArtifactURL)
|
||||
m.Get("/{artifact_id}/download", r.downloadArtifact)
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func ArtifactContexter() func(next http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
|
||||
base := context.NewBaseContext(resp, req)
|
||||
|
||||
ctx := &ArtifactContext{Base: base}
|
||||
ctx.SetContextValue(artifactContextKey, ctx)
|
||||
|
||||
// action task call server api with Bearer ACTIONS_RUNTIME_TOKEN
|
||||
// we should verify the ACTIONS_RUNTIME_TOKEN
|
||||
authHeader := req.Header.Get("Authorization")
|
||||
if len(authHeader) == 0 || !strings.HasPrefix(authHeader, "Bearer ") {
|
||||
ctx.HTTPError(http.StatusUnauthorized, "Bad authorization header")
|
||||
return
|
||||
}
|
||||
|
||||
// New act_runner uses jwt to authenticate
|
||||
tID, err := actions_service.ParseAuthorizationToken(req)
|
||||
|
||||
var task *actions.ActionTask
|
||||
if err == nil {
|
||||
task, err = actions.GetTaskByID(req.Context(), tID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task by ID: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task by ID")
|
||||
return
|
||||
}
|
||||
if task.Status != actions.StatusRunning {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Old act_runner uses GITEA_TOKEN to authenticate
|
||||
authToken := strings.TrimPrefix(authHeader, "Bearer ")
|
||||
|
||||
task, err = actions.GetRunningTaskByToken(req.Context(), authToken)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := task.LoadJob(req.Context()); err != nil {
|
||||
log.Error("Error runner api getting job: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting job")
|
||||
return
|
||||
}
|
||||
|
||||
ctx.ActionTask = task
|
||||
next.ServeHTTP(ctx.Resp, ctx.Req)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type artifactRoutes struct {
|
||||
prefix string
|
||||
fs storage.ObjectStorage
|
||||
}
|
||||
|
||||
func (ar artifactRoutes) buildArtifactURL(ctx *ArtifactContext, runID int64, artifactHash, suffix string) string {
|
||||
uploadURL := strings.TrimSuffix(httplib.GuessCurrentAppURL(ctx), "/") + strings.TrimSuffix(ar.prefix, "/") +
|
||||
strings.ReplaceAll(artifactRouteBase, "{run_id}", strconv.FormatInt(runID, 10)) +
|
||||
"/" + artifactHash + "/" + suffix
|
||||
return uploadURL
|
||||
}
|
||||
|
||||
type getUploadArtifactRequest struct {
|
||||
Type string
|
||||
Name string
|
||||
RetentionDays int64
|
||||
}
|
||||
|
||||
type getUploadArtifactResponse struct {
|
||||
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
|
||||
}
|
||||
|
||||
// getUploadArtifactURL generates a URL for uploading an artifact
|
||||
func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) {
|
||||
_, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var req getUploadArtifactRequest
|
||||
if err := json.NewDecoder(ctx.Req.Body).Decode(&req); err != nil {
|
||||
log.Error("Error decode request body: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error decode request body")
|
||||
return
|
||||
}
|
||||
|
||||
// set retention days
|
||||
retentionQuery := ""
|
||||
if req.RetentionDays > 0 {
|
||||
retentionQuery = fmt.Sprintf("?retentionDays=%d", req.RetentionDays)
|
||||
}
|
||||
|
||||
// use md5(artifact_name) to create upload url
|
||||
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(req.Name)))
|
||||
resp := getUploadArtifactResponse{
|
||||
FileContainerResourceURL: ar.buildArtifactURL(ctx, runID, artifactHash, "upload"+retentionQuery),
|
||||
}
|
||||
log.Debug("[artifact] get upload url: %s", resp.FileContainerResourceURL)
|
||||
ctx.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
||||
task, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
artifactName, artifactPath, ok := parseArtifactItemPath(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// get upload file size
|
||||
fileRealTotalSize, contentLength := getUploadFileSize(ctx)
|
||||
|
||||
// get artifact retention days
|
||||
expiredDays := setting.Actions.ArtifactRetentionDays
|
||||
if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" {
|
||||
var err error
|
||||
expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64)
|
||||
if err != nil {
|
||||
log.Error("Error parse retention days: %v", err)
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error parse retention days")
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Debug("[artifact] upload chunk, name: %s, path: %s, size: %d, retention days: %d",
|
||||
artifactName, artifactPath, fileRealTotalSize, expiredDays)
|
||||
|
||||
// create or get artifact with name and path
|
||||
artifact, err := actions.CreateArtifact(ctx, task, artifactName, artifactPath, expiredDays)
|
||||
if err != nil {
|
||||
log.Error("Error create or get artifact: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error create or get artifact")
|
||||
return
|
||||
}
|
||||
|
||||
// save chunk to storage, if success, return chunk stotal size
|
||||
// if artifact is not gzip when uploading, chunksTotalSize == fileRealTotalSize
|
||||
// if artifact is gzip when uploading, chunksTotalSize < fileRealTotalSize
|
||||
chunksTotalSize, err := saveUploadChunk(ar.fs, ctx, artifact, contentLength, runID)
|
||||
if err != nil {
|
||||
log.Error("Error save upload chunk: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error save upload chunk")
|
||||
return
|
||||
}
|
||||
|
||||
// update artifact size if zero or not match, over write artifact size
|
||||
if artifact.FileSize == 0 ||
|
||||
artifact.FileCompressedSize == 0 ||
|
||||
artifact.FileSize != fileRealTotalSize ||
|
||||
artifact.FileCompressedSize != chunksTotalSize {
|
||||
artifact.FileSize = fileRealTotalSize
|
||||
artifact.FileCompressedSize = chunksTotalSize
|
||||
artifact.ContentEncoding = ctx.Req.Header.Get("Content-Encoding")
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error update artifact: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error update artifact")
|
||||
return
|
||||
}
|
||||
log.Debug("[artifact] update artifact size, artifact_id: %d, size: %d, compressed size: %d",
|
||||
artifact.ID, artifact.FileSize, artifact.FileCompressedSize)
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, map[string]string{
|
||||
"message": "success",
|
||||
})
|
||||
}
|
||||
|
||||
// comfirmUploadArtifact confirm upload artifact.
|
||||
// if all chunks are uploaded, merge them to one file.
|
||||
func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) {
|
||||
_, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
artifactName := ctx.Req.URL.Query().Get("artifactName")
|
||||
if artifactName == "" {
|
||||
log.Error("Error artifact name is empty")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error artifact name is empty")
|
||||
return
|
||||
}
|
||||
if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
ctx.JSON(http.StatusOK, map[string]string{
|
||||
"message": "success",
|
||||
})
|
||||
}
|
||||
|
||||
type (
|
||||
listArtifactsResponse struct {
|
||||
Count int64 `json:"count"`
|
||||
Value []listArtifactsResponseItem `json:"value"`
|
||||
}
|
||||
listArtifactsResponseItem struct {
|
||||
Name string `json:"name"`
|
||||
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
|
||||
}
|
||||
)
|
||||
|
||||
func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) {
|
||||
_, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
|
||||
RunID: runID,
|
||||
Status: int(actions.ArtifactStatusUploadConfirmed),
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Error getting artifacts: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if len(artifacts) == 0 {
|
||||
log.Debug("[artifact] handleListArtifacts, no artifacts")
|
||||
ctx.HTTPError(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
items []listArtifactsResponseItem
|
||||
values = make(map[string]bool)
|
||||
)
|
||||
|
||||
for _, art := range artifacts {
|
||||
if values[art.ArtifactName] {
|
||||
continue
|
||||
}
|
||||
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(art.ArtifactName)))
|
||||
item := listArtifactsResponseItem{
|
||||
Name: art.ArtifactName,
|
||||
FileContainerResourceURL: ar.buildArtifactURL(ctx, runID, artifactHash, "download_url"),
|
||||
}
|
||||
items = append(items, item)
|
||||
values[art.ArtifactName] = true
|
||||
|
||||
log.Debug("[artifact] handleListArtifacts, name: %s, url: %s", item.Name, item.FileContainerResourceURL)
|
||||
}
|
||||
|
||||
respData := listArtifactsResponse{
|
||||
Count: int64(len(items)),
|
||||
Value: items,
|
||||
}
|
||||
ctx.JSON(http.StatusOK, respData)
|
||||
}
|
||||
|
||||
type (
|
||||
downloadArtifactResponse struct {
|
||||
Value []downloadArtifactResponseItem `json:"value"`
|
||||
}
|
||||
downloadArtifactResponseItem struct {
|
||||
Path string `json:"path"`
|
||||
ItemType string `json:"itemType"`
|
||||
ContentLocation string `json:"contentLocation"`
|
||||
}
|
||||
)
|
||||
|
||||
// getDownloadArtifactURL generates download url for each artifact
|
||||
func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
|
||||
_, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath"))
|
||||
if !validateArtifactHash(ctx, itemPath) {
|
||||
return
|
||||
}
|
||||
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
|
||||
RunID: runID,
|
||||
ArtifactName: itemPath,
|
||||
Status: int(actions.ArtifactStatusUploadConfirmed),
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Error getting artifacts: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if len(artifacts) == 0 {
|
||||
log.Debug("[artifact] getDownloadArtifactURL, no artifacts")
|
||||
ctx.HTTPError(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if itemPath != artifacts[0].ArtifactName {
|
||||
log.Error("Error dismatch artifact name, itemPath: %v, artifact: %v", itemPath, artifacts[0].ArtifactName)
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error dismatch artifact name")
|
||||
return
|
||||
}
|
||||
|
||||
var items []downloadArtifactResponseItem
|
||||
for _, artifact := range artifacts {
|
||||
var downloadURL string
|
||||
if setting.Actions.ArtifactStorage.ServeDirect() {
|
||||
u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName, nil)
|
||||
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
||||
log.Error("Error getting serve direct url: %v", err)
|
||||
}
|
||||
if u != nil {
|
||||
downloadURL = u.String()
|
||||
}
|
||||
}
|
||||
if downloadURL == "" {
|
||||
downloadURL = ar.buildArtifactURL(ctx, runID, strconv.FormatInt(artifact.ID, 10), "download")
|
||||
}
|
||||
item := downloadArtifactResponseItem{
|
||||
Path: util.PathJoinRel(itemPath, artifact.ArtifactPath),
|
||||
ItemType: "file",
|
||||
ContentLocation: downloadURL,
|
||||
}
|
||||
log.Debug("[artifact] getDownloadArtifactURL, path: %s, url: %s", item.Path, item.ContentLocation)
|
||||
items = append(items, item)
|
||||
}
|
||||
respData := downloadArtifactResponse{
|
||||
Value: items,
|
||||
}
|
||||
ctx.JSON(http.StatusOK, respData)
|
||||
}
|
||||
|
||||
// downloadArtifact downloads artifact content
|
||||
func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) {
|
||||
_, runID, ok := validateRunID(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
artifactID := ctx.PathParamInt64("artifact_id")
|
||||
artifact, exist, err := db.GetByID[actions.ActionArtifact](ctx, artifactID)
|
||||
if err != nil {
|
||||
log.Error("Error getting artifact: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if !exist {
|
||||
log.Error("artifact with ID %d does not exist", artifactID)
|
||||
ctx.HTTPError(http.StatusNotFound, fmt.Sprintf("artifact with ID %d does not exist", artifactID))
|
||||
return
|
||||
}
|
||||
if artifact.RunID != runID {
|
||||
log.Error("Error mismatch runID and artifactID, task: %v, artifact: %v", runID, artifactID)
|
||||
ctx.HTTPError(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if artifact.Status != actions.ArtifactStatusUploadConfirmed {
|
||||
log.Error("Error artifact not found: %s", artifact.Status.ToString())
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := ar.fs.Open(artifact.StoragePath)
|
||||
if err != nil {
|
||||
log.Error("Error opening file: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
// if artifact is compressed, set content-encoding header to gzip
|
||||
if artifact.ContentEncoding == "gzip" {
|
||||
ctx.Resp.Header().Set("Content-Encoding", "gzip")
|
||||
}
|
||||
log.Debug("[artifact] downloadArtifact, name: %s, path: %s, storage: %s, size: %d", artifact.ArtifactName, artifact.ArtifactPath, artifact.StoragePath, artifact.FileSize)
|
||||
ctx.ServeContent(fd, &context.ServeHeaderOptions{
|
||||
Filename: artifact.ArtifactName,
|
||||
LastModified: artifact.CreatedUnix.AsLocalTime(),
|
||||
})
|
||||
}
|
301
routers/api/actions/artifacts_chunks.go
Normal file
301
routers/api/actions/artifacts_chunks.go
Normal file
@@ -0,0 +1,301 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package actions
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models/actions"
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
)
|
||||
|
||||
func saveUploadChunkBase(st storage.ObjectStorage, ctx *ArtifactContext,
|
||||
artifact *actions.ActionArtifact,
|
||||
contentSize, runID, start, end, length int64, checkMd5 bool,
|
||||
) (int64, error) {
|
||||
// build chunk store path
|
||||
storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
|
||||
var r io.Reader = ctx.Req.Body
|
||||
var hasher hash.Hash
|
||||
if checkMd5 {
|
||||
// use io.TeeReader to avoid reading all body to md5 sum.
|
||||
// it writes data to hasher after reading end
|
||||
// if hash is not matched, delete the read-end result
|
||||
hasher = md5.New()
|
||||
r = io.TeeReader(r, hasher)
|
||||
}
|
||||
// save chunk to storage
|
||||
writtenSize, err := st.Save(storagePath, r, contentSize)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("save chunk to storage error: %v", err)
|
||||
}
|
||||
var checkErr error
|
||||
if checkMd5 {
|
||||
// check md5
|
||||
reqMd5String := ctx.Req.Header.Get(artifactXActionsResultsMD5Header)
|
||||
chunkMd5String := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||
log.Info("[artifact] check chunk md5, sum: %s, header: %s", chunkMd5String, reqMd5String)
|
||||
// if md5 not match, delete the chunk
|
||||
if reqMd5String != chunkMd5String {
|
||||
checkErr = errors.New("md5 not match")
|
||||
}
|
||||
}
|
||||
if writtenSize != contentSize {
|
||||
checkErr = errors.Join(checkErr, fmt.Errorf("writtenSize %d not match contentSize %d", writtenSize, contentSize))
|
||||
}
|
||||
if checkErr != nil {
|
||||
if err := st.Delete(storagePath); err != nil {
|
||||
log.Error("Error deleting chunk: %s, %v", storagePath, err)
|
||||
}
|
||||
return -1, checkErr
|
||||
}
|
||||
log.Info("[artifact] save chunk %s, size: %d, artifact id: %d, start: %d, end: %d",
|
||||
storagePath, contentSize, artifact.ID, start, end)
|
||||
// return chunk total size
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
|
||||
artifact *actions.ActionArtifact,
|
||||
contentSize, runID int64,
|
||||
) (int64, error) {
|
||||
// parse content-range header, format: bytes 0-1023/146515
|
||||
contentRange := ctx.Req.Header.Get("Content-Range")
|
||||
start, end, length := int64(0), int64(0), int64(0)
|
||||
if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
|
||||
log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
|
||||
return -1, fmt.Errorf("parse content range error: %v", err)
|
||||
}
|
||||
return saveUploadChunkBase(st, ctx, artifact, contentSize, runID, start, end, length, true)
|
||||
}
|
||||
|
||||
func appendUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
|
||||
artifact *actions.ActionArtifact,
|
||||
start, contentSize, runID int64,
|
||||
) (int64, error) {
|
||||
end := start + contentSize - 1
|
||||
return saveUploadChunkBase(st, ctx, artifact, contentSize, runID, start, end, contentSize, false)
|
||||
}
|
||||
|
||||
type chunkFileItem struct {
|
||||
RunID int64
|
||||
ArtifactID int64
|
||||
Start int64
|
||||
End int64
|
||||
Path string
|
||||
}
|
||||
|
||||
func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
|
||||
storageDir := fmt.Sprintf("tmp%d", runID)
|
||||
var chunks []*chunkFileItem
|
||||
if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
|
||||
baseName := filepath.Base(fpath)
|
||||
// when read chunks from storage, it only contains storage dir and basename,
|
||||
// no matter the subdirectory setting in storage config
|
||||
item := chunkFileItem{Path: storageDir + "/" + baseName}
|
||||
if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
|
||||
return fmt.Errorf("parse content range error: %v", err)
|
||||
}
|
||||
chunks = append(chunks, &item)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// chunks group by artifact id
|
||||
chunksMap := make(map[int64][]*chunkFileItem)
|
||||
for _, c := range chunks {
|
||||
chunksMap[c.ArtifactID] = append(chunksMap[c.ArtifactID], c)
|
||||
}
|
||||
return chunksMap, nil
|
||||
}
|
||||
|
||||
func listChunksByRunIDV4(st storage.ObjectStorage, runID, artifactID int64, blist *BlockList) ([]*chunkFileItem, error) {
|
||||
storageDir := fmt.Sprintf("tmpv4%d", runID)
|
||||
var chunks []*chunkFileItem
|
||||
chunkMap := map[string]*chunkFileItem{}
|
||||
dummy := &chunkFileItem{}
|
||||
for _, name := range blist.Latest {
|
||||
chunkMap[name] = dummy
|
||||
}
|
||||
if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
|
||||
baseName := filepath.Base(fpath)
|
||||
if !strings.HasPrefix(baseName, "block-") {
|
||||
return nil
|
||||
}
|
||||
// when read chunks from storage, it only contains storage dir and basename,
|
||||
// no matter the subdirectory setting in storage config
|
||||
item := chunkFileItem{Path: storageDir + "/" + baseName, ArtifactID: artifactID}
|
||||
var size int64
|
||||
var b64chunkName string
|
||||
if _, err := fmt.Sscanf(baseName, "block-%d-%d-%s", &item.RunID, &size, &b64chunkName); err != nil {
|
||||
return fmt.Errorf("parse content range error: %v", err)
|
||||
}
|
||||
rchunkName, err := base64.URLEncoding.DecodeString(b64chunkName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse chunkName: %v", err)
|
||||
}
|
||||
chunkName := string(rchunkName)
|
||||
item.End = item.Start + size - 1
|
||||
if _, ok := chunkMap[chunkName]; ok {
|
||||
chunkMap[chunkName] = &item
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, name := range blist.Latest {
|
||||
chunk, ok := chunkMap[name]
|
||||
if !ok || chunk.Path == "" {
|
||||
return nil, fmt.Errorf("missing Chunk (%d/%d): %s", i, len(blist.Latest), name)
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
if i > 0 {
|
||||
chunk.Start = chunkMap[blist.Latest[i-1]].End + 1
|
||||
chunk.End += chunk.Start
|
||||
}
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error {
|
||||
// read all db artifacts by name
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
|
||||
RunID: runID,
|
||||
ArtifactName: artifactName,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// read all uploading chunks from storage
|
||||
chunksMap, err := listChunksByRunID(st, runID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// range db artifacts to merge chunks
|
||||
for _, art := range artifacts {
|
||||
chunks, ok := chunksMap[art.ID]
|
||||
if !ok {
|
||||
log.Debug("artifact %d chunks not found", art.ID)
|
||||
continue
|
||||
}
|
||||
if err := mergeChunksForArtifact(ctx, chunks, st, art, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st storage.ObjectStorage, artifact *actions.ActionArtifact, checksum string) error {
|
||||
sort.Slice(chunks, func(i, j int) bool {
|
||||
return chunks[i].Start < chunks[j].Start
|
||||
})
|
||||
allChunks := make([]*chunkFileItem, 0)
|
||||
startAt := int64(-1)
|
||||
// check if all chunks are uploaded and in order and clean repeated chunks
|
||||
for _, c := range chunks {
|
||||
// startAt is -1 means this is the first chunk
|
||||
// previous c.ChunkEnd + 1 == c.ChunkStart means this chunk is in order
|
||||
// StartAt is not -1 and c.ChunkStart is not startAt + 1 means there is a chunk missing
|
||||
if c.Start == (startAt + 1) {
|
||||
allChunks = append(allChunks, c)
|
||||
startAt = c.End
|
||||
}
|
||||
}
|
||||
// if the last chunk.End + 1 is not equal to chunk.ChunkLength, means chunks are not uploaded completely
|
||||
if startAt+1 != artifact.FileCompressedSize {
|
||||
log.Debug("[artifact] chunks are not uploaded completely, artifact_id: %d", artifact.ID)
|
||||
return nil
|
||||
}
|
||||
// use multiReader
|
||||
readers := make([]io.Reader, 0, len(allChunks))
|
||||
closeReaders := func() {
|
||||
for _, r := range readers {
|
||||
_ = r.(io.Closer).Close() // it guarantees to be io.Closer by the following loop's Open function
|
||||
}
|
||||
readers = nil
|
||||
}
|
||||
defer closeReaders()
|
||||
for _, c := range allChunks {
|
||||
var readCloser io.ReadCloser
|
||||
var err error
|
||||
if readCloser, err = st.Open(c.Path); err != nil {
|
||||
return fmt.Errorf("open chunk error: %v, %s", err, c.Path)
|
||||
}
|
||||
readers = append(readers, readCloser)
|
||||
}
|
||||
mergedReader := io.MultiReader(readers...)
|
||||
shaPrefix := "sha256:"
|
||||
var hash hash.Hash
|
||||
if strings.HasPrefix(checksum, shaPrefix) {
|
||||
hash = sha256.New()
|
||||
}
|
||||
if hash != nil {
|
||||
mergedReader = io.TeeReader(mergedReader, hash)
|
||||
}
|
||||
|
||||
// if chunk is gzip, use gz as extension
|
||||
// download-artifact action will use content-encoding header to decide if it should decompress the file
|
||||
extension := "chunk"
|
||||
if artifact.ContentEncoding == "gzip" {
|
||||
extension = "chunk.gz"
|
||||
}
|
||||
|
||||
// save merged file
|
||||
storagePath := fmt.Sprintf("%d/%d/%d.%s", artifact.RunID%255, artifact.ID%255, time.Now().UnixNano(), extension)
|
||||
written, err := st.Save(storagePath, mergedReader, artifact.FileCompressedSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("save merged file error: %v", err)
|
||||
}
|
||||
if written != artifact.FileCompressedSize {
|
||||
return errors.New("merged file size is not equal to chunk length")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeReaders() // close before delete
|
||||
// drop chunks
|
||||
for _, c := range chunks {
|
||||
if err := st.Delete(c.Path); err != nil {
|
||||
log.Warn("Error deleting chunk: %s, %v", c.Path, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if hash != nil {
|
||||
rawChecksum := hash.Sum(nil)
|
||||
actualChecksum := hex.EncodeToString(rawChecksum)
|
||||
if !strings.HasSuffix(checksum, actualChecksum) {
|
||||
return fmt.Errorf("update artifact error checksum is invalid %v vs %v", checksum, actualChecksum)
|
||||
}
|
||||
}
|
||||
|
||||
// save storage path to artifact
|
||||
log.Debug("[artifact] merge chunks to artifact: %d, %s, old:%s", artifact.ID, storagePath, artifact.StoragePath)
|
||||
// if artifact is already uploaded, delete the old file
|
||||
if artifact.StoragePath != "" {
|
||||
if err := st.Delete(artifact.StoragePath); err != nil {
|
||||
log.Warn("Error deleting old artifact: %s, %v", artifact.StoragePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
artifact.StoragePath = storagePath
|
||||
artifact.Status = actions.ArtifactStatusUploadConfirmed
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
return fmt.Errorf("update artifact error: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
94
routers/api/actions/artifacts_utils.go
Normal file
94
routers/api/actions/artifacts_utils.go
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package actions
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/models/actions"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
const (
|
||||
artifactXTfsFileLengthHeader = "x-tfs-filelength"
|
||||
artifactXActionsResultsMD5Header = "x-actions-results-md5"
|
||||
)
|
||||
|
||||
// The rules are from https://github.com/actions/toolkit/blob/main/packages/artifact/src/internal/path-and-artifact-name-validation.ts#L32
|
||||
var invalidArtifactNameChars = strings.Join([]string{"\\", "/", "\"", ":", "<", ">", "|", "*", "?", "\r", "\n"}, "")
|
||||
|
||||
func validateArtifactName(ctx *ArtifactContext, artifactName string) bool {
|
||||
if strings.ContainsAny(artifactName, invalidArtifactNameChars) {
|
||||
log.Error("Error checking artifact name contains invalid character")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error checking artifact name contains invalid character")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) {
|
||||
task := ctx.ActionTask
|
||||
runID := ctx.PathParamInt64("run_id")
|
||||
if task.Job.RunID != runID {
|
||||
log.Error("Error runID not match")
|
||||
ctx.HTTPError(http.StatusBadRequest, "run-id does not match")
|
||||
return nil, 0, false
|
||||
}
|
||||
return task, runID, true
|
||||
}
|
||||
|
||||
func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { //nolint:unparam // ActionTask is never used
|
||||
task := ctx.ActionTask
|
||||
runID, err := strconv.ParseInt(rawRunID, 10, 64)
|
||||
if err != nil || task.Job.RunID != runID {
|
||||
log.Error("Error runID not match")
|
||||
ctx.HTTPError(http.StatusBadRequest, "run-id does not match")
|
||||
return nil, 0, false
|
||||
}
|
||||
return task, runID, true
|
||||
}
|
||||
|
||||
func validateArtifactHash(ctx *ArtifactContext, artifactName string) bool {
|
||||
paramHash := ctx.PathParam("artifact_hash")
|
||||
// use artifact name to create upload url
|
||||
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(artifactName)))
|
||||
if paramHash == artifactHash {
|
||||
return true
|
||||
}
|
||||
log.Error("Invalid artifact hash: %s", paramHash)
|
||||
ctx.HTTPError(http.StatusBadRequest, "Invalid artifact hash")
|
||||
return false
|
||||
}
|
||||
|
||||
func parseArtifactItemPath(ctx *ArtifactContext) (string, string, bool) {
|
||||
// itemPath is generated from upload-artifact action
|
||||
// it's formatted as {artifact_name}/{artfict_path_in_runner}
|
||||
// act_runner in host mode on Windows, itemPath is joined by Windows slash '\'
|
||||
itemPath := util.PathJoinRelX(ctx.Req.URL.Query().Get("itemPath"))
|
||||
artifactName := strings.Split(itemPath, "/")[0]
|
||||
artifactPath := strings.TrimPrefix(itemPath, artifactName+"/")
|
||||
if !validateArtifactHash(ctx, artifactName) {
|
||||
return "", "", false
|
||||
}
|
||||
if !validateArtifactName(ctx, artifactName) {
|
||||
return "", "", false
|
||||
}
|
||||
return artifactName, artifactPath, true
|
||||
}
|
||||
|
||||
// getUploadFileSize returns the size of the file to be uploaded.
|
||||
// The raw size is the size of the file as reported by the header X-TFS-FileLength.
|
||||
func getUploadFileSize(ctx *ArtifactContext) (int64, int64) {
|
||||
contentLength := ctx.Req.ContentLength
|
||||
xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64)
|
||||
if xTfsLength > 0 {
|
||||
return xTfsLength, contentLength
|
||||
}
|
||||
return contentLength, contentLength
|
||||
}
|
586
routers/api/actions/artifactsv4.go
Normal file
586
routers/api/actions/artifactsv4.go
Normal file
@@ -0,0 +1,586 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package actions
|
||||
|
||||
// GitHub Actions Artifacts V4 API Simple Description
|
||||
//
|
||||
// 1. Upload artifact
|
||||
// 1.1. CreateArtifact
|
||||
// Post: /twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact
|
||||
// Request:
|
||||
// {
|
||||
// "workflow_run_backend_id": "21",
|
||||
// "workflow_job_run_backend_id": "49",
|
||||
// "name": "test",
|
||||
// "version": 4
|
||||
// }
|
||||
// Response:
|
||||
// {
|
||||
// "ok": true,
|
||||
// "signedUploadUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75"
|
||||
// }
|
||||
// 1.2. Upload Zip Content to Blobstorage (unauthenticated request)
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
|
||||
// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
|
||||
// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
|
||||
// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to ensure the correct order
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
|
||||
// Request
|
||||
// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
// <BlockList>
|
||||
// <Latest>blockId1</Latest>
|
||||
// <Latest>blockId2</Latest>
|
||||
// </BlockList>
|
||||
// 1.5. FinalizeArtifact
|
||||
// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
|
||||
// Request
|
||||
// {
|
||||
// "workflow_run_backend_id": "21",
|
||||
// "workflow_job_run_backend_id": "49",
|
||||
// "name": "test",
|
||||
// "size": "2097",
|
||||
// "hash": "sha256:b6325614d5649338b87215d9536b3c0477729b8638994c74cdefacb020a2cad4"
|
||||
// }
|
||||
// Response
|
||||
// {
|
||||
// "ok": true,
|
||||
// "artifactId": "4"
|
||||
// }
|
||||
// 2. Download artifact
|
||||
// 2.1. ListArtifacts and optionally filter by artifact exact name or id
|
||||
// Post: /twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts
|
||||
// Request
|
||||
// {
|
||||
// "workflow_run_backend_id": "21",
|
||||
// "workflow_job_run_backend_id": "49",
|
||||
// "name_filter": "test"
|
||||
// }
|
||||
// Response
|
||||
// {
|
||||
// "artifacts": [
|
||||
// {
|
||||
// "workflowRunBackendId": "21",
|
||||
// "workflowJobRunBackendId": "49",
|
||||
// "databaseId": "4",
|
||||
// "name": "test",
|
||||
// "size": "2093",
|
||||
// "createdAt": "2024-01-23T00:13:28Z"
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// 2.2. GetSignedArtifactURL get the URL to download the artifact zip file of a specific artifact
|
||||
// Post: /twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL
|
||||
// Request
|
||||
// {
|
||||
// "workflow_run_backend_id": "21",
|
||||
// "workflow_job_run_backend_id": "49",
|
||||
// "name": "test"
|
||||
// }
|
||||
// Response
|
||||
// {
|
||||
// "signedUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76"
|
||||
// }
|
||||
// 2.3. Download Zip from Blobstorage (unauthenticated request)
|
||||
// GET: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models/actions"
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/modules/httplib"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/modules/web"
|
||||
"code.gitea.io/gitea/services/context"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
ArtifactV4RouteBase = "/twirp/github.actions.results.api.v1.ArtifactService"
|
||||
ArtifactV4ContentEncoding = "application/zip"
|
||||
)
|
||||
|
||||
type artifactV4Routes struct {
|
||||
prefix string
|
||||
fs storage.ObjectStorage
|
||||
}
|
||||
|
||||
func ArtifactV4Contexter() func(next http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
|
||||
base := context.NewBaseContext(resp, req)
|
||||
ctx := &ArtifactContext{Base: base}
|
||||
ctx.SetContextValue(artifactContextKey, ctx)
|
||||
next.ServeHTTP(ctx.Resp, ctx.Req)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func ArtifactsV4Routes(prefix string) *web.Router {
|
||||
m := web.NewRouter()
|
||||
|
||||
r := artifactV4Routes{
|
||||
prefix: prefix,
|
||||
fs: storage.ActionsArtifacts,
|
||||
}
|
||||
|
||||
m.Group("", func() {
|
||||
m.Post("CreateArtifact", r.createArtifact)
|
||||
m.Post("FinalizeArtifact", r.finalizeArtifact)
|
||||
m.Post("ListArtifacts", r.listArtifacts)
|
||||
m.Post("GetSignedArtifactURL", r.getSignedArtifactURL)
|
||||
m.Post("DeleteArtifact", r.deleteArtifact)
|
||||
}, ArtifactContexter())
|
||||
m.Group("", func() {
|
||||
m.Put("UploadArtifact", r.uploadArtifact)
|
||||
m.Get("DownloadArtifact", r.downloadArtifact)
|
||||
}, ArtifactV4Contexter())
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID, artifactID int64) []byte {
|
||||
mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret())
|
||||
mac.Write([]byte(endp))
|
||||
mac.Write([]byte(expires))
|
||||
mac.Write([]byte(artifactName))
|
||||
fmt.Fprint(mac, taskID)
|
||||
fmt.Fprint(mac, artifactID)
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func (r artifactV4Routes) buildArtifactURL(ctx *ArtifactContext, endp, artifactName string, taskID, artifactID int64) string {
|
||||
expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
|
||||
uploadURL := strings.TrimSuffix(httplib.GuessCurrentAppURL(ctx), "/") + strings.TrimSuffix(r.prefix, "/") +
|
||||
"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID, artifactID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + strconv.FormatInt(taskID, 10) + "&artifactID=" + strconv.FormatInt(artifactID, 10)
|
||||
return uploadURL
|
||||
}
|
||||
|
||||
func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) {
|
||||
rawTaskID := ctx.Req.URL.Query().Get("taskID")
|
||||
rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
|
||||
sig := ctx.Req.URL.Query().Get("sig")
|
||||
expires := ctx.Req.URL.Query().Get("expires")
|
||||
artifactName := ctx.Req.URL.Query().Get("artifactName")
|
||||
dsig, _ := base64.URLEncoding.DecodeString(sig)
|
||||
taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
|
||||
artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
|
||||
|
||||
expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
|
||||
if !hmac.Equal(dsig, expecedsig) {
|
||||
log.Error("Error unauthorized")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "Error unauthorized")
|
||||
return nil, "", false
|
||||
}
|
||||
t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", expires)
|
||||
if err != nil || t.Before(time.Now()) {
|
||||
log.Error("Error link expired")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "Error link expired")
|
||||
return nil, "", false
|
||||
}
|
||||
task, err := actions.GetTaskByID(ctx, taskID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task by ID: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task by ID")
|
||||
return nil, "", false
|
||||
}
|
||||
if task.Status != actions.StatusRunning {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return nil, "", false
|
||||
}
|
||||
if err := task.LoadJob(ctx); err != nil {
|
||||
log.Error("Error runner api getting job: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting job")
|
||||
return nil, "", false
|
||||
}
|
||||
return task, artifactName, true
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) getArtifactByName(ctx *ArtifactContext, runID int64, name string) (*actions.ActionArtifact, error) {
|
||||
var art actions.ActionArtifact
|
||||
has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ? AND artifact_path = ? AND content_encoding = ?", runID, name, name+".zip", ArtifactV4ContentEncoding).Get(&art)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !has {
|
||||
return nil, util.ErrNotExist
|
||||
}
|
||||
return &art, nil
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) parseProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) bool {
|
||||
body, err := io.ReadAll(ctx.Req.Body)
|
||||
if err != nil {
|
||||
log.Error("Error decode request body: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error decode request body")
|
||||
return false
|
||||
}
|
||||
err = protojson.Unmarshal(body, req)
|
||||
if err != nil {
|
||||
log.Error("Error decode request body: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error decode request body")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) sendProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) {
|
||||
resp, err := protojson.Marshal(req)
|
||||
if err != nil {
|
||||
log.Error("Error encode response body: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error encode response body")
|
||||
return
|
||||
}
|
||||
ctx.Resp.Header().Set("Content-Type", "application/json;charset=utf-8")
|
||||
ctx.Resp.WriteHeader(http.StatusOK)
|
||||
_, _ = ctx.Resp.Write(resp)
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
|
||||
var req CreateArtifactRequest
|
||||
|
||||
if ok := r.parseProtbufBody(ctx, &req); !ok {
|
||||
return
|
||||
}
|
||||
_, _, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
artifactName := req.Name
|
||||
|
||||
rententionDays := setting.Actions.ArtifactRetentionDays
|
||||
if req.ExpiresAt != nil {
|
||||
rententionDays = int64(time.Until(req.ExpiresAt.AsTime()).Hours() / 24)
|
||||
}
|
||||
// create or get artifact with name and path
|
||||
artifact, err := actions.CreateArtifact(ctx, ctx.ActionTask, artifactName, artifactName+".zip", rententionDays)
|
||||
if err != nil {
|
||||
log.Error("Error create or get artifact: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error create or get artifact")
|
||||
return
|
||||
}
|
||||
artifact.ContentEncoding = ArtifactV4ContentEncoding
|
||||
artifact.FileSize = 0
|
||||
artifact.FileCompressedSize = 0
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
return
|
||||
}
|
||||
|
||||
respData := CreateArtifactResponse{
|
||||
Ok: true,
|
||||
SignedUploadUrl: r.buildArtifactURL(ctx, "UploadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID),
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
|
||||
task, artifactName, ok := r.verifySignature(ctx, "UploadArtifact")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
comp := ctx.Req.URL.Query().Get("comp")
|
||||
switch comp {
|
||||
case "block", "appendBlock":
|
||||
blockid := ctx.Req.URL.Query().Get("blockid")
|
||||
if blockid == "" {
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
artifact.FileCompressedSize += ctx.Req.ContentLength
|
||||
artifact.FileSize += ctx.Req.ContentLength
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
}
|
||||
ctx.JSON(http.StatusCreated, "appended")
|
||||
case "blocklist":
|
||||
rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
|
||||
artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
|
||||
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
ctx.JSON(http.StatusCreated, "created")
|
||||
}
|
||||
}
|
||||
|
||||
type BlockList struct {
|
||||
Latest []string `xml:"Latest"`
|
||||
}
|
||||
|
||||
type Latest struct {
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) readBlockList(runID, artifactID int64) (*BlockList, error) {
|
||||
blockListName := fmt.Sprintf("tmpv4%d/%d-%d-blocklist", runID, runID, artifactID)
|
||||
s, err := r.fs.Open(blockListName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
xdec := xml.NewDecoder(s)
|
||||
blockList := &BlockList{}
|
||||
err = xdec.Decode(blockList)
|
||||
|
||||
delerr := r.fs.Delete(blockListName)
|
||||
if delerr != nil {
|
||||
log.Warn("Failed to delete blockList %s: %v", blockListName, delerr)
|
||||
}
|
||||
return blockList, err
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
|
||||
var req FinalizeArtifactRequest
|
||||
|
||||
if ok := r.parseProtbufBody(ctx, &req); !ok {
|
||||
return
|
||||
}
|
||||
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, runID, req.Name)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
var chunks []*chunkFileItem
|
||||
blockList, err := r.readBlockList(runID, artifact.ID)
|
||||
if err != nil {
|
||||
log.Warn("Failed to read BlockList, fallback to old behavior: %v", err)
|
||||
chunkMap, err := listChunksByRunID(r.fs, runID)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
chunks, ok = chunkMap[artifact.ID]
|
||||
if !ok {
|
||||
log.Error("Error merge chunks")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
artifact.FileSize = chunks[len(chunks)-1].End + 1
|
||||
artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1
|
||||
}
|
||||
|
||||
checksum := ""
|
||||
if req.Hash != nil {
|
||||
checksum = req.Hash.Value
|
||||
}
|
||||
if err := mergeChunksForArtifact(ctx, chunks, r.fs, artifact, checksum); err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
|
||||
respData := FinalizeArtifactResponse{
|
||||
Ok: true,
|
||||
ArtifactId: artifact.ID,
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) {
|
||||
var req ListArtifactsRequest
|
||||
|
||||
if ok := r.parseProtbufBody(ctx, &req); !ok {
|
||||
return
|
||||
}
|
||||
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
|
||||
RunID: runID,
|
||||
Status: int(actions.ArtifactStatusUploadConfirmed),
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Error getting artifacts: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
list := []*ListArtifactsResponse_MonolithArtifact{}
|
||||
|
||||
table := map[string]*ListArtifactsResponse_MonolithArtifact{}
|
||||
for _, artifact := range artifacts {
|
||||
if _, ok := table[artifact.ArtifactName]; ok || req.IdFilter != nil && artifact.ID != req.IdFilter.Value || req.NameFilter != nil && artifact.ArtifactName != req.NameFilter.Value || artifact.ArtifactName+".zip" != artifact.ArtifactPath || artifact.ContentEncoding != ArtifactV4ContentEncoding {
|
||||
table[artifact.ArtifactName] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
table[artifact.ArtifactName] = &ListArtifactsResponse_MonolithArtifact{
|
||||
Name: artifact.ArtifactName,
|
||||
CreatedAt: timestamppb.New(artifact.CreatedUnix.AsTime()),
|
||||
DatabaseId: artifact.ID,
|
||||
WorkflowRunBackendId: req.WorkflowRunBackendId,
|
||||
WorkflowJobRunBackendId: req.WorkflowJobRunBackendId,
|
||||
Size: artifact.FileSize,
|
||||
}
|
||||
}
|
||||
for _, artifact := range table {
|
||||
if artifact != nil {
|
||||
list = append(list, artifact)
|
||||
}
|
||||
}
|
||||
|
||||
respData := ListArtifactsResponse{
|
||||
Artifacts: list,
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
|
||||
var req GetSignedArtifactURLRequest
|
||||
|
||||
if ok := r.parseProtbufBody(ctx, &req); !ok {
|
||||
return
|
||||
}
|
||||
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
artifactName := req.Name
|
||||
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, runID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
if artifact.Status != actions.ArtifactStatusUploadConfirmed {
|
||||
log.Error("Error artifact not found: %s", artifact.Status.ToString())
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
respData := GetSignedArtifactURLResponse{}
|
||||
|
||||
if setting.Actions.ArtifactStorage.ServeDirect() {
|
||||
u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath, nil)
|
||||
if u != nil && err == nil {
|
||||
respData.SignedUrl = u.String()
|
||||
}
|
||||
}
|
||||
if respData.SignedUrl == "" {
|
||||
respData.SignedUrl = r.buildArtifactURL(ctx, "DownloadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID)
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) {
|
||||
task, artifactName, ok := r.verifySignature(ctx, "DownloadArtifact")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
if artifact.Status != actions.ArtifactStatusUploadConfirmed {
|
||||
log.Error("Error artifact not found: %s", artifact.Status.ToString())
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
file, _ := r.fs.Open(artifact.StoragePath)
|
||||
|
||||
_, _ = io.Copy(ctx.Resp, file)
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) {
|
||||
var req DeleteArtifactRequest
|
||||
|
||||
if ok := r.parseProtbufBody(ctx, &req); !ok {
|
||||
return
|
||||
}
|
||||
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, runID, req.Name)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
err = actions.SetArtifactNeedDelete(ctx, runID, req.Name)
|
||||
if err != nil {
|
||||
log.Error("Error deleting artifacts: %v", err)
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
respData := DeleteArtifactResponse{
|
||||
Ok: true,
|
||||
ArtifactId: artifact.ID,
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
36
routers/api/actions/ping/ping.go
Normal file
36
routers/api/actions/ping/ping.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package ping
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
|
||||
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
|
||||
"code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
|
||||
"connectrpc.com/connect"
|
||||
)
|
||||
|
||||
func NewPingServiceHandler() (string, http.Handler) {
|
||||
return pingv1connect.NewPingServiceHandler(&Service{})
|
||||
}
|
||||
|
||||
var _ pingv1connect.PingServiceHandler = (*Service)(nil)
|
||||
|
||||
type Service struct{}
|
||||
|
||||
func (s *Service) Ping(
|
||||
ctx context.Context,
|
||||
req *connect.Request[pingv1.PingRequest],
|
||||
) (*connect.Response[pingv1.PingResponse], error) {
|
||||
log.Trace("Content-Type: %s", req.Header().Get("Content-Type"))
|
||||
log.Trace("User-Agent: %s", req.Header().Get("User-Agent"))
|
||||
res := connect.NewResponse(&pingv1.PingResponse{
|
||||
Data: fmt.Sprintf("Hello, %s!", req.Msg.Data),
|
||||
})
|
||||
return res, nil
|
||||
}
|
60
routers/api/actions/ping/ping_test.go
Normal file
60
routers/api/actions/ping/ping_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package ping
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
|
||||
"code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
|
||||
"connectrpc.com/connect"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestService(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(pingv1connect.NewPingServiceHandler(
|
||||
&Service{},
|
||||
))
|
||||
MainServiceTest(t, mux)
|
||||
}
|
||||
|
||||
func MainServiceTest(t *testing.T, h http.Handler) {
|
||||
t.Parallel()
|
||||
server := httptest.NewUnstartedServer(h)
|
||||
server.EnableHTTP2 = true
|
||||
server.StartTLS()
|
||||
defer server.Close()
|
||||
|
||||
connectClient := pingv1connect.NewPingServiceClient(
|
||||
server.Client(),
|
||||
server.URL,
|
||||
)
|
||||
|
||||
grpcClient := pingv1connect.NewPingServiceClient(
|
||||
server.Client(),
|
||||
server.URL,
|
||||
connect.WithGRPC(),
|
||||
)
|
||||
|
||||
grpcWebClient := pingv1connect.NewPingServiceClient(
|
||||
server.Client(),
|
||||
server.URL,
|
||||
connect.WithGRPCWeb(),
|
||||
)
|
||||
|
||||
clients := []pingv1connect.PingServiceClient{connectClient, grpcClient, grpcWebClient}
|
||||
t.Run("ping request", func(t *testing.T) {
|
||||
for _, client := range clients {
|
||||
result, err := client.Ping(t.Context(), connect.NewRequest(&pingv1.PingRequest{
|
||||
Data: "foobar",
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Hello, foobar!", result.Msg.Data)
|
||||
}
|
||||
})
|
||||
}
|
80
routers/api/actions/runner/interceptor.go
Normal file
80
routers/api/actions/runner/interceptor.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
actions_model "code.gitea.io/gitea/models/actions"
|
||||
auth_model "code.gitea.io/gitea/models/auth"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
uuidHeaderKey = "x-runner-uuid"
|
||||
tokenHeaderKey = "x-runner-token"
|
||||
)
|
||||
|
||||
var withRunner = connect.WithInterceptors(connect.UnaryInterceptorFunc(func(unaryFunc connect.UnaryFunc) connect.UnaryFunc {
|
||||
return func(ctx context.Context, request connect.AnyRequest) (connect.AnyResponse, error) {
|
||||
methodName := getMethodName(request)
|
||||
if methodName == "Register" {
|
||||
return unaryFunc(ctx, request)
|
||||
}
|
||||
uuid := request.Header().Get(uuidHeaderKey)
|
||||
token := request.Header().Get(tokenHeaderKey)
|
||||
|
||||
runner, err := actions_model.GetRunnerByUUID(ctx, uuid)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNotExist) {
|
||||
return nil, status.Error(codes.Unauthenticated, "unregistered runner")
|
||||
}
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(runner.TokenHash), []byte(auth_model.HashToken(token, runner.TokenSalt))) != 1 {
|
||||
return nil, status.Error(codes.Unauthenticated, "unregistered runner")
|
||||
}
|
||||
|
||||
cols := []string{"last_online"}
|
||||
runner.LastOnline = timeutil.TimeStampNow()
|
||||
if methodName == "UpdateTask" || methodName == "UpdateLog" {
|
||||
runner.LastActive = timeutil.TimeStampNow()
|
||||
cols = append(cols, "last_active")
|
||||
}
|
||||
if err := actions_model.UpdateRunner(ctx, runner, cols...); err != nil {
|
||||
log.Error("can't update runner status: %v", err)
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, runnerCtxKey{}, runner)
|
||||
return unaryFunc(ctx, request)
|
||||
}
|
||||
}))
|
||||
|
||||
func getMethodName(req connect.AnyRequest) string {
|
||||
splits := strings.Split(req.Spec().Procedure, "/")
|
||||
if len(splits) > 0 {
|
||||
return splits[len(splits)-1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type runnerCtxKey struct{}
|
||||
|
||||
func GetRunner(ctx context.Context) *actions_model.ActionRunner {
|
||||
if v := ctx.Value(runnerCtxKey{}); v != nil {
|
||||
if r, ok := v.(*actions_model.ActionRunner); ok {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
300
routers/api/actions/runner/runner.go
Normal file
300
routers/api/actions/runner/runner.go
Normal file
@@ -0,0 +1,300 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
actions_model "code.gitea.io/gitea/models/actions"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/actions"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
actions_service "code.gitea.io/gitea/services/actions"
|
||||
notify_service "code.gitea.io/gitea/services/notify"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
|
||||
"connectrpc.com/connect"
|
||||
gouuid "github.com/google/uuid"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func NewRunnerServiceHandler() (string, http.Handler) {
|
||||
return runnerv1connect.NewRunnerServiceHandler(
|
||||
&Service{},
|
||||
connect.WithCompressMinBytes(1024),
|
||||
withRunner,
|
||||
)
|
||||
}
|
||||
|
||||
var _ runnerv1connect.RunnerServiceClient = (*Service)(nil)
|
||||
|
||||
type Service struct{}
|
||||
|
||||
// Register for new runner.
|
||||
func (s *Service) Register(
|
||||
ctx context.Context,
|
||||
req *connect.Request[runnerv1.RegisterRequest],
|
||||
) (*connect.Response[runnerv1.RegisterResponse], error) {
|
||||
if req.Msg.Token == "" || req.Msg.Name == "" {
|
||||
return nil, errors.New("missing runner token, name")
|
||||
}
|
||||
|
||||
runnerToken, err := actions_model.GetRunnerToken(ctx, req.Msg.Token)
|
||||
if err != nil {
|
||||
return nil, errors.New("runner registration token not found")
|
||||
}
|
||||
|
||||
if !runnerToken.IsActive {
|
||||
return nil, errors.New("runner registration token has been invalidated, please use the latest one")
|
||||
}
|
||||
|
||||
if runnerToken.OwnerID > 0 {
|
||||
if _, err := user_model.GetUserByID(ctx, runnerToken.OwnerID); err != nil {
|
||||
return nil, errors.New("owner of the token not found")
|
||||
}
|
||||
}
|
||||
|
||||
if runnerToken.RepoID > 0 {
|
||||
if _, err := repo_model.GetRepositoryByID(ctx, runnerToken.RepoID); err != nil {
|
||||
return nil, errors.New("repository of the token not found")
|
||||
}
|
||||
}
|
||||
|
||||
labels := req.Msg.Labels
|
||||
|
||||
// create new runner
|
||||
name := util.EllipsisDisplayString(req.Msg.Name, 255)
|
||||
runner := &actions_model.ActionRunner{
|
||||
UUID: gouuid.New().String(),
|
||||
Name: name,
|
||||
OwnerID: runnerToken.OwnerID,
|
||||
RepoID: runnerToken.RepoID,
|
||||
Version: req.Msg.Version,
|
||||
AgentLabels: labels,
|
||||
Ephemeral: req.Msg.Ephemeral,
|
||||
}
|
||||
if err := runner.GenerateToken(); err != nil {
|
||||
return nil, errors.New("can't generate token")
|
||||
}
|
||||
|
||||
// create new runner
|
||||
if err := actions_model.CreateRunner(ctx, runner); err != nil {
|
||||
return nil, errors.New("can't create new runner")
|
||||
}
|
||||
|
||||
// update token status
|
||||
runnerToken.IsActive = true
|
||||
if err := actions_model.UpdateRunnerToken(ctx, runnerToken, "is_active"); err != nil {
|
||||
return nil, errors.New("can't update runner token status")
|
||||
}
|
||||
|
||||
res := connect.NewResponse(&runnerv1.RegisterResponse{
|
||||
Runner: &runnerv1.Runner{
|
||||
Id: runner.ID,
|
||||
Uuid: runner.UUID,
|
||||
Token: runner.Token,
|
||||
Name: runner.Name,
|
||||
Version: runner.Version,
|
||||
Labels: runner.AgentLabels,
|
||||
Ephemeral: runner.Ephemeral,
|
||||
},
|
||||
})
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *Service) Declare(
|
||||
ctx context.Context,
|
||||
req *connect.Request[runnerv1.DeclareRequest],
|
||||
) (*connect.Response[runnerv1.DeclareResponse], error) {
|
||||
runner := GetRunner(ctx)
|
||||
runner.AgentLabels = req.Msg.Labels
|
||||
runner.Version = req.Msg.Version
|
||||
if err := actions_model.UpdateRunner(ctx, runner, "agent_labels", "version"); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "update runner: %v", err)
|
||||
}
|
||||
|
||||
return connect.NewResponse(&runnerv1.DeclareResponse{
|
||||
Runner: &runnerv1.Runner{
|
||||
Id: runner.ID,
|
||||
Uuid: runner.UUID,
|
||||
Token: runner.Token,
|
||||
Name: runner.Name,
|
||||
Version: runner.Version,
|
||||
Labels: runner.AgentLabels,
|
||||
},
|
||||
}), nil
|
||||
}
|
||||
|
||||
// FetchTask assigns a task to the runner
|
||||
func (s *Service) FetchTask(
|
||||
ctx context.Context,
|
||||
req *connect.Request[runnerv1.FetchTaskRequest],
|
||||
) (*connect.Response[runnerv1.FetchTaskResponse], error) {
|
||||
runner := GetRunner(ctx)
|
||||
|
||||
var task *runnerv1.Task
|
||||
tasksVersion := req.Msg.TasksVersion // task version from runner
|
||||
latestVersion, err := actions_model.GetTasksVersionByScope(ctx, runner.OwnerID, runner.RepoID)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "query tasks version failed: %v", err)
|
||||
} else if latestVersion == 0 {
|
||||
if err := actions_model.IncreaseTaskVersion(ctx, runner.OwnerID, runner.RepoID); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "fail to increase task version: %v", err)
|
||||
}
|
||||
// if we don't increase the value of `latestVersion` here,
|
||||
// the response of FetchTask will return tasksVersion as zero.
|
||||
// and the runner will treat it as an old version of Gitea.
|
||||
latestVersion++
|
||||
}
|
||||
|
||||
if tasksVersion != latestVersion {
|
||||
// if the task version in request is not equal to the version in db,
|
||||
// it means there may still be some tasks not be assgined.
|
||||
// try to pick a task for the runner that send the request.
|
||||
if t, ok, err := actions_service.PickTask(ctx, runner); err != nil {
|
||||
log.Error("pick task failed: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "pick task: %v", err)
|
||||
} else if ok {
|
||||
task = t
|
||||
}
|
||||
}
|
||||
res := connect.NewResponse(&runnerv1.FetchTaskResponse{
|
||||
Task: task,
|
||||
TasksVersion: latestVersion,
|
||||
})
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// UpdateTask updates the task status.
|
||||
func (s *Service) UpdateTask(
|
||||
ctx context.Context,
|
||||
req *connect.Request[runnerv1.UpdateTaskRequest],
|
||||
) (*connect.Response[runnerv1.UpdateTaskResponse], error) {
|
||||
runner := GetRunner(ctx)
|
||||
|
||||
task, err := actions_model.UpdateTaskByState(ctx, runner.ID, req.Msg.State)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "update task: %v", err)
|
||||
}
|
||||
|
||||
for k, v := range req.Msg.Outputs {
|
||||
if len(k) > 255 {
|
||||
log.Warn("Ignore the output of task %d because the key is too long: %q", task.ID, k)
|
||||
continue
|
||||
}
|
||||
// The value can be a maximum of 1 MB
|
||||
if l := len(v); l > 1024*1024 {
|
||||
log.Warn("Ignore the output %q of task %d because the value is too long: %v", k, task.ID, l)
|
||||
continue
|
||||
}
|
||||
// There's another limitation on GitHub that the total of all outputs in a workflow run can be a maximum of 50 MB.
|
||||
// We don't check the total size here because it's not easy to do, and it doesn't really worth it.
|
||||
// See https://docs.github.com/en/actions/using-jobs/defining-outputs-for-jobs
|
||||
|
||||
if err := actions_model.InsertTaskOutputIfNotExist(ctx, task.ID, k, v); err != nil {
|
||||
log.Warn("Failed to insert the output %q of task %d: %v", k, task.ID, err)
|
||||
// It's ok not to return errors, the runner will resend the outputs.
|
||||
}
|
||||
}
|
||||
sentOutputs, err := actions_model.FindTaskOutputKeyByTaskID(ctx, task.ID)
|
||||
if err != nil {
|
||||
log.Warn("Failed to find the sent outputs of task %d: %v", task.ID, err)
|
||||
// It's not to return errors, it can be handled when the runner resends sent outputs.
|
||||
}
|
||||
|
||||
if err := task.LoadJob(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "load job: %v", err)
|
||||
}
|
||||
if err := task.Job.LoadAttributes(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "load run: %v", err)
|
||||
}
|
||||
|
||||
// don't create commit status for cron job
|
||||
if task.Job.Run.ScheduleID == 0 {
|
||||
actions_service.CreateCommitStatus(ctx, task.Job)
|
||||
}
|
||||
|
||||
if task.Status.IsDone() {
|
||||
notify_service.WorkflowJobStatusUpdate(ctx, task.Job.Run.Repo, task.Job.Run.TriggerUser, task.Job, task)
|
||||
}
|
||||
|
||||
if req.Msg.State.Result != runnerv1.Result_RESULT_UNSPECIFIED {
|
||||
if err := actions_service.EmitJobsIfReady(task.Job.RunID); err != nil {
|
||||
log.Error("Emit ready jobs of run %d: %v", task.Job.RunID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return connect.NewResponse(&runnerv1.UpdateTaskResponse{
|
||||
State: &runnerv1.TaskState{
|
||||
Id: req.Msg.State.Id,
|
||||
Result: task.Status.AsResult(),
|
||||
},
|
||||
SentOutputs: sentOutputs,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// UpdateLog uploads log of the task.
|
||||
func (s *Service) UpdateLog(
|
||||
ctx context.Context,
|
||||
req *connect.Request[runnerv1.UpdateLogRequest],
|
||||
) (*connect.Response[runnerv1.UpdateLogResponse], error) {
|
||||
runner := GetRunner(ctx)
|
||||
|
||||
res := connect.NewResponse(&runnerv1.UpdateLogResponse{})
|
||||
|
||||
task, err := actions_model.GetTaskByID(ctx, req.Msg.TaskId)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "get task: %v", err)
|
||||
} else if runner.ID != task.RunnerID {
|
||||
return nil, status.Errorf(codes.Internal, "invalid runner for task")
|
||||
}
|
||||
ack := task.LogLength
|
||||
|
||||
if len(req.Msg.Rows) == 0 || req.Msg.Index > ack || int64(len(req.Msg.Rows))+req.Msg.Index <= ack {
|
||||
res.Msg.AckIndex = ack
|
||||
return res, nil
|
||||
}
|
||||
|
||||
if task.LogInStorage {
|
||||
return nil, status.Errorf(codes.AlreadyExists, "log file has been archived")
|
||||
}
|
||||
|
||||
rows := req.Msg.Rows[ack-req.Msg.Index:]
|
||||
ns, err := actions.WriteLogs(ctx, task.LogFilename, task.LogSize, rows)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "write logs: %v", err)
|
||||
}
|
||||
task.LogLength += int64(len(rows))
|
||||
for _, n := range ns {
|
||||
task.LogIndexes = append(task.LogIndexes, task.LogSize)
|
||||
task.LogSize += int64(n)
|
||||
}
|
||||
|
||||
res.Msg.AckIndex = task.LogLength
|
||||
|
||||
var remove func()
|
||||
if req.Msg.NoMore {
|
||||
task.LogInStorage = true
|
||||
remove, err = actions.TransferLogs(ctx, task.LogFilename)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "transfer logs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := actions_model.UpdateTask(ctx, task, "log_indexes", "log_length", "log_size", "log_in_storage"); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "update task: %v", err)
|
||||
}
|
||||
if remove != nil {
|
||||
remove()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
Reference in New Issue
Block a user