mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-23 11:01:49 +01:00
Merge pull request '[gitea] week 2024-40 cherry pick (gitea/main -> forgejo)' (#5416) from earl-warren/wcp/2024-40 into forgejo
Some checks are pending
Integration tests for the release process / release-simulation (push) Waiting to run
/ release (push) Waiting to run
testing / backend-checks (push) Waiting to run
testing / frontend-checks (push) Waiting to run
testing / test-unit (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/redis:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/valkey:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:ghcr.io/microsoft/garnet-alpine:1.0.14 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:registry.redict.io/redict:7.3.0-scratch port:6379]) (push) Blocked by required conditions
testing / test-mysql (push) Blocked by required conditions
testing / test-pgsql (push) Blocked by required conditions
testing / test-sqlite (push) Blocked by required conditions
testing / security-check (push) Blocked by required conditions
Some checks are pending
Integration tests for the release process / release-simulation (push) Waiting to run
/ release (push) Waiting to run
testing / backend-checks (push) Waiting to run
testing / frontend-checks (push) Waiting to run
testing / test-unit (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/redis:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/valkey:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:ghcr.io/microsoft/garnet-alpine:1.0.14 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:registry.redict.io/redict:7.3.0-scratch port:6379]) (push) Blocked by required conditions
testing / test-mysql (push) Blocked by required conditions
testing / test-pgsql (push) Blocked by required conditions
testing / test-sqlite (push) Blocked by required conditions
testing / security-check (push) Blocked by required conditions
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/5416 Reviewed-by: Shiny Nematoda <snematoda@noreply.codeberg.org>
This commit is contained in:
commit
aec55ac1b6
16 changed files with 392 additions and 78 deletions
11
cmd/serv.go
11
cmd/serv.go
|
@ -147,6 +147,12 @@ func runServ(c *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
_ = fail(ctx, "Internal Server Error", "Panic: %v\n%s", err, log.Stack(2))
|
||||
}
|
||||
}()
|
||||
|
||||
keys := strings.Split(c.Args().First(), "-")
|
||||
if len(keys) != 2 || keys[0] != "key" {
|
||||
return fail(ctx, "Key ID format error", "Invalid key argument: %s", c.Args().First())
|
||||
|
@ -193,10 +199,7 @@ func runServ(c *cli.Context) error {
|
|||
}
|
||||
|
||||
verb := words[0]
|
||||
repoPath := words[1]
|
||||
if repoPath[0] == '/' {
|
||||
repoPath = repoPath[1:]
|
||||
}
|
||||
repoPath := strings.TrimPrefix(words[1], "/")
|
||||
|
||||
var lfsVerb string
|
||||
if verb == lfsAuthenticateVerb {
|
||||
|
|
|
@ -18,8 +18,32 @@ func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
|
|||
return fullStepsOfEmptySteps(task)
|
||||
}
|
||||
|
||||
firstStep := task.Steps[0]
|
||||
// firstStep is the first step that has run or running, not include preStep.
|
||||
// For example,
|
||||
// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): firstStep is step1.
|
||||
// 2. preStep(Success) -> step1(Skipped) -> step2(Success) -> postStep(Success): firstStep is step2.
|
||||
// 3. preStep(Success) -> step1(Running) -> step2(Waiting) -> postStep(Waiting): firstStep is step1.
|
||||
// 4. preStep(Success) -> step1(Skipped) -> step2(Skipped) -> postStep(Skipped): firstStep is nil.
|
||||
// 5. preStep(Success) -> step1(Cancelled) -> step2(Cancelled) -> postStep(Cancelled): firstStep is nil.
|
||||
var firstStep *actions_model.ActionTaskStep
|
||||
// lastHasRunStep is the last step that has run.
|
||||
// For example,
|
||||
// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): lastHasRunStep is step1.
|
||||
// 2. preStep(Success) -> step1(Success) -> step2(Success) -> step3(Success) -> postStep(Success): lastHasRunStep is step3.
|
||||
// 3. preStep(Success) -> step1(Success) -> step2(Failure) -> step3 -> postStep(Waiting): lastHasRunStep is step2.
|
||||
// So its Stopped is the Started of postStep when there are no more steps to run.
|
||||
var lastHasRunStep *actions_model.ActionTaskStep
|
||||
|
||||
var logIndex int64
|
||||
for _, step := range task.Steps {
|
||||
if firstStep == nil && (step.Status.HasRun() || step.Status.IsRunning()) {
|
||||
firstStep = step
|
||||
}
|
||||
if step.Status.HasRun() {
|
||||
lastHasRunStep = step
|
||||
}
|
||||
logIndex += step.LogLength
|
||||
}
|
||||
|
||||
preStep := &actions_model.ActionTaskStep{
|
||||
Name: preStepName,
|
||||
|
@ -28,32 +52,17 @@ func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
|
|||
Status: actions_model.StatusRunning,
|
||||
}
|
||||
|
||||
if firstStep.Status.HasRun() || firstStep.Status.IsRunning() {
|
||||
// No step has run or is running, so preStep is equal to the task
|
||||
if firstStep == nil {
|
||||
preStep.Stopped = task.Stopped
|
||||
preStep.Status = task.Status
|
||||
} else {
|
||||
preStep.LogLength = firstStep.LogIndex
|
||||
preStep.Stopped = firstStep.Started
|
||||
preStep.Status = actions_model.StatusSuccess
|
||||
} else if task.Status.IsDone() {
|
||||
preStep.Stopped = task.Stopped
|
||||
preStep.Status = actions_model.StatusFailure
|
||||
if task.Status.IsSkipped() {
|
||||
preStep.Status = actions_model.StatusSkipped
|
||||
}
|
||||
}
|
||||
logIndex += preStep.LogLength
|
||||
|
||||
// lastHasRunStep is the last step that has run.
|
||||
// For example,
|
||||
// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): lastHasRunStep is step1.
|
||||
// 2. preStep(Success) -> step1(Success) -> step2(Success) -> step3(Success) -> postStep(Success): lastHasRunStep is step3.
|
||||
// 3. preStep(Success) -> step1(Success) -> step2(Failure) -> step3 -> postStep(Waiting): lastHasRunStep is step2.
|
||||
// So its Stopped is the Started of postStep when there are no more steps to run.
|
||||
var lastHasRunStep *actions_model.ActionTaskStep
|
||||
for _, step := range task.Steps {
|
||||
if step.Status.HasRun() {
|
||||
lastHasRunStep = step
|
||||
}
|
||||
logIndex += step.LogLength
|
||||
}
|
||||
if lastHasRunStep == nil {
|
||||
lastHasRunStep = preStep
|
||||
}
|
||||
|
|
|
@ -137,6 +137,25 @@ func TestFullSteps(t *testing.T) {
|
|||
{Name: postStepName, Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "first step is skipped",
|
||||
task: &actions_model.ActionTask{
|
||||
Steps: []*actions_model.ActionTaskStep{
|
||||
{Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
|
||||
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
|
||||
},
|
||||
Status: actions_model.StatusSuccess,
|
||||
Started: 10000,
|
||||
Stopped: 10100,
|
||||
LogLength: 100,
|
||||
},
|
||||
want: []*actions_model.ActionTaskStep{
|
||||
{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 10, Started: 10000, Stopped: 10010},
|
||||
{Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
|
||||
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
|
||||
{Name: postStepName, Status: actions_model.StatusSuccess, LogIndex: 90, LogLength: 10, Started: 10090, Stopped: 10100},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
@ -288,6 +288,8 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
|
|||
searchRequest.AddFacet("languages", bleve.NewFacetRequest("Language", 10))
|
||||
}
|
||||
|
||||
searchRequest.SortBy([]string{"-_score", "UpdatedAt"})
|
||||
|
||||
result, err := b.inner.Indexer.SearchInContext(ctx, searchRequest)
|
||||
if err != nil {
|
||||
return 0, nil, nil, err
|
||||
|
|
|
@ -318,7 +318,8 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
|
|||
NumOfFragments(0). // return all highting content on fragments
|
||||
HighlighterType("fvh"),
|
||||
).
|
||||
Sort("repo_id", true).
|
||||
Sort("_score", false).
|
||||
Sort("updated_at", true).
|
||||
From(start).Size(pageSize).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
|
@ -349,7 +350,8 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
|
|||
NumOfFragments(0). // return all highting content on fragments
|
||||
HighlighterType("fvh"),
|
||||
).
|
||||
Sort("repo_id", true).
|
||||
Sort("_score", false).
|
||||
Sort("updated_at", true).
|
||||
From(start).Size(pageSize).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
|
|
3
release-notes/5416.md
Normal file
3
release-notes/5416.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
feat: [commit](https://codeberg.org/forgejo/forgejo/commit/8178d6eaba64d05799fd3b62fa889bd13bee07c7) Code search results when using the bleve indexer are sorted by relevance.
|
||||
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/b496317b5a2aea970bc94ccf6fcde35cd417ec20) After migrating a repository that contains merged pull requests, the branch is missing and cannot be deleted.
|
||||
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/a226064711899da07d6b1455a68ef758f2f3e7e0) Forgejo Actions artifact v4 upload above 8MB.
|
|
@ -123,6 +123,54 @@ func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chun
|
|||
return chunksMap, nil
|
||||
}
|
||||
|
||||
func listChunksByRunIDV4(st storage.ObjectStorage, runID, artifactID int64, blist *BlockList) ([]*chunkFileItem, error) {
|
||||
storageDir := fmt.Sprintf("tmpv4%d", runID)
|
||||
var chunks []*chunkFileItem
|
||||
chunkMap := map[string]*chunkFileItem{}
|
||||
dummy := &chunkFileItem{}
|
||||
for _, name := range blist.Latest {
|
||||
chunkMap[name] = dummy
|
||||
}
|
||||
if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
|
||||
baseName := filepath.Base(fpath)
|
||||
if !strings.HasPrefix(baseName, "block-") {
|
||||
return nil
|
||||
}
|
||||
// when read chunks from storage, it only contains storage dir and basename,
|
||||
// no matter the subdirectory setting in storage config
|
||||
item := chunkFileItem{Path: storageDir + "/" + baseName, ArtifactID: artifactID}
|
||||
var size int64
|
||||
var b64chunkName string
|
||||
if _, err := fmt.Sscanf(baseName, "block-%d-%d-%s", &item.RunID, &size, &b64chunkName); err != nil {
|
||||
return fmt.Errorf("parse content range error: %v", err)
|
||||
}
|
||||
rchunkName, err := base64.URLEncoding.DecodeString(b64chunkName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse chunkName: %v", err)
|
||||
}
|
||||
chunkName := string(rchunkName)
|
||||
item.End = item.Start + size - 1
|
||||
if _, ok := chunkMap[chunkName]; ok {
|
||||
chunkMap[chunkName] = &item
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, name := range blist.Latest {
|
||||
chunk, ok := chunkMap[name]
|
||||
if !ok || chunk.Path == "" {
|
||||
return nil, fmt.Errorf("missing Chunk (%d/%d): %s", i, len(blist.Latest), name)
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
if i > 0 {
|
||||
chunk.Start = chunkMap[blist.Latest[i-1]].End + 1
|
||||
chunk.End += chunk.Start
|
||||
}
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error {
|
||||
// read all db artifacts by name
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
|
||||
|
@ -230,7 +278,7 @@ func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st st
|
|||
rawChecksum := hash.Sum(nil)
|
||||
actualChecksum := hex.EncodeToString(rawChecksum)
|
||||
if !strings.HasSuffix(checksum, actualChecksum) {
|
||||
return fmt.Errorf("update artifact error checksum is invalid")
|
||||
return fmt.Errorf("update artifact error checksum is invalid %v vs %v", checksum, actualChecksum)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,15 @@ package actions
|
|||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
|
||||
// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
|
||||
// 1.4. Unknown xml payload to Blobstorage (unauthenticated request), ignored for now
|
||||
// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
|
||||
// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
|
||||
// Request
|
||||
// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
// <BlockList>
|
||||
// <Latest>blockId1</Latest>
|
||||
// <Latest>blockId2</Latest>
|
||||
// </BlockList>
|
||||
// 1.5. FinalizeArtifact
|
||||
// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
|
||||
// Request
|
||||
|
@ -82,6 +89,7 @@ import (
|
|||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -153,31 +161,34 @@ func ArtifactsV4Routes(prefix string) *web.Route {
|
|||
return m
|
||||
}
|
||||
|
||||
func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID int64) []byte {
|
||||
func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID, artifactID int64) []byte {
|
||||
mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret())
|
||||
mac.Write([]byte(endp))
|
||||
mac.Write([]byte(expires))
|
||||
mac.Write([]byte(artifactName))
|
||||
mac.Write([]byte(fmt.Sprint(taskID)))
|
||||
mac.Write([]byte(fmt.Sprint(artifactID)))
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID int64) string {
|
||||
func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID, artifactID int64) string {
|
||||
expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
|
||||
uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") +
|
||||
"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID)
|
||||
"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID, artifactID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) + "&artifactID=" + fmt.Sprint(artifactID)
|
||||
return uploadURL
|
||||
}
|
||||
|
||||
func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) {
|
||||
rawTaskID := ctx.Req.URL.Query().Get("taskID")
|
||||
rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
|
||||
sig := ctx.Req.URL.Query().Get("sig")
|
||||
expires := ctx.Req.URL.Query().Get("expires")
|
||||
artifactName := ctx.Req.URL.Query().Get("artifactName")
|
||||
dsig, _ := base64.URLEncoding.DecodeString(sig)
|
||||
taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
|
||||
artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
|
||||
|
||||
expecedsig := r.buildSignature(endp, expires, artifactName, taskID)
|
||||
expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
|
||||
if !hmac.Equal(dsig, expecedsig) {
|
||||
log.Error("Error unauthorized")
|
||||
ctx.Error(http.StatusUnauthorized, "Error unauthorized")
|
||||
|
@ -272,6 +283,8 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
|
|||
return
|
||||
}
|
||||
artifact.ContentEncoding = ArtifactV4ContentEncoding
|
||||
artifact.FileSize = 0
|
||||
artifact.FileCompressedSize = 0
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
|
@ -280,7 +293,7 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
|
|||
|
||||
respData := CreateArtifactResponse{
|
||||
Ok: true,
|
||||
SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID),
|
||||
SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID),
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
||||
|
@ -306,38 +319,77 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
|
|||
comp := ctx.Req.URL.Query().Get("comp")
|
||||
switch comp {
|
||||
case "block", "appendBlock":
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
blockid := ctx.Req.URL.Query().Get("blockid")
|
||||
if blockid == "" {
|
||||
// get artifact by name
|
||||
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
if comp == "block" {
|
||||
artifact.FileSize = 0
|
||||
artifact.FileCompressedSize = 0
|
||||
_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
artifact.FileCompressedSize += ctx.Req.ContentLength
|
||||
artifact.FileSize += ctx.Req.ContentLength
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
|
||||
ctx.JSON(http.StatusCreated, "appended")
|
||||
case "blocklist":
|
||||
rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
|
||||
artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
|
||||
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
artifact.FileCompressedSize += ctx.Req.ContentLength
|
||||
artifact.FileSize += ctx.Req.ContentLength
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
return
|
||||
}
|
||||
ctx.JSON(http.StatusCreated, "appended")
|
||||
case "blocklist":
|
||||
ctx.JSON(http.StatusCreated, "created")
|
||||
}
|
||||
}
|
||||
|
||||
type BlockList struct {
|
||||
Latest []string `xml:"Latest"`
|
||||
}
|
||||
|
||||
type Latest struct {
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) readBlockList(runID, artifactID int64) (*BlockList, error) {
|
||||
blockListName := fmt.Sprintf("tmpv4%d/%d-%d-blocklist", runID, runID, artifactID)
|
||||
s, err := r.fs.Open(blockListName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
xdec := xml.NewDecoder(s)
|
||||
blockList := &BlockList{}
|
||||
err = xdec.Decode(blockList)
|
||||
|
||||
delerr := r.fs.Delete(blockListName)
|
||||
if delerr != nil {
|
||||
log.Warn("Failed to delete blockList %s: %v", blockListName, delerr)
|
||||
}
|
||||
return blockList, err
|
||||
}
|
||||
|
||||
func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
|
||||
var req FinalizeArtifactRequest
|
||||
|
||||
|
@ -356,18 +408,34 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
|
|||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
chunkMap, err := listChunksByRunID(r.fs, runID)
|
||||
|
||||
var chunks []*chunkFileItem
|
||||
blockList, err := r.readBlockList(runID, artifact.ID)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
chunks, ok := chunkMap[artifact.ID]
|
||||
if !ok {
|
||||
log.Error("Error merge chunks")
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
log.Warn("Failed to read BlockList, fallback to old behavior: %v", err)
|
||||
chunkMap, err := listChunksByRunID(r.fs, runID)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
chunks, ok = chunkMap[artifact.ID]
|
||||
if !ok {
|
||||
log.Error("Error merge chunks")
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
artifact.FileSize = chunks[len(chunks)-1].End + 1
|
||||
artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1
|
||||
}
|
||||
|
||||
checksum := ""
|
||||
if req.Hash != nil {
|
||||
checksum = req.Hash.Value
|
||||
|
@ -468,7 +536,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
|
|||
}
|
||||
}
|
||||
if respData.SignedUrl == "" {
|
||||
respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID)
|
||||
respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID)
|
||||
}
|
||||
r.sendProtbufBody(ctx, &respData)
|
||||
}
|
||||
|
|
|
@ -476,6 +476,7 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption opt
|
|||
ctx.Data["PosterID"] = posterID
|
||||
ctx.Data["IsFuzzy"] = isFuzzy
|
||||
ctx.Data["Keyword"] = keyword
|
||||
ctx.Data["IsShowClosed"] = isShowClosed
|
||||
switch {
|
||||
case isShowClosed.Value():
|
||||
ctx.Data["State"] = "closed"
|
||||
|
|
|
@ -430,13 +430,12 @@ func DeleteBranch(ctx context.Context, doer *user_model.User, repo *repo_model.R
|
|||
}
|
||||
|
||||
rawBranch, err := git_model.GetBranch(ctx, repo.ID, branchName)
|
||||
if err != nil {
|
||||
if err != nil && !git_model.IsErrBranchNotExist(err) {
|
||||
return fmt.Errorf("GetBranch: %v", err)
|
||||
}
|
||||
|
||||
if rawBranch.IsDeleted {
|
||||
return nil
|
||||
}
|
||||
// database branch record not exist or it's a deleted branch
|
||||
notExist := git_model.IsErrBranchNotExist(err) || rawBranch.IsDeleted
|
||||
|
||||
commit, err := gitRepo.GetBranchCommit(branchName)
|
||||
if err != nil {
|
||||
|
@ -444,8 +443,10 @@ func DeleteBranch(ctx context.Context, doer *user_model.User, repo *repo_model.R
|
|||
}
|
||||
|
||||
if err := db.WithTx(ctx, func(ctx context.Context) error {
|
||||
if err := git_model.AddDeletedBranch(ctx, repo.ID, branchName, doer.ID); err != nil {
|
||||
return err
|
||||
if !notExist {
|
||||
if err := git_model.AddDeletedBranch(ctx, repo.ID, branchName, doer.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return gitRepo.DeleteBranch(branchName, git.DeleteBranchOptions{
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
webhook_model "code.gitea.io/gitea/models/webhook"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
|
@ -179,8 +180,14 @@ func (d discordConvertor) Push(p *api.PushPayload) (DiscordPayload, error) {
|
|||
var text string
|
||||
// for each commit, generate attachment text
|
||||
for i, commit := range p.Commits {
|
||||
text += fmt.Sprintf("[%s](%s) %s - %s", commit.ID[:7], commit.URL,
|
||||
strings.TrimRight(commit.Message, "\r\n"), commit.Author.Name)
|
||||
// limit the commit message display to just the summary, otherwise it would be hard to read
|
||||
message := strings.TrimRight(strings.SplitN(commit.Message, "\n", 1)[0], "\r")
|
||||
|
||||
// a limit of 50 is set because GitHub does the same
|
||||
if utf8.RuneCountInString(message) > 50 {
|
||||
message = fmt.Sprintf("%.47s...", message)
|
||||
}
|
||||
text += fmt.Sprintf("[%s](%s) %s - %s", commit.ID[:7], commit.URL, message, commit.Author.Name)
|
||||
// add linebreak to each commit but the last
|
||||
if i < len(p.Commits)-1 {
|
||||
text += "\n"
|
||||
|
|
|
@ -80,6 +80,20 @@ func TestDiscordPayload(t *testing.T) {
|
|||
assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL)
|
||||
})
|
||||
|
||||
t.Run("PushWithLongCommitMessage", func(t *testing.T) {
|
||||
p := pushTestMultilineCommitMessagePayload()
|
||||
|
||||
pl, err := dc.Push(p)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, pl.Embeds, 1)
|
||||
assert.Equal(t, "[test/repo:test] 2 new commits", pl.Embeds[0].Title)
|
||||
assert.Equal(t, "[2020558](http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778) This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好... - user1\n[2020558](http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778) This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好... - user1", pl.Embeds[0].Description)
|
||||
assert.Equal(t, p.Sender.UserName, pl.Embeds[0].Author.Name)
|
||||
assert.Equal(t, setting.AppURL+p.Sender.UserName, pl.Embeds[0].Author.URL)
|
||||
assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL)
|
||||
})
|
||||
|
||||
t.Run("Issue", func(t *testing.T) {
|
||||
p := issueTestPayload()
|
||||
|
||||
|
|
|
@ -64,9 +64,17 @@ func forkTestPayload() *api.ForkPayload {
|
|||
}
|
||||
|
||||
func pushTestPayload() *api.PushPayload {
|
||||
return pushTestPayloadWithCommitMessage("commit message")
|
||||
}
|
||||
|
||||
func pushTestMultilineCommitMessagePayload() *api.PushPayload {
|
||||
return pushTestPayloadWithCommitMessage("This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好 ⚠️⚠️️\n\nThis is the message body.")
|
||||
}
|
||||
|
||||
func pushTestPayloadWithCommitMessage(message string) *api.PushPayload {
|
||||
commit := &api.PayloadCommit{
|
||||
ID: "2020558fe2e34debb818a514715839cabd25e778",
|
||||
Message: "commit message",
|
||||
Message: message,
|
||||
URL: "http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778",
|
||||
Author: &api.PayloadUser{
|
||||
Name: "user1",
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
<div class="ui secondary filter menu">
|
||||
{{if not .Repository.IsArchived}}
|
||||
<!-- Action Button -->
|
||||
{{if .IsShowClosed}}
|
||||
{{if and .IsShowClosed.Has .IsShowClosed.Value}}
|
||||
<button class="ui primary basic button issue-action" data-action="open" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_open"}}</button>
|
||||
{{else}}
|
||||
{{else if and .IsShowClosed.Has (not .IsShowClosed.Value)}}
|
||||
<button class="ui red basic button issue-action" data-action="close" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_close"}}</button>
|
||||
{{end}}
|
||||
{{if $.IsRepoAdmin}}
|
||||
|
|
|
@ -7,12 +7,14 @@ import (
|
|||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
"code.gitea.io/gitea/routers/api/actions"
|
||||
actions_service "code.gitea.io/gitea/services/actions"
|
||||
"code.gitea.io/gitea/tests"
|
||||
|
@ -175,6 +177,134 @@ func TestActionsArtifactV4UploadSingleFileWithRetentionDays(t *testing.T) {
|
|||
assert.True(t, finalizeResp.Ok)
|
||||
}
|
||||
|
||||
func TestActionsArtifactV4UploadSingleFileWithPotentialHarmfulBlockID(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
|
||||
require.NoError(t, err)
|
||||
|
||||
// acquire artifact upload url
|
||||
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
|
||||
Version: 4,
|
||||
Name: "artifactWithPotentialHarmfulBlockID",
|
||||
WorkflowRunBackendId: "792",
|
||||
WorkflowJobRunBackendId: "193",
|
||||
})).AddTokenAuth(token)
|
||||
resp := MakeRequest(t, req, http.StatusOK)
|
||||
var uploadResp actions.CreateArtifactResponse
|
||||
protojson.Unmarshal(resp.Body.Bytes(), &uploadResp)
|
||||
assert.True(t, uploadResp.Ok)
|
||||
assert.Contains(t, uploadResp.SignedUploadUrl, "/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact")
|
||||
|
||||
// get upload urls
|
||||
idx := strings.Index(uploadResp.SignedUploadUrl, "/twirp/")
|
||||
url := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=%2f..%2fmyfile"
|
||||
blockListURL := uploadResp.SignedUploadUrl[idx:] + "&comp=blocklist"
|
||||
|
||||
// upload artifact chunk
|
||||
body := strings.Repeat("A", 1024)
|
||||
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body))
|
||||
MakeRequest(t, req, http.StatusCreated)
|
||||
|
||||
// verify that the exploit didn't work
|
||||
_, err = storage.Actions.Stat("myfile")
|
||||
require.Error(t, err)
|
||||
|
||||
// upload artifact blockList
|
||||
blockList := &actions.BlockList{
|
||||
Latest: []string{
|
||||
"/../myfile",
|
||||
},
|
||||
}
|
||||
rawBlockList, err := xml.Marshal(blockList)
|
||||
require.NoError(t, err)
|
||||
req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
|
||||
MakeRequest(t, req, http.StatusCreated)
|
||||
|
||||
t.Logf("Create artifact confirm")
|
||||
|
||||
sha := sha256.Sum256([]byte(body))
|
||||
|
||||
// confirm artifact upload
|
||||
req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact", toProtoJSON(&actions.FinalizeArtifactRequest{
|
||||
Name: "artifactWithPotentialHarmfulBlockID",
|
||||
Size: 1024,
|
||||
Hash: wrapperspb.String("sha256:" + hex.EncodeToString(sha[:])),
|
||||
WorkflowRunBackendId: "792",
|
||||
WorkflowJobRunBackendId: "193",
|
||||
})).
|
||||
AddTokenAuth(token)
|
||||
resp = MakeRequest(t, req, http.StatusOK)
|
||||
var finalizeResp actions.FinalizeArtifactResponse
|
||||
protojson.Unmarshal(resp.Body.Bytes(), &finalizeResp)
|
||||
assert.True(t, finalizeResp.Ok)
|
||||
}
|
||||
|
||||
func TestActionsArtifactV4UploadSingleFileWithChunksOutOfOrder(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
|
||||
require.NoError(t, err)
|
||||
|
||||
// acquire artifact upload url
|
||||
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
|
||||
Version: 4,
|
||||
Name: "artifactWithChunksOutOfOrder",
|
||||
WorkflowRunBackendId: "792",
|
||||
WorkflowJobRunBackendId: "193",
|
||||
})).AddTokenAuth(token)
|
||||
resp := MakeRequest(t, req, http.StatusOK)
|
||||
var uploadResp actions.CreateArtifactResponse
|
||||
protojson.Unmarshal(resp.Body.Bytes(), &uploadResp)
|
||||
assert.True(t, uploadResp.Ok)
|
||||
assert.Contains(t, uploadResp.SignedUploadUrl, "/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact")
|
||||
|
||||
// get upload urls
|
||||
idx := strings.Index(uploadResp.SignedUploadUrl, "/twirp/")
|
||||
block1URL := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=block1"
|
||||
block2URL := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=block2"
|
||||
blockListURL := uploadResp.SignedUploadUrl[idx:] + "&comp=blocklist"
|
||||
|
||||
// upload artifact chunks
|
||||
bodyb := strings.Repeat("B", 1024)
|
||||
req = NewRequestWithBody(t, "PUT", block2URL, strings.NewReader(bodyb))
|
||||
MakeRequest(t, req, http.StatusCreated)
|
||||
|
||||
bodya := strings.Repeat("A", 1024)
|
||||
req = NewRequestWithBody(t, "PUT", block1URL, strings.NewReader(bodya))
|
||||
MakeRequest(t, req, http.StatusCreated)
|
||||
|
||||
// upload artifact blockList
|
||||
blockList := &actions.BlockList{
|
||||
Latest: []string{
|
||||
"block1",
|
||||
"block2",
|
||||
},
|
||||
}
|
||||
rawBlockList, err := xml.Marshal(blockList)
|
||||
require.NoError(t, err)
|
||||
req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
|
||||
MakeRequest(t, req, http.StatusCreated)
|
||||
|
||||
t.Logf("Create artifact confirm")
|
||||
|
||||
sha := sha256.Sum256([]byte(bodya + bodyb))
|
||||
|
||||
// confirm artifact upload
|
||||
req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact", toProtoJSON(&actions.FinalizeArtifactRequest{
|
||||
Name: "artifactWithChunksOutOfOrder",
|
||||
Size: 2048,
|
||||
Hash: wrapperspb.String("sha256:" + hex.EncodeToString(sha[:])),
|
||||
WorkflowRunBackendId: "792",
|
||||
WorkflowJobRunBackendId: "193",
|
||||
})).
|
||||
AddTokenAuth(token)
|
||||
resp = MakeRequest(t, req, http.StatusOK)
|
||||
var finalizeResp actions.FinalizeArtifactResponse
|
||||
protojson.Unmarshal(resp.Body.Bytes(), &finalizeResp)
|
||||
assert.True(t, finalizeResp.Ok)
|
||||
}
|
||||
|
||||
func TestActionsArtifactV4DownloadSingle(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
|
|
|
@ -78,7 +78,6 @@ const sfc = {
|
|||
searchURL() {
|
||||
return `${this.subUrl}/repo/search?sort=updated&order=desc&uid=${this.uid}&team_id=${this.teamId}&q=${this.searchQuery
|
||||
}&page=${this.page}&limit=${this.searchLimit}&mode=${this.repoTypes[this.reposFilter].searchMode
|
||||
}${this.reposFilter !== 'all' ? '&exclusive=1' : ''
|
||||
}${this.archivedFilter === 'archived' ? '&archived=true' : ''}${this.archivedFilter === 'unarchived' ? '&archived=false' : ''
|
||||
}${this.privateFilter === 'private' ? '&is_private=true' : ''}${this.privateFilter === 'public' ? '&is_private=false' : ''
|
||||
}`;
|
||||
|
|
Loading…
Reference in a new issue