mirror of
https://codeberg.org/forgejo/forgejo
synced 2024-11-24 10:46:10 +01:00
6f9c278559
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
213 lines
5.2 KiB
Go
213 lines
5.2 KiB
Go
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package testlogger
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/modules/json"
|
|
"code.gitea.io/gitea/modules/log"
|
|
"code.gitea.io/gitea/modules/queue"
|
|
)
|
|
|
|
var (
|
|
prefix string
|
|
SlowTest = 10 * time.Second
|
|
SlowFlush = 5 * time.Second
|
|
)
|
|
|
|
// TestLogger is a logger which will write to the testing log
|
|
type TestLogger struct {
|
|
log.WriterLogger
|
|
}
|
|
|
|
var WriterCloser = &testLoggerWriterCloser{}
|
|
|
|
type testLoggerWriterCloser struct {
|
|
sync.RWMutex
|
|
t []*testing.TB
|
|
}
|
|
|
|
func (w *testLoggerWriterCloser) pushT(t *testing.TB) {
|
|
w.Lock()
|
|
w.t = append(w.t, t)
|
|
w.Unlock()
|
|
}
|
|
|
|
func (w *testLoggerWriterCloser) Write(p []byte) (int, error) {
|
|
// There was a data race problem: the logger system could still try to output logs after the runner is finished.
|
|
// So we must ensure that the "t" in stack is still valid.
|
|
w.RLock()
|
|
defer w.RUnlock()
|
|
|
|
var t *testing.TB
|
|
if len(w.t) > 0 {
|
|
t = w.t[len(w.t)-1]
|
|
}
|
|
|
|
if len(p) > 0 && p[len(p)-1] == '\n' {
|
|
p = p[:len(p)-1]
|
|
}
|
|
|
|
if t == nil || *t == nil {
|
|
return fmt.Fprintf(os.Stdout, "??? [Unknown Test] %s\n", p)
|
|
}
|
|
|
|
defer func() {
|
|
err := recover()
|
|
if err == nil {
|
|
return
|
|
}
|
|
var errString string
|
|
errErr, ok := err.(error)
|
|
if ok {
|
|
errString = errErr.Error()
|
|
} else {
|
|
errString, ok = err.(string)
|
|
}
|
|
if !ok {
|
|
panic(err)
|
|
}
|
|
if !strings.HasPrefix(errString, "Log in goroutine after ") {
|
|
panic(err)
|
|
}
|
|
}()
|
|
|
|
(*t).Log(string(p))
|
|
return len(p), nil
|
|
}
|
|
|
|
func (w *testLoggerWriterCloser) popT() {
|
|
w.Lock()
|
|
if len(w.t) > 0 {
|
|
w.t = w.t[:len(w.t)-1]
|
|
}
|
|
w.Unlock()
|
|
}
|
|
|
|
func (w *testLoggerWriterCloser) Close() error {
|
|
return nil
|
|
}
|
|
|
|
func (w *testLoggerWriterCloser) Reset() {
|
|
w.Lock()
|
|
if len(w.t) > 0 {
|
|
for _, t := range w.t {
|
|
if t == nil {
|
|
continue
|
|
}
|
|
fmt.Fprintf(os.Stdout, "Unclosed logger writer in test: %s", (*t).Name())
|
|
(*t).Errorf("Unclosed logger writer in test: %s", (*t).Name())
|
|
}
|
|
w.t = nil
|
|
}
|
|
w.Unlock()
|
|
}
|
|
|
|
// PrintCurrentTest prints the current test to os.Stdout
|
|
func PrintCurrentTest(t testing.TB, skip ...int) func() {
|
|
start := time.Now()
|
|
actualSkip := 1
|
|
if len(skip) > 0 {
|
|
actualSkip = skip[0]
|
|
}
|
|
_, filename, line, _ := runtime.Caller(actualSkip)
|
|
|
|
if log.CanColorStdout {
|
|
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line)
|
|
} else {
|
|
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line)
|
|
}
|
|
WriterCloser.pushT(&t)
|
|
return func() {
|
|
took := time.Since(start)
|
|
if took > SlowTest {
|
|
if log.CanColorStdout {
|
|
fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgYellow)), fmt.Formatter(log.NewColoredValue(took, log.Bold, log.FgYellow)))
|
|
} else {
|
|
fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", t.Name(), took)
|
|
}
|
|
}
|
|
timer := time.AfterFunc(SlowFlush, func() {
|
|
if log.CanColorStdout {
|
|
fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), SlowFlush)
|
|
} else {
|
|
fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", t.Name(), SlowFlush)
|
|
}
|
|
})
|
|
if err := queue.GetManager().FlushAll(context.Background(), time.Minute); err != nil {
|
|
t.Errorf("Flushing queues failed with error %v", err)
|
|
}
|
|
timer.Stop()
|
|
flushTook := time.Since(start) - took
|
|
if flushTook > SlowFlush {
|
|
if log.CanColorStdout {
|
|
fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), fmt.Formatter(log.NewColoredValue(flushTook, log.Bold, log.FgRed)))
|
|
} else {
|
|
fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", t.Name(), flushTook)
|
|
}
|
|
}
|
|
WriterCloser.popT()
|
|
}
|
|
}
|
|
|
|
// Printf takes a format and args and prints the string to os.Stdout
|
|
func Printf(format string, args ...interface{}) {
|
|
if log.CanColorStdout {
|
|
for i := 0; i < len(args); i++ {
|
|
args[i] = log.NewColoredValue(args[i])
|
|
}
|
|
}
|
|
fmt.Fprintf(os.Stdout, "\t"+format, args...)
|
|
}
|
|
|
|
// NewTestLogger creates a TestLogger as a log.LoggerProvider
|
|
func NewTestLogger() log.LoggerProvider {
|
|
logger := &TestLogger{}
|
|
logger.Colorize = log.CanColorStdout
|
|
logger.Level = log.TRACE
|
|
return logger
|
|
}
|
|
|
|
// Init inits connection writer with json config.
|
|
// json config only need key "level".
|
|
func (log *TestLogger) Init(config string) error {
|
|
err := json.Unmarshal([]byte(config), log)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
log.NewWriterLogger(WriterCloser)
|
|
return nil
|
|
}
|
|
|
|
// Flush when log should be flushed
|
|
func (log *TestLogger) Flush() {
|
|
}
|
|
|
|
// ReleaseReopen does nothing
|
|
func (log *TestLogger) ReleaseReopen() error {
|
|
return nil
|
|
}
|
|
|
|
// GetName returns the default name for this implementation
|
|
func (log *TestLogger) GetName() string {
|
|
return "test"
|
|
}
|
|
|
|
func init() {
|
|
const relFilePath = "modules/testlogger/testlogger.go"
|
|
_, filename, _, _ := runtime.Caller(0)
|
|
if !strings.HasSuffix(filename, relFilePath) {
|
|
panic("source code file path doesn't match expected: " + relFilePath)
|
|
}
|
|
prefix = strings.TrimSuffix(filename, relFilePath)
|
|
}
|