Template
1
0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo synced 2024-11-27 20:26:09 +01:00
forgejo/services/convert/repository.go

248 lines
9.3 KiB
Go
Raw Normal View History

// Copyright 2020 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package convert
import (
"context"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/perm"
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
unit_model "code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/modules/log"
api "code.gitea.io/gitea/modules/structs"
)
// ToRepo converts a Repository to api.Repository
func ToRepo(ctx context.Context, repo *repo_model.Repository, permissionInRepo access_model.Permission) *api.Repository {
return innerToRepo(ctx, repo, permissionInRepo, false)
}
func innerToRepo(ctx context.Context, repo *repo_model.Repository, permissionInRepo access_model.Permission, isParent bool) *api.Repository {
var parent *api.Repository
if permissionInRepo.Units == nil && permissionInRepo.UnitsMode == nil {
// If Units and UnitsMode are both nil, it means that it's a hard coded permission,
// like access_model.Permission{AccessMode: perm.AccessModeAdmin}.
// So we need to load units for the repo, or UnitAccessMode will always return perm.AccessModeNone.
_ = repo.LoadUnits(ctx) // the error is not important, so ignore it
permissionInRepo.Units = repo.Units
}
cloneLink := repo.CloneLink()
permission := &api.Permission{
Admin: permissionInRepo.AccessMode >= perm.AccessModeAdmin,
Push: permissionInRepo.UnitAccessMode(unit_model.TypeCode) >= perm.AccessModeWrite,
Pull: permissionInRepo.UnitAccessMode(unit_model.TypeCode) >= perm.AccessModeRead,
}
if !isParent {
err := repo.GetBaseRepo(ctx)
if err != nil {
return nil
}
if repo.BaseRepo != nil {
// FIXME: The permission of the parent repo is not correct.
// It's the permission of the current repo, so it's probably different from the parent repo.
// But there isn't a good way to get the permission of the parent repo, because the doer is not passed in.
// Use the permission of the current repo to keep the behavior consistent with the old API.
// Maybe the right way is setting the permission of the parent repo to nil, empty is better than wrong.
parent = innerToRepo(ctx, repo.BaseRepo, permissionInRepo, true)
}
}
// check enabled/disabled units
hasIssues := false
var externalTracker *api.ExternalTracker
var internalTracker *api.InternalTracker
if unit, err := repo.GetUnit(ctx, unit_model.TypeIssues); err == nil {
config := unit.IssuesConfig()
hasIssues = true
internalTracker = &api.InternalTracker{
EnableTimeTracker: config.EnableTimetracker,
AllowOnlyContributorsToTrackTime: config.AllowOnlyContributorsToTrackTime,
EnableIssueDependencies: config.EnableDependencies,
}
} else if unit, err := repo.GetUnit(ctx, unit_model.TypeExternalTracker); err == nil {
config := unit.ExternalTrackerConfig()
hasIssues = true
externalTracker = &api.ExternalTracker{
ExternalTrackerURL: config.ExternalTrackerURL,
ExternalTrackerFormat: config.ExternalTrackerFormat,
ExternalTrackerStyle: config.ExternalTrackerStyle,
ExternalTrackerRegexpPattern: config.ExternalTrackerRegexpPattern,
}
}
hasWiki := false
var externalWiki *api.ExternalWiki
if _, err := repo.GetUnit(ctx, unit_model.TypeWiki); err == nil {
hasWiki = true
} else if unit, err := repo.GetUnit(ctx, unit_model.TypeExternalWiki); err == nil {
hasWiki = true
config := unit.ExternalWikiConfig()
externalWiki = &api.ExternalWiki{
ExternalWikiURL: config.ExternalWikiURL,
}
}
hasPullRequests := false
ignoreWhitespaceConflicts := false
allowMerge := false
allowRebase := false
allowRebaseMerge := false
allowSquash := false
allowFastForwardOnly := false
allowRebaseUpdate := false
defaultDeleteBranchAfterMerge := false
defaultMergeStyle := repo_model.MergeStyleMerge
defaultAllowMaintainerEdit := false
if unit, err := repo.GetUnit(ctx, unit_model.TypePullRequests); err == nil {
config := unit.PullRequestsConfig()
hasPullRequests = true
ignoreWhitespaceConflicts = config.IgnoreWhitespaceConflicts
allowMerge = config.AllowMerge
allowRebase = config.AllowRebase
allowRebaseMerge = config.AllowRebaseMerge
allowSquash = config.AllowSquash
allowFastForwardOnly = config.AllowFastForwardOnly
allowRebaseUpdate = config.AllowRebaseUpdate
defaultDeleteBranchAfterMerge = config.DefaultDeleteBranchAfterMerge
defaultMergeStyle = config.GetDefaultMergeStyle()
defaultAllowMaintainerEdit = config.DefaultAllowMaintainerEdit
}
hasProjects := false
if _, err := repo.GetUnit(ctx, unit_model.TypeProjects); err == nil {
hasProjects = true
}
hasReleases := false
if _, err := repo.GetUnit(ctx, unit_model.TypeReleases); err == nil {
hasReleases = true
}
hasPackages := false
if _, err := repo.GetUnit(ctx, unit_model.TypePackages); err == nil {
hasPackages = true
}
hasActions := false
if _, err := repo.GetUnit(ctx, unit_model.TypeActions); err == nil {
hasActions = true
}
if err := repo.LoadOwner(ctx); err != nil {
return nil
}
numReleases, _ := db.Count[repo_model.Release](ctx, repo_model.FindReleasesOptions{
IncludeDrafts: false,
IncludeTags: false,
RepoID: repo.ID,
})
mirrorInterval := ""
var mirrorUpdated time.Time
if repo.IsMirror {
pullMirror, err := repo_model.GetMirrorByRepoID(ctx, repo.ID)
if err == nil {
mirrorInterval = pullMirror.Interval.String()
mirrorUpdated = pullMirror.UpdatedUnix.AsTime()
}
}
var transfer *api.RepoTransfer
if repo.Status == repo_model.RepositoryPendingTransfer {
t, err := models.GetPendingRepositoryTransfer(ctx, repo)
if err != nil && !models.IsErrNoPendingTransfer(err) {
log.Warn("GetPendingRepositoryTransfer: %v", err)
} else {
if err := t.LoadAttributes(ctx); err != nil {
log.Warn("LoadAttributes of RepoTransfer: %v", err)
} else {
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
transfer = ToRepoTransfer(ctx, t)
}
}
}
var language string
if repo.PrimaryLanguage != nil {
language = repo.PrimaryLanguage.Language
}
repoAPIURL := repo.APIURL()
return &api.Repository{
ID: repo.ID,
Owner: ToUserWithAccessMode(ctx, repo.Owner, permissionInRepo.AccessMode),
Name: repo.Name,
FullName: repo.FullName(),
Description: repo.Description,
Private: repo.IsPrivate,
Template: repo.IsTemplate,
Empty: repo.IsEmpty,
Archived: repo.IsArchived,
Size: int(repo.Size / 1024),
Fork: repo.IsFork,
Parent: parent,
Mirror: repo.IsMirror,
HTMLURL: repo.HTMLURL(),
URL: repoAPIURL,
SSHURL: cloneLink.SSH,
CloneURL: cloneLink.HTTPS,
OriginalURL: repo.SanitizedOriginalURL(),
Website: repo.Website,
Language: language,
LanguagesURL: repoAPIURL + "/languages",
Stars: repo.NumStars,
Forks: repo.NumForks,
Watchers: repo.NumWatches,
OpenIssues: repo.NumOpenIssues,
OpenPulls: repo.NumOpenPulls,
Releases: int(numReleases),
DefaultBranch: repo.DefaultBranch,
Created: repo.CreatedUnix.AsTime(),
Updated: repo.UpdatedUnix.AsTime(),
ArchivedAt: repo.ArchivedUnix.AsTime(),
Permissions: permission,
HasIssues: hasIssues,
ExternalTracker: externalTracker,
InternalTracker: internalTracker,
HasWiki: hasWiki,
[GITEA] Allow changing the repo Wiki branch to main Previously, the repo wiki was hardcoded to use `master` as its branch, this change makes it possible to use `main` (or something else, governed by `[repository].DEFAULT_BRANCH`, a setting that already exists and defaults to `main`). The way it is done is that a new column is added to the `repository` table: `wiki_branch`. The migration will make existing repositories default to `master`, for compatibility's sake, even if they don't have a Wiki (because it's easier to do that). Newly created repositories will default to `[repository].DEFAULT_BRANCH` instead. The Wiki service was updated to use the branch name stored in the database, and fall back to the default if it is empty. Old repositories with Wikis using the older `master` branch will have the option to do a one-time transition to `main`, available via the repository settings in the "Danger Zone". This option will only be available for repositories that have the internal wiki enabled, it is not empty, and the wiki branch is not `[repository].DEFAULT_BRANCH`. When migrating a repository with a Wiki, Forgejo will use the same branch name for the wiki as the source repository did. If that's not the same as the default, the option to normalize it will be available after the migration's done. Additionally, the `/api/v1/{owner}/{repo}` endpoint was updated: it will now include the wiki branch name in `GET` requests, and allow changing the wiki branch via `PATCH`. Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu> (cherry picked from commit d87c526d2a313fa45093ab49b78bb30322b33298)
2024-01-30 12:18:53 +01:00
WikiBranch: repo.WikiBranch,
HasProjects: hasProjects,
HasReleases: hasReleases,
HasPackages: hasPackages,
HasActions: hasActions,
ExternalWiki: externalWiki,
HasPullRequests: hasPullRequests,
IgnoreWhitespaceConflicts: ignoreWhitespaceConflicts,
AllowMerge: allowMerge,
AllowRebase: allowRebase,
AllowRebaseMerge: allowRebaseMerge,
AllowSquash: allowSquash,
AllowFastForwardOnly: allowFastForwardOnly,
AllowRebaseUpdate: allowRebaseUpdate,
DefaultDeleteBranchAfterMerge: defaultDeleteBranchAfterMerge,
DefaultMergeStyle: string(defaultMergeStyle),
DefaultAllowMaintainerEdit: defaultAllowMaintainerEdit,
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
AvatarURL: repo.AvatarLink(ctx),
Internal: !repo.IsPrivate && repo.Owner.Visibility == api.VisibleTypePrivate,
MirrorInterval: mirrorInterval,
MirrorUpdated: mirrorUpdated,
RepoTransfer: transfer,
}
}
// ToRepoTransfer convert a models.RepoTransfer to a structs.RepeTransfer
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
func ToRepoTransfer(ctx context.Context, t *models.RepoTransfer) *api.RepoTransfer {
teams, _ := ToTeams(ctx, t.Teams, false)
return &api.RepoTransfer{
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
Doer: ToUser(ctx, t.Doer, nil),
Recipient: ToUser(ctx, t.Recipient, nil),
Teams: teams,
}
}