Template
1
0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo synced 2024-12-01 14:26:10 +01:00
forgejo/routers/api/v1/repo/file.go
Zettat123 6b74043b85
Fix missing signature key error when pulling Docker images with SERVE_DIRECT enabled (#32365)
Fix #28121

I did some tests and found that the `missing signature key` error is
caused by an incorrect `Content-Type` header. Gitea correctly sets the
`Content-Type` header when serving files.

348d1d0f32/routers/api/packages/container/container.go (L712-L717)
However, when `SERVE_DIRECT` is enabled, the `Content-Type` header may
be set to an incorrect value by the storage service. To fix this issue,
we can use query parameters to override response header values.

https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
<img width="600px"
src="https://github.com/user-attachments/assets/f2ff90f0-f1df-46f9-9680-b8120222c555"
/>

In this PR, I introduced a new parameter to the `URL` method to support
additional parameters.

```
URL(path, name string, reqParams url.Values) (*url.URL, error)
```

---

Most S3-like services support specifying the content type when storing
objects. However, Gitea always use `application/octet-stream`.
Therefore, I believe we also need to improve the `Save` method to
support storing objects with the correct content type.

b7fb20e73e/modules/storage/minio.go (L214-L221)
(cherry picked from commit 0690cb076bf63f71988a709f62a9c04660b51a4f)

Conflicts:
	- modules/storage/azureblob.go
	  Dropped the change, as we do not support Azure blob storage.
	- modules/storage/helper.go
	  Resolved by adjusting their `discardStorage` to our
	  `DiscardStorage`
	- routers/api/actions/artifacts.go
	  routers/api/actions/artifactsv4.go
	  routers/web/repo/actions/view.go
	  routers/web/repo/download.go
	  Resolved the conflicts by manually adding the new `nil`
	  parameter to the `storage.Attachments.URL()` calls.

	  Originally conflicted due to differences in the if expression
	  above these calls.
2024-11-05 09:33:15 +01:00

1015 lines
28 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2018 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package repo
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"code.gitea.io/gitea/models"
git_model "code.gitea.io/gitea/models/git"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/httpcache"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/web"
"code.gitea.io/gitea/routers/common"
"code.gitea.io/gitea/services/context"
archiver_service "code.gitea.io/gitea/services/repository/archiver"
files_service "code.gitea.io/gitea/services/repository/files"
)
const (
giteaObjectTypeHeader = "X-Gitea-Object-Type"
forgejoObjectTypeHeader = "X-Forgejo-Object-Type"
)
// GetRawFile get a file by path on a repository
func GetRawFile(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/raw/{filepath} repository repoGetRawFile
// ---
// summary: Get a file from a repository
// produces:
// - application/octet-stream
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: filepath of the file to get
// type: string
// required: true
// - name: ref
// in: query
// description: "The name of the commit/branch/tag. Default the repositorys default branch (usually master)"
// type: string
// required: false
// responses:
// 200:
// description: Returns raw file content.
// schema:
// type: file
// "404":
// "$ref": "#/responses/notFound"
if ctx.Repo.Repository.IsEmpty {
ctx.NotFound()
return
}
blob, entry, lastModified := getBlobForEntry(ctx)
if ctx.Written() {
return
}
ctx.RespHeader().Set(giteaObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
ctx.RespHeader().Set(forgejoObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
if err := common.ServeBlob(ctx.Base, ctx.Repo.TreePath, blob, lastModified); err != nil {
ctx.Error(http.StatusInternalServerError, "ServeBlob", err)
}
}
// GetRawFileOrLFS get a file by repo's path, redirecting to LFS if necessary.
func GetRawFileOrLFS(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/media/{filepath} repository repoGetRawFileOrLFS
// ---
// summary: Get a file or it's LFS object from a repository
// produces:
// - application/octet-stream
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: filepath of the file to get
// type: string
// required: true
// - name: ref
// in: query
// description: "The name of the commit/branch/tag. Default the repositorys default branch (usually master)"
// type: string
// required: false
// responses:
// 200:
// description: Returns raw file content.
// schema:
// type: file
// "404":
// "$ref": "#/responses/notFound"
if ctx.Repo.Repository.IsEmpty {
ctx.NotFound()
return
}
blob, entry, lastModified := getBlobForEntry(ctx)
if ctx.Written() {
return
}
ctx.RespHeader().Set(giteaObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
ctx.RespHeader().Set(forgejoObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
// LFS Pointer files are at most 1024 bytes - so any blob greater than 1024 bytes cannot be an LFS file
if blob.Size() > 1024 {
// First handle caching for the blob
if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
return
}
// OK not cached - serve!
if err := common.ServeBlob(ctx.Base, ctx.Repo.TreePath, blob, lastModified); err != nil {
ctx.ServerError("ServeBlob", err)
}
return
}
// OK, now the blob is known to have at most 1024 bytes we can simply read this in one go (This saves reading it twice)
dataRc, err := blob.DataAsync()
if err != nil {
ctx.ServerError("DataAsync", err)
return
}
// FIXME: code from #19689, what if the file is large ... OOM ...
buf, err := io.ReadAll(dataRc)
if err != nil {
_ = dataRc.Close()
ctx.ServerError("DataAsync", err)
return
}
if err := dataRc.Close(); err != nil {
log.Error("Error whilst closing blob %s reader in %-v. Error: %v", blob.ID, ctx.Repo.Repository, err)
}
// Check if the blob represents a pointer
pointer, _ := lfs.ReadPointer(bytes.NewReader(buf))
// if it's not a pointer, just serve the data directly
if !pointer.IsValid() {
// First handle caching for the blob
if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
return
}
// OK not cached - serve!
common.ServeContentByReader(ctx.Base, ctx.Repo.TreePath, blob.Size(), bytes.NewReader(buf))
return
}
// Now check if there is a MetaObject for this pointer
meta, err := git_model.GetLFSMetaObjectByOid(ctx, ctx.Repo.Repository.ID, pointer.Oid)
// If there isn't one, just serve the data directly
if err == git_model.ErrLFSObjectNotExist {
// Handle caching for the blob SHA (not the LFS object OID)
if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
return
}
common.ServeContentByReader(ctx.Base, ctx.Repo.TreePath, blob.Size(), bytes.NewReader(buf))
return
} else if err != nil {
ctx.ServerError("GetLFSMetaObjectByOid", err)
return
}
// Handle caching for the LFS object OID
if httpcache.HandleGenericETagCache(ctx.Req, ctx.Resp, `"`+pointer.Oid+`"`) {
return
}
if setting.LFS.Storage.MinioConfig.ServeDirect {
// If we have a signed url (S3, object storage), redirect to this directly.
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name(), nil)
if u != nil && err == nil {
ctx.Redirect(u.String())
return
}
}
lfsDataRc, err := lfs.ReadMetaObject(meta.Pointer)
if err != nil {
ctx.ServerError("ReadMetaObject", err)
return
}
defer lfsDataRc.Close()
common.ServeContentByReadSeeker(ctx.Base, ctx.Repo.TreePath, lastModified, lfsDataRc)
}
func getBlobForEntry(ctx *context.APIContext) (blob *git.Blob, entry *git.TreeEntry, lastModified *time.Time) {
entry, err := ctx.Repo.Commit.GetTreeEntryByPath(ctx.Repo.TreePath)
if err != nil {
if git.IsErrNotExist(err) {
ctx.NotFound()
} else {
ctx.Error(http.StatusInternalServerError, "GetTreeEntryByPath", err)
}
return nil, nil, nil
}
if entry.IsDir() || entry.IsSubModule() {
ctx.NotFound("getBlobForEntry", nil)
return nil, nil, nil
}
info, _, err := git.Entries([]*git.TreeEntry{entry}).GetCommitsInfo(ctx, ctx.Repo.Commit, path.Dir("/" + ctx.Repo.TreePath)[1:])
if err != nil {
ctx.Error(http.StatusInternalServerError, "GetCommitsInfo", err)
return nil, nil, nil
}
if len(info) == 1 {
// Not Modified
lastModified = &info[0].Commit.Committer.When
}
blob = entry.Blob()
return blob, entry, lastModified
}
// GetArchive get archive of a repository
func GetArchive(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/archive/{archive} repository repoGetArchive
// ---
// summary: Get an archive of a repository
// produces:
// - application/octet-stream
// - application/zip
// - application/gzip
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: archive
// in: path
// description: the git reference for download with attached archive format (e.g. master.zip)
// type: string
// required: true
// responses:
// 200:
// description: success
// "404":
// "$ref": "#/responses/notFound"
if ctx.Repo.GitRepo == nil {
gitRepo, err := gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
if err != nil {
ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
return
}
ctx.Repo.GitRepo = gitRepo
defer gitRepo.Close()
}
archiveDownload(ctx)
}
func archiveDownload(ctx *context.APIContext) {
uri := ctx.Params("*")
aReq, err := archiver_service.NewRequest(ctx, ctx.Repo.Repository.ID, ctx.Repo.GitRepo, uri)
if err != nil {
if errors.Is(err, archiver_service.ErrUnknownArchiveFormat{}) {
ctx.Error(http.StatusBadRequest, "unknown archive format", err)
} else if errors.Is(err, archiver_service.RepoRefNotFoundError{}) {
ctx.Error(http.StatusNotFound, "unrecognized reference", err)
} else {
ctx.ServerError("archiver_service.NewRequest", err)
}
return
}
archiver, err := aReq.Await(ctx)
if err != nil {
ctx.ServerError("archiver.Await", err)
return
}
download(ctx, aReq.GetArchiveName(), archiver)
}
func download(ctx *context.APIContext, archiveName string, archiver *repo_model.RepoArchiver) {
downloadName := ctx.Repo.Repository.Name + "-" + archiveName
// Add nix format link header so tarballs lock correctly:
// https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
ctx.Resp.Header().Add("Link", fmt.Sprintf("<%s/archive/%s.tar.gz?rev=%s>; rel=\"immutable\"",
ctx.Repo.Repository.APIURL(),
archiver.CommitID, archiver.CommitID))
rPath := archiver.RelativePath()
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
// If we have a signed url (S3, object storage), redirect to this directly.
u, err := storage.RepoArchives.URL(rPath, downloadName, nil)
if u != nil && err == nil {
ctx.Redirect(u.String())
return
}
}
// If we have matched and access to release or issue
fr, err := storage.RepoArchives.Open(rPath)
if err != nil {
ctx.ServerError("Open", err)
return
}
defer fr.Close()
contentType := ""
switch archiver.Type {
case git.ZIP:
contentType = "application/zip"
case git.TARGZ:
// Per RFC6713.
contentType = "application/gzip"
}
ctx.ServeContent(fr, &context.ServeHeaderOptions{
ContentType: contentType,
Filename: downloadName,
LastModified: archiver.CreatedUnix.AsLocalTime(),
})
}
// GetEditorconfig get editor config of a repository
func GetEditorconfig(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/editorconfig/{filepath} repository repoGetEditorConfig
// ---
// summary: Get the EditorConfig definitions of a file in a repository
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: filepath of file to get
// type: string
// required: true
// - name: ref
// in: query
// description: "The name of the commit/branch/tag. Default the repositorys default branch (usually master)"
// type: string
// required: false
// responses:
// 200:
// description: success
// "404":
// "$ref": "#/responses/notFound"
ec, _, err := ctx.Repo.GetEditorconfig(ctx.Repo.Commit)
if err != nil {
if git.IsErrNotExist(err) {
ctx.NotFound(err)
} else {
ctx.Error(http.StatusInternalServerError, "GetEditorconfig", err)
}
return
}
fileName := ctx.Params("filename")
def, err := ec.GetDefinitionForFilename(fileName)
if def == nil {
ctx.NotFound(err)
return
}
ctx.JSON(http.StatusOK, def)
}
// canWriteFiles returns true if repository is editable and user has proper access level.
func canWriteFiles(ctx *context.APIContext, branch string) bool {
return ctx.Repo.CanWriteToBranch(ctx, ctx.Doer, branch) &&
!ctx.Repo.Repository.IsMirror &&
!ctx.Repo.Repository.IsArchived
}
// canReadFiles returns true if repository is readable and user has proper access level.
func canReadFiles(r *context.Repository) bool {
return r.Permission.CanRead(unit.TypeCode)
}
func base64Reader(s string) (io.ReadSeeker, error) {
b, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return nil, err
}
return bytes.NewReader(b), nil
}
// ChangeFiles handles API call for modifying multiple files
func ChangeFiles(ctx *context.APIContext) {
// swagger:operation POST /repos/{owner}/{repo}/contents repository repoChangeFiles
// ---
// summary: Modify multiple files in a repository
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: body
// in: body
// required: true
// schema:
// "$ref": "#/definitions/ChangeFilesOptions"
// responses:
// "201":
// "$ref": "#/responses/FilesResponse"
// "403":
// "$ref": "#/responses/error"
// "404":
// "$ref": "#/responses/notFound"
// "413":
// "$ref": "#/responses/quotaExceeded"
// "422":
// "$ref": "#/responses/error"
// "423":
// "$ref": "#/responses/repoArchivedError"
apiOpts := web.GetForm(ctx).(*api.ChangeFilesOptions)
if apiOpts.BranchName == "" {
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
}
var files []*files_service.ChangeRepoFile
for _, file := range apiOpts.Files {
contentReader, err := base64Reader(file.ContentBase64)
if err != nil {
ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
return
}
changeRepoFile := &files_service.ChangeRepoFile{
Operation: file.Operation,
TreePath: file.Path,
FromTreePath: file.FromPath,
ContentReader: contentReader,
SHA: file.SHA,
}
files = append(files, changeRepoFile)
}
opts := &files_service.ChangeRepoFilesOptions{
Files: files,
Message: apiOpts.Message,
OldBranch: apiOpts.BranchName,
NewBranch: apiOpts.NewBranchName,
Committer: &files_service.IdentityOptions{
Name: apiOpts.Committer.Name,
Email: apiOpts.Committer.Email,
},
Author: &files_service.IdentityOptions{
Name: apiOpts.Author.Name,
Email: apiOpts.Author.Email,
},
Dates: &files_service.CommitDateOptions{
Author: apiOpts.Dates.Author,
Committer: apiOpts.Dates.Committer,
},
Signoff: apiOpts.Signoff,
}
if opts.Dates.Author.IsZero() {
opts.Dates.Author = time.Now()
}
if opts.Dates.Committer.IsZero() {
opts.Dates.Committer = time.Now()
}
if opts.Message == "" {
opts.Message = changeFilesCommitMessage(ctx, files)
}
if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
handleCreateOrUpdateFileError(ctx, err)
} else {
ctx.JSON(http.StatusCreated, filesResponse)
}
}
// CreateFile handles API call for creating a file
func CreateFile(ctx *context.APIContext) {
// swagger:operation POST /repos/{owner}/{repo}/contents/{filepath} repository repoCreateFile
// ---
// summary: Create a file in a repository
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: path of the file to create
// type: string
// required: true
// - name: body
// in: body
// required: true
// schema:
// "$ref": "#/definitions/CreateFileOptions"
// responses:
// "201":
// "$ref": "#/responses/FileResponse"
// "403":
// "$ref": "#/responses/error"
// "404":
// "$ref": "#/responses/notFound"
// "413":
// "$ref": "#/responses/quotaExceeded"
// "422":
// "$ref": "#/responses/error"
// "423":
// "$ref": "#/responses/repoArchivedError"
apiOpts := web.GetForm(ctx).(*api.CreateFileOptions)
if apiOpts.BranchName == "" {
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
}
contentReader, err := base64Reader(apiOpts.ContentBase64)
if err != nil {
ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
return
}
opts := &files_service.ChangeRepoFilesOptions{
Files: []*files_service.ChangeRepoFile{
{
Operation: "create",
TreePath: ctx.Params("*"),
ContentReader: contentReader,
},
},
Message: apiOpts.Message,
OldBranch: apiOpts.BranchName,
NewBranch: apiOpts.NewBranchName,
Committer: &files_service.IdentityOptions{
Name: apiOpts.Committer.Name,
Email: apiOpts.Committer.Email,
},
Author: &files_service.IdentityOptions{
Name: apiOpts.Author.Name,
Email: apiOpts.Author.Email,
},
Dates: &files_service.CommitDateOptions{
Author: apiOpts.Dates.Author,
Committer: apiOpts.Dates.Committer,
},
Signoff: apiOpts.Signoff,
}
if opts.Dates.Author.IsZero() {
opts.Dates.Author = time.Now()
}
if opts.Dates.Committer.IsZero() {
opts.Dates.Committer = time.Now()
}
if opts.Message == "" {
opts.Message = changeFilesCommitMessage(ctx, opts.Files)
}
if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
handleCreateOrUpdateFileError(ctx, err)
} else {
fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
ctx.JSON(http.StatusCreated, fileResponse)
}
}
// UpdateFile handles API call for updating a file
func UpdateFile(ctx *context.APIContext) {
// swagger:operation PUT /repos/{owner}/{repo}/contents/{filepath} repository repoUpdateFile
// ---
// summary: Update a file in a repository
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: path of the file to update
// type: string
// required: true
// - name: body
// in: body
// required: true
// schema:
// "$ref": "#/definitions/UpdateFileOptions"
// responses:
// "200":
// "$ref": "#/responses/FileResponse"
// "403":
// "$ref": "#/responses/error"
// "404":
// "$ref": "#/responses/notFound"
// "413":
// "$ref": "#/responses/quotaExceeded"
// "422":
// "$ref": "#/responses/error"
// "423":
// "$ref": "#/responses/repoArchivedError"
apiOpts := web.GetForm(ctx).(*api.UpdateFileOptions)
if ctx.Repo.Repository.IsEmpty {
ctx.Error(http.StatusUnprocessableEntity, "RepoIsEmpty", fmt.Errorf("repo is empty"))
return
}
if apiOpts.BranchName == "" {
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
}
contentReader, err := base64Reader(apiOpts.ContentBase64)
if err != nil {
ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
return
}
opts := &files_service.ChangeRepoFilesOptions{
Files: []*files_service.ChangeRepoFile{
{
Operation: "update",
ContentReader: contentReader,
SHA: apiOpts.SHA,
FromTreePath: apiOpts.FromPath,
TreePath: ctx.Params("*"),
},
},
Message: apiOpts.Message,
OldBranch: apiOpts.BranchName,
NewBranch: apiOpts.NewBranchName,
Committer: &files_service.IdentityOptions{
Name: apiOpts.Committer.Name,
Email: apiOpts.Committer.Email,
},
Author: &files_service.IdentityOptions{
Name: apiOpts.Author.Name,
Email: apiOpts.Author.Email,
},
Dates: &files_service.CommitDateOptions{
Author: apiOpts.Dates.Author,
Committer: apiOpts.Dates.Committer,
},
Signoff: apiOpts.Signoff,
}
if opts.Dates.Author.IsZero() {
opts.Dates.Author = time.Now()
}
if opts.Dates.Committer.IsZero() {
opts.Dates.Committer = time.Now()
}
if opts.Message == "" {
opts.Message = changeFilesCommitMessage(ctx, opts.Files)
}
if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
handleCreateOrUpdateFileError(ctx, err)
} else {
fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
ctx.JSON(http.StatusOK, fileResponse)
}
}
func handleCreateOrUpdateFileError(ctx *context.APIContext, err error) {
if models.IsErrUserCannotCommit(err) || models.IsErrFilePathProtected(err) {
ctx.Error(http.StatusForbidden, "Access", err)
return
}
if git_model.IsErrBranchAlreadyExists(err) || models.IsErrFilenameInvalid(err) || models.IsErrSHADoesNotMatch(err) ||
models.IsErrFilePathInvalid(err) || models.IsErrRepoFileAlreadyExists(err) {
ctx.Error(http.StatusUnprocessableEntity, "Invalid", err)
return
}
if git_model.IsErrBranchNotExist(err) || git.IsErrBranchNotExist(err) {
ctx.Error(http.StatusNotFound, "BranchDoesNotExist", err)
return
}
ctx.Error(http.StatusInternalServerError, "UpdateFile", err)
}
// Called from both CreateFile or UpdateFile to handle both
func createOrUpdateFiles(ctx *context.APIContext, opts *files_service.ChangeRepoFilesOptions) (*api.FilesResponse, error) {
if !canWriteFiles(ctx, opts.OldBranch) {
return nil, repo_model.ErrUserDoesNotHaveAccessToRepo{
UserID: ctx.Doer.ID,
RepoName: ctx.Repo.Repository.LowerName,
}
}
return files_service.ChangeRepoFiles(ctx, ctx.Repo.Repository, ctx.Doer, opts)
}
// format commit message if empty
func changeFilesCommitMessage(ctx *context.APIContext, files []*files_service.ChangeRepoFile) string {
var (
createFiles []string
updateFiles []string
deleteFiles []string
)
for _, file := range files {
switch file.Operation {
case "create":
createFiles = append(createFiles, file.TreePath)
case "update":
updateFiles = append(updateFiles, file.TreePath)
case "delete":
deleteFiles = append(deleteFiles, file.TreePath)
}
}
message := ""
if len(createFiles) != 0 {
message += ctx.Locale.TrString("repo.editor.add", strings.Join(createFiles, ", ")+"\n")
}
if len(updateFiles) != 0 {
message += ctx.Locale.TrString("repo.editor.update", strings.Join(updateFiles, ", ")+"\n")
}
if len(deleteFiles) != 0 {
message += ctx.Locale.TrString("repo.editor.delete", strings.Join(deleteFiles, ", "))
}
return strings.Trim(message, "\n")
}
// DeleteFile Delete a file in a repository
func DeleteFile(ctx *context.APIContext) {
// swagger:operation DELETE /repos/{owner}/{repo}/contents/{filepath} repository repoDeleteFile
// ---
// summary: Delete a file in a repository
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: path of the file to delete
// type: string
// required: true
// - name: body
// in: body
// required: true
// schema:
// "$ref": "#/definitions/DeleteFileOptions"
// responses:
// "200":
// "$ref": "#/responses/FileDeleteResponse"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/error"
// "404":
// "$ref": "#/responses/error"
// "413":
// "$ref": "#/responses/quotaExceeded"
// "423":
// "$ref": "#/responses/repoArchivedError"
apiOpts := web.GetForm(ctx).(*api.DeleteFileOptions)
if !canWriteFiles(ctx, apiOpts.BranchName) {
ctx.Error(http.StatusForbidden, "DeleteFile", repo_model.ErrUserDoesNotHaveAccessToRepo{
UserID: ctx.Doer.ID,
RepoName: ctx.Repo.Repository.LowerName,
})
return
}
if apiOpts.BranchName == "" {
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
}
opts := &files_service.ChangeRepoFilesOptions{
Files: []*files_service.ChangeRepoFile{
{
Operation: "delete",
SHA: apiOpts.SHA,
TreePath: ctx.Params("*"),
},
},
Message: apiOpts.Message,
OldBranch: apiOpts.BranchName,
NewBranch: apiOpts.NewBranchName,
Committer: &files_service.IdentityOptions{
Name: apiOpts.Committer.Name,
Email: apiOpts.Committer.Email,
},
Author: &files_service.IdentityOptions{
Name: apiOpts.Author.Name,
Email: apiOpts.Author.Email,
},
Dates: &files_service.CommitDateOptions{
Author: apiOpts.Dates.Author,
Committer: apiOpts.Dates.Committer,
},
Signoff: apiOpts.Signoff,
}
if opts.Dates.Author.IsZero() {
opts.Dates.Author = time.Now()
}
if opts.Dates.Committer.IsZero() {
opts.Dates.Committer = time.Now()
}
if opts.Message == "" {
opts.Message = changeFilesCommitMessage(ctx, opts.Files)
}
if filesResponse, err := files_service.ChangeRepoFiles(ctx, ctx.Repo.Repository, ctx.Doer, opts); err != nil {
if git.IsErrBranchNotExist(err) || models.IsErrRepoFileDoesNotExist(err) || git.IsErrNotExist(err) {
ctx.Error(http.StatusNotFound, "DeleteFile", err)
return
} else if git_model.IsErrBranchAlreadyExists(err) ||
models.IsErrFilenameInvalid(err) ||
models.IsErrSHADoesNotMatch(err) ||
models.IsErrCommitIDDoesNotMatch(err) ||
models.IsErrSHAOrCommitIDNotProvided(err) {
ctx.Error(http.StatusBadRequest, "DeleteFile", err)
return
} else if models.IsErrUserCannotCommit(err) {
ctx.Error(http.StatusForbidden, "DeleteFile", err)
return
}
ctx.Error(http.StatusInternalServerError, "DeleteFile", err)
} else {
fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
ctx.JSON(http.StatusOK, fileResponse) // FIXME on APIv2: return http.StatusNoContent
}
}
// GetContents Get the metadata and contents (if a file) of an entry in a repository, or a list of entries if a dir
func GetContents(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/contents/{filepath} repository repoGetContents
// ---
// summary: Gets the metadata and contents (if a file) of an entry in a repository, or a list of entries if a dir
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: filepath
// in: path
// description: path of the dir, file, symlink or submodule in the repo
// type: string
// required: true
// - name: ref
// in: query
// description: "The name of the commit/branch/tag. Default the repositorys default branch (usually master)"
// type: string
// required: false
// responses:
// "200":
// "$ref": "#/responses/ContentsResponse"
// "404":
// "$ref": "#/responses/notFound"
if !canReadFiles(ctx.Repo) {
ctx.Error(http.StatusInternalServerError, "GetContentsOrList", repo_model.ErrUserDoesNotHaveAccessToRepo{
UserID: ctx.Doer.ID,
RepoName: ctx.Repo.Repository.LowerName,
})
return
}
treePath := ctx.Params("*")
ref := ctx.FormTrim("ref")
if fileList, err := files_service.GetContentsOrList(ctx, ctx.Repo.Repository, treePath, ref); err != nil {
if git.IsErrNotExist(err) {
ctx.NotFound("GetContentsOrList", err)
return
}
ctx.Error(http.StatusInternalServerError, "GetContentsOrList", err)
} else {
ctx.JSON(http.StatusOK, fileList)
}
}
// GetContentsList Get the metadata of all the entries of the root dir
func GetContentsList(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/contents repository repoGetContentsList
// ---
// summary: Gets the metadata of all the entries of the root dir
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: ref
// in: query
// description: "The name of the commit/branch/tag. Default the repositorys default branch (usually master)"
// type: string
// required: false
// responses:
// "200":
// "$ref": "#/responses/ContentsListResponse"
// "404":
// "$ref": "#/responses/notFound"
// same as GetContents(), this function is here because swagger fails if path is empty in GetContents() interface
GetContents(ctx)
}