2019-02-12 21:07:31 +08:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2019-04-18 00:06:35 +08:00
|
|
|
package repofiles
|
2019-02-12 21:07:31 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
2019-03-27 17:33:00 +08:00
|
|
|
"code.gitea.io/gitea/models"
|
2021-03-01 20:14:17 +08:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
2019-02-12 21:07:31 +08:00
|
|
|
"code.gitea.io/gitea/modules/lfs"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2020-09-08 23:45:10 +08:00
|
|
|
"code.gitea.io/gitea/modules/storage"
|
2019-02-12 21:07:31 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// UploadRepoFileOptions contains the uploaded repository file options
|
|
|
|
type UploadRepoFileOptions struct {
|
|
|
|
LastCommitID string
|
|
|
|
OldBranch string
|
|
|
|
NewBranch string
|
|
|
|
TreePath string
|
|
|
|
Message string
|
|
|
|
Files []string // In UUID format.
|
2021-01-29 16:57:45 +08:00
|
|
|
Signoff bool
|
2019-02-12 21:07:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type uploadInfo struct {
|
|
|
|
upload *models.Upload
|
|
|
|
lfsMetaObject *models.LFSMetaObject
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanUpAfterFailure(infos *[]uploadInfo, t *TemporaryUploadRepository, original error) error {
|
|
|
|
for _, info := range *infos {
|
|
|
|
if info.lfsMetaObject == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !info.lfsMetaObject.Existing {
|
2019-10-29 02:31:55 +08:00
|
|
|
if _, err := t.repo.RemoveLFSMetaObjectByOid(info.lfsMetaObject.Oid); err != nil {
|
2019-02-12 21:07:31 +08:00
|
|
|
original = fmt.Errorf("%v, %v", original, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return original
|
|
|
|
}
|
|
|
|
|
|
|
|
// UploadRepoFiles uploads files to the given repository
|
|
|
|
func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRepoFileOptions) error {
|
|
|
|
if len(opts.Files) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
uploads, err := models.GetUploadsByUUIDs(opts.Files)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("GetUploadsByUUIDs [uuids: %v]: %v", opts.Files, err)
|
|
|
|
}
|
|
|
|
|
2019-11-02 15:37:05 +08:00
|
|
|
names := make([]string, len(uploads))
|
|
|
|
infos := make([]uploadInfo, len(uploads))
|
|
|
|
for i, upload := range uploads {
|
|
|
|
// Check file is not lfs locked, will return nil if lock setting not enabled
|
|
|
|
filepath := path.Join(opts.TreePath, upload.Name)
|
|
|
|
lfsLock, err := repo.GetTreePathLock(filepath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if lfsLock != nil && lfsLock.OwnerID != doer.ID {
|
|
|
|
return models.ErrLFSFileLocked{RepoID: repo.ID, Path: filepath, UserName: lfsLock.Owner.Name}
|
|
|
|
}
|
|
|
|
|
|
|
|
names[i] = upload.Name
|
|
|
|
infos[i] = uploadInfo{upload: upload}
|
|
|
|
}
|
|
|
|
|
2019-02-12 21:07:31 +08:00
|
|
|
t, err := NewTemporaryUploadRepository(repo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-13 03:41:28 +08:00
|
|
|
defer t.Close()
|
2019-02-12 21:07:31 +08:00
|
|
|
if err := t.Clone(opts.OldBranch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := t.SetDefaultIndex(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-12 08:13:27 +08:00
|
|
|
var filename2attribute2info map[string]map[string]string
|
|
|
|
if setting.LFS.StartServer {
|
2021-03-01 20:14:17 +08:00
|
|
|
filename2attribute2info, err = t.gitRepo.CheckAttribute(git.CheckAttributeOpts{
|
|
|
|
Attributes: []string{"filter"},
|
|
|
|
Filenames: names,
|
|
|
|
})
|
2019-10-12 08:13:27 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-12 21:07:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy uploaded files into repository.
|
2021-03-22 00:07:37 +08:00
|
|
|
for i := range infos {
|
|
|
|
if err := copyUploadedLFSFileIntoRepository(&infos[i], filename2attribute2info, t, opts.TreePath); err != nil {
|
2019-02-12 21:07:31 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now write the tree
|
|
|
|
treeHash, err := t.WriteTree()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:06:35 +08:00
|
|
|
// make author and committer the doer
|
|
|
|
author := doer
|
|
|
|
committer := doer
|
|
|
|
|
2019-02-12 21:07:31 +08:00
|
|
|
// Now commit the tree
|
2021-01-29 16:57:45 +08:00
|
|
|
commitHash, err := t.CommitTree(author, committer, treeHash, opts.Message, opts.Signoff)
|
2019-02-12 21:07:31 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now deal with LFS objects
|
2021-03-22 00:07:37 +08:00
|
|
|
for i := range infos {
|
|
|
|
if infos[i].lfsMetaObject == nil {
|
2019-02-12 21:07:31 +08:00
|
|
|
continue
|
|
|
|
}
|
2021-03-22 00:07:37 +08:00
|
|
|
infos[i].lfsMetaObject, err = models.NewLFSMetaObject(infos[i].lfsMetaObject)
|
2019-02-12 21:07:31 +08:00
|
|
|
if err != nil {
|
|
|
|
// OK Now we need to cleanup
|
|
|
|
return cleanUpAfterFailure(&infos, t, err)
|
|
|
|
}
|
|
|
|
// Don't move the files yet - we need to ensure that
|
|
|
|
// everything can be inserted first
|
|
|
|
}
|
|
|
|
|
|
|
|
// OK now we can insert the data into the store - there's no way to clean up the store
|
|
|
|
// once it's in there, it's in there.
|
2020-09-08 23:45:10 +08:00
|
|
|
contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
|
2021-03-20 09:37:57 +08:00
|
|
|
for _, info := range infos {
|
|
|
|
if err := uploadToLFSContentStore(info, contentStore); err != nil {
|
2020-09-08 23:45:10 +08:00
|
|
|
return cleanUpAfterFailure(&infos, t, err)
|
|
|
|
}
|
2019-02-12 21:07:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Then push this tree to NewBranch
|
|
|
|
if err := t.Push(doer, commitHash, opts.NewBranch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return models.DeleteUploads(uploads...)
|
|
|
|
}
|
2021-03-20 09:37:57 +08:00
|
|
|
|
2021-03-22 00:07:37 +08:00
|
|
|
func copyUploadedLFSFileIntoRepository(info *uploadInfo, filename2attribute2info map[string]map[string]string, t *TemporaryUploadRepository, treePath string) error {
|
|
|
|
file, err := os.Open(info.upload.LocalPath())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
var objectHash string
|
|
|
|
if setting.LFS.StartServer && filename2attribute2info[info.upload.Name] != nil && filename2attribute2info[info.upload.Name]["filter"] == "lfs" {
|
|
|
|
// Handle LFS
|
|
|
|
// FIXME: Inefficient! this should probably happen in models.Upload
|
|
|
|
oid, err := models.GenerateLFSOid(file)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fileInfo, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
info.lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: fileInfo.Size(), RepositoryID: t.repo.ID}
|
|
|
|
|
|
|
|
if objectHash, err = t.HashObject(strings.NewReader(info.lfsMetaObject.Pointer())); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if objectHash, err = t.HashObject(file); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the object to the index
|
|
|
|
return t.AddObjectToIndex("100644", objectHash, path.Join(treePath, info.upload.Name))
|
|
|
|
}
|
|
|
|
|
2021-03-20 09:37:57 +08:00
|
|
|
func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) error {
|
|
|
|
if info.lfsMetaObject == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
exist, err := contentStore.Exists(info.lfsMetaObject)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !exist {
|
|
|
|
file, err := os.Open(info.upload.LocalPath())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer file.Close()
|
|
|
|
// FIXME: Put regenerates the hash and copies the file over.
|
|
|
|
// I guess this strictly ensures the soundness of the store but this is inefficient.
|
|
|
|
if err := contentStore.Put(info.lfsMetaObject, file); err != nil {
|
|
|
|
// OK Now we need to cleanup
|
|
|
|
// Can't clean up the store, once uploaded there they're there.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|