mirror of
https://github.com/1Panel-dev/1Panel.git
synced 2024-11-24 02:59:16 +08:00
feat: 修改备份账号同步方式 (#6108)
This commit is contained in:
parent
d100edb75b
commit
f0274701cf
@ -10,20 +10,6 @@ import (
|
|||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *BaseApi) OperateBackup(c *gin.Context) {
|
|
||||||
var req dto.BackupOperate
|
|
||||||
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
|
||||||
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInvalidParams, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := backupService.Operate(req); err != nil {
|
|
||||||
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
helper.SuccessWithData(c, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Tags Backup Account
|
// @Tags Backup Account
|
||||||
// @Summary Page backup records
|
// @Summary Page backup records
|
||||||
// @Description 获取备份记录列表分页
|
// @Description 获取备份记录列表分页
|
||||||
@ -125,12 +111,12 @@ func (b *BaseApi) DeleteBackupRecord(c *gin.Context) {
|
|||||||
// @Summary List files from backup accounts
|
// @Summary List files from backup accounts
|
||||||
// @Description 获取备份账号内文件列表
|
// @Description 获取备份账号内文件列表
|
||||||
// @Accept json
|
// @Accept json
|
||||||
// @Param request body dto.BackupSearchFile true "request"
|
// @Param request body dto.OperateByID true "request"
|
||||||
// @Success 200 {array} string
|
// @Success 200 {array} string
|
||||||
// @Security ApiKeyAuth
|
// @Security ApiKeyAuth
|
||||||
// @Router /settings/backup/search/files [post]
|
// @Router /settings/backup/search/files [post]
|
||||||
func (b *BaseApi) LoadFilesFromBackup(c *gin.Context) {
|
func (b *BaseApi) LoadFilesFromBackup(c *gin.Context) {
|
||||||
var req dto.BackupSearchFile
|
var req dto.OperateByID
|
||||||
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -199,7 +185,11 @@ func (b *BaseApi) Recover(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
downloadPath, err := backupService.DownloadRecord(dto.DownloadRecord{Source: req.Source, FileDir: path.Dir(req.File), FileName: path.Base(req.File)})
|
downloadPath, err := backupService.DownloadRecord(dto.DownloadRecord{
|
||||||
|
DownloadAccountID: req.BackupAccountID,
|
||||||
|
FileDir: path.Dir(req.File),
|
||||||
|
FileName: path.Base(req.File),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, fmt.Errorf("download file failed, err: %v", err))
|
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, fmt.Errorf("download file failed, err: %v", err))
|
||||||
return
|
return
|
||||||
|
@ -2,26 +2,17 @@ package dto
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/model"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type BackupOperate struct {
|
|
||||||
Operate string `json:"operate" validate:"required,oneof=add remove update"`
|
|
||||||
Data []model.BackupAccount `json:"data" validate:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type BackupInfo struct {
|
type BackupInfo struct {
|
||||||
ID uint `json:"id"`
|
ID uint `json:"id"`
|
||||||
CreatedAt time.Time `json:"createdAt"`
|
Name string `json:"name"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type" validate:"required"`
|
||||||
Bucket string `json:"bucket"`
|
Bucket string `json:"bucket"`
|
||||||
BackupPath string `json:"backupPath"`
|
AccessKey string `json:"accessKey"`
|
||||||
Vars string `json:"vars"`
|
Credential string `json:"credential"`
|
||||||
}
|
BackupPath string `json:"backupPath"`
|
||||||
|
Vars string `json:"vars" validate:"required"`
|
||||||
type BackupSearchFile struct {
|
|
||||||
Type string `json:"type" validate:"required"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type CommonBackup struct {
|
type CommonBackup struct {
|
||||||
@ -31,12 +22,12 @@ type CommonBackup struct {
|
|||||||
Secret string `json:"secret"`
|
Secret string `json:"secret"`
|
||||||
}
|
}
|
||||||
type CommonRecover struct {
|
type CommonRecover struct {
|
||||||
Source string `json:"source" validate:"required,oneof=OSS S3 SFTP MINIO LOCAL COS KODO OneDrive WebDAV"`
|
BackupAccountID uint `json:"backupAccountID" validate:"required"`
|
||||||
Type string `json:"type" validate:"required,oneof=app mysql mariadb redis website postgresql"`
|
Type string `json:"type" validate:"required,oneof=app mysql mariadb redis website postgresql"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
DetailName string `json:"detailName"`
|
DetailName string `json:"detailName"`
|
||||||
File string `json:"file"`
|
File string `json:"file"`
|
||||||
Secret string `json:"secret"`
|
Secret string `json:"secret"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RecordSearch struct {
|
type RecordSearch struct {
|
||||||
@ -62,7 +53,7 @@ type BackupRecords struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type DownloadRecord struct {
|
type DownloadRecord struct {
|
||||||
Source string `json:"source" validate:"required,oneof=OSS S3 SFTP MINIO LOCAL COS KODO OneDrive WebDAV"`
|
DownloadAccountID uint `json:"downloadAccountID" validate:"required"`
|
||||||
FileDir string `json:"fileDir" validate:"required"`
|
FileDir string `json:"fileDir" validate:"required"`
|
||||||
FileName string `json:"fileName" validate:"required"`
|
FileName string `json:"fileName" validate:"required"`
|
||||||
}
|
}
|
||||||
|
@ -48,11 +48,11 @@ type SnapshotStatus struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotCreate struct {
|
type SnapshotCreate struct {
|
||||||
ID uint `json:"id"`
|
ID uint `json:"id"`
|
||||||
From string `json:"from" validate:"required"`
|
SourceAccountIDs string `json:"sourceAccountsIDs" validate:"required"`
|
||||||
DefaultDownload string `json:"defaultDownload" validate:"required"`
|
DownloadAccountID uint `json:"downloadAccountID" validate:"required"`
|
||||||
Description string `json:"description" validate:"max=256"`
|
Description string `json:"description" validate:"max=256"`
|
||||||
Secret string `json:"secret"`
|
Secret string `json:"secret"`
|
||||||
}
|
}
|
||||||
type SnapshotRecover struct {
|
type SnapshotRecover struct {
|
||||||
IsNew bool `json:"isNew"`
|
IsNew bool `json:"isNew"`
|
||||||
@ -66,9 +66,9 @@ type SnapshotBatchDelete struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotImport struct {
|
type SnapshotImport struct {
|
||||||
From string `json:"from"`
|
BackupAccountID uint `json:"backupAccountID"`
|
||||||
Names []string `json:"names"`
|
Names []string `json:"names"`
|
||||||
Description string `json:"description" validate:"max=256"`
|
Description string `json:"description" validate:"max=256"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotInfo struct {
|
type SnapshotInfo struct {
|
||||||
|
@ -1,25 +1,15 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
type BackupAccount struct {
|
|
||||||
BaseModel
|
|
||||||
Name string `gorm:"type:varchar(64);unique;not null" json:"name"`
|
|
||||||
Type string `gorm:"type:varchar(64);unique;not null" json:"type"`
|
|
||||||
Bucket string `gorm:"type:varchar(256)" json:"bucket"`
|
|
||||||
AccessKey string `gorm:"type:varchar(256)" json:"accessKey"`
|
|
||||||
Credential string `gorm:"type:varchar(256)" json:"credential"`
|
|
||||||
BackupPath string `gorm:"type:varchar(256)" json:"backupPath"`
|
|
||||||
Vars string `gorm:"type:longText" json:"vars"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type BackupRecord struct {
|
type BackupRecord struct {
|
||||||
BaseModel
|
BaseModel
|
||||||
From string `gorm:"type:varchar(64)" json:"from"`
|
From string `json:"from"`
|
||||||
CronjobID uint `gorm:"type:decimal" json:"cronjobID"`
|
CronjobID uint `json:"cronjobID"`
|
||||||
Type string `gorm:"type:varchar(64);not null" json:"type"`
|
SourceAccountIDs string `json:"sourceAccountsIDs"`
|
||||||
Name string `gorm:"type:varchar(64);not null" json:"name"`
|
DownloadAccountID uint `json:"downloadAccountID"`
|
||||||
DetailName string `gorm:"type:varchar(256)" json:"detailName"`
|
|
||||||
Source string `gorm:"type:varchar(256)" json:"source"`
|
Type string `gorm:"not null" json:"type"`
|
||||||
BackupType string `gorm:"type:varchar(256)" json:"backupType"`
|
Name string `gorm:"not null" json:"name"`
|
||||||
FileDir string `gorm:"type:varchar(256)" json:"fileDir"`
|
DetailName string `json:"detailName"`
|
||||||
FileName string `gorm:"type:varchar(256)" json:"fileName"`
|
FileDir string `json:"fileDir"`
|
||||||
|
FileName string `json:"fileName"`
|
||||||
}
|
}
|
||||||
|
@ -7,44 +7,40 @@ import (
|
|||||||
type Cronjob struct {
|
type Cronjob struct {
|
||||||
BaseModel
|
BaseModel
|
||||||
|
|
||||||
Name string `gorm:"type:varchar(64);not null" json:"name"`
|
Name string `gorm:"not null" json:"name"`
|
||||||
Type string `gorm:"type:varchar(64);not null" json:"type"`
|
Type string `gorm:"not null" json:"type"`
|
||||||
Spec string `gorm:"type:varchar(64);not null" json:"spec"`
|
Spec string `gorm:"not null" json:"spec"`
|
||||||
|
|
||||||
Command string `gorm:"type:varchar(64)" json:"command"`
|
Command string `json:"command"`
|
||||||
ContainerName string `gorm:"type:varchar(64)" json:"containerName"`
|
ContainerName string `json:"containerName"`
|
||||||
Script string `gorm:"longtext" json:"script"`
|
Script string `json:"script"`
|
||||||
Website string `gorm:"type:varchar(64)" json:"website"`
|
Website string `json:"website"`
|
||||||
AppID string `gorm:"type:varchar(64)" json:"appID"`
|
AppID string `json:"appID"`
|
||||||
DBType string `gorm:"type:varchar(64)" json:"dbType"`
|
DBType string `json:"dbType"`
|
||||||
DBName string `gorm:"type:varchar(64)" json:"dbName"`
|
DBName string `json:"dbName"`
|
||||||
URL string `gorm:"type:varchar(256)" json:"url"`
|
URL string `json:"url"`
|
||||||
SourceDir string `gorm:"type:varchar(256)" json:"sourceDir"`
|
SourceDir string `json:"sourceDir"`
|
||||||
ExclusionRules string `gorm:"longtext" json:"exclusionRules"`
|
ExclusionRules string `json:"exclusionRules"`
|
||||||
|
|
||||||
// 已废弃
|
SourceAccountIDs string `json:"sourceAccountsIDs"`
|
||||||
KeepLocal bool `gorm:"type:varchar(64)" json:"keepLocal"`
|
DownloadAccountID uint `json:"downloadAccountID"`
|
||||||
TargetDirID uint64 `gorm:"type:decimal" json:"targetDirID"`
|
RetainCopies uint64 `json:"retainCopies"`
|
||||||
|
|
||||||
BackupAccounts string `gorm:"type:varchar(64)" json:"backupAccounts"`
|
Status string `json:"status"`
|
||||||
DefaultDownload string `gorm:"type:varchar(64)" json:"defaultDownload"`
|
EntryIDs string `json:"entryIDs"`
|
||||||
RetainCopies uint64 `gorm:"type:decimal" json:"retainCopies"`
|
|
||||||
|
|
||||||
Status string `gorm:"type:varchar(64)" json:"status"`
|
|
||||||
EntryIDs string `gorm:"type:varchar(64)" json:"entryIDs"`
|
|
||||||
Records []JobRecords `json:"records"`
|
Records []JobRecords `json:"records"`
|
||||||
Secret string `gorm:"type:varchar(64)" json:"secret"`
|
Secret string `json:"secret"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobRecords struct {
|
type JobRecords struct {
|
||||||
BaseModel
|
BaseModel
|
||||||
|
|
||||||
CronjobID uint `gorm:"type:decimal" json:"cronjobID"`
|
CronjobID uint `json:"cronjobID"`
|
||||||
StartTime time.Time `gorm:"type:datetime" json:"startTime"`
|
StartTime time.Time `json:"startTime"`
|
||||||
Interval float64 `gorm:"type:float" json:"interval"`
|
Interval float64 `json:"interval"`
|
||||||
Records string `gorm:"longtext" json:"records"`
|
Records string `json:"records"`
|
||||||
FromLocal bool `gorm:"type:varchar(64)" json:"source"`
|
FromLocal bool `json:"source"`
|
||||||
File string `gorm:"type:varchar(256)" json:"file"`
|
File string `json:"file"`
|
||||||
Status string `gorm:"type:varchar(64)" json:"status"`
|
Status string `json:"status"`
|
||||||
Message string `gorm:"longtext" json:"message"`
|
Message string `json:"message"`
|
||||||
}
|
}
|
||||||
|
@ -2,34 +2,34 @@ package model
|
|||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
BaseModel
|
BaseModel
|
||||||
Name string `json:"name" gorm:"type:varchar(64);not null;unique"`
|
Name string `json:"name" gorm:"not null;unique"`
|
||||||
Description string `json:"description" gorm:"type:varchar(256)"`
|
Description string `json:"description"`
|
||||||
From string `json:"from"`
|
SourceAccountIDs string `json:"sourceAccountsIDs"`
|
||||||
DefaultDownload string `json:"defaultDownload" gorm:"type:varchar(64)"`
|
DownloadAccountID uint `json:"downloadAccountID"`
|
||||||
Status string `json:"status" gorm:"type:varchar(64)"`
|
Status string `json:"status"`
|
||||||
Message string `json:"message" gorm:"type:varchar(256)"`
|
Message string `json:"message"`
|
||||||
Version string `json:"version" gorm:"type:varchar(256)"`
|
Version string `json:"version"`
|
||||||
|
|
||||||
InterruptStep string `json:"interruptStep" gorm:"type:varchar(64)"`
|
InterruptStep string `json:"interruptStep"`
|
||||||
RecoverStatus string `json:"recoverStatus" gorm:"type:varchar(64)"`
|
RecoverStatus string `json:"recoverStatus"`
|
||||||
RecoverMessage string `json:"recoverMessage" gorm:"type:varchar(256)"`
|
RecoverMessage string `json:"recoverMessage"`
|
||||||
LastRecoveredAt string `json:"lastRecoveredAt" gorm:"type:varchar(64)"`
|
LastRecoveredAt string `json:"lastRecoveredAt"`
|
||||||
RollbackStatus string `json:"rollbackStatus" gorm:"type:varchar(64)"`
|
RollbackStatus string `json:"rollbackStatus"`
|
||||||
RollbackMessage string `json:"rollbackMessage" gorm:"type:varchar(256)"`
|
RollbackMessage string `json:"rollbackMessage"`
|
||||||
LastRollbackedAt string `json:"lastRollbackedAt" gorm:"type:varchar(64)"`
|
LastRollbackAt string `json:"lastRollbackAt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotStatus struct {
|
type SnapshotStatus struct {
|
||||||
BaseModel
|
BaseModel
|
||||||
SnapID uint `gorm:"type:decimal" json:"snapID"`
|
SnapID uint `json:"snapID"`
|
||||||
Panel string `json:"panel" gorm:"type:varchar(64);default:Running"`
|
Panel string `json:"panel" gorm:"default:Running"`
|
||||||
PanelInfo string `json:"panelInfo" gorm:"type:varchar(64);default:Running"`
|
PanelInfo string `json:"panelInfo" gorm:"default:Running"`
|
||||||
DaemonJson string `json:"daemonJson" gorm:"type:varchar(64);default:Running"`
|
DaemonJson string `json:"daemonJson" gorm:"default:Running"`
|
||||||
AppData string `json:"appData" gorm:"type:varchar(64);default:Running"`
|
AppData string `json:"appData" gorm:"default:Running"`
|
||||||
PanelData string `json:"panelData" gorm:"type:varchar(64);default:Running"`
|
PanelData string `json:"panelData" gorm:"default:Running"`
|
||||||
BackupData string `json:"backupData" gorm:"type:varchar(64);default:Running"`
|
BackupData string `json:"backupData" gorm:"default:Running"`
|
||||||
|
|
||||||
Compress string `json:"compress" gorm:"type:varchar(64);default:Waiting"`
|
Compress string `json:"compress" gorm:"default:Waiting"`
|
||||||
Size string `json:"size" gorm:"type:varchar(64)"`
|
Size string `json:"size" `
|
||||||
Upload string `json:"upload" gorm:"type:varchar(64);default:Waiting"`
|
Upload string `json:"upload" gorm:"default:Waiting"`
|
||||||
}
|
}
|
||||||
|
@ -11,12 +11,6 @@ import (
|
|||||||
type BackupRepo struct{}
|
type BackupRepo struct{}
|
||||||
|
|
||||||
type IBackupRepo interface {
|
type IBackupRepo interface {
|
||||||
Get(opts ...DBOption) (model.BackupAccount, error)
|
|
||||||
List(opts ...DBOption) ([]model.BackupAccount, error)
|
|
||||||
Create(backup []model.BackupAccount) error
|
|
||||||
Save(backup *model.BackupAccount) error
|
|
||||||
Delete(opts ...DBOption) error
|
|
||||||
|
|
||||||
ListRecord(opts ...DBOption) ([]model.BackupRecord, error)
|
ListRecord(opts ...DBOption) ([]model.BackupRecord, error)
|
||||||
PageRecord(page, size int, opts ...DBOption) (int64, []model.BackupRecord, error)
|
PageRecord(page, size int, opts ...DBOption) (int64, []model.BackupRecord, error)
|
||||||
CreateRecord(record *model.BackupRecord) error
|
CreateRecord(record *model.BackupRecord) error
|
||||||
@ -24,7 +18,6 @@ type IBackupRepo interface {
|
|||||||
UpdateRecord(record *model.BackupRecord) error
|
UpdateRecord(record *model.BackupRecord) error
|
||||||
WithByDetailName(detailName string) DBOption
|
WithByDetailName(detailName string) DBOption
|
||||||
WithByFileName(fileName string) DBOption
|
WithByFileName(fileName string) DBOption
|
||||||
WithByType(backupType string) DBOption
|
|
||||||
WithByCronID(cronjobID uint) DBOption
|
WithByCronID(cronjobID uint) DBOption
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,20 +25,6 @@ func NewIBackupRepo() IBackupRepo {
|
|||||||
return &BackupRepo{}
|
return &BackupRepo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupRepo) Get(opts ...DBOption) (model.BackupAccount, error) {
|
|
||||||
var backup model.BackupAccount
|
|
||||||
db := global.DB
|
|
||||||
for _, opt := range opts {
|
|
||||||
db = opt(db)
|
|
||||||
}
|
|
||||||
err := db.First(&backup).Error
|
|
||||||
return backup, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupRepo) Save(backup *model.BackupAccount) error {
|
|
||||||
return global.DB.Save(backup).Error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupRepo) ListRecord(opts ...DBOption) ([]model.BackupRecord, error) {
|
func (u *BackupRepo) ListRecord(opts ...DBOption) ([]model.BackupRecord, error) {
|
||||||
var users []model.BackupRecord
|
var users []model.BackupRecord
|
||||||
db := global.DB.Model(&model.BackupRecord{})
|
db := global.DB.Model(&model.BackupRecord{})
|
||||||
@ -86,29 +65,6 @@ func (u *BackupRepo) WithByFileName(fileName string) DBOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupRepo) WithByType(backupType string) DBOption {
|
|
||||||
return func(g *gorm.DB) *gorm.DB {
|
|
||||||
if len(backupType) == 0 {
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
return g.Where("type = ?", backupType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupRepo) List(opts ...DBOption) ([]model.BackupAccount, error) {
|
|
||||||
var ops []model.BackupAccount
|
|
||||||
db := global.DB.Model(&model.BackupAccount{})
|
|
||||||
for _, opt := range opts {
|
|
||||||
db = opt(db)
|
|
||||||
}
|
|
||||||
err := db.Find(&ops).Error
|
|
||||||
return ops, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupRepo) Create(backup []model.BackupAccount) error {
|
|
||||||
return global.DB.Create(backup).Error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupRepo) CreateRecord(record *model.BackupRecord) error {
|
func (u *BackupRepo) CreateRecord(record *model.BackupRecord) error {
|
||||||
return global.DB.Create(record).Error
|
return global.DB.Create(record).Error
|
||||||
}
|
}
|
||||||
@ -117,14 +73,6 @@ func (u *BackupRepo) UpdateRecord(record *model.BackupRecord) error {
|
|||||||
return global.DB.Save(record).Error
|
return global.DB.Save(record).Error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupRepo) Delete(opts ...DBOption) error {
|
|
||||||
db := global.DB
|
|
||||||
for _, opt := range opts {
|
|
||||||
db = opt(db)
|
|
||||||
}
|
|
||||||
return db.Delete(&model.BackupAccount{}).Error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupRepo) DeleteRecord(ctx context.Context, opts ...DBOption) error {
|
func (u *BackupRepo) DeleteRecord(ctx context.Context, opts ...DBOption) error {
|
||||||
return getTx(ctx, opts...).Delete(&model.BackupRecord{}).Error
|
return getTx(ctx, opts...).Delete(&model.BackupRecord{}).Error
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ type ICronjobRepo interface {
|
|||||||
Update(id uint, vars map[string]interface{}) error
|
Update(id uint, vars map[string]interface{}) error
|
||||||
Delete(opts ...DBOption) error
|
Delete(opts ...DBOption) error
|
||||||
DeleteRecord(opts ...DBOption) error
|
DeleteRecord(opts ...DBOption) error
|
||||||
StartRecords(cronjobID uint, fromLocal bool, targetPath string) model.JobRecords
|
StartRecords(cronjobID uint, targetPath string) model.JobRecords
|
||||||
UpdateRecords(id uint, vars map[string]interface{}) error
|
UpdateRecords(id uint, vars map[string]interface{}) error
|
||||||
EndRecords(record model.JobRecords, status, message, records string)
|
EndRecords(record model.JobRecords, status, message, records string)
|
||||||
PageRecords(page, size int, opts ...DBOption) (int64, []model.JobRecords, error)
|
PageRecords(page, size int, opts ...DBOption) (int64, []model.JobRecords, error)
|
||||||
@ -142,11 +142,10 @@ func (c *CronjobRepo) WithByRecordDropID(id int) DBOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *CronjobRepo) StartRecords(cronjobID uint, fromLocal bool, targetPath string) model.JobRecords {
|
func (u *CronjobRepo) StartRecords(cronjobID uint, targetPath string) model.JobRecords {
|
||||||
var record model.JobRecords
|
var record model.JobRecords
|
||||||
record.StartTime = time.Now()
|
record.StartTime = time.Now()
|
||||||
record.CronjobID = cronjobID
|
record.CronjobID = cronjobID
|
||||||
record.FromLocal = fromLocal
|
|
||||||
record.Status = constant.StatusWaiting
|
record.Status = constant.StatusWaiting
|
||||||
if err := global.DB.Create(&record).Error; err != nil {
|
if err := global.DB.Create(&record).Error; err != nil {
|
||||||
global.LOG.Errorf("create record status failed, err: %v", err)
|
global.LOG.Errorf("create record status failed, err: %v", err)
|
||||||
|
@ -5,6 +5,13 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/dto"
|
"github.com/1Panel-dev/1Panel/agent/app/dto"
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
|
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/dto/response"
|
"github.com/1Panel-dev/1Panel/agent/app/dto/response"
|
||||||
@ -22,12 +29,6 @@ import (
|
|||||||
httpUtil "github.com/1Panel-dev/1Panel/agent/utils/http"
|
httpUtil "github.com/1Panel-dev/1Panel/agent/utils/http"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
|
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type AppService struct {
|
type AppService struct {
|
||||||
@ -91,10 +92,6 @@ func (a AppService) PageApp(req request.AppSearch) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
var appDTOs []*response.AppDto
|
var appDTOs []*response.AppDto
|
||||||
for _, ap := range apps {
|
for _, ap := range apps {
|
||||||
ap.ReadMe = ""
|
|
||||||
ap.Website = ""
|
|
||||||
ap.Document = ""
|
|
||||||
ap.Github = ""
|
|
||||||
appDTO := &response.AppDto{
|
appDTO := &response.AppDto{
|
||||||
ID: ap.ID,
|
ID: ap.ID,
|
||||||
Name: ap.Name,
|
Name: ap.Name,
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/task"
|
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -18,6 +17,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/app/task"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
|
||||||
httpUtil "github.com/1Panel-dev/1Panel/agent/utils/http"
|
httpUtil "github.com/1Panel-dev/1Panel/agent/utils/http"
|
||||||
@ -437,8 +438,7 @@ func deleteAppInstall(deleteReq request.AppInstallDelete) error {
|
|||||||
_ = os.RemoveAll(uploadDir)
|
_ = os.RemoveAll(uploadDir)
|
||||||
}
|
}
|
||||||
if deleteReq.DeleteBackup {
|
if deleteReq.DeleteBackup {
|
||||||
localDir, _ := loadLocalDir()
|
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("app/%s/%s", install.App.Key, install.Name))
|
||||||
backupDir := path.Join(localDir, fmt.Sprintf("app/%s/%s", install.App.Key, install.Name))
|
|
||||||
if _, err = os.Stat(backupDir); err == nil {
|
if _, err = os.Stat(backupDir); err == nil {
|
||||||
t.LogWithOps(task.TaskDelete, i18n.GetMsgByKey("TaskBackup"))
|
t.LogWithOps(task.TaskDelete, i18n.GetMsgByKey("TaskBackup"))
|
||||||
_ = os.RemoveAll(backupDir)
|
_ = os.RemoveAll(backupDir)
|
||||||
@ -584,11 +584,7 @@ func upgradeInstall(req request.AppInstallUpgrade) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return buserr.WithNameAndErr("ErrAppBackup", install.Name, err)
|
return buserr.WithNameAndErr("ErrAppBackup", install.Name, err)
|
||||||
}
|
}
|
||||||
localDir, err := loadLocalDir()
|
backupFile = path.Join(global.CONF.System.Backup, backupRecord.FileDir, backupRecord.FileName)
|
||||||
if err != nil {
|
|
||||||
return buserr.WithNameAndErr("ErrAppBackup", install.Name, err)
|
|
||||||
}
|
|
||||||
backupFile = path.Join(localDir, backupRecord.FileDir, backupRecord.FileName)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -709,7 +705,7 @@ func upgradeInstall(req request.AppInstallUpgrade) error {
|
|||||||
rollBackApp := func(t *task.Task) {
|
rollBackApp := func(t *task.Task) {
|
||||||
if req.Backup {
|
if req.Backup {
|
||||||
t.Log(i18n.GetWithName("AppRecover", install.Name))
|
t.Log(i18n.GetWithName("AppRecover", install.Name))
|
||||||
if err := NewIBackupService().AppRecover(dto.CommonRecover{Name: install.App.Key, DetailName: install.Name, Type: "app", Source: constant.ResourceLocal, File: backupFile}); err != nil {
|
if err := NewIBackupService().AppRecover(dto.CommonRecover{Name: install.App.Key, DetailName: install.Name, Type: "app", BackupAccountID: 1, File: backupFile}); err != nil {
|
||||||
t.LogFailedWithErr(i18n.GetWithName("AppRecover", install.Name), err)
|
t.LogFailedWithErr(i18n.GetWithName("AppRecover", install.Name), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
package service
|
package service
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
@ -15,8 +17,7 @@ import (
|
|||||||
"github.com/1Panel-dev/1Panel/agent/constant"
|
"github.com/1Panel-dev/1Panel/agent/constant"
|
||||||
"github.com/1Panel-dev/1Panel/agent/global"
|
"github.com/1Panel-dev/1Panel/agent/global"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage"
|
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/encrypt"
|
httpUtils "github.com/1Panel-dev/1Panel/agent/utils/http"
|
||||||
fileUtils "github.com/1Panel-dev/1Panel/agent/utils/files"
|
|
||||||
"github.com/jinzhu/copier"
|
"github.com/jinzhu/copier"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -24,16 +25,13 @@ import (
|
|||||||
type BackupService struct{}
|
type BackupService struct{}
|
||||||
|
|
||||||
type IBackupService interface {
|
type IBackupService interface {
|
||||||
Operate(req dto.BackupOperate) error
|
|
||||||
|
|
||||||
SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error)
|
SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error)
|
||||||
SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error)
|
SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error)
|
||||||
DownloadRecord(info dto.DownloadRecord) (string, error)
|
DownloadRecord(info dto.DownloadRecord) (string, error)
|
||||||
DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error
|
DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error
|
||||||
BatchDeleteRecord(ids []uint) error
|
BatchDeleteRecord(ids []uint) error
|
||||||
NewClient(backup *model.BackupAccount) (cloud_storage.CloudStorageClient, error)
|
|
||||||
|
|
||||||
ListFiles(req dto.BackupSearchFile) []string
|
ListFiles(req dto.OperateByID) []string
|
||||||
|
|
||||||
MysqlBackup(db dto.CommonBackup) error
|
MysqlBackup(db dto.CommonBackup) error
|
||||||
PostgresqlBackup(db dto.CommonBackup) error
|
PostgresqlBackup(db dto.CommonBackup) error
|
||||||
@ -99,36 +97,13 @@ type loadSizeHelper struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error) {
|
func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error) {
|
||||||
backup, _ := backupRepo.Get(commonRepo.WithByType(info.Source))
|
account, client, err := NewBackupClientWithID(info.DownloadAccountID)
|
||||||
if backup.ID == 0 {
|
|
||||||
return "", constant.ErrRecordNotFound
|
|
||||||
}
|
|
||||||
if info.Source == "LOCAL" {
|
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return path.Join(localDir, info.FileDir, info.FileName), nil
|
|
||||||
}
|
|
||||||
varMap := make(map[string]interface{})
|
|
||||||
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
varMap["bucket"] = backup.Bucket
|
|
||||||
switch backup.Type {
|
|
||||||
case constant.Sftp, constant.WebDAV:
|
|
||||||
varMap["username"] = backup.AccessKey
|
|
||||||
varMap["password"] = backup.Credential
|
|
||||||
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
|
|
||||||
varMap["accessKey"] = backup.AccessKey
|
|
||||||
varMap["secretKey"] = backup.Credential
|
|
||||||
case constant.OneDrive:
|
|
||||||
varMap["accessToken"] = backup.Credential
|
|
||||||
}
|
|
||||||
backClient, err := cloud_storage.NewCloudStorageClient(backup.Type, varMap)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("new cloud storage client failed, err: %v", err)
|
return "", fmt.Errorf("new cloud storage client failed, err: %v", err)
|
||||||
}
|
}
|
||||||
|
if account.Type == "LOCAL" {
|
||||||
|
return path.Join(global.CONF.System.Backup, info.FileDir, info.FileName), nil
|
||||||
|
}
|
||||||
targetPath := fmt.Sprintf("%s/download/%s/%s", constant.DataDir, info.FileDir, info.FileName)
|
targetPath := fmt.Sprintf("%s/download/%s/%s", constant.DataDir, info.FileDir, info.FileName)
|
||||||
if _, err := os.Stat(path.Dir(targetPath)); err != nil && os.IsNotExist(err) {
|
if _, err := os.Stat(path.Dir(targetPath)); err != nil && os.IsNotExist(err) {
|
||||||
if err = os.MkdirAll(path.Dir(targetPath), os.ModePerm); err != nil {
|
if err = os.MkdirAll(path.Dir(targetPath), os.ModePerm); err != nil {
|
||||||
@ -136,11 +111,11 @@ func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
srcPath := fmt.Sprintf("%s/%s", info.FileDir, info.FileName)
|
srcPath := fmt.Sprintf("%s/%s", info.FileDir, info.FileName)
|
||||||
if len(backup.BackupPath) != 0 {
|
if len(account.BackupPath) != 0 {
|
||||||
srcPath = path.Join(strings.TrimPrefix(backup.BackupPath, "/"), srcPath)
|
srcPath = path.Join(strings.TrimPrefix(account.BackupPath, "/"), srcPath)
|
||||||
}
|
}
|
||||||
if exist, _ := backClient.Exist(srcPath); exist {
|
if exist, _ := client.Exist(srcPath); exist {
|
||||||
isOK, err := backClient.Download(srcPath, targetPath)
|
isOK, err := client.Download(srcPath, targetPath)
|
||||||
if !isOK {
|
if !isOK {
|
||||||
return "", fmt.Errorf("cloud storage download failed, err: %v", err)
|
return "", fmt.Errorf("cloud storage download failed, err: %v", err)
|
||||||
}
|
}
|
||||||
@ -148,67 +123,6 @@ func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error)
|
|||||||
return targetPath, nil
|
return targetPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupService) Operate(req dto.BackupOperate) error {
|
|
||||||
for i := 0; i < len(req.Data); i++ {
|
|
||||||
encryptKeyItem, err := encrypt.StringEncryptWithBase64(req.Data[i].AccessKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Data[i].AccessKey = encryptKeyItem
|
|
||||||
encryptCredentialItem, err := encrypt.StringEncryptWithBase64(req.Data[i].Credential)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Data[i].Credential = encryptCredentialItem
|
|
||||||
}
|
|
||||||
if req.Operate == "add" {
|
|
||||||
return backupRepo.Create(req.Data)
|
|
||||||
}
|
|
||||||
if req.Operate == "remove" {
|
|
||||||
var names []string
|
|
||||||
for _, item := range req.Data {
|
|
||||||
names = append(names, item.Name)
|
|
||||||
}
|
|
||||||
return backupRepo.Delete(commonRepo.WithNamesIn(names))
|
|
||||||
}
|
|
||||||
global.LOG.Debug("走到了这里")
|
|
||||||
for _, item := range req.Data {
|
|
||||||
local, _ := backupRepo.Get(commonRepo.WithByName(item.Name))
|
|
||||||
if local.ID == 0 {
|
|
||||||
if err := backupRepo.Create([]model.BackupAccount{item}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if item.Type == constant.Local {
|
|
||||||
if local.ID != 0 && item.Vars != local.Vars {
|
|
||||||
oldPath, err := loadLocalDirByStr(local.Vars)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newPath, err := loadLocalDirByStr(item.Vars)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(newPath, "/") && newPath != "/" {
|
|
||||||
newPath = newPath[:strings.LastIndex(newPath, "/")]
|
|
||||||
}
|
|
||||||
if err := copyDir(oldPath, newPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
global.CONF.System.Backup = newPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
item.ID = local.ID
|
|
||||||
|
|
||||||
global.LOG.Debug("走到了这里111")
|
|
||||||
if err := backupRepo.Save(&item); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupService) DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error {
|
func (u *BackupService) DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error {
|
||||||
if !withDeleteFile {
|
if !withDeleteFile {
|
||||||
return backupRepo.DeleteRecord(context.Background(), commonRepo.WithByType(backupType), commonRepo.WithByName(name), backupRepo.WithByDetailName(detailName))
|
return backupRepo.DeleteRecord(context.Background(), commonRepo.WithByType(backupType), commonRepo.WithByName(name), backupRepo.WithByDetailName(detailName))
|
||||||
@ -220,18 +134,13 @@ func (u *BackupService) DeleteRecordByName(backupType, name, detailName string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, record := range records {
|
for _, record := range records {
|
||||||
backupAccount, err := backupRepo.Get(commonRepo.WithByType(record.Source))
|
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("load backup account %s info from db failed, err: %v", record.Source, err)
|
global.LOG.Errorf("new client for backup account failed, err: %v", err)
|
||||||
continue
|
|
||||||
}
|
|
||||||
client, err := u.NewClient(&backupAccount)
|
|
||||||
if err != nil {
|
|
||||||
global.LOG.Errorf("new client for backup account %s failed, err: %v", record.Source, err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
|
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
|
||||||
global.LOG.Errorf("remove file %s from %s failed, err: %v", path.Join(record.FileDir, record.FileName), record.Source, err)
|
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
|
||||||
}
|
}
|
||||||
_ = backupRepo.DeleteRecord(context.Background(), commonRepo.WithByID(record.ID))
|
_ = backupRepo.DeleteRecord(context.Background(), commonRepo.WithByID(record.ID))
|
||||||
}
|
}
|
||||||
@ -244,40 +153,31 @@ func (u *BackupService) BatchDeleteRecord(ids []uint) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, record := range records {
|
for _, record := range records {
|
||||||
backupAccount, err := backupRepo.Get(commonRepo.WithByType(record.Source))
|
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("load backup account %s info from db failed, err: %v", record.Source, err)
|
global.LOG.Errorf("new client for backup account failed, err: %v", err)
|
||||||
continue
|
|
||||||
}
|
|
||||||
client, err := u.NewClient(&backupAccount)
|
|
||||||
if err != nil {
|
|
||||||
global.LOG.Errorf("new client for backup account %s failed, err: %v", record.Source, err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
|
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
|
||||||
global.LOG.Errorf("remove file %s from %s failed, err: %v", path.Join(record.FileDir, record.FileName), record.Source, err)
|
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return backupRepo.DeleteRecord(context.Background(), commonRepo.WithIdsIn(ids))
|
return backupRepo.DeleteRecord(context.Background(), commonRepo.WithIdsIn(ids))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupService) ListFiles(req dto.BackupSearchFile) []string {
|
func (u *BackupService) ListFiles(req dto.OperateByID) []string {
|
||||||
var datas []string
|
var datas []string
|
||||||
backup, err := backupRepo.Get(backupRepo.WithByType(req.Type))
|
account, client, err := NewBackupClientWithID(req.ID)
|
||||||
if err != nil {
|
|
||||||
return datas
|
|
||||||
}
|
|
||||||
client, err := u.NewClient(&backup)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return datas
|
return datas
|
||||||
}
|
}
|
||||||
prefix := "system_snapshot"
|
prefix := "system_snapshot"
|
||||||
if len(backup.BackupPath) != 0 {
|
if len(account.BackupPath) != 0 {
|
||||||
prefix = path.Join(strings.TrimPrefix(backup.BackupPath, "/"), prefix)
|
prefix = path.Join(strings.TrimPrefix(account.BackupPath, "/"), prefix)
|
||||||
}
|
}
|
||||||
files, err := client.ListObjects(prefix)
|
files, err := client.ListObjects(prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Debugf("load files from %s failed, err: %v", req.Type, err)
|
global.LOG.Debugf("load files failed, err: %v", err)
|
||||||
return datas
|
return datas
|
||||||
}
|
}
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
@ -288,32 +188,9 @@ func (u *BackupService) ListFiles(req dto.BackupSearchFile) []string {
|
|||||||
return datas
|
return datas
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *BackupService) NewClient(backup *model.BackupAccount) (cloud_storage.CloudStorageClient, error) {
|
|
||||||
varMap := make(map[string]interface{})
|
|
||||||
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
varMap["bucket"] = backup.Bucket
|
|
||||||
switch backup.Type {
|
|
||||||
case constant.Sftp, constant.WebDAV:
|
|
||||||
varMap["username"] = backup.AccessKey
|
|
||||||
varMap["password"] = backup.Credential
|
|
||||||
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
|
|
||||||
varMap["accessKey"] = backup.AccessKey
|
|
||||||
varMap["secretKey"] = backup.Credential
|
|
||||||
}
|
|
||||||
|
|
||||||
backClient, err := cloud_storage.NewCloudStorageClient(backup.Type, varMap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return backClient, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.BackupRecords, error) {
|
func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.BackupRecords, error) {
|
||||||
var datas []dto.BackupRecords
|
var datas []dto.BackupRecords
|
||||||
clientMap := make(map[string]loadSizeHelper)
|
clientMap := make(map[uint]loadSizeHelper)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < len(records); i++ {
|
for i := 0; i < len(records); i++ {
|
||||||
var item dto.BackupRecords
|
var item dto.BackupRecords
|
||||||
@ -321,30 +198,23 @@ func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.Back
|
|||||||
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
|
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
|
||||||
}
|
}
|
||||||
itemPath := path.Join(records[i].FileDir, records[i].FileName)
|
itemPath := path.Join(records[i].FileDir, records[i].FileName)
|
||||||
if _, ok := clientMap[records[i].Source]; !ok {
|
if _, ok := clientMap[records[i].DownloadAccountID]; !ok {
|
||||||
backup, err := backupRepo.Get(commonRepo.WithByType(records[i].Source))
|
account, client, err := NewBackupClientWithID(records[i].DownloadAccountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("load backup model %s from db failed, err: %v", records[i].Source, err)
|
global.LOG.Errorf("load backup client from db failed, err: %v", err)
|
||||||
clientMap[records[i].Source] = loadSizeHelper{}
|
clientMap[records[i].DownloadAccountID] = loadSizeHelper{}
|
||||||
datas = append(datas, item)
|
datas = append(datas, item)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
client, err := u.NewClient(&backup)
|
item.Size, _ = client.Size(path.Join(strings.TrimLeft(account.BackupPath, "/"), itemPath))
|
||||||
if err != nil {
|
|
||||||
global.LOG.Errorf("load backup client %s from db failed, err: %v", records[i].Source, err)
|
|
||||||
clientMap[records[i].Source] = loadSizeHelper{}
|
|
||||||
datas = append(datas, item)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
item.Size, _ = client.Size(path.Join(strings.TrimLeft(backup.BackupPath, "/"), itemPath))
|
|
||||||
datas = append(datas, item)
|
datas = append(datas, item)
|
||||||
clientMap[records[i].Source] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true}
|
clientMap[records[i].DownloadAccountID] = loadSizeHelper{backupPath: strings.TrimLeft(account.BackupPath, "/"), client: client, isOk: true}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if clientMap[records[i].Source].isOk {
|
if clientMap[records[i].DownloadAccountID].isOk {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(index int) {
|
go func(index int) {
|
||||||
item.Size, _ = clientMap[records[index].Source].client.Size(path.Join(clientMap[records[index].Source].backupPath, itemPath))
|
item.Size, _ = clientMap[records[index].DownloadAccountID].client.Size(path.Join(clientMap[records[index].DownloadAccountID].backupPath, itemPath))
|
||||||
datas = append(datas, item)
|
datas = append(datas, item)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(i)
|
}(i)
|
||||||
@ -356,15 +226,90 @@ func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.Back
|
|||||||
return datas, nil
|
return datas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadLocalDir() (string, error) {
|
func NewBackupClientWithID(id uint) (*dto.BackupInfo, cloud_storage.CloudStorageClient, error) {
|
||||||
backup, err := backupRepo.Get(commonRepo.WithByType("LOCAL"))
|
data, err := httpUtils.RequestToMaster(fmt.Sprintf("/api/v2/backup/%v", id), http.MethodGet, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return loadLocalDirByStr(backup.Vars)
|
global.LOG.Debug("我走到了这里11")
|
||||||
|
account, ok := data.(dto.BackupInfo)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, fmt.Errorf("err response from master: %v", data)
|
||||||
|
}
|
||||||
|
global.LOG.Debug("我走到了这里22")
|
||||||
|
if account.Type == constant.Local {
|
||||||
|
localDir, err := LoadLocalDirByStr(account.Vars)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
global.CONF.System.Backup = localDir
|
||||||
|
}
|
||||||
|
global.LOG.Debug("我走到了这里33")
|
||||||
|
backClient, err := newClient(&account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return &account, backClient, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadLocalDirByStr(vars string) (string, error) {
|
type backupClientHelper struct {
|
||||||
|
id uint
|
||||||
|
name string
|
||||||
|
backupPath string
|
||||||
|
client cloud_storage.CloudStorageClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBackupClientMap(ids []string) (map[string]backupClientHelper, error) {
|
||||||
|
bodyItem, err := json.Marshal(ids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := httpUtils.RequestToMaster("/api/v2/backup/list", http.MethodPost, bytes.NewReader(bodyItem))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
accounts, ok := data.([]dto.BackupInfo)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("err response from master: %v", data)
|
||||||
|
}
|
||||||
|
clientMap := make(map[string]backupClientHelper)
|
||||||
|
for _, item := range accounts {
|
||||||
|
backClient, err := newClient(&item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pathItem := item.BackupPath
|
||||||
|
if item.BackupPath != "/" {
|
||||||
|
pathItem = strings.TrimPrefix(item.BackupPath, "/")
|
||||||
|
}
|
||||||
|
clientMap[item.Name] = backupClientHelper{client: backClient, backupPath: pathItem, name: item.Name}
|
||||||
|
}
|
||||||
|
return clientMap, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClient(account *dto.BackupInfo) (cloud_storage.CloudStorageClient, error) {
|
||||||
|
varMap := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal([]byte(account.Vars), &varMap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
varMap["bucket"] = account.Bucket
|
||||||
|
switch account.Type {
|
||||||
|
case constant.Sftp, constant.WebDAV:
|
||||||
|
varMap["username"] = account.AccessKey
|
||||||
|
varMap["password"] = account.Credential
|
||||||
|
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
|
||||||
|
varMap["accessKey"] = account.AccessKey
|
||||||
|
varMap["secretKey"] = account.Credential
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := cloud_storage.NewCloudStorageClient(account.Type, varMap)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadLocalDirByStr(vars string) (string, error) {
|
||||||
varMap := make(map[string]interface{})
|
varMap := make(map[string]interface{})
|
||||||
if err := json.Unmarshal([]byte(vars), &varMap); err != nil {
|
if err := json.Unmarshal([]byte(vars), &varMap); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -382,34 +327,3 @@ func loadLocalDirByStr(vars string) (string, error) {
|
|||||||
}
|
}
|
||||||
return "", fmt.Errorf("error type dir: %T", varMap["dir"])
|
return "", fmt.Errorf("error type dir: %T", varMap["dir"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyDir(src, dst string) error {
|
|
||||||
srcInfo, err := os.Stat(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = os.MkdirAll(dst, srcInfo.Mode()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
files, err := os.ReadDir(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fileOP := fileUtils.NewFileOp()
|
|
||||||
for _, file := range files {
|
|
||||||
srcPath := fmt.Sprintf("%s/%s", src, file.Name())
|
|
||||||
dstPath := fmt.Sprintf("%s/%s", dst, file.Name())
|
|
||||||
if file.IsDir() {
|
|
||||||
if err = copyDir(srcPath, dstPath); err != nil {
|
|
||||||
global.LOG.Errorf("copy dir %s to %s failed, err: %v", srcPath, dstPath, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := fileOP.CopyFile(srcPath, dst); err != nil {
|
|
||||||
global.LOG.Errorf("copy file %s to %s failed, err: %v", srcPath, dstPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -23,10 +23,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, error) {
|
func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, error) {
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
app, err := appRepo.GetFirst(appRepo.WithKey(req.Name))
|
app, err := appRepo.GetFirst(appRepo.WithKey(req.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -37,7 +33,7 @@ func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, er
|
|||||||
}
|
}
|
||||||
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
||||||
itemDir := fmt.Sprintf("app/%s/%s", req.Name, req.DetailName)
|
itemDir := fmt.Sprintf("app/%s/%s", req.Name, req.DetailName)
|
||||||
backupDir := path.Join(localDir, itemDir)
|
backupDir := path.Join(global.CONF.System.Backup, itemDir)
|
||||||
|
|
||||||
fileName := fmt.Sprintf("%s_%s.tar.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
|
fileName := fmt.Sprintf("%s_%s.tar.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
|
||||||
if err := handleAppBackup(&install, backupDir, fileName, "", req.Secret); err != nil {
|
if err := handleAppBackup(&install, backupDir, fileName, "", req.Secret); err != nil {
|
||||||
@ -45,13 +41,13 @@ func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
record := &model.BackupRecord{
|
record := &model.BackupRecord{
|
||||||
Type: "app",
|
Type: "app",
|
||||||
Name: req.Name,
|
Name: req.Name,
|
||||||
DetailName: req.DetailName,
|
DetailName: req.DetailName,
|
||||||
Source: "LOCAL",
|
SourceAccountIDs: "1",
|
||||||
BackupType: "LOCAL",
|
DownloadAccountID: 1,
|
||||||
FileDir: itemDir,
|
FileDir: itemDir,
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := backupRepo.CreateRecord(record); err != nil {
|
if err := backupRepo.CreateRecord(record); err != nil {
|
||||||
|
@ -21,14 +21,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (u *BackupService) MysqlBackup(req dto.CommonBackup) error {
|
func (u *BackupService) MysqlBackup(req dto.CommonBackup) error {
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
||||||
itemDir := fmt.Sprintf("database/%s/%s/%s", req.Type, req.Name, req.DetailName)
|
itemDir := fmt.Sprintf("database/%s/%s/%s", req.Type, req.Name, req.DetailName)
|
||||||
targetDir := path.Join(localDir, itemDir)
|
targetDir := path.Join(global.CONF.System.Backup, itemDir)
|
||||||
fileName := fmt.Sprintf("%s_%s.sql.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
|
fileName := fmt.Sprintf("%s_%s.sql.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
|
||||||
|
|
||||||
if err := handleMysqlBackup(req.Name, req.Type, req.DetailName, targetDir, fileName); err != nil {
|
if err := handleMysqlBackup(req.Name, req.Type, req.DetailName, targetDir, fileName); err != nil {
|
||||||
@ -36,13 +31,13 @@ func (u *BackupService) MysqlBackup(req dto.CommonBackup) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
record := &model.BackupRecord{
|
record := &model.BackupRecord{
|
||||||
Type: req.Type,
|
Type: req.Type,
|
||||||
Name: req.Name,
|
Name: req.Name,
|
||||||
DetailName: req.DetailName,
|
DetailName: req.DetailName,
|
||||||
Source: "LOCAL",
|
SourceAccountIDs: "1",
|
||||||
BackupType: "LOCAL",
|
DownloadAccountID: 1,
|
||||||
FileDir: itemDir,
|
FileDir: itemDir,
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
}
|
}
|
||||||
if err := backupRepo.CreateRecord(record); err != nil {
|
if err := backupRepo.CreateRecord(record); err != nil {
|
||||||
global.LOG.Errorf("save backup record failed, err: %v", err)
|
global.LOG.Errorf("save backup record failed, err: %v", err)
|
||||||
|
@ -22,14 +22,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (u *BackupService) PostgresqlBackup(req dto.CommonBackup) error {
|
func (u *BackupService) PostgresqlBackup(req dto.CommonBackup) error {
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
||||||
itemDir := fmt.Sprintf("database/%s/%s/%s", req.Type, req.Name, req.DetailName)
|
itemDir := fmt.Sprintf("database/%s/%s/%s", req.Type, req.Name, req.DetailName)
|
||||||
targetDir := path.Join(localDir, itemDir)
|
targetDir := path.Join(global.CONF.System.Backup, itemDir)
|
||||||
fileName := fmt.Sprintf("%s_%s.sql.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
|
fileName := fmt.Sprintf("%s_%s.sql.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
|
||||||
|
|
||||||
if err := handlePostgresqlBackup(req.Name, req.DetailName, targetDir, fileName); err != nil {
|
if err := handlePostgresqlBackup(req.Name, req.DetailName, targetDir, fileName); err != nil {
|
||||||
@ -37,13 +32,13 @@ func (u *BackupService) PostgresqlBackup(req dto.CommonBackup) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
record := &model.BackupRecord{
|
record := &model.BackupRecord{
|
||||||
Type: req.Type,
|
Type: req.Type,
|
||||||
Name: req.Name,
|
Name: req.Name,
|
||||||
DetailName: req.DetailName,
|
DetailName: req.DetailName,
|
||||||
Source: "LOCAL",
|
SourceAccountIDs: "1",
|
||||||
BackupType: "LOCAL",
|
DownloadAccountID: 1,
|
||||||
FileDir: itemDir,
|
FileDir: itemDir,
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
}
|
}
|
||||||
if err := backupRepo.CreateRecord(record); err != nil {
|
if err := backupRepo.CreateRecord(record); err != nil {
|
||||||
global.LOG.Errorf("save backup record failed, err: %v", err)
|
global.LOG.Errorf("save backup record failed, err: %v", err)
|
||||||
|
@ -21,10 +21,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (u *BackupService) RedisBackup(db dto.CommonBackup) error {
|
func (u *BackupService) RedisBackup(db dto.CommonBackup) error {
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
redisInfo, err := appInstallRepo.LoadBaseInfo("redis", db.Name)
|
redisInfo, err := appInstallRepo.LoadBaseInfo("redis", db.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -45,17 +41,17 @@ func (u *BackupService) RedisBackup(db dto.CommonBackup) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
itemDir := fmt.Sprintf("database/redis/%s", redisInfo.Name)
|
itemDir := fmt.Sprintf("database/redis/%s", redisInfo.Name)
|
||||||
backupDir := path.Join(localDir, itemDir)
|
backupDir := path.Join(global.CONF.System.Backup, itemDir)
|
||||||
if err := handleRedisBackup(redisInfo, backupDir, fileName, db.Secret); err != nil {
|
if err := handleRedisBackup(redisInfo, backupDir, fileName, db.Secret); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
record := &model.BackupRecord{
|
record := &model.BackupRecord{
|
||||||
Type: "redis",
|
Type: "redis",
|
||||||
Name: db.Name,
|
Name: db.Name,
|
||||||
Source: "LOCAL",
|
SourceAccountIDs: "1",
|
||||||
BackupType: "LOCAL",
|
DownloadAccountID: 1,
|
||||||
FileDir: itemDir,
|
FileDir: itemDir,
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
}
|
}
|
||||||
if err := backupRepo.CreateRecord(record); err != nil {
|
if err := backupRepo.CreateRecord(record); err != nil {
|
||||||
global.LOG.Errorf("save backup record failed, err: %v", err)
|
global.LOG.Errorf("save backup record failed, err: %v", err)
|
||||||
|
@ -22,10 +22,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (u *BackupService) WebsiteBackup(req dto.CommonBackup) error {
|
func (u *BackupService) WebsiteBackup(req dto.CommonBackup) error {
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
website, err := websiteRepo.GetFirst(websiteRepo.WithAlias(req.DetailName))
|
website, err := websiteRepo.GetFirst(websiteRepo.WithAlias(req.DetailName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -33,20 +29,20 @@ func (u *BackupService) WebsiteBackup(req dto.CommonBackup) error {
|
|||||||
|
|
||||||
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
|
||||||
itemDir := fmt.Sprintf("website/%s", req.Name)
|
itemDir := fmt.Sprintf("website/%s", req.Name)
|
||||||
backupDir := path.Join(localDir, itemDir)
|
backupDir := path.Join(global.CONF.System.Backup, itemDir)
|
||||||
fileName := fmt.Sprintf("%s_%s.tar.gz", website.PrimaryDomain, timeNow+common.RandStrAndNum(5))
|
fileName := fmt.Sprintf("%s_%s.tar.gz", website.PrimaryDomain, timeNow+common.RandStrAndNum(5))
|
||||||
if err := handleWebsiteBackup(&website, backupDir, fileName, "", req.Secret); err != nil {
|
if err := handleWebsiteBackup(&website, backupDir, fileName, "", req.Secret); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
record := &model.BackupRecord{
|
record := &model.BackupRecord{
|
||||||
Type: "website",
|
Type: "website",
|
||||||
Name: website.PrimaryDomain,
|
Name: website.PrimaryDomain,
|
||||||
DetailName: req.DetailName,
|
DetailName: req.DetailName,
|
||||||
Source: "LOCAL",
|
SourceAccountIDs: "1",
|
||||||
BackupType: "LOCAL",
|
DownloadAccountID: 1,
|
||||||
FileDir: itemDir,
|
FileDir: itemDir,
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
}
|
}
|
||||||
if err := backupRepo.CreateRecord(record); err != nil {
|
if err := backupRepo.CreateRecord(record); err != nil {
|
||||||
global.LOG.Errorf("save backup record failed, err: %v", err)
|
global.LOG.Errorf("save backup record failed, err: %v", err)
|
||||||
|
@ -99,7 +99,7 @@ func (u *CronjobService) CleanRecord(req dto.CronjobClean) error {
|
|||||||
}
|
}
|
||||||
if req.CleanData {
|
if req.CleanData {
|
||||||
if hasBackup(cronjob.Type) {
|
if hasBackup(cronjob.Type) {
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -129,16 +129,16 @@ func (u *CronjobService) CleanRecord(req dto.CronjobClean) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *CronjobService) Download(down dto.CronjobDownload) (string, error) {
|
func (u *CronjobService) Download(req dto.CronjobDownload) (string, error) {
|
||||||
record, _ := cronjobRepo.GetRecord(commonRepo.WithByID(down.RecordID))
|
record, _ := cronjobRepo.GetRecord(commonRepo.WithByID(req.RecordID))
|
||||||
if record.ID == 0 {
|
if record.ID == 0 {
|
||||||
return "", constant.ErrRecordNotFound
|
return "", constant.ErrRecordNotFound
|
||||||
}
|
}
|
||||||
backup, _ := backupRepo.Get(commonRepo.WithByID(down.BackupAccountID))
|
account, client, err := NewBackupClientWithID(req.BackupAccountID)
|
||||||
if backup.ID == 0 {
|
if err != nil {
|
||||||
return "", constant.ErrRecordNotFound
|
return "", err
|
||||||
}
|
}
|
||||||
if backup.Type == "LOCAL" || record.FromLocal {
|
if account.Type == "LOCAL" || record.FromLocal {
|
||||||
if _, err := os.Stat(record.File); err != nil && os.IsNotExist(err) {
|
if _, err := os.Stat(record.File); err != nil && os.IsNotExist(err) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -146,10 +146,6 @@ func (u *CronjobService) Download(down dto.CronjobDownload) (string, error) {
|
|||||||
}
|
}
|
||||||
tempPath := fmt.Sprintf("%s/download/%s", constant.DataDir, record.File)
|
tempPath := fmt.Sprintf("%s/download/%s", constant.DataDir, record.File)
|
||||||
if _, err := os.Stat(tempPath); err != nil && os.IsNotExist(err) {
|
if _, err := os.Stat(tempPath); err != nil && os.IsNotExist(err) {
|
||||||
client, err := NewIBackupService().NewClient(&backup)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
_ = os.MkdirAll(path.Dir(tempPath), os.ModePerm)
|
_ = os.MkdirAll(path.Dir(tempPath), os.ModePerm)
|
||||||
isOK, err := client.Download(record.File, tempPath)
|
isOK, err := client.Download(record.File, tempPath)
|
||||||
if !isOK || err != nil {
|
if !isOK || err != nil {
|
||||||
|
@ -27,7 +27,7 @@ func (u *CronjobService) handleApp(cronjob model.Cronjob, startTime time.Time) e
|
|||||||
}
|
}
|
||||||
apps = append(apps, app)
|
apps = append(apps, app)
|
||||||
}
|
}
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -38,7 +38,7 @@ func (u *CronjobService) handleApp(cronjob model.Cronjob, startTime time.Time) e
|
|||||||
record.CronjobID = cronjob.ID
|
record.CronjobID = cronjob.ID
|
||||||
record.Name = app.App.Key
|
record.Name = app.App.Key
|
||||||
record.DetailName = app.Name
|
record.DetailName = app.Name
|
||||||
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
|
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
|
||||||
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("app/%s/%s", app.App.Key, app.Name))
|
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("app/%s/%s", app.App.Key, app.Name))
|
||||||
record.FileName = fmt.Sprintf("app_%s_%s.tar.gz", app.Name, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
|
record.FileName = fmt.Sprintf("app_%s_%s.tar.gz", app.Name, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
|
||||||
if err := handleAppBackup(&app, backupDir, record.FileName, cronjob.ExclusionRules, cronjob.Secret); err != nil {
|
if err := handleAppBackup(&app, backupDir, record.FileName, cronjob.ExclusionRules, cronjob.Secret); err != nil {
|
||||||
@ -60,7 +60,7 @@ func (u *CronjobService) handleApp(cronjob model.Cronjob, startTime time.Time) e
|
|||||||
|
|
||||||
func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Time) error {
|
func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Time) error {
|
||||||
webs := loadWebsForJob(cronjob)
|
webs := loadWebsForJob(cronjob)
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -71,7 +71,7 @@ func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Tim
|
|||||||
record.CronjobID = cronjob.ID
|
record.CronjobID = cronjob.ID
|
||||||
record.Name = web.PrimaryDomain
|
record.Name = web.PrimaryDomain
|
||||||
record.DetailName = web.Alias
|
record.DetailName = web.Alias
|
||||||
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
|
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
|
||||||
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("website/%s", web.PrimaryDomain))
|
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("website/%s", web.PrimaryDomain))
|
||||||
record.FileName = fmt.Sprintf("website_%s_%s.tar.gz", web.PrimaryDomain, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
|
record.FileName = fmt.Sprintf("website_%s_%s.tar.gz", web.PrimaryDomain, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
|
||||||
if err := handleWebsiteBackup(&web, backupDir, record.FileName, cronjob.ExclusionRules, cronjob.Secret); err != nil {
|
if err := handleWebsiteBackup(&web, backupDir, record.FileName, cronjob.ExclusionRules, cronjob.Secret); err != nil {
|
||||||
@ -93,7 +93,7 @@ func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Tim
|
|||||||
|
|
||||||
func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Time) error {
|
func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Time) error {
|
||||||
dbs := loadDbsForJob(cronjob)
|
dbs := loadDbsForJob(cronjob)
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -104,7 +104,7 @@ func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Ti
|
|||||||
record.CronjobID = cronjob.ID
|
record.CronjobID = cronjob.ID
|
||||||
record.Name = dbInfo.Database
|
record.Name = dbInfo.Database
|
||||||
record.DetailName = dbInfo.Name
|
record.DetailName = dbInfo.Name
|
||||||
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
|
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
|
||||||
|
|
||||||
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("database/%s/%s/%s", dbInfo.DBType, record.Name, dbInfo.Name))
|
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("database/%s/%s/%s", dbInfo.DBType, record.Name, dbInfo.Name))
|
||||||
record.FileName = fmt.Sprintf("db_%s_%s.sql.gz", dbInfo.Name, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
|
record.FileName = fmt.Sprintf("db_%s_%s.sql.gz", dbInfo.Name, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
|
||||||
@ -132,7 +132,7 @@ func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Ti
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.Time) error {
|
func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.Time) error {
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -146,7 +146,7 @@ func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.T
|
|||||||
record.Type = "directory"
|
record.Type = "directory"
|
||||||
record.CronjobID = cronjob.ID
|
record.CronjobID = cronjob.ID
|
||||||
record.Name = cronjob.Name
|
record.Name = cronjob.Name
|
||||||
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
|
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
|
||||||
downloadPath, err := u.uploadCronjobBackFile(cronjob, accountMap, path.Join(backupDir, fileName))
|
downloadPath, err := u.uploadCronjobBackFile(cronjob, accountMap, path.Join(backupDir, fileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -162,7 +162,7 @@ func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.T
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.Time) error {
|
func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.Time) error {
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -177,7 +177,7 @@ func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.T
|
|||||||
record.Type = "log"
|
record.Type = "log"
|
||||||
record.CronjobID = cronjob.ID
|
record.CronjobID = cronjob.ID
|
||||||
record.Name = cronjob.Name
|
record.Name = cronjob.Name
|
||||||
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
|
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
|
||||||
downloadPath, err := u.uploadCronjobBackFile(cronjob, accountMap, path.Join(path.Dir(backupDir), fileName))
|
downloadPath, err := u.uploadCronjobBackFile(cronjob, accountMap, path.Join(path.Dir(backupDir), fileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -193,7 +193,7 @@ func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.T
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (u *CronjobService) handleSnapshot(cronjob model.Cronjob, startTime time.Time, logPath string) error {
|
func (u *CronjobService) handleSnapshot(cronjob model.Cronjob, startTime time.Time, logPath string) error {
|
||||||
accountMap, err := loadClientMap(cronjob.BackupAccounts)
|
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -203,12 +203,12 @@ func (u *CronjobService) handleSnapshot(cronjob model.Cronjob, startTime time.Ti
|
|||||||
record.Type = "directory"
|
record.Type = "directory"
|
||||||
record.CronjobID = cronjob.ID
|
record.CronjobID = cronjob.ID
|
||||||
record.Name = cronjob.Name
|
record.Name = cronjob.Name
|
||||||
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
|
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
|
||||||
record.FileDir = "system_snapshot"
|
record.FileDir = "system_snapshot"
|
||||||
|
|
||||||
req := dto.SnapshotCreate{
|
req := dto.SnapshotCreate{
|
||||||
From: record.BackupType,
|
SourceAccountIDs: record.SourceAccountIDs,
|
||||||
DefaultDownload: cronjob.DefaultDownload,
|
DownloadAccountID: cronjob.DownloadAccountID,
|
||||||
}
|
}
|
||||||
name, err := NewISnapshotService().HandleSnapshot(true, logPath, req, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5), cronjob.Secret)
|
name, err := NewISnapshotService().HandleSnapshot(true, logPath, req, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5), cronjob.Secret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -287,20 +287,20 @@ func loadWebsForJob(cronjob model.Cronjob) []model.Website {
|
|||||||
return weblist
|
return weblist
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadRecordPath(cronjob model.Cronjob, accountMap map[string]cronjobUploadHelper) (string, string) {
|
func loadRecordPath(cronjob model.Cronjob, accountMap map[string]backupClientHelper) (uint, string) {
|
||||||
source := accountMap[fmt.Sprintf("%v", cronjob.DefaultDownload)].backType
|
download := accountMap[fmt.Sprintf("%v", cronjob.DownloadAccountID)].id
|
||||||
targets := strings.Split(cronjob.BackupAccounts, ",")
|
sources := strings.Split(cronjob.SourceAccountIDs, ",")
|
||||||
var itemAccounts []string
|
var itemAccounts []string
|
||||||
for _, target := range targets {
|
for _, target := range sources {
|
||||||
if len(target) == 0 {
|
if len(target) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(accountMap[target].backType) != 0 {
|
if accountMap[target].id != 0 {
|
||||||
itemAccounts = append(itemAccounts, accountMap[target].backType)
|
itemAccounts = append(itemAccounts, fmt.Sprintf("%v", accountMap[target].id))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
backupType := strings.Join(itemAccounts, ",")
|
backupType := strings.Join(itemAccounts, ",")
|
||||||
return source, backupType
|
return download, backupType
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleBackupLogs(targetDir, fileName string, secret string) error {
|
func handleBackupLogs(targetDir, fileName string, secret string) error {
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"github.com/1Panel-dev/1Panel/agent/app/repo"
|
"github.com/1Panel-dev/1Panel/agent/app/repo"
|
||||||
"github.com/1Panel-dev/1Panel/agent/constant"
|
"github.com/1Panel-dev/1Panel/agent/constant"
|
||||||
"github.com/1Panel-dev/1Panel/agent/global"
|
"github.com/1Panel-dev/1Panel/agent/global"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage"
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/cmd"
|
"github.com/1Panel-dev/1Panel/agent/utils/cmd"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/files"
|
"github.com/1Panel-dev/1Panel/agent/utils/files"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/ntp"
|
"github.com/1Panel-dev/1Panel/agent/utils/ntp"
|
||||||
@ -27,7 +26,7 @@ func (u *CronjobService) HandleJob(cronjob *model.Cronjob) {
|
|||||||
message []byte
|
message []byte
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
record := cronjobRepo.StartRecords(cronjob.ID, cronjob.KeepLocal, "")
|
record := cronjobRepo.StartRecords(cronjob.ID, "")
|
||||||
go func() {
|
go func() {
|
||||||
switch cronjob.Type {
|
switch cronjob.Type {
|
||||||
case "shell":
|
case "shell":
|
||||||
@ -269,49 +268,11 @@ func (u *CronjobService) handleSystemClean() (string, error) {
|
|||||||
return NewIDeviceService().CleanForCronjob()
|
return NewIDeviceService().CleanForCronjob()
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadClientMap(backupAccounts string) (map[string]cronjobUploadHelper, error) {
|
func (u *CronjobService) uploadCronjobBackFile(cronjob model.Cronjob, accountMap map[string]backupClientHelper, file string) (string, error) {
|
||||||
clients := make(map[string]cronjobUploadHelper)
|
|
||||||
accounts, err := backupRepo.List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targets := strings.Split(backupAccounts, ",")
|
|
||||||
for _, target := range targets {
|
|
||||||
if len(target) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, account := range accounts {
|
|
||||||
if target == account.Type {
|
|
||||||
client, err := NewIBackupService().NewClient(&account)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pathItem := account.BackupPath
|
|
||||||
if account.BackupPath != "/" {
|
|
||||||
pathItem = strings.TrimPrefix(account.BackupPath, "/")
|
|
||||||
}
|
|
||||||
clients[target] = cronjobUploadHelper{
|
|
||||||
client: client,
|
|
||||||
backupPath: pathItem,
|
|
||||||
backType: account.Type,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return clients, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type cronjobUploadHelper struct {
|
|
||||||
backupPath string
|
|
||||||
backType string
|
|
||||||
client cloud_storage.CloudStorageClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *CronjobService) uploadCronjobBackFile(cronjob model.Cronjob, accountMap map[string]cronjobUploadHelper, file string) (string, error) {
|
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = os.Remove(file)
|
_ = os.Remove(file)
|
||||||
}()
|
}()
|
||||||
accounts := strings.Split(cronjob.BackupAccounts, ",")
|
accounts := strings.Split(cronjob.SourceAccountIDs, ",")
|
||||||
cloudSrc := strings.TrimPrefix(file, global.CONF.System.TmpDir+"/")
|
cloudSrc := strings.TrimPrefix(file, global.CONF.System.TmpDir+"/")
|
||||||
for _, account := range accounts {
|
for _, account := range accounts {
|
||||||
if len(account) != 0 {
|
if len(account) != 0 {
|
||||||
@ -325,14 +286,14 @@ func (u *CronjobService) uploadCronjobBackFile(cronjob model.Cronjob, accountMap
|
|||||||
return cloudSrc, nil
|
return cloudSrc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *CronjobService) removeExpiredBackup(cronjob model.Cronjob, accountMap map[string]cronjobUploadHelper, record model.BackupRecord) {
|
func (u *CronjobService) removeExpiredBackup(cronjob model.Cronjob, accountMap map[string]backupClientHelper, record model.BackupRecord) {
|
||||||
global.LOG.Infof("start to handle remove expired, retain copies: %d", cronjob.RetainCopies)
|
global.LOG.Infof("start to handle remove expired, retain copies: %d", cronjob.RetainCopies)
|
||||||
var opts []repo.DBOption
|
var opts []repo.DBOption
|
||||||
opts = append(opts, commonRepo.WithByFrom("cronjob"))
|
opts = append(opts, commonRepo.WithByFrom("cronjob"))
|
||||||
opts = append(opts, backupRepo.WithByCronID(cronjob.ID))
|
opts = append(opts, backupRepo.WithByCronID(cronjob.ID))
|
||||||
opts = append(opts, commonRepo.WithOrderBy("created_at desc"))
|
opts = append(opts, commonRepo.WithOrderBy("created_at desc"))
|
||||||
if record.ID != 0 {
|
if record.ID != 0 {
|
||||||
opts = append(opts, backupRepo.WithByType(record.Type))
|
opts = append(opts, commonRepo.WithByType(record.Type))
|
||||||
opts = append(opts, commonRepo.WithByName(record.Name))
|
opts = append(opts, commonRepo.WithByName(record.Name))
|
||||||
opts = append(opts, backupRepo.WithByDetailName(record.DetailName))
|
opts = append(opts, backupRepo.WithByDetailName(record.DetailName))
|
||||||
}
|
}
|
||||||
@ -341,7 +302,7 @@ func (u *CronjobService) removeExpiredBackup(cronjob model.Cronjob, accountMap m
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := int(cronjob.RetainCopies); i < len(records); i++ {
|
for i := int(cronjob.RetainCopies); i < len(records); i++ {
|
||||||
accounts := strings.Split(cronjob.BackupAccounts, ",")
|
accounts := strings.Split(cronjob.SourceAccountIDs, ",")
|
||||||
if cronjob.Type == "snapshot" {
|
if cronjob.Type == "snapshot" {
|
||||||
for _, account := range accounts {
|
for _, account := range accounts {
|
||||||
if len(account) != 0 {
|
if len(account) != 0 {
|
||||||
|
@ -237,11 +237,7 @@ func (u *DatabaseService) Delete(req dto.DatabaseDelete) error {
|
|||||||
if _, err := os.Stat(uploadDir); err == nil {
|
if _, err := os.Stat(uploadDir); err == nil {
|
||||||
_ = os.RemoveAll(uploadDir)
|
_ = os.RemoveAll(uploadDir)
|
||||||
}
|
}
|
||||||
localDir, err := loadLocalDir()
|
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("database/%s/%s", db.Type, db.Name))
|
||||||
if err != nil && !req.ForceDelete {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
backupDir := path.Join(localDir, fmt.Sprintf("database/%s/%s", db.Type, db.Name))
|
|
||||||
if _, err := os.Stat(backupDir); err == nil {
|
if _, err := os.Stat(backupDir); err == nil {
|
||||||
_ = os.RemoveAll(backupDir)
|
_ = os.RemoveAll(backupDir)
|
||||||
}
|
}
|
||||||
|
@ -280,11 +280,7 @@ func (u *MysqlService) Delete(ctx context.Context, req dto.MysqlDBDelete) error
|
|||||||
if _, err := os.Stat(uploadDir); err == nil {
|
if _, err := os.Stat(uploadDir); err == nil {
|
||||||
_ = os.RemoveAll(uploadDir)
|
_ = os.RemoveAll(uploadDir)
|
||||||
}
|
}
|
||||||
localDir, err := loadLocalDir()
|
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("database/%s/%s/%s", req.Type, db.MysqlName, db.Name))
|
||||||
if err != nil && !req.ForceDelete {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
backupDir := path.Join(localDir, fmt.Sprintf("database/%s/%s/%s", req.Type, db.MysqlName, db.Name))
|
|
||||||
if _, err := os.Stat(backupDir); err == nil {
|
if _, err := os.Stat(backupDir); err == nil {
|
||||||
_ = os.RemoveAll(backupDir)
|
_ = os.RemoveAll(backupDir)
|
||||||
}
|
}
|
||||||
|
@ -305,11 +305,7 @@ func (u *PostgresqlService) Delete(ctx context.Context, req dto.PostgresqlDBDele
|
|||||||
if _, err := os.Stat(uploadDir); err == nil {
|
if _, err := os.Stat(uploadDir); err == nil {
|
||||||
_ = os.RemoveAll(uploadDir)
|
_ = os.RemoveAll(uploadDir)
|
||||||
}
|
}
|
||||||
localDir, err := loadLocalDir()
|
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("database/%s/%s/%s", req.Type, db.PostgresqlName, db.Name))
|
||||||
if err != nil && !req.ForceDelete {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
backupDir := path.Join(localDir, fmt.Sprintf("database/%s/%s/%s", req.Type, db.PostgresqlName, db.Name))
|
|
||||||
if _, err := os.Stat(backupDir); err == nil {
|
if _, err := os.Stat(backupDir); err == nil {
|
||||||
_ = os.RemoveAll(backupDir)
|
_ = os.RemoveAll(backupDir)
|
||||||
}
|
}
|
||||||
|
@ -78,12 +78,12 @@ func (u *SnapshotService) SnapshotImport(req dto.SnapshotImport) error {
|
|||||||
snap = strings.ReplaceAll(snap, ".tar.gz", "")
|
snap = strings.ReplaceAll(snap, ".tar.gz", "")
|
||||||
}
|
}
|
||||||
itemSnap := model.Snapshot{
|
itemSnap := model.Snapshot{
|
||||||
Name: snap,
|
Name: snap,
|
||||||
From: req.From,
|
SourceAccountIDs: fmt.Sprintf("%v", req.BackupAccountID),
|
||||||
DefaultDownload: req.From,
|
DownloadAccountID: req.BackupAccountID,
|
||||||
Version: nameItems[1],
|
Version: nameItems[1],
|
||||||
Description: req.Description,
|
Description: req.Description,
|
||||||
Status: constant.StatusSuccess,
|
Status: constant.StatusSuccess,
|
||||||
}
|
}
|
||||||
if err := snapshotRepo.Create(&itemSnap); err != nil {
|
if err := snapshotRepo.Create(&itemSnap); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -180,14 +180,11 @@ func (u *SnapshotService) readFromJson(path string) (SnapshotJson, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto.SnapshotCreate, timeNow string, secret string) (string, error) {
|
func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto.SnapshotCreate, timeNow string, secret string) (string, error) {
|
||||||
localDir, err := loadLocalDir()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
var (
|
var (
|
||||||
rootDir string
|
rootDir string
|
||||||
snap model.Snapshot
|
snap model.Snapshot
|
||||||
snapStatus model.SnapshotStatus
|
snapStatus model.SnapshotStatus
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
if req.ID == 0 {
|
if req.ID == 0 {
|
||||||
@ -197,15 +194,15 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
if isCronjob {
|
if isCronjob {
|
||||||
name = fmt.Sprintf("snapshot_1panel_%s_%s_%s", versionItem.Value, loadOs(), timeNow)
|
name = fmt.Sprintf("snapshot_1panel_%s_%s_%s", versionItem.Value, loadOs(), timeNow)
|
||||||
}
|
}
|
||||||
rootDir = path.Join(localDir, "system", name)
|
rootDir = path.Join(global.CONF.System.Backup, "system", name)
|
||||||
|
|
||||||
snap = model.Snapshot{
|
snap = model.Snapshot{
|
||||||
Name: name,
|
Name: name,
|
||||||
Description: req.Description,
|
Description: req.Description,
|
||||||
From: req.From,
|
SourceAccountIDs: req.SourceAccountIDs,
|
||||||
DefaultDownload: req.DefaultDownload,
|
DownloadAccountID: req.DownloadAccountID,
|
||||||
Version: versionItem.Value,
|
Version: versionItem.Value,
|
||||||
Status: constant.StatusWaiting,
|
Status: constant.StatusWaiting,
|
||||||
}
|
}
|
||||||
_ = snapshotRepo.Create(&snap)
|
_ = snapshotRepo.Create(&snap)
|
||||||
snapStatus.SnapID = snap.ID
|
snapStatus.SnapID = snap.ID
|
||||||
@ -221,7 +218,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
snapStatus.SnapID = snap.ID
|
snapStatus.SnapID = snap.ID
|
||||||
_ = snapshotRepo.CreateStatus(&snapStatus)
|
_ = snapshotRepo.CreateStatus(&snapStatus)
|
||||||
}
|
}
|
||||||
rootDir = path.Join(localDir, fmt.Sprintf("system/%s", snap.Name))
|
rootDir = path.Join(global.CONF.System.Backup, fmt.Sprintf("system/%s", snap.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@ -233,7 +230,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
|
|
||||||
jsonItem := SnapshotJson{
|
jsonItem := SnapshotJson{
|
||||||
BaseDir: global.CONF.System.BaseDir,
|
BaseDir: global.CONF.System.BaseDir,
|
||||||
BackupDataDir: localDir,
|
BackupDataDir: global.CONF.System.Backup,
|
||||||
PanelDataDir: path.Join(global.CONF.System.BaseDir, "1panel"),
|
PanelDataDir: path.Join(global.CONF.System.BaseDir, "1panel"),
|
||||||
}
|
}
|
||||||
loadLogByStatus(snapStatus, logPath)
|
loadLogByStatus(snapStatus, logPath)
|
||||||
@ -255,7 +252,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
}
|
}
|
||||||
if snapStatus.BackupData != constant.StatusDone {
|
if snapStatus.BackupData != constant.StatusDone {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go snapBackup(itemHelper, localDir, backupPanelDir)
|
go snapBackup(itemHelper, backupPanelDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isCronjob {
|
if !isCronjob {
|
||||||
@ -266,7 +263,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if snapStatus.PanelData != constant.StatusDone {
|
if snapStatus.PanelData != constant.StatusDone {
|
||||||
snapPanelData(itemHelper, localDir, backupPanelDir)
|
snapPanelData(itemHelper, backupPanelDir)
|
||||||
}
|
}
|
||||||
if snapStatus.PanelData != constant.StatusDone {
|
if snapStatus.PanelData != constant.StatusDone {
|
||||||
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
||||||
@ -280,7 +277,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if snapStatus.Upload != constant.StatusDone {
|
if snapStatus.Upload != constant.StatusDone {
|
||||||
snapUpload(itemHelper, req.From, fmt.Sprintf("%s.tar.gz", rootDir))
|
snapUpload(itemHelper, req.SourceAccountIDs, fmt.Sprintf("%s.tar.gz", rootDir))
|
||||||
}
|
}
|
||||||
if snapStatus.Upload != constant.StatusDone {
|
if snapStatus.Upload != constant.StatusDone {
|
||||||
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
||||||
@ -297,7 +294,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
return snap.Name, fmt.Errorf("snapshot %s backup failed", snap.Name)
|
return snap.Name, fmt.Errorf("snapshot %s backup failed", snap.Name)
|
||||||
}
|
}
|
||||||
loadLogByStatus(snapStatus, logPath)
|
loadLogByStatus(snapStatus, logPath)
|
||||||
snapPanelData(itemHelper, localDir, backupPanelDir)
|
snapPanelData(itemHelper, backupPanelDir)
|
||||||
if snapStatus.PanelData != constant.StatusDone {
|
if snapStatus.PanelData != constant.StatusDone {
|
||||||
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
||||||
loadLogByStatus(snapStatus, logPath)
|
loadLogByStatus(snapStatus, logPath)
|
||||||
@ -311,7 +308,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
|
|||||||
return snap.Name, fmt.Errorf("snapshot %s compress failed", snap.Name)
|
return snap.Name, fmt.Errorf("snapshot %s compress failed", snap.Name)
|
||||||
}
|
}
|
||||||
loadLogByStatus(snapStatus, logPath)
|
loadLogByStatus(snapStatus, logPath)
|
||||||
snapUpload(itemHelper, req.From, fmt.Sprintf("%s.tar.gz", rootDir))
|
snapUpload(itemHelper, req.SourceAccountIDs, fmt.Sprintf("%s.tar.gz", rootDir))
|
||||||
if snapStatus.Upload != constant.StatusDone {
|
if snapStatus.Upload != constant.StatusDone {
|
||||||
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
|
||||||
loadLogByStatus(snapStatus, logPath)
|
loadLogByStatus(snapStatus, logPath)
|
||||||
@ -326,12 +323,12 @@ func (u *SnapshotService) Delete(req dto.SnapshotBatchDelete) error {
|
|||||||
snaps, _ := snapshotRepo.GetList(commonRepo.WithIdsIn(req.Ids))
|
snaps, _ := snapshotRepo.GetList(commonRepo.WithIdsIn(req.Ids))
|
||||||
for _, snap := range snaps {
|
for _, snap := range snaps {
|
||||||
if req.DeleteWithFile {
|
if req.DeleteWithFile {
|
||||||
targetAccounts, err := loadClientMap(snap.From)
|
accounts, err := NewBackupClientMap(strings.Split(snap.SourceAccountIDs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, item := range targetAccounts {
|
for _, item := range accounts {
|
||||||
global.LOG.Debugf("remove snapshot file %s.tar.gz from %s", snap.Name, item.backType)
|
global.LOG.Debugf("remove snapshot file %s.tar.gz from %s", snap.Name, item.name)
|
||||||
_, _ = item.client.Delete(path.Join(item.backupPath, "system_snapshot", snap.Name+".tar.gz"))
|
_, _ = item.client.Delete(path.Join(item.backupPath, "system_snapshot", snap.Name+".tar.gz"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -512,7 +509,7 @@ func loadOs() string {
|
|||||||
|
|
||||||
func loadSnapSize(records []model.Snapshot) ([]dto.SnapshotInfo, error) {
|
func loadSnapSize(records []model.Snapshot) ([]dto.SnapshotInfo, error) {
|
||||||
var datas []dto.SnapshotInfo
|
var datas []dto.SnapshotInfo
|
||||||
clientMap := make(map[string]loadSizeHelper)
|
clientMap := make(map[uint]loadSizeHelper)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < len(records); i++ {
|
for i := 0; i < len(records); i++ {
|
||||||
var item dto.SnapshotInfo
|
var item dto.SnapshotInfo
|
||||||
@ -520,30 +517,23 @@ func loadSnapSize(records []model.Snapshot) ([]dto.SnapshotInfo, error) {
|
|||||||
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
|
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
|
||||||
}
|
}
|
||||||
itemPath := fmt.Sprintf("system_snapshot/%s.tar.gz", item.Name)
|
itemPath := fmt.Sprintf("system_snapshot/%s.tar.gz", item.Name)
|
||||||
if _, ok := clientMap[records[i].DefaultDownload]; !ok {
|
if _, ok := clientMap[records[i].DownloadAccountID]; !ok {
|
||||||
backup, err := backupRepo.Get(commonRepo.WithByType(records[i].DefaultDownload))
|
backup, client, err := NewBackupClientWithID(records[i].DownloadAccountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("load backup model %s from db failed, err: %v", records[i].DefaultDownload, err)
|
global.LOG.Errorf("load backup client from db failed, err: %v", err)
|
||||||
clientMap[records[i].DefaultDownload] = loadSizeHelper{}
|
clientMap[records[i].DownloadAccountID] = loadSizeHelper{}
|
||||||
datas = append(datas, item)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
client, err := NewIBackupService().NewClient(&backup)
|
|
||||||
if err != nil {
|
|
||||||
global.LOG.Errorf("load backup client %s from db failed, err: %v", records[i].DefaultDownload, err)
|
|
||||||
clientMap[records[i].DefaultDownload] = loadSizeHelper{}
|
|
||||||
datas = append(datas, item)
|
datas = append(datas, item)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
item.Size, _ = client.Size(path.Join(strings.TrimLeft(backup.BackupPath, "/"), itemPath))
|
item.Size, _ = client.Size(path.Join(strings.TrimLeft(backup.BackupPath, "/"), itemPath))
|
||||||
datas = append(datas, item)
|
datas = append(datas, item)
|
||||||
clientMap[records[i].DefaultDownload] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true}
|
clientMap[records[i].DownloadAccountID] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if clientMap[records[i].DefaultDownload].isOk {
|
if clientMap[records[i].DownloadAccountID].isOk {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(index int) {
|
go func(index int) {
|
||||||
item.Size, _ = clientMap[records[index].DefaultDownload].client.Size(path.Join(clientMap[records[index].DefaultDownload].backupPath, itemPath))
|
item.Size, _ = clientMap[records[index].DownloadAccountID].client.Size(path.Join(clientMap[records[index].DownloadAccountID].backupPath, itemPath))
|
||||||
datas = append(datas, item)
|
datas = append(datas, item)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(i)
|
}(i)
|
||||||
|
@ -127,24 +127,24 @@ func snapAppData(snap snapHelper, targetDir string) {
|
|||||||
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"app_data": constant.StatusDone})
|
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"app_data": constant.StatusDone})
|
||||||
}
|
}
|
||||||
|
|
||||||
func snapBackup(snap snapHelper, localDir, targetDir string) {
|
func snapBackup(snap snapHelper, targetDir string) {
|
||||||
defer snap.Wg.Done()
|
defer snap.Wg.Done()
|
||||||
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"backup_data": constant.Running})
|
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"backup_data": constant.Running})
|
||||||
status := constant.StatusDone
|
status := constant.StatusDone
|
||||||
if err := handleSnapTar(localDir, targetDir, "1panel_backup.tar.gz", "./system;./system_snapshot;", ""); err != nil {
|
if err := handleSnapTar(global.CONF.System.Backup, targetDir, "1panel_backup.tar.gz", "./system;./system_snapshot;", ""); err != nil {
|
||||||
status = err.Error()
|
status = err.Error()
|
||||||
}
|
}
|
||||||
snap.Status.BackupData = status
|
snap.Status.BackupData = status
|
||||||
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"backup_data": status})
|
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"backup_data": status})
|
||||||
}
|
}
|
||||||
|
|
||||||
func snapPanelData(snap snapHelper, localDir, targetDir string) {
|
func snapPanelData(snap snapHelper, targetDir string) {
|
||||||
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"panel_data": constant.Running})
|
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"panel_data": constant.Running})
|
||||||
status := constant.StatusDone
|
status := constant.StatusDone
|
||||||
dataDir := path.Join(global.CONF.System.BaseDir, "1panel")
|
dataDir := path.Join(global.CONF.System.BaseDir, "1panel")
|
||||||
exclusionRules := "./tmp;./log;./cache;./db/1Panel.db-*;"
|
exclusionRules := "./tmp;./log;./cache;./db/1Panel.db-*;"
|
||||||
if strings.Contains(localDir, dataDir) {
|
if strings.Contains(global.CONF.System.Backup, dataDir) {
|
||||||
exclusionRules += ("." + strings.ReplaceAll(localDir, dataDir, "") + ";")
|
exclusionRules += ("." + strings.ReplaceAll(global.CONF.System.Backup, dataDir, "") + ";")
|
||||||
}
|
}
|
||||||
ignoreVal, _ := settingRepo.Get(settingRepo.WithByKey("SnapshotIgnore"))
|
ignoreVal, _ := settingRepo.Get(settingRepo.WithByKey("SnapshotIgnore"))
|
||||||
rules := strings.Split(ignoreVal.Value, ",")
|
rules := strings.Split(ignoreVal.Value, ",")
|
||||||
@ -197,7 +197,7 @@ func snapCompress(snap snapHelper, rootDir string, secret string) {
|
|||||||
func snapUpload(snap snapHelper, accounts string, file string) {
|
func snapUpload(snap snapHelper, accounts string, file string) {
|
||||||
source := path.Join(global.CONF.System.TmpDir, "system", path.Base(file))
|
source := path.Join(global.CONF.System.TmpDir, "system", path.Base(file))
|
||||||
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"upload": constant.StatusUploading})
|
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"upload": constant.StatusUploading})
|
||||||
accountMap, err := loadClientMap(accounts)
|
accountMap, err := NewBackupClientMap(strings.Split(accounts, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
snap.Status.Upload = err.Error()
|
snap.Status.Upload = err.Error()
|
||||||
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"upload": err.Error()})
|
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"upload": err.Error()})
|
||||||
|
@ -168,7 +168,7 @@ func backupBeforeRecover(snap model.Snapshot) error {
|
|||||||
go snapJson(itemHelper, jsonItem, baseDir)
|
go snapJson(itemHelper, jsonItem, baseDir)
|
||||||
go snapPanel(itemHelper, path.Join(baseDir, "1panel"))
|
go snapPanel(itemHelper, path.Join(baseDir, "1panel"))
|
||||||
go snapDaemonJson(itemHelper, path.Join(baseDir, "docker"))
|
go snapDaemonJson(itemHelper, path.Join(baseDir, "docker"))
|
||||||
go snapBackup(itemHelper, global.CONF.System.Backup, path.Join(baseDir, "1panel"))
|
go snapBackup(itemHelper, path.Join(baseDir, "1panel"))
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
itemHelper.Status.AppData = constant.StatusDone
|
itemHelper.Status.AppData = constant.StatusDone
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ func backupBeforeRecover(snap model.Snapshot) error {
|
|||||||
if !allDone {
|
if !allDone {
|
||||||
return errors.New(msg)
|
return errors.New(msg)
|
||||||
}
|
}
|
||||||
snapPanelData(itemHelper, global.CONF.System.BaseDir, path.Join(baseDir, "1panel"))
|
snapPanelData(itemHelper, path.Join(baseDir, "1panel"))
|
||||||
if status.PanelData != constant.StatusDone {
|
if status.PanelData != constant.StatusDone {
|
||||||
return errors.New(status.PanelData)
|
return errors.New(status.PanelData)
|
||||||
}
|
}
|
||||||
@ -184,23 +184,19 @@ func backupBeforeRecover(snap model.Snapshot) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func handleDownloadSnapshot(snap model.Snapshot, targetDir string) error {
|
func handleDownloadSnapshot(snap model.Snapshot, targetDir string) error {
|
||||||
backup, err := backupRepo.Get(commonRepo.WithByType(snap.DefaultDownload))
|
account, client, err := NewBackupClientWithID(snap.DownloadAccountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
client, err := NewIBackupService().NewClient(&backup)
|
pathItem := account.BackupPath
|
||||||
if err != nil {
|
if account.BackupPath != "/" {
|
||||||
return err
|
pathItem = strings.TrimPrefix(account.BackupPath, "/")
|
||||||
}
|
|
||||||
pathItem := backup.BackupPath
|
|
||||||
if backup.BackupPath != "/" {
|
|
||||||
pathItem = strings.TrimPrefix(backup.BackupPath, "/")
|
|
||||||
}
|
}
|
||||||
filePath := fmt.Sprintf("%s/%s.tar.gz", targetDir, snap.Name)
|
filePath := fmt.Sprintf("%s/%s.tar.gz", targetDir, snap.Name)
|
||||||
_ = os.RemoveAll(filePath)
|
_ = os.RemoveAll(filePath)
|
||||||
ok, err := client.Download(path.Join(pathItem, fmt.Sprintf("system_snapshot/%s.tar.gz", snap.Name)), filePath)
|
ok, err := client.Download(path.Join(pathItem, fmt.Sprintf("system_snapshot/%s.tar.gz", snap.Name)), filePath)
|
||||||
if err != nil || !ok {
|
if err != nil || !ok {
|
||||||
return fmt.Errorf("download file %s from %s failed, err: %v", snap.Name, backup.Type, err)
|
return fmt.Errorf("download file %s from %s failed, err: %v", snap.Name, account.Name, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,18 +1,20 @@
|
|||||||
package configs
|
package configs
|
||||||
|
|
||||||
type System struct {
|
type System struct {
|
||||||
DbFile string `mapstructure:"db_agent_file"`
|
MasterRequestAddr string `mapstructure:"master_request_addr"`
|
||||||
DbPath string `mapstructure:"db_path"`
|
MasterRequestToken string `mapstructure:"master_request_token"`
|
||||||
LogPath string `mapstructure:"log_path"`
|
DbFile string `mapstructure:"db_agent_file"`
|
||||||
DataDir string `mapstructure:"data_dir"`
|
DbPath string `mapstructure:"db_path"`
|
||||||
TmpDir string `mapstructure:"tmp_dir"`
|
LogPath string `mapstructure:"log_path"`
|
||||||
Cache string `mapstructure:"cache"`
|
DataDir string `mapstructure:"data_dir"`
|
||||||
Backup string `mapstructure:"backup"`
|
TmpDir string `mapstructure:"tmp_dir"`
|
||||||
EncryptKey string `mapstructure:"encrypt_key"`
|
Cache string `mapstructure:"cache"`
|
||||||
BaseDir string `mapstructure:"base_dir"`
|
Backup string `mapstructure:"backup"`
|
||||||
Mode string `mapstructure:"mode"`
|
EncryptKey string `mapstructure:"encrypt_key"`
|
||||||
RepoUrl string `mapstructure:"repo_url"`
|
BaseDir string `mapstructure:"base_dir"`
|
||||||
Version string `mapstructure:"version"`
|
Mode string `mapstructure:"mode"`
|
||||||
IsDemo bool `mapstructure:"is_demo"`
|
RepoUrl string `mapstructure:"repo_url"`
|
||||||
AppRepo string `mapstructure:"app_repo"`
|
Version string `mapstructure:"version"`
|
||||||
|
IsDemo bool `mapstructure:"is_demo"`
|
||||||
|
AppRepo string `mapstructure:"app_repo"`
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@ require (
|
|||||||
github.com/go-redis/redis v6.15.9+incompatible
|
github.com/go-redis/redis v6.15.9+incompatible
|
||||||
github.com/go-sql-driver/mysql v1.8.1
|
github.com/go-sql-driver/mysql v1.8.1
|
||||||
github.com/goh-chunlin/go-onedrive v1.1.1
|
github.com/goh-chunlin/go-onedrive v1.1.1
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/jackc/pgx/v5 v5.6.0
|
github.com/jackc/pgx/v5 v5.6.0
|
||||||
|
@ -300,8 +300,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
|||||||
github.com/goh-chunlin/go-onedrive v1.1.1 h1:HGtHk5iG0MZ92zYUtaY04czfZPBIJUr12UuFc+PW8m4=
|
github.com/goh-chunlin/go-onedrive v1.1.1 h1:HGtHk5iG0MZ92zYUtaY04czfZPBIJUr12UuFc+PW8m4=
|
||||||
github.com/goh-chunlin/go-onedrive v1.1.1/go.mod h1:N8qIGHD7tryO734epiBKk5oXcpGwxKET/u3LuBHciTs=
|
github.com/goh-chunlin/go-onedrive v1.1.1/go.mod h1:N8qIGHD7tryO734epiBKk5oXcpGwxKET/u3LuBHciTs=
|
||||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
|
||||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/model"
|
"github.com/1Panel-dev/1Panel/agent/app/model"
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/repo"
|
"github.com/1Panel-dev/1Panel/agent/app/repo"
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/app/service"
|
||||||
"github.com/1Panel-dev/1Panel/agent/constant"
|
"github.com/1Panel-dev/1Panel/agent/constant"
|
||||||
"github.com/1Panel-dev/1Panel/agent/global"
|
"github.com/1Panel-dev/1Panel/agent/global"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/encrypt"
|
"github.com/1Panel-dev/1Panel/agent/utils/encrypt"
|
||||||
@ -100,33 +101,14 @@ func handleCronjobStatus() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func loadLocalDir() {
|
func loadLocalDir() {
|
||||||
var backup model.BackupAccount
|
account, _, err := service.NewBackupClientWithID(1)
|
||||||
_ = global.DB.Where("type = ?", "LOCAL").First(&backup).Error
|
if err != nil {
|
||||||
if backup.ID == 0 {
|
global.LOG.Errorf("load local backup account info failed, err: %v", err)
|
||||||
global.LOG.Errorf("no such backup account `%s` in db", "LOCAL")
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
varMap := make(map[string]interface{})
|
global.CONF.System.Backup, err = service.LoadLocalDirByStr(account.Vars)
|
||||||
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("json unmarshal backup.Vars: %v failed, err: %v", backup.Vars, err)
|
global.LOG.Errorf("load local backup dir failed, err: %v", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if _, ok := varMap["dir"]; !ok {
|
|
||||||
global.LOG.Error("load local backup dir failed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
baseDir, ok := varMap["dir"].(string)
|
|
||||||
if ok {
|
|
||||||
if _, err := os.Stat(baseDir); err != nil && os.IsNotExist(err) {
|
|
||||||
if err = os.MkdirAll(baseDir, os.ModePerm); err != nil {
|
|
||||||
global.LOG.Errorf("mkdir %s failed, err: %v", baseDir, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
global.CONF.System.Backup = baseDir
|
|
||||||
return
|
|
||||||
}
|
|
||||||
global.LOG.Errorf("error type dir: %T", varMap["dir"])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func initDir() {
|
func initDir() {
|
||||||
|
@ -12,7 +12,6 @@ func Init() {
|
|||||||
migrations.AddTable,
|
migrations.AddTable,
|
||||||
migrations.InitHost,
|
migrations.InitHost,
|
||||||
migrations.InitSetting,
|
migrations.InitSetting,
|
||||||
migrations.InitBackupAccount,
|
|
||||||
migrations.InitImageRepo,
|
migrations.InitImageRepo,
|
||||||
migrations.InitDefaultGroup,
|
migrations.InitDefaultGroup,
|
||||||
migrations.InitDefaultCA,
|
migrations.InitDefaultCA,
|
||||||
|
@ -2,7 +2,6 @@ package migrations
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
|
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
|
||||||
@ -27,7 +26,6 @@ var AddTable = &gormigrate.Migration{
|
|||||||
&model.AppTag{},
|
&model.AppTag{},
|
||||||
&model.Tag{},
|
&model.Tag{},
|
||||||
&model.App{},
|
&model.App{},
|
||||||
&model.BackupAccount{},
|
|
||||||
&model.BackupRecord{},
|
&model.BackupRecord{},
|
||||||
&model.Clam{},
|
&model.Clam{},
|
||||||
&model.Command{},
|
&model.Command{},
|
||||||
@ -94,9 +92,11 @@ var InitSetting = &gormigrate.Migration{
|
|||||||
}
|
}
|
||||||
if _, err := os.Stat("/opt/1panel/nodeJson"); err == nil {
|
if _, err := os.Stat("/opt/1panel/nodeJson"); err == nil {
|
||||||
type nodeInfo struct {
|
type nodeInfo struct {
|
||||||
ServerCrt string `json:"serverCrt"`
|
MasterRequestAddr string `json:"masterRequestAddr"`
|
||||||
ServerKey string `json:"serverKey"`
|
Token string `json:"token"`
|
||||||
CurrentNode string `json:"currentNode"`
|
ServerCrt string `json:"serverCrt"`
|
||||||
|
ServerKey string `json:"serverKey"`
|
||||||
|
CurrentNode string `json:"currentNode"`
|
||||||
}
|
}
|
||||||
nodeJson, err := os.ReadFile("/opt/1panel/nodeJson")
|
nodeJson, err := os.ReadFile("/opt/1panel/nodeJson")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -106,14 +106,20 @@ var InitSetting = &gormigrate.Migration{
|
|||||||
if err := json.Unmarshal(nodeJson, &node); err != nil {
|
if err := json.Unmarshal(nodeJson, &node); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
itemKey, _ := encrypt.StringEncrypt(node.ServerKey)
|
itemKey, _ := encrypt.StringEncryptWithBase64(node.ServerKey)
|
||||||
if err := tx.Create(&model.Setting{Key: "ServerKey", Value: itemKey}).Error; err != nil {
|
if err := tx.Create(&model.Setting{Key: "ServerKey", Value: itemKey}).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
itemCrt, _ := encrypt.StringEncrypt(node.ServerCrt)
|
itemCrt, _ := encrypt.StringEncryptWithBase64(node.ServerCrt)
|
||||||
if err := tx.Create(&model.Setting{Key: "ServerCrt", Value: itemCrt}).Error; err != nil {
|
if err := tx.Create(&model.Setting{Key: "ServerCrt", Value: itemCrt}).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
itemToken, _ := encrypt.StringEncryptWithBase64(node.Token)
|
||||||
|
if err := tx.Create(&model.Setting{Key: "Token", Value: itemToken}).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
global.CONF.System.MasterRequestAddr = node.MasterRequestAddr
|
||||||
|
global.CONF.System.MasterRequestToken = itemToken
|
||||||
global.CurrentNode = node.CurrentNode
|
global.CurrentNode = node.CurrentNode
|
||||||
} else {
|
} else {
|
||||||
global.CurrentNode = "127.0.0.1"
|
global.CurrentNode = "127.0.0.1"
|
||||||
@ -190,20 +196,6 @@ var InitSetting = &gormigrate.Migration{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var InitBackupAccount = &gormigrate.Migration{
|
|
||||||
ID: "20240722-init-backup",
|
|
||||||
Migrate: func(tx *gorm.DB) error {
|
|
||||||
item := &model.BackupAccount{
|
|
||||||
Type: "LOCAL",
|
|
||||||
Vars: fmt.Sprintf("{\"dir\":\"%s\"}", global.CONF.System.Backup),
|
|
||||||
}
|
|
||||||
if err := tx.Create(item).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var InitImageRepo = &gormigrate.Migration{
|
var InitImageRepo = &gormigrate.Migration{
|
||||||
ID: "20240722-init-imagerepo",
|
ID: "20240722-init-imagerepo",
|
||||||
Migrate: func(tx *gorm.DB) error {
|
Migrate: func(tx *gorm.DB) error {
|
||||||
|
@ -24,7 +24,6 @@ func (s *SettingRouter) InitRouter(Router *gin.RouterGroup) {
|
|||||||
settingRouter.POST("/snapshot/rollback", baseApi.RollbackSnapshot)
|
settingRouter.POST("/snapshot/rollback", baseApi.RollbackSnapshot)
|
||||||
settingRouter.POST("/snapshot/description/update", baseApi.UpdateSnapDescription)
|
settingRouter.POST("/snapshot/description/update", baseApi.UpdateSnapDescription)
|
||||||
|
|
||||||
settingRouter.POST("/backup/operate", baseApi.OperateBackup)
|
|
||||||
settingRouter.POST("/backup/backup", baseApi.Backup)
|
settingRouter.POST("/backup/backup", baseApi.Backup)
|
||||||
settingRouter.POST("/backup/recover", baseApi.Recover)
|
settingRouter.POST("/backup/recover", baseApi.Recover)
|
||||||
settingRouter.POST("/backup/recover/byupload", baseApi.RecoverByUpload)
|
settingRouter.POST("/backup/recover/byupload", baseApi.RecoverByUpload)
|
||||||
|
56
agent/utils/http/master.go
Normal file
56
agent/utils/http/master.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package http
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/app/dto"
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/constant"
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RequestToMaster(reqUrl, reqMethod string, reqBody io.Reader) (interface{}, error) {
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: time.Second * 5,
|
||||||
|
}
|
||||||
|
parsedURL, err := url.Parse(global.CONF.System.MasterRequestAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handle url Parse failed, err: %v \n", err)
|
||||||
|
}
|
||||||
|
rURL := &url.URL{
|
||||||
|
Path: reqUrl,
|
||||||
|
Host: parsedURL.Host,
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(reqMethod, rURL.String(), reqBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("handle request failed, err: %v \n", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set(constant.JWTHeaderName, global.CONF.System.MasterRequestToken)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("client do request failed, err: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("do request failed, err: %v", resp.Status)
|
||||||
|
}
|
||||||
|
bodyByte, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read resp body from request failed, err: %v", err)
|
||||||
|
}
|
||||||
|
var respJson dto.Response
|
||||||
|
if err := json.Unmarshal(bodyByte, &respJson); err != nil {
|
||||||
|
return nil, fmt.Errorf("json unmarshal resp data failed, err: %v", err)
|
||||||
|
}
|
||||||
|
if respJson.Code != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("do request success but handle failed, err: %v", respJson.Message)
|
||||||
|
}
|
||||||
|
return respJson.Data, nil
|
||||||
|
}
|
@ -1,69 +0,0 @@
|
|||||||
package jwt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/app/repo"
|
|
||||||
"github.com/1Panel-dev/1Panel/agent/constant"
|
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JWT struct {
|
|
||||||
SigningKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type JwtRequest struct {
|
|
||||||
BaseClaims
|
|
||||||
BufferTime int64
|
|
||||||
jwt.RegisteredClaims
|
|
||||||
}
|
|
||||||
|
|
||||||
type CustomClaims struct {
|
|
||||||
BaseClaims
|
|
||||||
BufferTime int64
|
|
||||||
jwt.RegisteredClaims
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseClaims struct {
|
|
||||||
ID uint
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewJWT() *JWT {
|
|
||||||
settingRepo := repo.NewISettingRepo()
|
|
||||||
jwtSign, _ := settingRepo.Get(settingRepo.WithByKey("JWTSigningKey"))
|
|
||||||
return &JWT{
|
|
||||||
[]byte(jwtSign.Value),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *JWT) CreateClaims(baseClaims BaseClaims) CustomClaims {
|
|
||||||
claims := CustomClaims{
|
|
||||||
BaseClaims: baseClaims,
|
|
||||||
BufferTime: constant.JWTBufferTime,
|
|
||||||
RegisteredClaims: jwt.RegisteredClaims{
|
|
||||||
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * time.Duration(constant.JWTBufferTime))),
|
|
||||||
Issuer: constant.JWTIssuer,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return claims
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *JWT) CreateToken(request CustomClaims) (string, error) {
|
|
||||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, &request)
|
|
||||||
return token.SignedString(j.SigningKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *JWT) ParseToken(tokenStr string) (*JwtRequest, error) {
|
|
||||||
token, err := jwt.ParseWithClaims(tokenStr, &JwtRequest{}, func(token *jwt.Token) (interface{}, error) {
|
|
||||||
return j.SigningKey, nil
|
|
||||||
})
|
|
||||||
if err != nil || token == nil {
|
|
||||||
return nil, constant.ErrTokenParse
|
|
||||||
}
|
|
||||||
if claims, ok := token.Claims.(*JwtRequest); ok && token.Valid {
|
|
||||||
return claims, nil
|
|
||||||
}
|
|
||||||
return nil, constant.ErrTokenParse
|
|
||||||
}
|
|
@ -129,7 +129,7 @@ func (b *BaseApi) UpdateBackup(c *gin.Context) {
|
|||||||
// @Param request body dto.SearchPageWithType true "request"
|
// @Param request body dto.SearchPageWithType true "request"
|
||||||
// @Success 200
|
// @Success 200
|
||||||
// @Security ApiKeyAuth
|
// @Security ApiKeyAuth
|
||||||
// @Router /core/backup/search [get]
|
// @Router /core/backup/search [post]
|
||||||
func (b *BaseApi) SearchBackup(c *gin.Context) {
|
func (b *BaseApi) SearchBackup(c *gin.Context) {
|
||||||
var req dto.SearchPageWithType
|
var req dto.SearchPageWithType
|
||||||
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
||||||
@ -147,3 +147,32 @@ func (b *BaseApi) SearchBackup(c *gin.Context) {
|
|||||||
Total: total,
|
Total: total,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *BaseApi) GetBackup(c *gin.Context) {
|
||||||
|
var req dto.OperateByID
|
||||||
|
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := backupService.Get(req)
|
||||||
|
if err != nil {
|
||||||
|
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
helper.SuccessWithData(c, data)
|
||||||
|
}
|
||||||
|
func (b *BaseApi) ListBackup(c *gin.Context) {
|
||||||
|
var req dto.OperateByIDs
|
||||||
|
if err := helper.CheckBindAndValidate(&req, c); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := backupService.List(req)
|
||||||
|
if err != nil {
|
||||||
|
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
helper.SuccessWithData(c, list)
|
||||||
|
}
|
||||||
|
@ -34,3 +34,6 @@ type Options struct {
|
|||||||
type OperateByID struct {
|
type OperateByID struct {
|
||||||
ID uint `json:"id"`
|
ID uint `json:"id"`
|
||||||
}
|
}
|
||||||
|
type OperateByIDs struct {
|
||||||
|
IDs []uint `json:"ids"`
|
||||||
|
}
|
||||||
|
@ -9,6 +9,7 @@ type DBOption func(*gorm.DB) *gorm.DB
|
|||||||
type ICommonRepo interface {
|
type ICommonRepo interface {
|
||||||
WithByID(id uint) DBOption
|
WithByID(id uint) DBOption
|
||||||
WithByName(name string) DBOption
|
WithByName(name string) DBOption
|
||||||
|
WithByIDs(ids []uint) DBOption
|
||||||
WithByType(ty string) DBOption
|
WithByType(ty string) DBOption
|
||||||
WithOrderBy(orderStr string) DBOption
|
WithOrderBy(orderStr string) DBOption
|
||||||
}
|
}
|
||||||
@ -32,6 +33,11 @@ func (c *CommonRepo) WithByName(name string) DBOption {
|
|||||||
return g.Where("`name` = ?", name)
|
return g.Where("`name` = ?", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func (c *CommonRepo) WithByIDs(ids []uint) DBOption {
|
||||||
|
return func(g *gorm.DB) *gorm.DB {
|
||||||
|
return g.Where("id in (?)", ids)
|
||||||
|
}
|
||||||
|
}
|
||||||
func (c *CommonRepo) WithByType(ty string) DBOption {
|
func (c *CommonRepo) WithByType(ty string) DBOption {
|
||||||
return func(g *gorm.DB) *gorm.DB {
|
return func(g *gorm.DB) *gorm.DB {
|
||||||
if len(ty) == 0 {
|
if len(ty) == 0 {
|
||||||
|
@ -123,7 +123,8 @@ func (u *AuthService) generateSession(c *gin.Context, name, authMethod string) (
|
|||||||
if authMethod == constant.AuthMethodJWT {
|
if authMethod == constant.AuthMethodJWT {
|
||||||
j := jwt.NewJWT()
|
j := jwt.NewJWT()
|
||||||
claims := j.CreateClaims(jwt.BaseClaims{
|
claims := j.CreateClaims(jwt.BaseClaims{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
IsAgent: false,
|
||||||
})
|
})
|
||||||
token, err := j.CreateToken(claims)
|
token, err := j.CreateToken(claims)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -19,7 +19,6 @@ import (
|
|||||||
"github.com/1Panel-dev/1Panel/core/utils/cloud_storage/client"
|
"github.com/1Panel-dev/1Panel/core/utils/cloud_storage/client"
|
||||||
"github.com/1Panel-dev/1Panel/core/utils/encrypt"
|
"github.com/1Panel-dev/1Panel/core/utils/encrypt"
|
||||||
fileUtils "github.com/1Panel-dev/1Panel/core/utils/files"
|
fileUtils "github.com/1Panel-dev/1Panel/core/utils/files"
|
||||||
"github.com/1Panel-dev/1Panel/core/utils/xpack"
|
|
||||||
"github.com/jinzhu/copier"
|
"github.com/jinzhu/copier"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
@ -28,6 +27,9 @@ import (
|
|||||||
type BackupService struct{}
|
type BackupService struct{}
|
||||||
|
|
||||||
type IBackupService interface {
|
type IBackupService interface {
|
||||||
|
Get(req dto.OperateByID) (dto.BackupInfo, error)
|
||||||
|
List(req dto.OperateByIDs) ([]dto.BackupInfo, error)
|
||||||
|
|
||||||
SearchWithPage(search dto.SearchPageWithType) (int64, interface{}, error)
|
SearchWithPage(search dto.SearchPageWithType) (int64, interface{}, error)
|
||||||
LoadOneDriveInfo() (dto.OneDriveInfo, error)
|
LoadOneDriveInfo() (dto.OneDriveInfo, error)
|
||||||
Create(backupDto dto.BackupOperate) error
|
Create(backupDto dto.BackupOperate) error
|
||||||
@ -43,6 +45,50 @@ func NewIBackupService() IBackupService {
|
|||||||
return &BackupService{}
|
return &BackupService{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (u *BackupService) Get(req dto.OperateByID) (dto.BackupInfo, error) {
|
||||||
|
var data dto.BackupInfo
|
||||||
|
account, err := backupRepo.List(commonRepo.WithByID(req.ID))
|
||||||
|
if err != nil {
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
if err := copier.Copy(&data, &account); err != nil {
|
||||||
|
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
|
||||||
|
}
|
||||||
|
data.AccessKey, err = encrypt.StringDecryptWithBase64(data.AccessKey)
|
||||||
|
if err != nil {
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
data.Credential, err = encrypt.StringDecryptWithBase64(data.Credential)
|
||||||
|
if err != nil {
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *BackupService) List(req dto.OperateByIDs) ([]dto.BackupInfo, error) {
|
||||||
|
accounts, err := backupRepo.List(commonRepo.WithByIDs(req.IDs), commonRepo.WithOrderBy("created_at desc"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var data []dto.BackupInfo
|
||||||
|
for _, account := range accounts {
|
||||||
|
var item dto.BackupInfo
|
||||||
|
if err := copier.Copy(&item, &account); err != nil {
|
||||||
|
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
|
||||||
|
}
|
||||||
|
item.AccessKey, err = encrypt.StringDecryptWithBase64(item.AccessKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
item.Credential, err = encrypt.StringDecryptWithBase64(item.Credential)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data = append(data, item)
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (u *BackupService) SearchWithPage(req dto.SearchPageWithType) (int64, interface{}, error) {
|
func (u *BackupService) SearchWithPage(req dto.SearchPageWithType) (int64, interface{}, error) {
|
||||||
count, accounts, err := backupRepo.Page(
|
count, accounts, err := backupRepo.Page(
|
||||||
req.Page,
|
req.Page,
|
||||||
@ -141,9 +187,6 @@ func (u *BackupService) Create(req dto.BackupOperate) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := xpack.SyncBackupOperation("add", []model.BackupAccount{backup}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
backup.AccessKey, err = encrypt.StringEncrypt(backup.AccessKey)
|
backup.AccessKey, err = encrypt.StringEncrypt(backup.AccessKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -205,9 +248,6 @@ func (u *BackupService) Delete(id uint) error {
|
|||||||
global.Cron.Remove(cron.EntryID(backup.EntryID))
|
global.Cron.Remove(cron.EntryID(backup.EntryID))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := xpack.SyncBackupOperation("remove", []model.BackupAccount{backup}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return backupRepo.Delete(commonRepo.WithByID(id))
|
return backupRepo.Delete(commonRepo.WithByID(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,10 +303,6 @@ func (u *BackupService) Update(req dto.BackupOperate) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := xpack.SyncBackupOperation("update", []model.BackupAccount{newBackup}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
newBackup.AccessKey, err = encrypt.StringEncrypt(newBackup.AccessKey)
|
newBackup.AccessKey, err = encrypt.StringEncrypt(newBackup.AccessKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1,21 +1,22 @@
|
|||||||
package configs
|
package configs
|
||||||
|
|
||||||
type System struct {
|
type System struct {
|
||||||
Port string `mapstructure:"port"`
|
MasterRequestAddr string `mapstructure:"masterRequestAddr"`
|
||||||
Ipv6 string `mapstructure:"ipv6"`
|
Port string `mapstructure:"port"`
|
||||||
BindAddress string `mapstructure:"bindAddress"`
|
Ipv6 string `mapstructure:"ipv6"`
|
||||||
SSL string `mapstructure:"ssl"`
|
BindAddress string `mapstructure:"bindAddress"`
|
||||||
DbCoreFile string `mapstructure:"db_core_file"`
|
SSL string `mapstructure:"ssl"`
|
||||||
EncryptKey string `mapstructure:"encrypt_key"`
|
DbCoreFile string `mapstructure:"db_core_file"`
|
||||||
BaseDir string `mapstructure:"base_dir"`
|
EncryptKey string `mapstructure:"encrypt_key"`
|
||||||
BackupDir string `mapstructure:"backup_dir"`
|
BaseDir string `mapstructure:"base_dir"`
|
||||||
Mode string `mapstructure:"mode"`
|
BackupDir string `mapstructure:"backup_dir"`
|
||||||
RepoUrl string `mapstructure:"repo_url"`
|
Mode string `mapstructure:"mode"`
|
||||||
Version string `mapstructure:"version"`
|
RepoUrl string `mapstructure:"repo_url"`
|
||||||
Username string `mapstructure:"username"`
|
Version string `mapstructure:"version"`
|
||||||
Password string `mapstructure:"password"`
|
Username string `mapstructure:"username"`
|
||||||
Entrance string `mapstructure:"entrance"`
|
Password string `mapstructure:"password"`
|
||||||
IsDemo bool `mapstructure:"is_demo"`
|
Entrance string `mapstructure:"entrance"`
|
||||||
ChangeUserInfo string `mapstructure:"change_user_info"`
|
IsDemo bool `mapstructure:"is_demo"`
|
||||||
DbPath string `mapstructure:"db_path"`
|
ChangeUserInfo string `mapstructure:"change_user_info"`
|
||||||
|
DbPath string `mapstructure:"db_path"`
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,11 @@ import (
|
|||||||
|
|
||||||
func Init() {
|
func Init() {
|
||||||
settingRepo := repo.NewISettingRepo()
|
settingRepo := repo.NewISettingRepo()
|
||||||
|
masterSetting, err := settingRepo.Get(settingRepo.WithByKey("MasterRequestAddr"))
|
||||||
|
if err != nil {
|
||||||
|
global.LOG.Errorf("load master request addr from setting failed, err: %v", err)
|
||||||
|
}
|
||||||
|
global.CONF.System.MasterRequestAddr = masterSetting.Value
|
||||||
portSetting, err := settingRepo.Get(settingRepo.WithByKey("ServerPort"))
|
portSetting, err := settingRepo.Get(settingRepo.WithByKey("ServerPort"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("load service port from setting failed, err: %v", err)
|
global.LOG.Errorf("load service port from setting failed, err: %v", err)
|
||||||
|
@ -122,6 +122,9 @@ var InitSetting = &gormigrate.Migration{
|
|||||||
if err := tx.Create(&model.Setting{Key: "SystemStatus", Value: "Free"}).Error; err != nil {
|
if err := tx.Create(&model.Setting{Key: "SystemStatus", Value: "Free"}).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := tx.Create(&model.Setting{Key: "MasterRequestAddr", Value: ""}).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := tx.Create(&model.Setting{Key: "BindAddress", Value: "0.0.0.0"}).Error; err != nil {
|
if err := tx.Create(&model.Setting{Key: "BindAddress", Value: "0.0.0.0"}).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/1Panel-dev/1Panel/core/global"
|
"github.com/1Panel-dev/1Panel/core/global"
|
||||||
"github.com/1Panel-dev/1Panel/core/i18n"
|
"github.com/1Panel-dev/1Panel/core/i18n"
|
||||||
"github.com/1Panel-dev/1Panel/core/middleware"
|
"github.com/1Panel-dev/1Panel/core/middleware"
|
||||||
|
"github.com/1Panel-dev/1Panel/core/router"
|
||||||
rou "github.com/1Panel-dev/1Panel/core/router"
|
rou "github.com/1Panel-dev/1Panel/core/router"
|
||||||
"github.com/gin-contrib/gzip"
|
"github.com/gin-contrib/gzip"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
@ -53,6 +54,11 @@ func Routers() *gin.Engine {
|
|||||||
setWebStatic(PublicGroup)
|
setWebStatic(PublicGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
agentRouter := Router.Group("agent")
|
||||||
|
agentRouter.Use(middleware.JwtAuth())
|
||||||
|
var agent router.AgentRouter
|
||||||
|
agent.InitRouter(agentRouter)
|
||||||
|
|
||||||
Router.Use(middleware.OperationLog())
|
Router.Use(middleware.OperationLog())
|
||||||
if global.CONF.System.IsDemo {
|
if global.CONF.System.IsDemo {
|
||||||
Router.Use(middleware.DemoHandle())
|
Router.Use(middleware.DemoHandle())
|
||||||
|
16
core/router/ro_agent.go
Normal file
16
core/router/ro_agent.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
v2 "github.com/1Panel-dev/1Panel/core/app/api/v2"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AgentRouter struct{}
|
||||||
|
|
||||||
|
func (s *AgentRouter) InitRouter(Router *gin.RouterGroup) {
|
||||||
|
baseApi := v2.ApiGroupApp.BaseApi
|
||||||
|
{
|
||||||
|
Router.GET("/backup/:id", baseApi.GetBackup)
|
||||||
|
Router.POST("/backup/list", baseApi.ListBackup)
|
||||||
|
}
|
||||||
|
}
|
@ -13,12 +13,6 @@ type JWT struct {
|
|||||||
SigningKey []byte
|
SigningKey []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type JwtRequest struct {
|
|
||||||
BaseClaims
|
|
||||||
BufferTime int64
|
|
||||||
jwt.RegisteredClaims
|
|
||||||
}
|
|
||||||
|
|
||||||
type CustomClaims struct {
|
type CustomClaims struct {
|
||||||
BaseClaims
|
BaseClaims
|
||||||
BufferTime int64
|
BufferTime int64
|
||||||
@ -26,8 +20,9 @@ type CustomClaims struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type BaseClaims struct {
|
type BaseClaims struct {
|
||||||
ID uint
|
ID uint
|
||||||
Name string
|
Name string
|
||||||
|
IsAgent bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewJWT() *JWT {
|
func NewJWT() *JWT {
|
||||||
@ -55,14 +50,14 @@ func (j *JWT) CreateToken(request CustomClaims) (string, error) {
|
|||||||
return token.SignedString(j.SigningKey)
|
return token.SignedString(j.SigningKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *JWT) ParseToken(tokenStr string) (*JwtRequest, error) {
|
func (j *JWT) ParseToken(tokenStr string) (*CustomClaims, error) {
|
||||||
token, err := jwt.ParseWithClaims(tokenStr, &JwtRequest{}, func(token *jwt.Token) (interface{}, error) {
|
token, err := jwt.ParseWithClaims(tokenStr, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {
|
||||||
return j.SigningKey, nil
|
return j.SigningKey, nil
|
||||||
})
|
})
|
||||||
if err != nil || token == nil {
|
if err != nil || token == nil {
|
||||||
return nil, constant.ErrTokenParse
|
return nil, constant.ErrTokenParse
|
||||||
}
|
}
|
||||||
if claims, ok := token.Claims.(*JwtRequest); ok && token.Valid {
|
if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {
|
||||||
return claims, nil
|
return claims, nil
|
||||||
}
|
}
|
||||||
return nil, constant.ErrTokenParse
|
return nil, constant.ErrTokenParse
|
||||||
|
@ -137,6 +137,9 @@ const loadNodes = async () => {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
nodes.value = res.data;
|
nodes.value = res.data;
|
||||||
|
if (nodes.value.length === 1) {
|
||||||
|
globalStore.currentNode = nodes.value[0].name;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
nodes.value = [];
|
nodes.value = [];
|
||||||
|
Loading…
Reference in New Issue
Block a user