feat: 修改备份账号同步方式 (#6108)

This commit is contained in:
ssongliu 2024-08-13 15:33:34 +08:00 committed by GitHub
parent d100edb75b
commit f0274701cf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
46 changed files with 577 additions and 785 deletions

View File

@ -10,20 +10,6 @@ import (
"github.com/gin-gonic/gin"
)
func (b *BaseApi) OperateBackup(c *gin.Context) {
var req dto.BackupOperate
if err := helper.CheckBindAndValidate(&req, c); err != nil {
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInvalidParams, err)
return
}
if err := backupService.Operate(req); err != nil {
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
return
}
helper.SuccessWithData(c, nil)
}
// @Tags Backup Account
// @Summary Page backup records
// @Description 获取备份记录列表分页
@ -125,12 +111,12 @@ func (b *BaseApi) DeleteBackupRecord(c *gin.Context) {
// @Summary List files from backup accounts
// @Description 获取备份账号内文件列表
// @Accept json
// @Param request body dto.BackupSearchFile true "request"
// @Param request body dto.OperateByID true "request"
// @Success 200 {array} string
// @Security ApiKeyAuth
// @Router /settings/backup/search/files [post]
func (b *BaseApi) LoadFilesFromBackup(c *gin.Context) {
var req dto.BackupSearchFile
var req dto.OperateByID
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
@ -199,7 +185,11 @@ func (b *BaseApi) Recover(c *gin.Context) {
return
}
downloadPath, err := backupService.DownloadRecord(dto.DownloadRecord{Source: req.Source, FileDir: path.Dir(req.File), FileName: path.Base(req.File)})
downloadPath, err := backupService.DownloadRecord(dto.DownloadRecord{
DownloadAccountID: req.BackupAccountID,
FileDir: path.Dir(req.File),
FileName: path.Base(req.File),
})
if err != nil {
helper.ErrorWithDetail(c, constant.CodeErrBadRequest, constant.ErrTypeInvalidParams, fmt.Errorf("download file failed, err: %v", err))
return

View File

@ -2,26 +2,17 @@ package dto
import (
"time"
"github.com/1Panel-dev/1Panel/agent/app/model"
)
type BackupOperate struct {
Operate string `json:"operate" validate:"required,oneof=add remove update"`
Data []model.BackupAccount `json:"data" validate:"required"`
}
type BackupInfo struct {
ID uint `json:"id"`
CreatedAt time.Time `json:"createdAt"`
Type string `json:"type"`
Bucket string `json:"bucket"`
BackupPath string `json:"backupPath"`
Vars string `json:"vars"`
}
type BackupSearchFile struct {
Type string `json:"type" validate:"required"`
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type" validate:"required"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`
BackupPath string `json:"backupPath"`
Vars string `json:"vars" validate:"required"`
}
type CommonBackup struct {
@ -31,12 +22,12 @@ type CommonBackup struct {
Secret string `json:"secret"`
}
type CommonRecover struct {
Source string `json:"source" validate:"required,oneof=OSS S3 SFTP MINIO LOCAL COS KODO OneDrive WebDAV"`
Type string `json:"type" validate:"required,oneof=app mysql mariadb redis website postgresql"`
Name string `json:"name"`
DetailName string `json:"detailName"`
File string `json:"file"`
Secret string `json:"secret"`
BackupAccountID uint `json:"backupAccountID" validate:"required"`
Type string `json:"type" validate:"required,oneof=app mysql mariadb redis website postgresql"`
Name string `json:"name"`
DetailName string `json:"detailName"`
File string `json:"file"`
Secret string `json:"secret"`
}
type RecordSearch struct {
@ -62,7 +53,7 @@ type BackupRecords struct {
}
type DownloadRecord struct {
Source string `json:"source" validate:"required,oneof=OSS S3 SFTP MINIO LOCAL COS KODO OneDrive WebDAV"`
FileDir string `json:"fileDir" validate:"required"`
FileName string `json:"fileName" validate:"required"`
DownloadAccountID uint `json:"downloadAccountID" validate:"required"`
FileDir string `json:"fileDir" validate:"required"`
FileName string `json:"fileName" validate:"required"`
}

View File

@ -48,11 +48,11 @@ type SnapshotStatus struct {
}
type SnapshotCreate struct {
ID uint `json:"id"`
From string `json:"from" validate:"required"`
DefaultDownload string `json:"defaultDownload" validate:"required"`
Description string `json:"description" validate:"max=256"`
Secret string `json:"secret"`
ID uint `json:"id"`
SourceAccountIDs string `json:"sourceAccountsIDs" validate:"required"`
DownloadAccountID uint `json:"downloadAccountID" validate:"required"`
Description string `json:"description" validate:"max=256"`
Secret string `json:"secret"`
}
type SnapshotRecover struct {
IsNew bool `json:"isNew"`
@ -66,9 +66,9 @@ type SnapshotBatchDelete struct {
}
type SnapshotImport struct {
From string `json:"from"`
Names []string `json:"names"`
Description string `json:"description" validate:"max=256"`
BackupAccountID uint `json:"backupAccountID"`
Names []string `json:"names"`
Description string `json:"description" validate:"max=256"`
}
type SnapshotInfo struct {

View File

@ -1,25 +1,15 @@
package model
type BackupAccount struct {
BaseModel
Name string `gorm:"type:varchar(64);unique;not null" json:"name"`
Type string `gorm:"type:varchar(64);unique;not null" json:"type"`
Bucket string `gorm:"type:varchar(256)" json:"bucket"`
AccessKey string `gorm:"type:varchar(256)" json:"accessKey"`
Credential string `gorm:"type:varchar(256)" json:"credential"`
BackupPath string `gorm:"type:varchar(256)" json:"backupPath"`
Vars string `gorm:"type:longText" json:"vars"`
}
type BackupRecord struct {
BaseModel
From string `gorm:"type:varchar(64)" json:"from"`
CronjobID uint `gorm:"type:decimal" json:"cronjobID"`
Type string `gorm:"type:varchar(64);not null" json:"type"`
Name string `gorm:"type:varchar(64);not null" json:"name"`
DetailName string `gorm:"type:varchar(256)" json:"detailName"`
Source string `gorm:"type:varchar(256)" json:"source"`
BackupType string `gorm:"type:varchar(256)" json:"backupType"`
FileDir string `gorm:"type:varchar(256)" json:"fileDir"`
FileName string `gorm:"type:varchar(256)" json:"fileName"`
From string `json:"from"`
CronjobID uint `json:"cronjobID"`
SourceAccountIDs string `json:"sourceAccountsIDs"`
DownloadAccountID uint `json:"downloadAccountID"`
Type string `gorm:"not null" json:"type"`
Name string `gorm:"not null" json:"name"`
DetailName string `json:"detailName"`
FileDir string `json:"fileDir"`
FileName string `json:"fileName"`
}

View File

@ -7,44 +7,40 @@ import (
type Cronjob struct {
BaseModel
Name string `gorm:"type:varchar(64);not null" json:"name"`
Type string `gorm:"type:varchar(64);not null" json:"type"`
Spec string `gorm:"type:varchar(64);not null" json:"spec"`
Name string `gorm:"not null" json:"name"`
Type string `gorm:"not null" json:"type"`
Spec string `gorm:"not null" json:"spec"`
Command string `gorm:"type:varchar(64)" json:"command"`
ContainerName string `gorm:"type:varchar(64)" json:"containerName"`
Script string `gorm:"longtext" json:"script"`
Website string `gorm:"type:varchar(64)" json:"website"`
AppID string `gorm:"type:varchar(64)" json:"appID"`
DBType string `gorm:"type:varchar(64)" json:"dbType"`
DBName string `gorm:"type:varchar(64)" json:"dbName"`
URL string `gorm:"type:varchar(256)" json:"url"`
SourceDir string `gorm:"type:varchar(256)" json:"sourceDir"`
ExclusionRules string `gorm:"longtext" json:"exclusionRules"`
Command string `json:"command"`
ContainerName string `json:"containerName"`
Script string `json:"script"`
Website string `json:"website"`
AppID string `json:"appID"`
DBType string `json:"dbType"`
DBName string `json:"dbName"`
URL string `json:"url"`
SourceDir string `json:"sourceDir"`
ExclusionRules string `json:"exclusionRules"`
// 已废弃
KeepLocal bool `gorm:"type:varchar(64)" json:"keepLocal"`
TargetDirID uint64 `gorm:"type:decimal" json:"targetDirID"`
SourceAccountIDs string `json:"sourceAccountsIDs"`
DownloadAccountID uint `json:"downloadAccountID"`
RetainCopies uint64 `json:"retainCopies"`
BackupAccounts string `gorm:"type:varchar(64)" json:"backupAccounts"`
DefaultDownload string `gorm:"type:varchar(64)" json:"defaultDownload"`
RetainCopies uint64 `gorm:"type:decimal" json:"retainCopies"`
Status string `gorm:"type:varchar(64)" json:"status"`
EntryIDs string `gorm:"type:varchar(64)" json:"entryIDs"`
Status string `json:"status"`
EntryIDs string `json:"entryIDs"`
Records []JobRecords `json:"records"`
Secret string `gorm:"type:varchar(64)" json:"secret"`
Secret string `json:"secret"`
}
type JobRecords struct {
BaseModel
CronjobID uint `gorm:"type:decimal" json:"cronjobID"`
StartTime time.Time `gorm:"type:datetime" json:"startTime"`
Interval float64 `gorm:"type:float" json:"interval"`
Records string `gorm:"longtext" json:"records"`
FromLocal bool `gorm:"type:varchar(64)" json:"source"`
File string `gorm:"type:varchar(256)" json:"file"`
Status string `gorm:"type:varchar(64)" json:"status"`
Message string `gorm:"longtext" json:"message"`
CronjobID uint `json:"cronjobID"`
StartTime time.Time `json:"startTime"`
Interval float64 `json:"interval"`
Records string `json:"records"`
FromLocal bool `json:"source"`
File string `json:"file"`
Status string `json:"status"`
Message string `json:"message"`
}

View File

@ -2,34 +2,34 @@ package model
type Snapshot struct {
BaseModel
Name string `json:"name" gorm:"type:varchar(64);not null;unique"`
Description string `json:"description" gorm:"type:varchar(256)"`
From string `json:"from"`
DefaultDownload string `json:"defaultDownload" gorm:"type:varchar(64)"`
Status string `json:"status" gorm:"type:varchar(64)"`
Message string `json:"message" gorm:"type:varchar(256)"`
Version string `json:"version" gorm:"type:varchar(256)"`
Name string `json:"name" gorm:"not null;unique"`
Description string `json:"description"`
SourceAccountIDs string `json:"sourceAccountsIDs"`
DownloadAccountID uint `json:"downloadAccountID"`
Status string `json:"status"`
Message string `json:"message"`
Version string `json:"version"`
InterruptStep string `json:"interruptStep" gorm:"type:varchar(64)"`
RecoverStatus string `json:"recoverStatus" gorm:"type:varchar(64)"`
RecoverMessage string `json:"recoverMessage" gorm:"type:varchar(256)"`
LastRecoveredAt string `json:"lastRecoveredAt" gorm:"type:varchar(64)"`
RollbackStatus string `json:"rollbackStatus" gorm:"type:varchar(64)"`
RollbackMessage string `json:"rollbackMessage" gorm:"type:varchar(256)"`
LastRollbackedAt string `json:"lastRollbackedAt" gorm:"type:varchar(64)"`
InterruptStep string `json:"interruptStep"`
RecoverStatus string `json:"recoverStatus"`
RecoverMessage string `json:"recoverMessage"`
LastRecoveredAt string `json:"lastRecoveredAt"`
RollbackStatus string `json:"rollbackStatus"`
RollbackMessage string `json:"rollbackMessage"`
LastRollbackAt string `json:"lastRollbackAt"`
}
type SnapshotStatus struct {
BaseModel
SnapID uint `gorm:"type:decimal" json:"snapID"`
Panel string `json:"panel" gorm:"type:varchar(64);default:Running"`
PanelInfo string `json:"panelInfo" gorm:"type:varchar(64);default:Running"`
DaemonJson string `json:"daemonJson" gorm:"type:varchar(64);default:Running"`
AppData string `json:"appData" gorm:"type:varchar(64);default:Running"`
PanelData string `json:"panelData" gorm:"type:varchar(64);default:Running"`
BackupData string `json:"backupData" gorm:"type:varchar(64);default:Running"`
SnapID uint `json:"snapID"`
Panel string `json:"panel" gorm:"default:Running"`
PanelInfo string `json:"panelInfo" gorm:"default:Running"`
DaemonJson string `json:"daemonJson" gorm:"default:Running"`
AppData string `json:"appData" gorm:"default:Running"`
PanelData string `json:"panelData" gorm:"default:Running"`
BackupData string `json:"backupData" gorm:"default:Running"`
Compress string `json:"compress" gorm:"type:varchar(64);default:Waiting"`
Size string `json:"size" gorm:"type:varchar(64)"`
Upload string `json:"upload" gorm:"type:varchar(64);default:Waiting"`
Compress string `json:"compress" gorm:"default:Waiting"`
Size string `json:"size" `
Upload string `json:"upload" gorm:"default:Waiting"`
}

View File

@ -11,12 +11,6 @@ import (
type BackupRepo struct{}
type IBackupRepo interface {
Get(opts ...DBOption) (model.BackupAccount, error)
List(opts ...DBOption) ([]model.BackupAccount, error)
Create(backup []model.BackupAccount) error
Save(backup *model.BackupAccount) error
Delete(opts ...DBOption) error
ListRecord(opts ...DBOption) ([]model.BackupRecord, error)
PageRecord(page, size int, opts ...DBOption) (int64, []model.BackupRecord, error)
CreateRecord(record *model.BackupRecord) error
@ -24,7 +18,6 @@ type IBackupRepo interface {
UpdateRecord(record *model.BackupRecord) error
WithByDetailName(detailName string) DBOption
WithByFileName(fileName string) DBOption
WithByType(backupType string) DBOption
WithByCronID(cronjobID uint) DBOption
}
@ -32,20 +25,6 @@ func NewIBackupRepo() IBackupRepo {
return &BackupRepo{}
}
func (u *BackupRepo) Get(opts ...DBOption) (model.BackupAccount, error) {
var backup model.BackupAccount
db := global.DB
for _, opt := range opts {
db = opt(db)
}
err := db.First(&backup).Error
return backup, err
}
func (u *BackupRepo) Save(backup *model.BackupAccount) error {
return global.DB.Save(backup).Error
}
func (u *BackupRepo) ListRecord(opts ...DBOption) ([]model.BackupRecord, error) {
var users []model.BackupRecord
db := global.DB.Model(&model.BackupRecord{})
@ -86,29 +65,6 @@ func (u *BackupRepo) WithByFileName(fileName string) DBOption {
}
}
func (u *BackupRepo) WithByType(backupType string) DBOption {
return func(g *gorm.DB) *gorm.DB {
if len(backupType) == 0 {
return g
}
return g.Where("type = ?", backupType)
}
}
func (u *BackupRepo) List(opts ...DBOption) ([]model.BackupAccount, error) {
var ops []model.BackupAccount
db := global.DB.Model(&model.BackupAccount{})
for _, opt := range opts {
db = opt(db)
}
err := db.Find(&ops).Error
return ops, err
}
func (u *BackupRepo) Create(backup []model.BackupAccount) error {
return global.DB.Create(backup).Error
}
func (u *BackupRepo) CreateRecord(record *model.BackupRecord) error {
return global.DB.Create(record).Error
}
@ -117,14 +73,6 @@ func (u *BackupRepo) UpdateRecord(record *model.BackupRecord) error {
return global.DB.Save(record).Error
}
func (u *BackupRepo) Delete(opts ...DBOption) error {
db := global.DB
for _, opt := range opts {
db = opt(db)
}
return db.Delete(&model.BackupAccount{}).Error
}
func (u *BackupRepo) DeleteRecord(ctx context.Context, opts ...DBOption) error {
return getTx(ctx, opts...).Delete(&model.BackupRecord{}).Error
}

View File

@ -28,7 +28,7 @@ type ICronjobRepo interface {
Update(id uint, vars map[string]interface{}) error
Delete(opts ...DBOption) error
DeleteRecord(opts ...DBOption) error
StartRecords(cronjobID uint, fromLocal bool, targetPath string) model.JobRecords
StartRecords(cronjobID uint, targetPath string) model.JobRecords
UpdateRecords(id uint, vars map[string]interface{}) error
EndRecords(record model.JobRecords, status, message, records string)
PageRecords(page, size int, opts ...DBOption) (int64, []model.JobRecords, error)
@ -142,11 +142,10 @@ func (c *CronjobRepo) WithByRecordDropID(id int) DBOption {
}
}
func (u *CronjobRepo) StartRecords(cronjobID uint, fromLocal bool, targetPath string) model.JobRecords {
func (u *CronjobRepo) StartRecords(cronjobID uint, targetPath string) model.JobRecords {
var record model.JobRecords
record.StartTime = time.Now()
record.CronjobID = cronjobID
record.FromLocal = fromLocal
record.Status = constant.StatusWaiting
if err := global.DB.Create(&record).Error; err != nil {
global.LOG.Errorf("create record status failed, err: %v", err)

View File

@ -5,6 +5,13 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
"github.com/1Panel-dev/1Panel/agent/app/dto/response"
@ -22,12 +29,6 @@ import (
httpUtil "github.com/1Panel-dev/1Panel/agent/utils/http"
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
"gopkg.in/yaml.v3"
"net/http"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
)
type AppService struct {
@ -91,10 +92,6 @@ func (a AppService) PageApp(req request.AppSearch) (interface{}, error) {
}
var appDTOs []*response.AppDto
for _, ap := range apps {
ap.ReadMe = ""
ap.Website = ""
ap.Document = ""
ap.Github = ""
appDTO := &response.AppDto{
ID: ap.ID,
Name: ap.Name,

View File

@ -5,7 +5,6 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/task"
"log"
"math"
"net/http"
@ -18,6 +17,8 @@ import (
"strings"
"time"
"github.com/1Panel-dev/1Panel/agent/app/task"
"github.com/docker/docker/api/types"
httpUtil "github.com/1Panel-dev/1Panel/agent/utils/http"
@ -437,8 +438,7 @@ func deleteAppInstall(deleteReq request.AppInstallDelete) error {
_ = os.RemoveAll(uploadDir)
}
if deleteReq.DeleteBackup {
localDir, _ := loadLocalDir()
backupDir := path.Join(localDir, fmt.Sprintf("app/%s/%s", install.App.Key, install.Name))
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("app/%s/%s", install.App.Key, install.Name))
if _, err = os.Stat(backupDir); err == nil {
t.LogWithOps(task.TaskDelete, i18n.GetMsgByKey("TaskBackup"))
_ = os.RemoveAll(backupDir)
@ -584,11 +584,7 @@ func upgradeInstall(req request.AppInstallUpgrade) error {
if err != nil {
return buserr.WithNameAndErr("ErrAppBackup", install.Name, err)
}
localDir, err := loadLocalDir()
if err != nil {
return buserr.WithNameAndErr("ErrAppBackup", install.Name, err)
}
backupFile = path.Join(localDir, backupRecord.FileDir, backupRecord.FileName)
backupFile = path.Join(global.CONF.System.Backup, backupRecord.FileDir, backupRecord.FileName)
}
return nil
}
@ -709,7 +705,7 @@ func upgradeInstall(req request.AppInstallUpgrade) error {
rollBackApp := func(t *task.Task) {
if req.Backup {
t.Log(i18n.GetWithName("AppRecover", install.Name))
if err := NewIBackupService().AppRecover(dto.CommonRecover{Name: install.App.Key, DetailName: install.Name, Type: "app", Source: constant.ResourceLocal, File: backupFile}); err != nil {
if err := NewIBackupService().AppRecover(dto.CommonRecover{Name: install.App.Key, DetailName: install.Name, Type: "app", BackupAccountID: 1, File: backupFile}); err != nil {
t.LogFailedWithErr(i18n.GetWithName("AppRecover", install.Name), err)
return
}

View File

@ -1,9 +1,11 @@
package service
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"path"
"sort"
@ -15,8 +17,7 @@ import (
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage"
"github.com/1Panel-dev/1Panel/agent/utils/encrypt"
fileUtils "github.com/1Panel-dev/1Panel/agent/utils/files"
httpUtils "github.com/1Panel-dev/1Panel/agent/utils/http"
"github.com/jinzhu/copier"
"github.com/pkg/errors"
)
@ -24,16 +25,13 @@ import (
type BackupService struct{}
type IBackupService interface {
Operate(req dto.BackupOperate) error
SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error)
SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error)
DownloadRecord(info dto.DownloadRecord) (string, error)
DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error
BatchDeleteRecord(ids []uint) error
NewClient(backup *model.BackupAccount) (cloud_storage.CloudStorageClient, error)
ListFiles(req dto.BackupSearchFile) []string
ListFiles(req dto.OperateByID) []string
MysqlBackup(db dto.CommonBackup) error
PostgresqlBackup(db dto.CommonBackup) error
@ -99,36 +97,13 @@ type loadSizeHelper struct {
}
func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error) {
backup, _ := backupRepo.Get(commonRepo.WithByType(info.Source))
if backup.ID == 0 {
return "", constant.ErrRecordNotFound
}
if info.Source == "LOCAL" {
localDir, err := loadLocalDir()
if err != nil {
return "", err
}
return path.Join(localDir, info.FileDir, info.FileName), nil
}
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
return "", err
}
varMap["bucket"] = backup.Bucket
switch backup.Type {
case constant.Sftp, constant.WebDAV:
varMap["username"] = backup.AccessKey
varMap["password"] = backup.Credential
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
varMap["accessKey"] = backup.AccessKey
varMap["secretKey"] = backup.Credential
case constant.OneDrive:
varMap["accessToken"] = backup.Credential
}
backClient, err := cloud_storage.NewCloudStorageClient(backup.Type, varMap)
account, client, err := NewBackupClientWithID(info.DownloadAccountID)
if err != nil {
return "", fmt.Errorf("new cloud storage client failed, err: %v", err)
}
if account.Type == "LOCAL" {
return path.Join(global.CONF.System.Backup, info.FileDir, info.FileName), nil
}
targetPath := fmt.Sprintf("%s/download/%s/%s", constant.DataDir, info.FileDir, info.FileName)
if _, err := os.Stat(path.Dir(targetPath)); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(targetPath), os.ModePerm); err != nil {
@ -136,11 +111,11 @@ func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error)
}
}
srcPath := fmt.Sprintf("%s/%s", info.FileDir, info.FileName)
if len(backup.BackupPath) != 0 {
srcPath = path.Join(strings.TrimPrefix(backup.BackupPath, "/"), srcPath)
if len(account.BackupPath) != 0 {
srcPath = path.Join(strings.TrimPrefix(account.BackupPath, "/"), srcPath)
}
if exist, _ := backClient.Exist(srcPath); exist {
isOK, err := backClient.Download(srcPath, targetPath)
if exist, _ := client.Exist(srcPath); exist {
isOK, err := client.Download(srcPath, targetPath)
if !isOK {
return "", fmt.Errorf("cloud storage download failed, err: %v", err)
}
@ -148,67 +123,6 @@ func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error)
return targetPath, nil
}
func (u *BackupService) Operate(req dto.BackupOperate) error {
for i := 0; i < len(req.Data); i++ {
encryptKeyItem, err := encrypt.StringEncryptWithBase64(req.Data[i].AccessKey)
if err != nil {
return err
}
req.Data[i].AccessKey = encryptKeyItem
encryptCredentialItem, err := encrypt.StringEncryptWithBase64(req.Data[i].Credential)
if err != nil {
return err
}
req.Data[i].Credential = encryptCredentialItem
}
if req.Operate == "add" {
return backupRepo.Create(req.Data)
}
if req.Operate == "remove" {
var names []string
for _, item := range req.Data {
names = append(names, item.Name)
}
return backupRepo.Delete(commonRepo.WithNamesIn(names))
}
global.LOG.Debug("走到了这里")
for _, item := range req.Data {
local, _ := backupRepo.Get(commonRepo.WithByName(item.Name))
if local.ID == 0 {
if err := backupRepo.Create([]model.BackupAccount{item}); err != nil {
return err
}
continue
}
if item.Type == constant.Local {
if local.ID != 0 && item.Vars != local.Vars {
oldPath, err := loadLocalDirByStr(local.Vars)
if err != nil {
return err
}
newPath, err := loadLocalDirByStr(item.Vars)
if err != nil {
return err
}
if strings.HasSuffix(newPath, "/") && newPath != "/" {
newPath = newPath[:strings.LastIndex(newPath, "/")]
}
if err := copyDir(oldPath, newPath); err != nil {
return err
}
global.CONF.System.Backup = newPath
}
}
item.ID = local.ID
global.LOG.Debug("走到了这里111")
if err := backupRepo.Save(&item); err != nil {
return err
}
}
return nil
}
func (u *BackupService) DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error {
if !withDeleteFile {
return backupRepo.DeleteRecord(context.Background(), commonRepo.WithByType(backupType), commonRepo.WithByName(name), backupRepo.WithByDetailName(detailName))
@ -220,18 +134,13 @@ func (u *BackupService) DeleteRecordByName(backupType, name, detailName string,
}
for _, record := range records {
backupAccount, err := backupRepo.Get(commonRepo.WithByType(record.Source))
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
if err != nil {
global.LOG.Errorf("load backup account %s info from db failed, err: %v", record.Source, err)
continue
}
client, err := u.NewClient(&backupAccount)
if err != nil {
global.LOG.Errorf("new client for backup account %s failed, err: %v", record.Source, err)
global.LOG.Errorf("new client for backup account failed, err: %v", err)
continue
}
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
global.LOG.Errorf("remove file %s from %s failed, err: %v", path.Join(record.FileDir, record.FileName), record.Source, err)
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
}
_ = backupRepo.DeleteRecord(context.Background(), commonRepo.WithByID(record.ID))
}
@ -244,40 +153,31 @@ func (u *BackupService) BatchDeleteRecord(ids []uint) error {
return err
}
for _, record := range records {
backupAccount, err := backupRepo.Get(commonRepo.WithByType(record.Source))
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
if err != nil {
global.LOG.Errorf("load backup account %s info from db failed, err: %v", record.Source, err)
continue
}
client, err := u.NewClient(&backupAccount)
if err != nil {
global.LOG.Errorf("new client for backup account %s failed, err: %v", record.Source, err)
global.LOG.Errorf("new client for backup account failed, err: %v", err)
continue
}
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
global.LOG.Errorf("remove file %s from %s failed, err: %v", path.Join(record.FileDir, record.FileName), record.Source, err)
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
}
}
return backupRepo.DeleteRecord(context.Background(), commonRepo.WithIdsIn(ids))
}
func (u *BackupService) ListFiles(req dto.BackupSearchFile) []string {
func (u *BackupService) ListFiles(req dto.OperateByID) []string {
var datas []string
backup, err := backupRepo.Get(backupRepo.WithByType(req.Type))
if err != nil {
return datas
}
client, err := u.NewClient(&backup)
account, client, err := NewBackupClientWithID(req.ID)
if err != nil {
return datas
}
prefix := "system_snapshot"
if len(backup.BackupPath) != 0 {
prefix = path.Join(strings.TrimPrefix(backup.BackupPath, "/"), prefix)
if len(account.BackupPath) != 0 {
prefix = path.Join(strings.TrimPrefix(account.BackupPath, "/"), prefix)
}
files, err := client.ListObjects(prefix)
if err != nil {
global.LOG.Debugf("load files from %s failed, err: %v", req.Type, err)
global.LOG.Debugf("load files failed, err: %v", err)
return datas
}
for _, file := range files {
@ -288,32 +188,9 @@ func (u *BackupService) ListFiles(req dto.BackupSearchFile) []string {
return datas
}
func (u *BackupService) NewClient(backup *model.BackupAccount) (cloud_storage.CloudStorageClient, error) {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
return nil, err
}
varMap["bucket"] = backup.Bucket
switch backup.Type {
case constant.Sftp, constant.WebDAV:
varMap["username"] = backup.AccessKey
varMap["password"] = backup.Credential
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
varMap["accessKey"] = backup.AccessKey
varMap["secretKey"] = backup.Credential
}
backClient, err := cloud_storage.NewCloudStorageClient(backup.Type, varMap)
if err != nil {
return nil, err
}
return backClient, nil
}
func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.BackupRecords, error) {
var datas []dto.BackupRecords
clientMap := make(map[string]loadSizeHelper)
clientMap := make(map[uint]loadSizeHelper)
var wg sync.WaitGroup
for i := 0; i < len(records); i++ {
var item dto.BackupRecords
@ -321,30 +198,23 @@ func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.Back
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
}
itemPath := path.Join(records[i].FileDir, records[i].FileName)
if _, ok := clientMap[records[i].Source]; !ok {
backup, err := backupRepo.Get(commonRepo.WithByType(records[i].Source))
if _, ok := clientMap[records[i].DownloadAccountID]; !ok {
account, client, err := NewBackupClientWithID(records[i].DownloadAccountID)
if err != nil {
global.LOG.Errorf("load backup model %s from db failed, err: %v", records[i].Source, err)
clientMap[records[i].Source] = loadSizeHelper{}
global.LOG.Errorf("load backup client from db failed, err: %v", err)
clientMap[records[i].DownloadAccountID] = loadSizeHelper{}
datas = append(datas, item)
continue
}
client, err := u.NewClient(&backup)
if err != nil {
global.LOG.Errorf("load backup client %s from db failed, err: %v", records[i].Source, err)
clientMap[records[i].Source] = loadSizeHelper{}
datas = append(datas, item)
continue
}
item.Size, _ = client.Size(path.Join(strings.TrimLeft(backup.BackupPath, "/"), itemPath))
item.Size, _ = client.Size(path.Join(strings.TrimLeft(account.BackupPath, "/"), itemPath))
datas = append(datas, item)
clientMap[records[i].Source] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true}
clientMap[records[i].DownloadAccountID] = loadSizeHelper{backupPath: strings.TrimLeft(account.BackupPath, "/"), client: client, isOk: true}
continue
}
if clientMap[records[i].Source].isOk {
if clientMap[records[i].DownloadAccountID].isOk {
wg.Add(1)
go func(index int) {
item.Size, _ = clientMap[records[index].Source].client.Size(path.Join(clientMap[records[index].Source].backupPath, itemPath))
item.Size, _ = clientMap[records[index].DownloadAccountID].client.Size(path.Join(clientMap[records[index].DownloadAccountID].backupPath, itemPath))
datas = append(datas, item)
wg.Done()
}(i)
@ -356,15 +226,90 @@ func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.Back
return datas, nil
}
func loadLocalDir() (string, error) {
backup, err := backupRepo.Get(commonRepo.WithByType("LOCAL"))
func NewBackupClientWithID(id uint) (*dto.BackupInfo, cloud_storage.CloudStorageClient, error) {
data, err := httpUtils.RequestToMaster(fmt.Sprintf("/api/v2/backup/%v", id), http.MethodGet, nil)
if err != nil {
return "", err
return nil, nil, err
}
return loadLocalDirByStr(backup.Vars)
global.LOG.Debug("我走到了这里11")
account, ok := data.(dto.BackupInfo)
if !ok {
return nil, nil, fmt.Errorf("err response from master: %v", data)
}
global.LOG.Debug("我走到了这里22")
if account.Type == constant.Local {
localDir, err := LoadLocalDirByStr(account.Vars)
if err != nil {
return nil, nil, err
}
global.CONF.System.Backup = localDir
}
global.LOG.Debug("我走到了这里33")
backClient, err := newClient(&account)
if err != nil {
return nil, nil, err
}
return &account, backClient, nil
}
func loadLocalDirByStr(vars string) (string, error) {
type backupClientHelper struct {
id uint
name string
backupPath string
client cloud_storage.CloudStorageClient
}
func NewBackupClientMap(ids []string) (map[string]backupClientHelper, error) {
bodyItem, err := json.Marshal(ids)
if err != nil {
return nil, err
}
data, err := httpUtils.RequestToMaster("/api/v2/backup/list", http.MethodPost, bytes.NewReader(bodyItem))
if err != nil {
return nil, err
}
accounts, ok := data.([]dto.BackupInfo)
if !ok {
return nil, fmt.Errorf("err response from master: %v", data)
}
clientMap := make(map[string]backupClientHelper)
for _, item := range accounts {
backClient, err := newClient(&item)
if err != nil {
return nil, err
}
pathItem := item.BackupPath
if item.BackupPath != "/" {
pathItem = strings.TrimPrefix(item.BackupPath, "/")
}
clientMap[item.Name] = backupClientHelper{client: backClient, backupPath: pathItem, name: item.Name}
}
return clientMap, nil
}
func newClient(account *dto.BackupInfo) (cloud_storage.CloudStorageClient, error) {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(account.Vars), &varMap); err != nil {
return nil, err
}
varMap["bucket"] = account.Bucket
switch account.Type {
case constant.Sftp, constant.WebDAV:
varMap["username"] = account.AccessKey
varMap["password"] = account.Credential
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
varMap["accessKey"] = account.AccessKey
varMap["secretKey"] = account.Credential
}
client, err := cloud_storage.NewCloudStorageClient(account.Type, varMap)
if err != nil {
return nil, err
}
return client, nil
}
func LoadLocalDirByStr(vars string) (string, error) {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(vars), &varMap); err != nil {
return "", err
@ -382,34 +327,3 @@ func loadLocalDirByStr(vars string) (string, error) {
}
return "", fmt.Errorf("error type dir: %T", varMap["dir"])
}
func copyDir(src, dst string) error {
srcInfo, err := os.Stat(src)
if err != nil {
return err
}
if err = os.MkdirAll(dst, srcInfo.Mode()); err != nil {
return err
}
files, err := os.ReadDir(src)
if err != nil {
return err
}
fileOP := fileUtils.NewFileOp()
for _, file := range files {
srcPath := fmt.Sprintf("%s/%s", src, file.Name())
dstPath := fmt.Sprintf("%s/%s", dst, file.Name())
if file.IsDir() {
if err = copyDir(srcPath, dstPath); err != nil {
global.LOG.Errorf("copy dir %s to %s failed, err: %v", srcPath, dstPath, err)
}
} else {
if err := fileOP.CopyFile(srcPath, dst); err != nil {
global.LOG.Errorf("copy file %s to %s failed, err: %v", srcPath, dstPath, err)
}
}
}
return nil
}

View File

@ -23,10 +23,6 @@ import (
)
func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, error) {
localDir, err := loadLocalDir()
if err != nil {
return nil, err
}
app, err := appRepo.GetFirst(appRepo.WithKey(req.Name))
if err != nil {
return nil, err
@ -37,7 +33,7 @@ func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, er
}
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
itemDir := fmt.Sprintf("app/%s/%s", req.Name, req.DetailName)
backupDir := path.Join(localDir, itemDir)
backupDir := path.Join(global.CONF.System.Backup, itemDir)
fileName := fmt.Sprintf("%s_%s.tar.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
if err := handleAppBackup(&install, backupDir, fileName, "", req.Secret); err != nil {
@ -45,13 +41,13 @@ func (u *BackupService) AppBackup(req dto.CommonBackup) (*model.BackupRecord, er
}
record := &model.BackupRecord{
Type: "app",
Name: req.Name,
DetailName: req.DetailName,
Source: "LOCAL",
BackupType: "LOCAL",
FileDir: itemDir,
FileName: fileName,
Type: "app",
Name: req.Name,
DetailName: req.DetailName,
SourceAccountIDs: "1",
DownloadAccountID: 1,
FileDir: itemDir,
FileName: fileName,
}
if err := backupRepo.CreateRecord(record); err != nil {

View File

@ -21,14 +21,9 @@ import (
)
func (u *BackupService) MysqlBackup(req dto.CommonBackup) error {
localDir, err := loadLocalDir()
if err != nil {
return err
}
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
itemDir := fmt.Sprintf("database/%s/%s/%s", req.Type, req.Name, req.DetailName)
targetDir := path.Join(localDir, itemDir)
targetDir := path.Join(global.CONF.System.Backup, itemDir)
fileName := fmt.Sprintf("%s_%s.sql.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
if err := handleMysqlBackup(req.Name, req.Type, req.DetailName, targetDir, fileName); err != nil {
@ -36,13 +31,13 @@ func (u *BackupService) MysqlBackup(req dto.CommonBackup) error {
}
record := &model.BackupRecord{
Type: req.Type,
Name: req.Name,
DetailName: req.DetailName,
Source: "LOCAL",
BackupType: "LOCAL",
FileDir: itemDir,
FileName: fileName,
Type: req.Type,
Name: req.Name,
DetailName: req.DetailName,
SourceAccountIDs: "1",
DownloadAccountID: 1,
FileDir: itemDir,
FileName: fileName,
}
if err := backupRepo.CreateRecord(record); err != nil {
global.LOG.Errorf("save backup record failed, err: %v", err)

View File

@ -22,14 +22,9 @@ import (
)
func (u *BackupService) PostgresqlBackup(req dto.CommonBackup) error {
localDir, err := loadLocalDir()
if err != nil {
return err
}
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
itemDir := fmt.Sprintf("database/%s/%s/%s", req.Type, req.Name, req.DetailName)
targetDir := path.Join(localDir, itemDir)
targetDir := path.Join(global.CONF.System.Backup, itemDir)
fileName := fmt.Sprintf("%s_%s.sql.gz", req.DetailName, timeNow+common.RandStrAndNum(5))
if err := handlePostgresqlBackup(req.Name, req.DetailName, targetDir, fileName); err != nil {
@ -37,13 +32,13 @@ func (u *BackupService) PostgresqlBackup(req dto.CommonBackup) error {
}
record := &model.BackupRecord{
Type: req.Type,
Name: req.Name,
DetailName: req.DetailName,
Source: "LOCAL",
BackupType: "LOCAL",
FileDir: itemDir,
FileName: fileName,
Type: req.Type,
Name: req.Name,
DetailName: req.DetailName,
SourceAccountIDs: "1",
DownloadAccountID: 1,
FileDir: itemDir,
FileName: fileName,
}
if err := backupRepo.CreateRecord(record); err != nil {
global.LOG.Errorf("save backup record failed, err: %v", err)

View File

@ -21,10 +21,6 @@ import (
)
func (u *BackupService) RedisBackup(db dto.CommonBackup) error {
localDir, err := loadLocalDir()
if err != nil {
return err
}
redisInfo, err := appInstallRepo.LoadBaseInfo("redis", db.Name)
if err != nil {
return err
@ -45,17 +41,17 @@ func (u *BackupService) RedisBackup(db dto.CommonBackup) error {
}
}
itemDir := fmt.Sprintf("database/redis/%s", redisInfo.Name)
backupDir := path.Join(localDir, itemDir)
backupDir := path.Join(global.CONF.System.Backup, itemDir)
if err := handleRedisBackup(redisInfo, backupDir, fileName, db.Secret); err != nil {
return err
}
record := &model.BackupRecord{
Type: "redis",
Name: db.Name,
Source: "LOCAL",
BackupType: "LOCAL",
FileDir: itemDir,
FileName: fileName,
Type: "redis",
Name: db.Name,
SourceAccountIDs: "1",
DownloadAccountID: 1,
FileDir: itemDir,
FileName: fileName,
}
if err := backupRepo.CreateRecord(record); err != nil {
global.LOG.Errorf("save backup record failed, err: %v", err)

View File

@ -22,10 +22,6 @@ import (
)
func (u *BackupService) WebsiteBackup(req dto.CommonBackup) error {
localDir, err := loadLocalDir()
if err != nil {
return err
}
website, err := websiteRepo.GetFirst(websiteRepo.WithAlias(req.DetailName))
if err != nil {
return err
@ -33,20 +29,20 @@ func (u *BackupService) WebsiteBackup(req dto.CommonBackup) error {
timeNow := time.Now().Format(constant.DateTimeSlimLayout)
itemDir := fmt.Sprintf("website/%s", req.Name)
backupDir := path.Join(localDir, itemDir)
backupDir := path.Join(global.CONF.System.Backup, itemDir)
fileName := fmt.Sprintf("%s_%s.tar.gz", website.PrimaryDomain, timeNow+common.RandStrAndNum(5))
if err := handleWebsiteBackup(&website, backupDir, fileName, "", req.Secret); err != nil {
return err
}
record := &model.BackupRecord{
Type: "website",
Name: website.PrimaryDomain,
DetailName: req.DetailName,
Source: "LOCAL",
BackupType: "LOCAL",
FileDir: itemDir,
FileName: fileName,
Type: "website",
Name: website.PrimaryDomain,
DetailName: req.DetailName,
SourceAccountIDs: "1",
DownloadAccountID: 1,
FileDir: itemDir,
FileName: fileName,
}
if err := backupRepo.CreateRecord(record); err != nil {
global.LOG.Errorf("save backup record failed, err: %v", err)

View File

@ -99,7 +99,7 @@ func (u *CronjobService) CleanRecord(req dto.CronjobClean) error {
}
if req.CleanData {
if hasBackup(cronjob.Type) {
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -129,16 +129,16 @@ func (u *CronjobService) CleanRecord(req dto.CronjobClean) error {
return nil
}
func (u *CronjobService) Download(down dto.CronjobDownload) (string, error) {
record, _ := cronjobRepo.GetRecord(commonRepo.WithByID(down.RecordID))
func (u *CronjobService) Download(req dto.CronjobDownload) (string, error) {
record, _ := cronjobRepo.GetRecord(commonRepo.WithByID(req.RecordID))
if record.ID == 0 {
return "", constant.ErrRecordNotFound
}
backup, _ := backupRepo.Get(commonRepo.WithByID(down.BackupAccountID))
if backup.ID == 0 {
return "", constant.ErrRecordNotFound
account, client, err := NewBackupClientWithID(req.BackupAccountID)
if err != nil {
return "", err
}
if backup.Type == "LOCAL" || record.FromLocal {
if account.Type == "LOCAL" || record.FromLocal {
if _, err := os.Stat(record.File); err != nil && os.IsNotExist(err) {
return "", err
}
@ -146,10 +146,6 @@ func (u *CronjobService) Download(down dto.CronjobDownload) (string, error) {
}
tempPath := fmt.Sprintf("%s/download/%s", constant.DataDir, record.File)
if _, err := os.Stat(tempPath); err != nil && os.IsNotExist(err) {
client, err := NewIBackupService().NewClient(&backup)
if err != nil {
return "", err
}
_ = os.MkdirAll(path.Dir(tempPath), os.ModePerm)
isOK, err := client.Download(record.File, tempPath)
if !isOK || err != nil {

View File

@ -27,7 +27,7 @@ func (u *CronjobService) handleApp(cronjob model.Cronjob, startTime time.Time) e
}
apps = append(apps, app)
}
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -38,7 +38,7 @@ func (u *CronjobService) handleApp(cronjob model.Cronjob, startTime time.Time) e
record.CronjobID = cronjob.ID
record.Name = app.App.Key
record.DetailName = app.Name
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("app/%s/%s", app.App.Key, app.Name))
record.FileName = fmt.Sprintf("app_%s_%s.tar.gz", app.Name, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
if err := handleAppBackup(&app, backupDir, record.FileName, cronjob.ExclusionRules, cronjob.Secret); err != nil {
@ -60,7 +60,7 @@ func (u *CronjobService) handleApp(cronjob model.Cronjob, startTime time.Time) e
func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Time) error {
webs := loadWebsForJob(cronjob)
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -71,7 +71,7 @@ func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Tim
record.CronjobID = cronjob.ID
record.Name = web.PrimaryDomain
record.DetailName = web.Alias
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("website/%s", web.PrimaryDomain))
record.FileName = fmt.Sprintf("website_%s_%s.tar.gz", web.PrimaryDomain, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
if err := handleWebsiteBackup(&web, backupDir, record.FileName, cronjob.ExclusionRules, cronjob.Secret); err != nil {
@ -93,7 +93,7 @@ func (u *CronjobService) handleWebsite(cronjob model.Cronjob, startTime time.Tim
func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Time) error {
dbs := loadDbsForJob(cronjob)
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -104,7 +104,7 @@ func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Ti
record.CronjobID = cronjob.ID
record.Name = dbInfo.Database
record.DetailName = dbInfo.Name
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
backupDir := path.Join(global.CONF.System.TmpDir, fmt.Sprintf("database/%s/%s/%s", dbInfo.DBType, record.Name, dbInfo.Name))
record.FileName = fmt.Sprintf("db_%s_%s.sql.gz", dbInfo.Name, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
@ -132,7 +132,7 @@ func (u *CronjobService) handleDatabase(cronjob model.Cronjob, startTime time.Ti
}
func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.Time) error {
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -146,7 +146,7 @@ func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.T
record.Type = "directory"
record.CronjobID = cronjob.ID
record.Name = cronjob.Name
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
downloadPath, err := u.uploadCronjobBackFile(cronjob, accountMap, path.Join(backupDir, fileName))
if err != nil {
return err
@ -162,7 +162,7 @@ func (u *CronjobService) handleDirectory(cronjob model.Cronjob, startTime time.T
}
func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.Time) error {
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -177,7 +177,7 @@ func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.T
record.Type = "log"
record.CronjobID = cronjob.ID
record.Name = cronjob.Name
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
downloadPath, err := u.uploadCronjobBackFile(cronjob, accountMap, path.Join(path.Dir(backupDir), fileName))
if err != nil {
return err
@ -193,7 +193,7 @@ func (u *CronjobService) handleSystemLog(cronjob model.Cronjob, startTime time.T
}
func (u *CronjobService) handleSnapshot(cronjob model.Cronjob, startTime time.Time, logPath string) error {
accountMap, err := loadClientMap(cronjob.BackupAccounts)
accountMap, err := NewBackupClientMap(strings.Split(cronjob.SourceAccountIDs, ","))
if err != nil {
return err
}
@ -203,12 +203,12 @@ func (u *CronjobService) handleSnapshot(cronjob model.Cronjob, startTime time.Ti
record.Type = "directory"
record.CronjobID = cronjob.ID
record.Name = cronjob.Name
record.Source, record.BackupType = loadRecordPath(cronjob, accountMap)
record.DownloadAccountID, record.SourceAccountIDs = loadRecordPath(cronjob, accountMap)
record.FileDir = "system_snapshot"
req := dto.SnapshotCreate{
From: record.BackupType,
DefaultDownload: cronjob.DefaultDownload,
SourceAccountIDs: record.SourceAccountIDs,
DownloadAccountID: cronjob.DownloadAccountID,
}
name, err := NewISnapshotService().HandleSnapshot(true, logPath, req, startTime.Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5), cronjob.Secret)
if err != nil {
@ -287,20 +287,20 @@ func loadWebsForJob(cronjob model.Cronjob) []model.Website {
return weblist
}
func loadRecordPath(cronjob model.Cronjob, accountMap map[string]cronjobUploadHelper) (string, string) {
source := accountMap[fmt.Sprintf("%v", cronjob.DefaultDownload)].backType
targets := strings.Split(cronjob.BackupAccounts, ",")
func loadRecordPath(cronjob model.Cronjob, accountMap map[string]backupClientHelper) (uint, string) {
download := accountMap[fmt.Sprintf("%v", cronjob.DownloadAccountID)].id
sources := strings.Split(cronjob.SourceAccountIDs, ",")
var itemAccounts []string
for _, target := range targets {
for _, target := range sources {
if len(target) == 0 {
continue
}
if len(accountMap[target].backType) != 0 {
itemAccounts = append(itemAccounts, accountMap[target].backType)
if accountMap[target].id != 0 {
itemAccounts = append(itemAccounts, fmt.Sprintf("%v", accountMap[target].id))
}
}
backupType := strings.Join(itemAccounts, ",")
return source, backupType
return download, backupType
}
func handleBackupLogs(targetDir, fileName string, secret string) error {

View File

@ -15,7 +15,6 @@ import (
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage"
"github.com/1Panel-dev/1Panel/agent/utils/cmd"
"github.com/1Panel-dev/1Panel/agent/utils/files"
"github.com/1Panel-dev/1Panel/agent/utils/ntp"
@ -27,7 +26,7 @@ func (u *CronjobService) HandleJob(cronjob *model.Cronjob) {
message []byte
err error
)
record := cronjobRepo.StartRecords(cronjob.ID, cronjob.KeepLocal, "")
record := cronjobRepo.StartRecords(cronjob.ID, "")
go func() {
switch cronjob.Type {
case "shell":
@ -269,49 +268,11 @@ func (u *CronjobService) handleSystemClean() (string, error) {
return NewIDeviceService().CleanForCronjob()
}
func loadClientMap(backupAccounts string) (map[string]cronjobUploadHelper, error) {
clients := make(map[string]cronjobUploadHelper)
accounts, err := backupRepo.List()
if err != nil {
return nil, err
}
targets := strings.Split(backupAccounts, ",")
for _, target := range targets {
if len(target) == 0 {
continue
}
for _, account := range accounts {
if target == account.Type {
client, err := NewIBackupService().NewClient(&account)
if err != nil {
return nil, err
}
pathItem := account.BackupPath
if account.BackupPath != "/" {
pathItem = strings.TrimPrefix(account.BackupPath, "/")
}
clients[target] = cronjobUploadHelper{
client: client,
backupPath: pathItem,
backType: account.Type,
}
}
}
}
return clients, nil
}
type cronjobUploadHelper struct {
backupPath string
backType string
client cloud_storage.CloudStorageClient
}
func (u *CronjobService) uploadCronjobBackFile(cronjob model.Cronjob, accountMap map[string]cronjobUploadHelper, file string) (string, error) {
func (u *CronjobService) uploadCronjobBackFile(cronjob model.Cronjob, accountMap map[string]backupClientHelper, file string) (string, error) {
defer func() {
_ = os.Remove(file)
}()
accounts := strings.Split(cronjob.BackupAccounts, ",")
accounts := strings.Split(cronjob.SourceAccountIDs, ",")
cloudSrc := strings.TrimPrefix(file, global.CONF.System.TmpDir+"/")
for _, account := range accounts {
if len(account) != 0 {
@ -325,14 +286,14 @@ func (u *CronjobService) uploadCronjobBackFile(cronjob model.Cronjob, accountMap
return cloudSrc, nil
}
func (u *CronjobService) removeExpiredBackup(cronjob model.Cronjob, accountMap map[string]cronjobUploadHelper, record model.BackupRecord) {
func (u *CronjobService) removeExpiredBackup(cronjob model.Cronjob, accountMap map[string]backupClientHelper, record model.BackupRecord) {
global.LOG.Infof("start to handle remove expired, retain copies: %d", cronjob.RetainCopies)
var opts []repo.DBOption
opts = append(opts, commonRepo.WithByFrom("cronjob"))
opts = append(opts, backupRepo.WithByCronID(cronjob.ID))
opts = append(opts, commonRepo.WithOrderBy("created_at desc"))
if record.ID != 0 {
opts = append(opts, backupRepo.WithByType(record.Type))
opts = append(opts, commonRepo.WithByType(record.Type))
opts = append(opts, commonRepo.WithByName(record.Name))
opts = append(opts, backupRepo.WithByDetailName(record.DetailName))
}
@ -341,7 +302,7 @@ func (u *CronjobService) removeExpiredBackup(cronjob model.Cronjob, accountMap m
return
}
for i := int(cronjob.RetainCopies); i < len(records); i++ {
accounts := strings.Split(cronjob.BackupAccounts, ",")
accounts := strings.Split(cronjob.SourceAccountIDs, ",")
if cronjob.Type == "snapshot" {
for _, account := range accounts {
if len(account) != 0 {

View File

@ -237,11 +237,7 @@ func (u *DatabaseService) Delete(req dto.DatabaseDelete) error {
if _, err := os.Stat(uploadDir); err == nil {
_ = os.RemoveAll(uploadDir)
}
localDir, err := loadLocalDir()
if err != nil && !req.ForceDelete {
return err
}
backupDir := path.Join(localDir, fmt.Sprintf("database/%s/%s", db.Type, db.Name))
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("database/%s/%s", db.Type, db.Name))
if _, err := os.Stat(backupDir); err == nil {
_ = os.RemoveAll(backupDir)
}

View File

@ -280,11 +280,7 @@ func (u *MysqlService) Delete(ctx context.Context, req dto.MysqlDBDelete) error
if _, err := os.Stat(uploadDir); err == nil {
_ = os.RemoveAll(uploadDir)
}
localDir, err := loadLocalDir()
if err != nil && !req.ForceDelete {
return err
}
backupDir := path.Join(localDir, fmt.Sprintf("database/%s/%s/%s", req.Type, db.MysqlName, db.Name))
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("database/%s/%s/%s", req.Type, db.MysqlName, db.Name))
if _, err := os.Stat(backupDir); err == nil {
_ = os.RemoveAll(backupDir)
}

View File

@ -305,11 +305,7 @@ func (u *PostgresqlService) Delete(ctx context.Context, req dto.PostgresqlDBDele
if _, err := os.Stat(uploadDir); err == nil {
_ = os.RemoveAll(uploadDir)
}
localDir, err := loadLocalDir()
if err != nil && !req.ForceDelete {
return err
}
backupDir := path.Join(localDir, fmt.Sprintf("database/%s/%s/%s", req.Type, db.PostgresqlName, db.Name))
backupDir := path.Join(global.CONF.System.Backup, fmt.Sprintf("database/%s/%s/%s", req.Type, db.PostgresqlName, db.Name))
if _, err := os.Stat(backupDir); err == nil {
_ = os.RemoveAll(backupDir)
}

View File

@ -78,12 +78,12 @@ func (u *SnapshotService) SnapshotImport(req dto.SnapshotImport) error {
snap = strings.ReplaceAll(snap, ".tar.gz", "")
}
itemSnap := model.Snapshot{
Name: snap,
From: req.From,
DefaultDownload: req.From,
Version: nameItems[1],
Description: req.Description,
Status: constant.StatusSuccess,
Name: snap,
SourceAccountIDs: fmt.Sprintf("%v", req.BackupAccountID),
DownloadAccountID: req.BackupAccountID,
Version: nameItems[1],
Description: req.Description,
Status: constant.StatusSuccess,
}
if err := snapshotRepo.Create(&itemSnap); err != nil {
return err
@ -180,14 +180,11 @@ func (u *SnapshotService) readFromJson(path string) (SnapshotJson, error) {
}
func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto.SnapshotCreate, timeNow string, secret string) (string, error) {
localDir, err := loadLocalDir()
if err != nil {
return "", err
}
var (
rootDir string
snap model.Snapshot
snapStatus model.SnapshotStatus
err error
)
if req.ID == 0 {
@ -197,15 +194,15 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
if isCronjob {
name = fmt.Sprintf("snapshot_1panel_%s_%s_%s", versionItem.Value, loadOs(), timeNow)
}
rootDir = path.Join(localDir, "system", name)
rootDir = path.Join(global.CONF.System.Backup, "system", name)
snap = model.Snapshot{
Name: name,
Description: req.Description,
From: req.From,
DefaultDownload: req.DefaultDownload,
Version: versionItem.Value,
Status: constant.StatusWaiting,
Name: name,
Description: req.Description,
SourceAccountIDs: req.SourceAccountIDs,
DownloadAccountID: req.DownloadAccountID,
Version: versionItem.Value,
Status: constant.StatusWaiting,
}
_ = snapshotRepo.Create(&snap)
snapStatus.SnapID = snap.ID
@ -221,7 +218,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
snapStatus.SnapID = snap.ID
_ = snapshotRepo.CreateStatus(&snapStatus)
}
rootDir = path.Join(localDir, fmt.Sprintf("system/%s", snap.Name))
rootDir = path.Join(global.CONF.System.Backup, fmt.Sprintf("system/%s", snap.Name))
}
var wg sync.WaitGroup
@ -233,7 +230,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
jsonItem := SnapshotJson{
BaseDir: global.CONF.System.BaseDir,
BackupDataDir: localDir,
BackupDataDir: global.CONF.System.Backup,
PanelDataDir: path.Join(global.CONF.System.BaseDir, "1panel"),
}
loadLogByStatus(snapStatus, logPath)
@ -255,7 +252,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
}
if snapStatus.BackupData != constant.StatusDone {
wg.Add(1)
go snapBackup(itemHelper, localDir, backupPanelDir)
go snapBackup(itemHelper, backupPanelDir)
}
if !isCronjob {
@ -266,7 +263,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
return
}
if snapStatus.PanelData != constant.StatusDone {
snapPanelData(itemHelper, localDir, backupPanelDir)
snapPanelData(itemHelper, backupPanelDir)
}
if snapStatus.PanelData != constant.StatusDone {
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
@ -280,7 +277,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
return
}
if snapStatus.Upload != constant.StatusDone {
snapUpload(itemHelper, req.From, fmt.Sprintf("%s.tar.gz", rootDir))
snapUpload(itemHelper, req.SourceAccountIDs, fmt.Sprintf("%s.tar.gz", rootDir))
}
if snapStatus.Upload != constant.StatusDone {
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
@ -297,7 +294,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
return snap.Name, fmt.Errorf("snapshot %s backup failed", snap.Name)
}
loadLogByStatus(snapStatus, logPath)
snapPanelData(itemHelper, localDir, backupPanelDir)
snapPanelData(itemHelper, backupPanelDir)
if snapStatus.PanelData != constant.StatusDone {
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
loadLogByStatus(snapStatus, logPath)
@ -311,7 +308,7 @@ func (u *SnapshotService) HandleSnapshot(isCronjob bool, logPath string, req dto
return snap.Name, fmt.Errorf("snapshot %s compress failed", snap.Name)
}
loadLogByStatus(snapStatus, logPath)
snapUpload(itemHelper, req.From, fmt.Sprintf("%s.tar.gz", rootDir))
snapUpload(itemHelper, req.SourceAccountIDs, fmt.Sprintf("%s.tar.gz", rootDir))
if snapStatus.Upload != constant.StatusDone {
_ = snapshotRepo.Update(snap.ID, map[string]interface{}{"status": constant.StatusFailed})
loadLogByStatus(snapStatus, logPath)
@ -326,12 +323,12 @@ func (u *SnapshotService) Delete(req dto.SnapshotBatchDelete) error {
snaps, _ := snapshotRepo.GetList(commonRepo.WithIdsIn(req.Ids))
for _, snap := range snaps {
if req.DeleteWithFile {
targetAccounts, err := loadClientMap(snap.From)
accounts, err := NewBackupClientMap(strings.Split(snap.SourceAccountIDs, ","))
if err != nil {
return err
}
for _, item := range targetAccounts {
global.LOG.Debugf("remove snapshot file %s.tar.gz from %s", snap.Name, item.backType)
for _, item := range accounts {
global.LOG.Debugf("remove snapshot file %s.tar.gz from %s", snap.Name, item.name)
_, _ = item.client.Delete(path.Join(item.backupPath, "system_snapshot", snap.Name+".tar.gz"))
}
}
@ -512,7 +509,7 @@ func loadOs() string {
func loadSnapSize(records []model.Snapshot) ([]dto.SnapshotInfo, error) {
var datas []dto.SnapshotInfo
clientMap := make(map[string]loadSizeHelper)
clientMap := make(map[uint]loadSizeHelper)
var wg sync.WaitGroup
for i := 0; i < len(records); i++ {
var item dto.SnapshotInfo
@ -520,30 +517,23 @@ func loadSnapSize(records []model.Snapshot) ([]dto.SnapshotInfo, error) {
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
}
itemPath := fmt.Sprintf("system_snapshot/%s.tar.gz", item.Name)
if _, ok := clientMap[records[i].DefaultDownload]; !ok {
backup, err := backupRepo.Get(commonRepo.WithByType(records[i].DefaultDownload))
if _, ok := clientMap[records[i].DownloadAccountID]; !ok {
backup, client, err := NewBackupClientWithID(records[i].DownloadAccountID)
if err != nil {
global.LOG.Errorf("load backup model %s from db failed, err: %v", records[i].DefaultDownload, err)
clientMap[records[i].DefaultDownload] = loadSizeHelper{}
datas = append(datas, item)
continue
}
client, err := NewIBackupService().NewClient(&backup)
if err != nil {
global.LOG.Errorf("load backup client %s from db failed, err: %v", records[i].DefaultDownload, err)
clientMap[records[i].DefaultDownload] = loadSizeHelper{}
global.LOG.Errorf("load backup client from db failed, err: %v", err)
clientMap[records[i].DownloadAccountID] = loadSizeHelper{}
datas = append(datas, item)
continue
}
item.Size, _ = client.Size(path.Join(strings.TrimLeft(backup.BackupPath, "/"), itemPath))
datas = append(datas, item)
clientMap[records[i].DefaultDownload] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true}
clientMap[records[i].DownloadAccountID] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true}
continue
}
if clientMap[records[i].DefaultDownload].isOk {
if clientMap[records[i].DownloadAccountID].isOk {
wg.Add(1)
go func(index int) {
item.Size, _ = clientMap[records[index].DefaultDownload].client.Size(path.Join(clientMap[records[index].DefaultDownload].backupPath, itemPath))
item.Size, _ = clientMap[records[index].DownloadAccountID].client.Size(path.Join(clientMap[records[index].DownloadAccountID].backupPath, itemPath))
datas = append(datas, item)
wg.Done()
}(i)

View File

@ -127,24 +127,24 @@ func snapAppData(snap snapHelper, targetDir string) {
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"app_data": constant.StatusDone})
}
func snapBackup(snap snapHelper, localDir, targetDir string) {
func snapBackup(snap snapHelper, targetDir string) {
defer snap.Wg.Done()
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"backup_data": constant.Running})
status := constant.StatusDone
if err := handleSnapTar(localDir, targetDir, "1panel_backup.tar.gz", "./system;./system_snapshot;", ""); err != nil {
if err := handleSnapTar(global.CONF.System.Backup, targetDir, "1panel_backup.tar.gz", "./system;./system_snapshot;", ""); err != nil {
status = err.Error()
}
snap.Status.BackupData = status
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"backup_data": status})
}
func snapPanelData(snap snapHelper, localDir, targetDir string) {
func snapPanelData(snap snapHelper, targetDir string) {
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"panel_data": constant.Running})
status := constant.StatusDone
dataDir := path.Join(global.CONF.System.BaseDir, "1panel")
exclusionRules := "./tmp;./log;./cache;./db/1Panel.db-*;"
if strings.Contains(localDir, dataDir) {
exclusionRules += ("." + strings.ReplaceAll(localDir, dataDir, "") + ";")
if strings.Contains(global.CONF.System.Backup, dataDir) {
exclusionRules += ("." + strings.ReplaceAll(global.CONF.System.Backup, dataDir, "") + ";")
}
ignoreVal, _ := settingRepo.Get(settingRepo.WithByKey("SnapshotIgnore"))
rules := strings.Split(ignoreVal.Value, ",")
@ -197,7 +197,7 @@ func snapCompress(snap snapHelper, rootDir string, secret string) {
func snapUpload(snap snapHelper, accounts string, file string) {
source := path.Join(global.CONF.System.TmpDir, "system", path.Base(file))
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"upload": constant.StatusUploading})
accountMap, err := loadClientMap(accounts)
accountMap, err := NewBackupClientMap(strings.Split(accounts, ","))
if err != nil {
snap.Status.Upload = err.Error()
_ = snapshotRepo.UpdateStatus(snap.Status.ID, map[string]interface{}{"upload": err.Error()})

View File

@ -168,7 +168,7 @@ func backupBeforeRecover(snap model.Snapshot) error {
go snapJson(itemHelper, jsonItem, baseDir)
go snapPanel(itemHelper, path.Join(baseDir, "1panel"))
go snapDaemonJson(itemHelper, path.Join(baseDir, "docker"))
go snapBackup(itemHelper, global.CONF.System.Backup, path.Join(baseDir, "1panel"))
go snapBackup(itemHelper, path.Join(baseDir, "1panel"))
wg.Wait()
itemHelper.Status.AppData = constant.StatusDone
@ -176,7 +176,7 @@ func backupBeforeRecover(snap model.Snapshot) error {
if !allDone {
return errors.New(msg)
}
snapPanelData(itemHelper, global.CONF.System.BaseDir, path.Join(baseDir, "1panel"))
snapPanelData(itemHelper, path.Join(baseDir, "1panel"))
if status.PanelData != constant.StatusDone {
return errors.New(status.PanelData)
}
@ -184,23 +184,19 @@ func backupBeforeRecover(snap model.Snapshot) error {
}
func handleDownloadSnapshot(snap model.Snapshot, targetDir string) error {
backup, err := backupRepo.Get(commonRepo.WithByType(snap.DefaultDownload))
account, client, err := NewBackupClientWithID(snap.DownloadAccountID)
if err != nil {
return err
}
client, err := NewIBackupService().NewClient(&backup)
if err != nil {
return err
}
pathItem := backup.BackupPath
if backup.BackupPath != "/" {
pathItem = strings.TrimPrefix(backup.BackupPath, "/")
pathItem := account.BackupPath
if account.BackupPath != "/" {
pathItem = strings.TrimPrefix(account.BackupPath, "/")
}
filePath := fmt.Sprintf("%s/%s.tar.gz", targetDir, snap.Name)
_ = os.RemoveAll(filePath)
ok, err := client.Download(path.Join(pathItem, fmt.Sprintf("system_snapshot/%s.tar.gz", snap.Name)), filePath)
if err != nil || !ok {
return fmt.Errorf("download file %s from %s failed, err: %v", snap.Name, backup.Type, err)
return fmt.Errorf("download file %s from %s failed, err: %v", snap.Name, account.Name, err)
}
return nil
}

View File

@ -1,18 +1,20 @@
package configs
type System struct {
DbFile string `mapstructure:"db_agent_file"`
DbPath string `mapstructure:"db_path"`
LogPath string `mapstructure:"log_path"`
DataDir string `mapstructure:"data_dir"`
TmpDir string `mapstructure:"tmp_dir"`
Cache string `mapstructure:"cache"`
Backup string `mapstructure:"backup"`
EncryptKey string `mapstructure:"encrypt_key"`
BaseDir string `mapstructure:"base_dir"`
Mode string `mapstructure:"mode"`
RepoUrl string `mapstructure:"repo_url"`
Version string `mapstructure:"version"`
IsDemo bool `mapstructure:"is_demo"`
AppRepo string `mapstructure:"app_repo"`
MasterRequestAddr string `mapstructure:"master_request_addr"`
MasterRequestToken string `mapstructure:"master_request_token"`
DbFile string `mapstructure:"db_agent_file"`
DbPath string `mapstructure:"db_path"`
LogPath string `mapstructure:"log_path"`
DataDir string `mapstructure:"data_dir"`
TmpDir string `mapstructure:"tmp_dir"`
Cache string `mapstructure:"cache"`
Backup string `mapstructure:"backup"`
EncryptKey string `mapstructure:"encrypt_key"`
BaseDir string `mapstructure:"base_dir"`
Mode string `mapstructure:"mode"`
RepoUrl string `mapstructure:"repo_url"`
Version string `mapstructure:"version"`
IsDemo bool `mapstructure:"is_demo"`
AppRepo string `mapstructure:"app_repo"`
}

View File

@ -21,7 +21,6 @@ require (
github.com/go-redis/redis v6.15.9+incompatible
github.com/go-sql-driver/mysql v1.8.1
github.com/goh-chunlin/go-onedrive v1.1.1
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/jackc/pgx/v5 v5.6.0

View File

@ -300,8 +300,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/goh-chunlin/go-onedrive v1.1.1 h1:HGtHk5iG0MZ92zYUtaY04czfZPBIJUr12UuFc+PW8m4=
github.com/goh-chunlin/go-onedrive v1.1.1/go.mod h1:N8qIGHD7tryO734epiBKk5oXcpGwxKET/u3LuBHciTs=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=

View File

@ -7,6 +7,7 @@ import (
"github.com/1Panel-dev/1Panel/agent/app/model"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/app/service"
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
"github.com/1Panel-dev/1Panel/agent/utils/encrypt"
@ -100,33 +101,14 @@ func handleCronjobStatus() {
}
func loadLocalDir() {
var backup model.BackupAccount
_ = global.DB.Where("type = ?", "LOCAL").First(&backup).Error
if backup.ID == 0 {
global.LOG.Errorf("no such backup account `%s` in db", "LOCAL")
return
account, _, err := service.NewBackupClientWithID(1)
if err != nil {
global.LOG.Errorf("load local backup account info failed, err: %v", err)
}
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
global.LOG.Errorf("json unmarshal backup.Vars: %v failed, err: %v", backup.Vars, err)
return
global.CONF.System.Backup, err = service.LoadLocalDirByStr(account.Vars)
if err != nil {
global.LOG.Errorf("load local backup dir failed, err: %v", err)
}
if _, ok := varMap["dir"]; !ok {
global.LOG.Error("load local backup dir failed")
return
}
baseDir, ok := varMap["dir"].(string)
if ok {
if _, err := os.Stat(baseDir); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(baseDir, os.ModePerm); err != nil {
global.LOG.Errorf("mkdir %s failed, err: %v", baseDir, err)
return
}
}
global.CONF.System.Backup = baseDir
return
}
global.LOG.Errorf("error type dir: %T", varMap["dir"])
}
func initDir() {

View File

@ -12,7 +12,6 @@ func Init() {
migrations.AddTable,
migrations.InitHost,
migrations.InitSetting,
migrations.InitBackupAccount,
migrations.InitImageRepo,
migrations.InitDefaultGroup,
migrations.InitDefaultCA,

View File

@ -2,7 +2,6 @@ package migrations
import (
"encoding/json"
"fmt"
"os"
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
@ -27,7 +26,6 @@ var AddTable = &gormigrate.Migration{
&model.AppTag{},
&model.Tag{},
&model.App{},
&model.BackupAccount{},
&model.BackupRecord{},
&model.Clam{},
&model.Command{},
@ -94,9 +92,11 @@ var InitSetting = &gormigrate.Migration{
}
if _, err := os.Stat("/opt/1panel/nodeJson"); err == nil {
type nodeInfo struct {
ServerCrt string `json:"serverCrt"`
ServerKey string `json:"serverKey"`
CurrentNode string `json:"currentNode"`
MasterRequestAddr string `json:"masterRequestAddr"`
Token string `json:"token"`
ServerCrt string `json:"serverCrt"`
ServerKey string `json:"serverKey"`
CurrentNode string `json:"currentNode"`
}
nodeJson, err := os.ReadFile("/opt/1panel/nodeJson")
if err != nil {
@ -106,14 +106,20 @@ var InitSetting = &gormigrate.Migration{
if err := json.Unmarshal(nodeJson, &node); err != nil {
return err
}
itemKey, _ := encrypt.StringEncrypt(node.ServerKey)
itemKey, _ := encrypt.StringEncryptWithBase64(node.ServerKey)
if err := tx.Create(&model.Setting{Key: "ServerKey", Value: itemKey}).Error; err != nil {
return err
}
itemCrt, _ := encrypt.StringEncrypt(node.ServerCrt)
itemCrt, _ := encrypt.StringEncryptWithBase64(node.ServerCrt)
if err := tx.Create(&model.Setting{Key: "ServerCrt", Value: itemCrt}).Error; err != nil {
return err
}
itemToken, _ := encrypt.StringEncryptWithBase64(node.Token)
if err := tx.Create(&model.Setting{Key: "Token", Value: itemToken}).Error; err != nil {
return err
}
global.CONF.System.MasterRequestAddr = node.MasterRequestAddr
global.CONF.System.MasterRequestToken = itemToken
global.CurrentNode = node.CurrentNode
} else {
global.CurrentNode = "127.0.0.1"
@ -190,20 +196,6 @@ var InitSetting = &gormigrate.Migration{
},
}
var InitBackupAccount = &gormigrate.Migration{
ID: "20240722-init-backup",
Migrate: func(tx *gorm.DB) error {
item := &model.BackupAccount{
Type: "LOCAL",
Vars: fmt.Sprintf("{\"dir\":\"%s\"}", global.CONF.System.Backup),
}
if err := tx.Create(item).Error; err != nil {
return err
}
return nil
},
}
var InitImageRepo = &gormigrate.Migration{
ID: "20240722-init-imagerepo",
Migrate: func(tx *gorm.DB) error {

View File

@ -24,7 +24,6 @@ func (s *SettingRouter) InitRouter(Router *gin.RouterGroup) {
settingRouter.POST("/snapshot/rollback", baseApi.RollbackSnapshot)
settingRouter.POST("/snapshot/description/update", baseApi.UpdateSnapDescription)
settingRouter.POST("/backup/operate", baseApi.OperateBackup)
settingRouter.POST("/backup/backup", baseApi.Backup)
settingRouter.POST("/backup/recover", baseApi.Recover)
settingRouter.POST("/backup/recover/byupload", baseApi.RecoverByUpload)

View File

@ -0,0 +1,56 @@
package http
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"time"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
)
func RequestToMaster(reqUrl, reqMethod string, reqBody io.Reader) (interface{}, error) {
client := &http.Client{
Timeout: time.Second * 5,
}
parsedURL, err := url.Parse(global.CONF.System.MasterRequestAddr)
if err != nil {
return nil, fmt.Errorf("handle url Parse failed, err: %v \n", err)
}
rURL := &url.URL{
Path: reqUrl,
Host: parsedURL.Host,
}
req, err := http.NewRequest(reqMethod, rURL.String(), reqBody)
if err != nil {
return nil, fmt.Errorf("handle request failed, err: %v \n", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set(constant.JWTHeaderName, global.CONF.System.MasterRequestToken)
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("client do request failed, err: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("do request failed, err: %v", resp.Status)
}
bodyByte, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read resp body from request failed, err: %v", err)
}
var respJson dto.Response
if err := json.Unmarshal(bodyByte, &respJson); err != nil {
return nil, fmt.Errorf("json unmarshal resp data failed, err: %v", err)
}
if respJson.Code != http.StatusOK {
return nil, fmt.Errorf("do request success but handle failed, err: %v", respJson.Message)
}
return respJson.Data, nil
}

View File

@ -1,69 +0,0 @@
package jwt
import (
"time"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/golang-jwt/jwt/v4"
)
type JWT struct {
SigningKey []byte
}
type JwtRequest struct {
BaseClaims
BufferTime int64
jwt.RegisteredClaims
}
type CustomClaims struct {
BaseClaims
BufferTime int64
jwt.RegisteredClaims
}
type BaseClaims struct {
ID uint
Name string
}
func NewJWT() *JWT {
settingRepo := repo.NewISettingRepo()
jwtSign, _ := settingRepo.Get(settingRepo.WithByKey("JWTSigningKey"))
return &JWT{
[]byte(jwtSign.Value),
}
}
func (j *JWT) CreateClaims(baseClaims BaseClaims) CustomClaims {
claims := CustomClaims{
BaseClaims: baseClaims,
BufferTime: constant.JWTBufferTime,
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * time.Duration(constant.JWTBufferTime))),
Issuer: constant.JWTIssuer,
},
}
return claims
}
func (j *JWT) CreateToken(request CustomClaims) (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodHS256, &request)
return token.SignedString(j.SigningKey)
}
func (j *JWT) ParseToken(tokenStr string) (*JwtRequest, error) {
token, err := jwt.ParseWithClaims(tokenStr, &JwtRequest{}, func(token *jwt.Token) (interface{}, error) {
return j.SigningKey, nil
})
if err != nil || token == nil {
return nil, constant.ErrTokenParse
}
if claims, ok := token.Claims.(*JwtRequest); ok && token.Valid {
return claims, nil
}
return nil, constant.ErrTokenParse
}

View File

@ -129,7 +129,7 @@ func (b *BaseApi) UpdateBackup(c *gin.Context) {
// @Param request body dto.SearchPageWithType true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup/search [get]
// @Router /core/backup/search [post]
func (b *BaseApi) SearchBackup(c *gin.Context) {
var req dto.SearchPageWithType
if err := helper.CheckBindAndValidate(&req, c); err != nil {
@ -147,3 +147,32 @@ func (b *BaseApi) SearchBackup(c *gin.Context) {
Total: total,
})
}
func (b *BaseApi) GetBackup(c *gin.Context) {
var req dto.OperateByID
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
data, err := backupService.Get(req)
if err != nil {
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
return
}
helper.SuccessWithData(c, data)
}
func (b *BaseApi) ListBackup(c *gin.Context) {
var req dto.OperateByIDs
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
list, err := backupService.List(req)
if err != nil {
helper.ErrorWithDetail(c, constant.CodeErrInternalServer, constant.ErrTypeInternalServer, err)
return
}
helper.SuccessWithData(c, list)
}

View File

@ -34,3 +34,6 @@ type Options struct {
type OperateByID struct {
ID uint `json:"id"`
}
type OperateByIDs struct {
IDs []uint `json:"ids"`
}

View File

@ -9,6 +9,7 @@ type DBOption func(*gorm.DB) *gorm.DB
type ICommonRepo interface {
WithByID(id uint) DBOption
WithByName(name string) DBOption
WithByIDs(ids []uint) DBOption
WithByType(ty string) DBOption
WithOrderBy(orderStr string) DBOption
}
@ -32,6 +33,11 @@ func (c *CommonRepo) WithByName(name string) DBOption {
return g.Where("`name` = ?", name)
}
}
func (c *CommonRepo) WithByIDs(ids []uint) DBOption {
return func(g *gorm.DB) *gorm.DB {
return g.Where("id in (?)", ids)
}
}
func (c *CommonRepo) WithByType(ty string) DBOption {
return func(g *gorm.DB) *gorm.DB {
if len(ty) == 0 {

View File

@ -123,7 +123,8 @@ func (u *AuthService) generateSession(c *gin.Context, name, authMethod string) (
if authMethod == constant.AuthMethodJWT {
j := jwt.NewJWT()
claims := j.CreateClaims(jwt.BaseClaims{
Name: name,
Name: name,
IsAgent: false,
})
token, err := j.CreateToken(claims)
if err != nil {

View File

@ -19,7 +19,6 @@ import (
"github.com/1Panel-dev/1Panel/core/utils/cloud_storage/client"
"github.com/1Panel-dev/1Panel/core/utils/encrypt"
fileUtils "github.com/1Panel-dev/1Panel/core/utils/files"
"github.com/1Panel-dev/1Panel/core/utils/xpack"
"github.com/jinzhu/copier"
"github.com/pkg/errors"
"github.com/robfig/cron/v3"
@ -28,6 +27,9 @@ import (
type BackupService struct{}
type IBackupService interface {
Get(req dto.OperateByID) (dto.BackupInfo, error)
List(req dto.OperateByIDs) ([]dto.BackupInfo, error)
SearchWithPage(search dto.SearchPageWithType) (int64, interface{}, error)
LoadOneDriveInfo() (dto.OneDriveInfo, error)
Create(backupDto dto.BackupOperate) error
@ -43,6 +45,50 @@ func NewIBackupService() IBackupService {
return &BackupService{}
}
func (u *BackupService) Get(req dto.OperateByID) (dto.BackupInfo, error) {
var data dto.BackupInfo
account, err := backupRepo.List(commonRepo.WithByID(req.ID))
if err != nil {
return data, err
}
if err := copier.Copy(&data, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
data.AccessKey, err = encrypt.StringDecryptWithBase64(data.AccessKey)
if err != nil {
return data, err
}
data.Credential, err = encrypt.StringDecryptWithBase64(data.Credential)
if err != nil {
return data, err
}
return data, nil
}
func (u *BackupService) List(req dto.OperateByIDs) ([]dto.BackupInfo, error) {
accounts, err := backupRepo.List(commonRepo.WithByIDs(req.IDs), commonRepo.WithOrderBy("created_at desc"))
if err != nil {
return nil, err
}
var data []dto.BackupInfo
for _, account := range accounts {
var item dto.BackupInfo
if err := copier.Copy(&item, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
item.AccessKey, err = encrypt.StringDecryptWithBase64(item.AccessKey)
if err != nil {
return nil, err
}
item.Credential, err = encrypt.StringDecryptWithBase64(item.Credential)
if err != nil {
return nil, err
}
data = append(data, item)
}
return data, nil
}
func (u *BackupService) SearchWithPage(req dto.SearchPageWithType) (int64, interface{}, error) {
count, accounts, err := backupRepo.Page(
req.Page,
@ -141,9 +187,6 @@ func (u *BackupService) Create(req dto.BackupOperate) error {
return err
}
}
if err := xpack.SyncBackupOperation("add", []model.BackupAccount{backup}); err != nil {
return err
}
backup.AccessKey, err = encrypt.StringEncrypt(backup.AccessKey)
if err != nil {
@ -205,9 +248,6 @@ func (u *BackupService) Delete(id uint) error {
global.Cron.Remove(cron.EntryID(backup.EntryID))
}
if err := xpack.SyncBackupOperation("remove", []model.BackupAccount{backup}); err != nil {
return err
}
return backupRepo.Delete(commonRepo.WithByID(id))
}
@ -263,10 +303,6 @@ func (u *BackupService) Update(req dto.BackupOperate) error {
}
}
if err := xpack.SyncBackupOperation("update", []model.BackupAccount{newBackup}); err != nil {
return err
}
newBackup.AccessKey, err = encrypt.StringEncrypt(newBackup.AccessKey)
if err != nil {
return err

View File

@ -1,21 +1,22 @@
package configs
type System struct {
Port string `mapstructure:"port"`
Ipv6 string `mapstructure:"ipv6"`
BindAddress string `mapstructure:"bindAddress"`
SSL string `mapstructure:"ssl"`
DbCoreFile string `mapstructure:"db_core_file"`
EncryptKey string `mapstructure:"encrypt_key"`
BaseDir string `mapstructure:"base_dir"`
BackupDir string `mapstructure:"backup_dir"`
Mode string `mapstructure:"mode"`
RepoUrl string `mapstructure:"repo_url"`
Version string `mapstructure:"version"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
Entrance string `mapstructure:"entrance"`
IsDemo bool `mapstructure:"is_demo"`
ChangeUserInfo string `mapstructure:"change_user_info"`
DbPath string `mapstructure:"db_path"`
MasterRequestAddr string `mapstructure:"masterRequestAddr"`
Port string `mapstructure:"port"`
Ipv6 string `mapstructure:"ipv6"`
BindAddress string `mapstructure:"bindAddress"`
SSL string `mapstructure:"ssl"`
DbCoreFile string `mapstructure:"db_core_file"`
EncryptKey string `mapstructure:"encrypt_key"`
BaseDir string `mapstructure:"base_dir"`
BackupDir string `mapstructure:"backup_dir"`
Mode string `mapstructure:"mode"`
RepoUrl string `mapstructure:"repo_url"`
Version string `mapstructure:"version"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
Entrance string `mapstructure:"entrance"`
IsDemo bool `mapstructure:"is_demo"`
ChangeUserInfo string `mapstructure:"change_user_info"`
DbPath string `mapstructure:"db_path"`
}

View File

@ -15,6 +15,11 @@ import (
func Init() {
settingRepo := repo.NewISettingRepo()
masterSetting, err := settingRepo.Get(settingRepo.WithByKey("MasterRequestAddr"))
if err != nil {
global.LOG.Errorf("load master request addr from setting failed, err: %v", err)
}
global.CONF.System.MasterRequestAddr = masterSetting.Value
portSetting, err := settingRepo.Get(settingRepo.WithByKey("ServerPort"))
if err != nil {
global.LOG.Errorf("load service port from setting failed, err: %v", err)

View File

@ -122,6 +122,9 @@ var InitSetting = &gormigrate.Migration{
if err := tx.Create(&model.Setting{Key: "SystemStatus", Value: "Free"}).Error; err != nil {
return err
}
if err := tx.Create(&model.Setting{Key: "MasterRequestAddr", Value: ""}).Error; err != nil {
return err
}
if err := tx.Create(&model.Setting{Key: "BindAddress", Value: "0.0.0.0"}).Error; err != nil {
return err
}

View File

@ -9,6 +9,7 @@ import (
"github.com/1Panel-dev/1Panel/core/global"
"github.com/1Panel-dev/1Panel/core/i18n"
"github.com/1Panel-dev/1Panel/core/middleware"
"github.com/1Panel-dev/1Panel/core/router"
rou "github.com/1Panel-dev/1Panel/core/router"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin"
@ -53,6 +54,11 @@ func Routers() *gin.Engine {
setWebStatic(PublicGroup)
}
agentRouter := Router.Group("agent")
agentRouter.Use(middleware.JwtAuth())
var agent router.AgentRouter
agent.InitRouter(agentRouter)
Router.Use(middleware.OperationLog())
if global.CONF.System.IsDemo {
Router.Use(middleware.DemoHandle())

16
core/router/ro_agent.go Normal file
View File

@ -0,0 +1,16 @@
package router
import (
v2 "github.com/1Panel-dev/1Panel/core/app/api/v2"
"github.com/gin-gonic/gin"
)
type AgentRouter struct{}
func (s *AgentRouter) InitRouter(Router *gin.RouterGroup) {
baseApi := v2.ApiGroupApp.BaseApi
{
Router.GET("/backup/:id", baseApi.GetBackup)
Router.POST("/backup/list", baseApi.ListBackup)
}
}

View File

@ -13,12 +13,6 @@ type JWT struct {
SigningKey []byte
}
type JwtRequest struct {
BaseClaims
BufferTime int64
jwt.RegisteredClaims
}
type CustomClaims struct {
BaseClaims
BufferTime int64
@ -26,8 +20,9 @@ type CustomClaims struct {
}
type BaseClaims struct {
ID uint
Name string
ID uint
Name string
IsAgent bool
}
func NewJWT() *JWT {
@ -55,14 +50,14 @@ func (j *JWT) CreateToken(request CustomClaims) (string, error) {
return token.SignedString(j.SigningKey)
}
func (j *JWT) ParseToken(tokenStr string) (*JwtRequest, error) {
token, err := jwt.ParseWithClaims(tokenStr, &JwtRequest{}, func(token *jwt.Token) (interface{}, error) {
func (j *JWT) ParseToken(tokenStr string) (*CustomClaims, error) {
token, err := jwt.ParseWithClaims(tokenStr, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {
return j.SigningKey, nil
})
if err != nil || token == nil {
return nil, constant.ErrTokenParse
}
if claims, ok := token.Claims.(*JwtRequest); ok && token.Valid {
if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {
return claims, nil
}
return nil, constant.ErrTokenParse

View File

@ -137,6 +137,9 @@ const loadNodes = async () => {
return;
}
nodes.value = res.data;
if (nodes.value.length === 1) {
globalStore.currentNode = nodes.value[0].name;
}
return;
}
nodes.value = [];