1
0
mirror of https://github.com/1Panel-dev/1Panel.git synced 2025-01-31 14:08:06 +08:00

feat: Add usage scope for backup accounts (#7716)

* feat: Add usage scope for backup accounts

* fix: 解决计划任务列表不显示备份账号的问题

* feat: 统一备份文件大小获取接口
This commit is contained in:
ssongliu 2025-01-14 14:27:51 +08:00 committed by GitHub
parent c4f9d29bcb
commit 78199a49ed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
72 changed files with 1519 additions and 929 deletions

View File

@ -39,13 +39,121 @@ func (b *BaseApi) SyncBackupAccount(c *gin.Context) {
helper.SuccessWithOutData(c)
}
// @Tags Backup Account
// @Summary Create backup account
// @Description 创建备份账号
// @Accept json
// @Param request body dto.BackupOperate true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backups [post]
// @x-panel-log {"bodyKeys":["type"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"创建备份账号 [type]","formatEN":"create backup account [type]"}
func (b *BaseApi) CreateBackup(c *gin.Context) {
var req dto.BackupOperate
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
if err := backupService.Create(req); err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithOutData(c)
}
// @Tags Backup Account
// @Summary Refresh token
// @Description 刷新 token
// @Accept json
// @Param request body dto.BackupOperate true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backups/refresh/token [post]
func (b *BaseApi) RefreshToken(c *gin.Context) {
var req dto.OperateByID
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
if err := backupService.RefreshToken(req); err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithOutData(c)
}
// @Tags Backup Account
// @Summary List buckets
// @Description 获取 bucket 列表
// @Accept json
// @Param request body dto.ForBuckets true "request"
// @Success 200 {array} string
// @Security ApiKeyAuth
// @Router /buckets [post]
func (b *BaseApi) ListBuckets(c *gin.Context) {
var req dto.ForBuckets
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
buckets, err := backupService.GetBuckets(req)
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, buckets)
}
// @Tags Backup Account
// @Summary Delete backup account
// @Description 删除备份账号
// @Accept json
// @Param request body dto.OperateByID true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backups/del [post]
// @x-panel-log {"bodyKeys":["id"],"paramKeys":[],"BeforeFunctions":[{"input_column":"id","input_value":"id","isList":false,"db":"backup_accounts","output_column":"type","output_value":"types"}],"formatZH":"删除备份账号 [types]","formatEN":"delete backup account [types]"}
func (b *BaseApi) DeleteBackup(c *gin.Context) {
var req dto.OperateByID
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
if err := backupService.Delete(req.ID); err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, nil)
}
// @Tags Backup Account
// @Summary Update backup account
// @Description 更新备份账号信息
// @Accept json
// @Param request body dto.BackupOperate true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backups/update [post]
// @x-panel-log {"bodyKeys":["type"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"更新备份账号 [types]","formatEN":"update backup account [types]"}
func (b *BaseApi) UpdateBackup(c *gin.Context) {
var req dto.BackupOperate
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
if err := backupService.Update(req); err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, nil)
}
// @Tags Backup Account
// @Summary Load backup account options
// @Description 获取备份账号选项
// @Accept json
// @Success 200 {array} dto.BackupOption
// @Security ApiKeyAuth
// @Router /backup/options [get]
// @Router /backups/options [get]
func (b *BaseApi) LoadBackupOptions(c *gin.Context) {
list, err := backupService.LoadBackupOptions()
if err != nil {
@ -55,6 +163,71 @@ func (b *BaseApi) LoadBackupOptions(c *gin.Context) {
helper.SuccessWithData(c, list)
}
// @Tags Backup Account
// @Summary Search backup accounts with page
// @Description 获取备份账号列表
// @Accept json
// @Param request body dto.SearchPageWithType true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backups/search [post]
func (b *BaseApi) SearchBackup(c *gin.Context) {
var req dto.SearchPageWithType
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
total, list, err := backupService.SearchWithPage(req)
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, dto.PageResult{
Items: list,
Total: total,
})
}
// @Tags Backup Account
// @Summary get local backup dir
// @Description 获取本地备份目录
// @Success 200
// @Security ApiKeyAuth
// @Router /backups/local [get]
func (b *BaseApi) GetLocalDir(c *gin.Context) {
dir, err := backupService.GetLocalDir()
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, dir)
}
// @Tags Backup Account
// @Summary Page backup records
// @Description 获取备份记录列表分页
// @Accept json
// @Param request body dto.SearchForSize true "request"
// @Success 200 {object} dto.RecordFileSize
// @Security ApiKeyAuth
// @Router /backups/record/size [post]
func (b *BaseApi) LoadBackupRecordSize(c *gin.Context) {
var req dto.SearchForSize
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
list, err := backupRecordService.LoadRecordSize(req)
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, list)
}
// @Tags Backup Account
// @Summary Page backup records
// @Description 获取备份记录列表分页
@ -62,14 +235,14 @@ func (b *BaseApi) LoadBackupOptions(c *gin.Context) {
// @Param request body dto.RecordSearch true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backup/record/search [post]
// @Router /backups/record/search [post]
func (b *BaseApi) SearchBackupRecords(c *gin.Context) {
var req dto.RecordSearch
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
total, list, err := backupService.SearchRecordsWithPage(req)
total, list, err := backupRecordService.SearchRecordsWithPage(req)
if err != nil {
helper.InternalServer(c, err)
return
@ -88,14 +261,14 @@ func (b *BaseApi) SearchBackupRecords(c *gin.Context) {
// @Param request body dto.RecordSearchByCronjob true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backup/record/search/bycronjob [post]
// @Router /backups/record/search/bycronjob [post]
func (b *BaseApi) SearchBackupRecordsByCronjob(c *gin.Context) {
var req dto.RecordSearchByCronjob
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
total, list, err := backupService.SearchRecordsByCronjobWithPage(req)
total, list, err := backupRecordService.SearchRecordsByCronjobWithPage(req)
if err != nil {
helper.InternalServer(c, err)
return
@ -122,7 +295,7 @@ func (b *BaseApi) DownloadRecord(c *gin.Context) {
return
}
filePath, err := backupService.DownloadRecord(req)
filePath, err := backupRecordService.DownloadRecord(req)
if err != nil {
helper.InternalServer(c, err)
return
@ -145,7 +318,7 @@ func (b *BaseApi) DeleteBackupRecord(c *gin.Context) {
return
}
if err := backupService.BatchDeleteRecord(req.Ids); err != nil {
if err := backupRecordService.BatchDeleteRecord(req.Ids); err != nil {
helper.InternalServer(c, err)
return
}
@ -159,14 +332,14 @@ func (b *BaseApi) DeleteBackupRecord(c *gin.Context) {
// @Param request body dto.OperateByID true "request"
// @Success 200 {array} string
// @Security ApiKeyAuth
// @Router /backup/search/files [post]
// @Router /backups/search/files [post]
func (b *BaseApi) LoadFilesFromBackup(c *gin.Context) {
var req dto.OperateByID
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
data := backupService.ListFiles(req)
data := backupRecordService.ListFiles(req)
helper.SuccessWithData(c, data)
}
@ -177,7 +350,7 @@ func (b *BaseApi) LoadFilesFromBackup(c *gin.Context) {
// @Param request body dto.CommonBackup true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backup/backup [post]
// @Router /backups/backup [post]
// @x-panel-log {"bodyKeys":["type","name","detailName"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"备份 [type] 数据 [name][detailName]","formatEN":"backup [type] data [name][detailName]"}
func (b *BaseApi) Backup(c *gin.Context) {
var req dto.CommonBackup
@ -222,7 +395,7 @@ func (b *BaseApi) Backup(c *gin.Context) {
// @Param request body dto.CommonRecover true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backup/recover [post]
// @Router /backups/recover [post]
// @x-panel-log {"bodyKeys":["type","name","detailName","file"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"从 [file] 恢复 [type] 数据 [name][detailName]","formatEN":"recover [type] data [name][detailName] from [file]"}
func (b *BaseApi) Recover(c *gin.Context) {
var req dto.CommonRecover
@ -230,7 +403,7 @@ func (b *BaseApi) Recover(c *gin.Context) {
return
}
downloadPath, err := backupService.DownloadRecord(dto.DownloadRecord{
downloadPath, err := backupRecordService.DownloadRecord(dto.DownloadRecord{
DownloadAccountID: req.DownloadAccountID,
FileDir: path.Dir(req.File),
FileName: path.Base(req.File),
@ -277,7 +450,7 @@ func (b *BaseApi) Recover(c *gin.Context) {
// @Param request body dto.CommonRecover true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /backup/recover/byupload [post]
// @Router /backups/recover/byupload [post]
// @x-panel-log {"bodyKeys":["type","name","detailName","file"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"从 [file] 恢复 [type] 数据 [name][detailName]","formatEN":"recover [type] data [name][detailName] from [file]"}
func (b *BaseApi) RecoverByUpload(c *gin.Context) {
var req dto.CommonRecover

View File

@ -40,8 +40,9 @@ var (
ftpService = service.NewIFtpService()
clamService = service.NewIClamService()
settingService = service.NewISettingService()
backupService = service.NewIBackupService()
settingService = service.NewISettingService()
backupService = service.NewIBackupService()
backupRecordService = service.NewIBackupRecordService()
websiteService = service.NewIWebsiteService()
websiteDnsAccountService = service.NewIWebsiteDnsAccountService()

View File

@ -134,28 +134,6 @@ func (b *BaseApi) SearchSnapshot(c *gin.Context) {
})
}
// @Tags System Setting
// @Summary Load system snapshot size
// @Description 获取系统快照文件大小
// @Accept json
// @Param request body dto.SearchWithPage true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /settings/snapshot/size [post]
func (b *BaseApi) LoadSnapshotSize(c *gin.Context) {
var req dto.SearchWithPage
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
accounts, err := snapshotService.LoadSize(req)
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, accounts)
}
// @Tags System Setting
// @Summary Recover system backup
// @Description 从系统快照恢复

View File

@ -4,6 +4,42 @@ import (
"time"
)
type BackupOperate struct {
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type" validate:"required"`
IsPublic bool `json:"isPublic"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`
BackupPath string `json:"backupPath"`
Vars string `json:"vars" validate:"required"`
RememberAuth bool `json:"rememberAuth"`
}
type BackupInfo struct {
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
IsPublic bool `json:"isPublic"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`
BackupPath string `json:"backupPath"`
Vars string `json:"vars"`
CreatedAt time.Time `json:"createdAt"`
RememberAuth bool `json:"rememberAuth"`
}
type ForBuckets struct {
Type string `json:"type" validate:"required"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential" validate:"required"`
Vars string `json:"vars" validate:"required"`
}
type SyncFromMaster struct {
Name string `json:"name" validate:"required"`
Operation string `json:"operation" validate:"required,oneof=create delete update"`
@ -11,9 +47,10 @@ type SyncFromMaster struct {
}
type BackupOption struct {
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
IsPublic bool `json:"isPublic"`
}
type CommonBackup struct {
@ -55,7 +92,6 @@ type BackupRecords struct {
DownloadAccountID uint `json:"downloadAccountID"`
FileDir string `json:"fileDir"`
FileName string `json:"fileName"`
Size int64 `json:"size"`
}
type DownloadRecord struct {
@ -63,3 +99,17 @@ type DownloadRecord struct {
FileDir string `json:"fileDir" validate:"required"`
FileName string `json:"fileName" validate:"required"`
}
type SearchForSize struct {
PageInfo
Type string `json:"type" validate:"required"`
Name string `json:"name"`
DetailName string `json:"detailName"`
Info string `json:"info"`
CronjobID uint `json:"cronjobID"`
}
type RecordFileSize struct {
ID uint `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
}

View File

@ -5,6 +5,12 @@ type SearchWithPage struct {
Info string `json:"info"`
}
type SearchPageWithType struct {
PageInfo
Info string `json:"info"`
Type string `json:"type"`
}
type PageInfo struct {
Page int `json:"page" validate:"required,number"`
PageSize int `json:"pageSize" validate:"required,number"`

View File

@ -106,17 +106,17 @@ type CronjobInfo struct {
ContainerName string `json:"containerName"`
User string `json:"user"`
AppID string `json:"appID"`
Website string `json:"website"`
ExclusionRules string `json:"exclusionRules"`
DBType string `json:"dbType"`
DBName string `json:"dbName"`
URL string `json:"url"`
IsDir bool `json:"isDir"`
SourceDir string `json:"sourceDir"`
SourceAccountIDs string `json:"sourceAccountIDs"`
DownloadAccountID uint `json:"downloadAccountID"`
RetainCopies int `json:"retainCopies"`
AppID string `json:"appID"`
Website string `json:"website"`
ExclusionRules string `json:"exclusionRules"`
DBType string `json:"dbType"`
DBName string `json:"dbName"`
URL string `json:"url"`
IsDir bool `json:"isDir"`
SourceDir string `json:"sourceDir"`
SourceAccounts []string `json:"sourceAccounts"`
DownloadAccount string `json:"downloadAccount"`
RetainCopies int `json:"retainCopies"`
LastRecordStatus string `json:"lastRecordStatus"`
LastRecordTime string `json:"lastRecordTime"`

View File

@ -88,8 +88,8 @@ type SnapshotInfo struct {
ID uint `json:"id"`
Name string `json:"name"`
Description string `json:"description" validate:"max=256"`
From string `json:"from"`
DefaultDownload string `json:"defaultDownload"`
SourceAccounts []string `json:"sourceAccounts"`
DownloadAccount string `json:"downloadAccount"`
Status string `json:"status"`
Message string `json:"message"`
CreatedAt time.Time `json:"createdAt"`
@ -108,11 +108,3 @@ type SnapshotInfo struct {
RollbackMessage string `json:"rollbackMessage"`
LastRollbackedAt string `json:"lastRollbackedAt"`
}
type SnapshotFile struct {
ID uint `json:"id"`
Name string `json:"name"`
From string `json:"from"`
DefaultDownload string `json:"defaultDownload"`
Size int64 `json:"size"`
}

View File

@ -4,6 +4,7 @@ type BackupAccount struct {
BaseModel
Name string `gorm:"not null;default:''" json:"name"`
Type string `gorm:"not null;default:''" json:"type"`
IsPublic bool `json:"isPublic"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`
@ -13,8 +14,6 @@ type BackupAccount struct {
RememberAuth bool `json:"rememberAuth"`
}
// Source ---> SourceAccountIDs
// BackupType ---> DownloadAccountID
type BackupRecord struct {
BaseModel
From string `json:"from"`

View File

@ -13,6 +13,7 @@ type BackupRepo struct{}
type IBackupRepo interface {
Get(opts ...DBOption) (model.BackupAccount, error)
List(opts ...DBOption) ([]model.BackupAccount, error)
Page(limit, offset int, opts ...DBOption) (int64, []model.BackupAccount, error)
Create(backup *model.BackupAccount) error
Save(backup *model.BackupAccount) error
Delete(opts ...DBOption) error

View File

@ -29,7 +29,7 @@ type ICronjobRepo interface {
Update(id uint, vars map[string]interface{}) error
Delete(opts ...DBOption) error
DeleteRecord(opts ...DBOption) error
StartRecords(cronjobID uint, targetPath string) model.JobRecords
StartRecords(cronjobID uint, targetPath, cronjobType string) model.JobRecords
UpdateRecords(id uint, vars map[string]interface{}) error
EndRecords(record model.JobRecords, status, message, records string)
PageRecords(page, size int, opts ...DBOption) (int64, []model.JobRecords, error)
@ -143,11 +143,13 @@ func (c *CronjobRepo) WithByRecordDropID(id int) DBOption {
}
}
func (u *CronjobRepo) StartRecords(cronjobID uint, targetPath string) model.JobRecords {
func (u *CronjobRepo) StartRecords(cronjobID uint, targetPath, cronjobType string) model.JobRecords {
var record model.JobRecords
record.StartTime = time.Now()
record.CronjobID = cronjobID
record.TaskID = uuid.New().String()
if cronjobType != "directory" && cronjobType != "log" {
record.TaskID = uuid.New().String()
}
record.Status = constant.StatusWaiting
if err := global.DB.Create(&record).Error; err != nil {
global.LOG.Errorf("create record status failed, err: %v", err)

View File

@ -5,10 +5,6 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/utils/nginx"
"github.com/1Panel-dev/1Panel/agent/utils/nginx/parser"
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
"log"
"math"
"net/http"
@ -21,6 +17,11 @@ import (
"strings"
"time"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/utils/nginx"
"github.com/1Panel-dev/1Panel/agent/utils/nginx/parser"
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
"github.com/1Panel-dev/1Panel/agent/app/task"
"github.com/docker/docker/api/types"
@ -562,17 +563,18 @@ func upgradeInstall(req request.AppInstallUpgrade) error {
backUpApp := func(t *task.Task) error {
if req.Backup {
backupService := NewIBackupService()
backupRecordService := NewIBackupRecordService()
fileName := fmt.Sprintf("upgrade_backup_%s_%s.tar.gz", install.Name, time.Now().Format(constant.DateTimeSlimLayout)+common.RandStrAndNum(5))
backupRecord, err := backupService.AppBackup(dto.CommonBackup{Name: install.App.Key, DetailName: install.Name, FileName: fileName})
if err == nil {
backups, _ := backupService.ListAppRecords(install.App.Key, install.Name, "upgrade_backup")
backups, _ := backupRecordService.ListAppRecords(install.App.Key, install.Name, "upgrade_backup")
if len(backups) > 3 {
backupsToDelete := backups[:len(backups)-3]
var deleteIDs []uint
for _, backup := range backupsToDelete {
deleteIDs = append(deleteIDs, backup.ID)
}
_ = backupService.BatchDeleteRecord(deleteIDs)
_ = backupRecordService.BatchDeleteRecord(deleteIDs)
}
backupFile = path.Join(global.CONF.System.Backup, backupRecord.FileDir, backupRecord.FileName)
} else {

View File

@ -1,17 +1,17 @@
package service
import (
"context"
"bufio"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/app/model"
@ -19,7 +19,9 @@ import (
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage"
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage/client"
"github.com/1Panel-dev/1Panel/agent/utils/encrypt"
"github.com/1Panel-dev/1Panel/agent/utils/files"
"github.com/jinzhu/copier"
"github.com/pkg/errors"
)
@ -31,15 +33,13 @@ type IBackupService interface {
Sync(req dto.SyncFromMaster) error
LoadBackupOptions() ([]dto.BackupOption, error)
SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error)
SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error)
DownloadRecord(info dto.DownloadRecord) (string, error)
DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error
BatchDeleteRecord(ids []uint) error
ListAppRecords(name, detailName, fileName string) ([]model.BackupRecord, error)
ListFiles(req dto.OperateByID) []string
SearchWithPage(search dto.SearchPageWithType) (int64, interface{}, error)
Create(backupDto dto.BackupOperate) error
GetBuckets(backupDto dto.ForBuckets) ([]interface{}, error)
Update(req dto.BackupOperate) error
Delete(id uint) error
RefreshToken(req dto.OperateByID) error
GetLocalDir() (string, error)
MysqlBackup(db dto.CommonBackup) error
PostgresqlBackup(db dto.CommonBackup) error
@ -62,6 +62,287 @@ func NewIBackupService() IBackupService {
return &BackupService{}
}
func (u *BackupService) GetLocalDir() (string, error) {
account, err := backupRepo.Get(repo.WithByType(constant.Local))
if err != nil {
return "", err
}
return account.BackupPath, nil
}
func (u *BackupService) SearchWithPage(req dto.SearchPageWithType) (int64, interface{}, error) {
options := []repo.DBOption{repo.WithOrderBy("created_at desc")}
if len(req.Type) != 0 {
options = append(options, repo.WithByType(req.Type))
}
if len(req.Info) != 0 {
options = append(options, repo.WithByType(req.Info))
}
count, accounts, err := backupRepo.Page(req.Page, req.PageSize, options...)
if err != nil {
return 0, nil, err
}
var data []dto.BackupInfo
for _, account := range accounts {
var item dto.BackupInfo
if err := copier.Copy(&item, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
if item.Type != constant.Sftp && item.Type != constant.Local {
item.BackupPath = path.Join("/", strings.TrimPrefix(item.BackupPath, "/"))
}
if !item.RememberAuth {
item.AccessKey = ""
item.Credential = ""
if account.Type == constant.Sftp {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(item.Vars), &varMap); err != nil {
continue
}
delete(varMap, "passPhrase")
itemVars, _ := json.Marshal(varMap)
item.Vars = string(itemVars)
}
} else {
item.AccessKey = base64.StdEncoding.EncodeToString([]byte(item.AccessKey))
item.Credential = base64.StdEncoding.EncodeToString([]byte(item.Credential))
}
if account.Type == constant.OneDrive || account.Type == constant.ALIYUN || account.Type == constant.GoogleDrive {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(item.Vars), &varMap); err != nil {
continue
}
delete(varMap, "refresh_token")
delete(varMap, "drive_id")
itemVars, _ := json.Marshal(varMap)
item.Vars = string(itemVars)
}
data = append(data, item)
}
return count, data, nil
}
func (u *BackupService) Create(req dto.BackupOperate) error {
if req.Type == constant.Local {
return buserr.New(constant.ErrBackupLocalCreate)
}
if req.Type != constant.Sftp && req.BackupPath != "/" {
req.BackupPath = strings.TrimPrefix(req.BackupPath, "/")
}
backup, _ := backupRepo.Get(repo.WithByName(req.Name))
if backup.ID != 0 {
return constant.ErrRecordExist
}
if err := copier.Copy(&backup, &req); err != nil {
return errors.WithMessage(constant.ErrStructTransform, err.Error())
}
itemAccessKey, err := base64.StdEncoding.DecodeString(backup.AccessKey)
if err != nil {
return err
}
backup.AccessKey = string(itemAccessKey)
itemCredential, err := base64.StdEncoding.DecodeString(backup.Credential)
if err != nil {
return err
}
backup.Credential = string(itemCredential)
if req.Type == constant.OneDrive || req.Type == constant.GoogleDrive {
if err := loadRefreshTokenByCode(&backup); err != nil {
return err
}
}
if req.Type != "LOCAL" {
isOk, err := u.checkBackupConn(&backup)
if err != nil || !isOk {
return buserr.WithMap(constant.ErrBackupCheck, map[string]interface{}{"err": err.Error()}, err)
}
}
backup.AccessKey, err = encrypt.StringEncrypt(backup.AccessKey)
if err != nil {
return err
}
backup.Credential, err = encrypt.StringEncrypt(backup.Credential)
if err != nil {
return err
}
if err := backupRepo.Create(&backup); err != nil {
return err
}
return nil
}
func (u *BackupService) GetBuckets(req dto.ForBuckets) ([]interface{}, error) {
itemAccessKey, err := base64.StdEncoding.DecodeString(req.AccessKey)
if err != nil {
return nil, err
}
req.AccessKey = string(itemAccessKey)
itemCredential, err := base64.StdEncoding.DecodeString(req.Credential)
if err != nil {
return nil, err
}
req.Credential = string(itemCredential)
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(req.Vars), &varMap); err != nil {
return nil, err
}
switch req.Type {
case constant.Sftp, constant.WebDAV:
varMap["username"] = req.AccessKey
varMap["password"] = req.Credential
case constant.OSS, constant.S3, constant.MinIo, constant.Cos, constant.Kodo:
varMap["accessKey"] = req.AccessKey
varMap["secretKey"] = req.Credential
}
client, err := cloud_storage.NewCloudStorageClient(req.Type, varMap)
if err != nil {
return nil, err
}
return client.ListBuckets()
}
func (u *BackupService) Delete(id uint) error {
backup, _ := backupRepo.Get(repo.WithByID(id))
if backup.ID == 0 {
return constant.ErrRecordNotFound
}
if backup.Type == constant.Local {
return buserr.New(constant.ErrBackupLocalDelete)
}
return backupRepo.Delete(repo.WithByID(id))
}
func (u *BackupService) Update(req dto.BackupOperate) error {
backup, _ := backupRepo.Get(repo.WithByID(req.ID))
if backup.ID == 0 {
return constant.ErrRecordNotFound
}
if req.Type != constant.Sftp && req.Type != constant.Local && req.BackupPath != "/" {
req.BackupPath = strings.TrimPrefix(req.BackupPath, "/")
}
var newBackup model.BackupAccount
if err := copier.Copy(&newBackup, &req); err != nil {
return errors.WithMessage(constant.ErrStructTransform, err.Error())
}
itemAccessKey, err := base64.StdEncoding.DecodeString(newBackup.AccessKey)
if err != nil {
return err
}
newBackup.AccessKey = string(itemAccessKey)
itemCredential, err := base64.StdEncoding.DecodeString(newBackup.Credential)
if err != nil {
return err
}
newBackup.Credential = string(itemCredential)
if backup.Type == constant.Local {
if newBackup.Vars != backup.Vars {
oldPath := backup.BackupPath
newPath := newBackup.BackupPath
if strings.HasSuffix(newPath, "/") && newPath != "/" {
newPath = newPath[:strings.LastIndex(newPath, "/")]
}
if err := files.NewFileOp().CopyDir(oldPath, newPath); err != nil {
return err
}
}
}
if newBackup.Type == constant.OneDrive || newBackup.Type == constant.GoogleDrive {
if err := loadRefreshTokenByCode(&backup); err != nil {
return err
}
}
if backup.Type != "LOCAL" {
isOk, err := u.checkBackupConn(&newBackup)
if err != nil || !isOk {
return buserr.WithMap("ErrBackupCheck", map[string]interface{}{"err": err.Error()}, err)
}
}
newBackup.AccessKey, err = encrypt.StringEncrypt(newBackup.AccessKey)
if err != nil {
return err
}
newBackup.Credential, err = encrypt.StringEncrypt(newBackup.Credential)
if err != nil {
return err
}
newBackup.ID = backup.ID
if err := backupRepo.Save(&newBackup); err != nil {
return err
}
return nil
}
func (u *BackupService) RefreshToken(req dto.OperateByID) error {
backup, _ := backupRepo.Get(repo.WithByID(req.ID))
if backup.ID == 0 {
return constant.ErrRecordNotFound
}
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
return fmt.Errorf("Failed to refresh %s - %s token, please retry, err: %v", backup.Type, backup.Name, err)
}
var (
refreshToken string
err error
)
switch backup.Type {
case constant.OneDrive:
refreshToken, err = client.RefreshToken("refresh_token", "refreshToken", varMap)
case constant.GoogleDrive:
refreshToken, err = client.RefreshGoogleToken("refresh_token", "refreshToken", varMap)
case constant.ALIYUN:
refreshToken, err = client.RefreshALIToken(varMap)
}
if err != nil {
varMap["refresh_status"] = constant.StatusFailed
varMap["refresh_msg"] = err.Error()
return fmt.Errorf("Failed to refresh %s-%s token, please retry, err: %v", backup.Type, backup.Name, err)
}
varMap["refresh_status"] = constant.StatusSuccess
varMap["refresh_time"] = time.Now().Format(constant.DateTimeLayout)
varMap["refresh_token"] = refreshToken
varsItem, _ := json.Marshal(varMap)
backup.Vars = string(varsItem)
return backupRepo.Save(&backup)
}
func (u *BackupService) checkBackupConn(backup *model.BackupAccount) (bool, error) {
client, err := newClient(backup)
if err != nil {
return false, err
}
fileItem := path.Join(global.CONF.System.BaseDir, "1panel/tmp/test/1panel")
if _, err := os.Stat(path.Dir(fileItem)); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(fileItem), os.ModePerm); err != nil {
return false, err
}
}
file, err := os.OpenFile(fileItem, os.O_WRONLY|os.O_CREATE, constant.FilePerm)
if err != nil {
return false, err
}
defer file.Close()
write := bufio.NewWriter(file)
_, _ = write.WriteString("1Panel 备份账号测试文件。\n")
_, _ = write.WriteString("1Panel 備份賬號測試文件。\n")
_, _ = write.WriteString("1Panel Backs up account test files.\n")
_, _ = write.WriteString("1Panelアカウントのテストファイルをバックアップします。\n")
write.Flush()
targetPath := path.Join(backup.BackupPath, "test/1panel")
if backup.Type != constant.Sftp && backup.Type != constant.Local && targetPath != "/" {
targetPath = strings.TrimPrefix(targetPath, "/")
}
return client.Upload(fileItem, targetPath)
}
func (u *BackupService) Sync(req dto.SyncFromMaster) error {
var accountItem model.BackupAccount
if err := json.Unmarshal([]byte(req.Data), &accountItem); err != nil {
@ -109,48 +390,6 @@ func (u *BackupService) LoadBackupOptions() ([]dto.BackupOption, error) {
return data, nil
}
func (u *BackupService) SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error) {
total, records, err := backupRepo.PageRecord(
search.Page, search.PageSize,
repo.WithOrderBy("created_at desc"),
repo.WithByName(search.Name),
repo.WithByType(search.Type),
repo.WithByDetailName(search.DetailName),
)
if err != nil {
return 0, nil, err
}
if total == 0 {
return 0, nil, nil
}
datas, err := u.loadRecordSize(records)
sort.Slice(datas, func(i, j int) bool {
return datas[i].CreatedAt.After(datas[j].CreatedAt)
})
return total, datas, err
}
func (u *BackupService) SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error) {
total, records, err := backupRepo.PageRecord(
search.Page, search.PageSize,
repo.WithOrderBy("created_at desc"),
backupRepo.WithByCronID(search.CronjobID),
)
if err != nil {
return 0, nil, err
}
if total == 0 {
return 0, nil, nil
}
datas, err := u.loadRecordSize(records)
sort.Slice(datas, func(i, j int) bool {
return datas[i].CreatedAt.After(datas[j].CreatedAt)
})
return total, datas, err
}
func (u *BackupService) CheckUsed(id uint) error {
cronjobs, _ := cronjobRepo.List()
for _, job := range cronjobs {
@ -167,178 +406,8 @@ func (u *BackupService) CheckUsed(id uint) error {
return nil
}
type loadSizeHelper struct {
isOk bool
backupName string
backupPath string
client cloud_storage.CloudStorageClient
}
func (u *BackupService) DownloadRecord(info dto.DownloadRecord) (string, error) {
account, client, err := NewBackupClientWithID(info.DownloadAccountID)
if err != nil {
return "", fmt.Errorf("new cloud storage client failed, err: %v", err)
}
if account.Type == "LOCAL" {
return path.Join(global.CONF.System.Backup, info.FileDir, info.FileName), nil
}
targetPath := fmt.Sprintf("%s/download/%s/%s", constant.DataDir, info.FileDir, info.FileName)
if _, err := os.Stat(path.Dir(targetPath)); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(targetPath), os.ModePerm); err != nil {
global.LOG.Errorf("mkdir %s failed, err: %v", path.Dir(targetPath), err)
}
}
srcPath := fmt.Sprintf("%s/%s", info.FileDir, info.FileName)
if len(account.BackupPath) != 0 {
srcPath = path.Join(strings.TrimPrefix(account.BackupPath, "/"), srcPath)
}
if exist, _ := client.Exist(srcPath); exist {
isOK, err := client.Download(srcPath, targetPath)
if !isOK {
return "", fmt.Errorf("cloud storage download failed, err: %v", err)
}
}
return targetPath, nil
}
func (u *BackupService) DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error {
if !withDeleteFile {
return backupRepo.DeleteRecord(context.Background(), repo.WithByType(backupType), repo.WithByName(name), repo.WithByDetailName(detailName))
}
records, err := backupRepo.ListRecord(repo.WithByType(backupType), repo.WithByName(name), repo.WithByDetailName(detailName))
if err != nil {
return err
}
for _, record := range records {
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
if err != nil {
global.LOG.Errorf("new client for backup account failed, err: %v", err)
continue
}
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
}
_ = backupRepo.DeleteRecord(context.Background(), repo.WithByID(record.ID))
}
return nil
}
func (u *BackupService) BatchDeleteRecord(ids []uint) error {
records, err := backupRepo.ListRecord(repo.WithByIDs(ids))
if err != nil {
return err
}
for _, record := range records {
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
if err != nil {
global.LOG.Errorf("new client for backup account failed, err: %v", err)
continue
}
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
}
}
return backupRepo.DeleteRecord(context.Background(), repo.WithByIDs(ids))
}
func (u *BackupService) ListAppRecords(name, detailName, fileName string) ([]model.BackupRecord, error) {
records, err := backupRepo.ListRecord(
repo.WithOrderBy("created_at asc"),
repo.WithByName(name),
repo.WithByType("app"),
backupRepo.WithFileNameStartWith(fileName),
backupRepo.WithByDetailName(detailName),
)
if err != nil {
return nil, err
}
return records, err
}
func (u *BackupService) ListFiles(req dto.OperateByID) []string {
var datas []string
account, client, err := NewBackupClientWithID(req.ID)
if err != nil {
return datas
}
prefix := "system_snapshot"
if len(account.BackupPath) != 0 {
prefix = path.Join(strings.TrimPrefix(account.BackupPath, "/"), prefix)
}
files, err := client.ListObjects(prefix)
if err != nil {
global.LOG.Debugf("load files failed, err: %v", err)
return datas
}
for _, file := range files {
if len(file) != 0 {
datas = append(datas, path.Base(file))
}
}
return datas
}
func (u *BackupService) loadRecordSize(records []model.BackupRecord) ([]dto.BackupRecords, error) {
recordMap := make(map[uint]struct{})
var recordIds []string
for _, record := range records {
if _, ok := recordMap[record.DownloadAccountID]; !ok {
recordMap[record.DownloadAccountID] = struct{}{}
recordIds = append(recordIds, fmt.Sprintf("%v", record.DownloadAccountID))
}
}
clientMap, err := NewBackupClientMap(recordIds)
if err != nil {
return nil, err
}
var datas []dto.BackupRecords
var wg sync.WaitGroup
for i := 0; i < len(records); i++ {
var item dto.BackupRecords
if err := copier.Copy(&item, &records[i]); err != nil {
return nil, errors.WithMessage(constant.ErrStructTransform, err.Error())
}
itemPath := path.Join(records[i].FileDir, records[i].FileName)
if val, ok := clientMap[fmt.Sprintf("%v", records[i].DownloadAccountID)]; ok {
item.AccountName = val.name
item.AccountType = val.accountType
item.DownloadAccountID = val.id
wg.Add(1)
go func(index int) {
item.Size, _ = val.client.Size(path.Join(strings.TrimLeft(val.backupPath, "/"), itemPath))
datas = append(datas, item)
wg.Done()
}(i)
} else {
datas = append(datas, item)
}
}
wg.Wait()
return datas, nil
}
func NewBackupClientWithID(id uint) (*model.BackupAccount, cloud_storage.CloudStorageClient, error) {
var account model.BackupAccount
if global.IsMaster {
var setting model.Setting
if err := global.CoreDB.Where("key = ?", "EncryptKey").First(&setting).Error; err != nil {
return nil, nil, err
}
if err := global.CoreDB.Where("id = ?", id).First(&account).Error; err != nil {
return nil, nil, err
}
if account.ID == 0 {
return nil, nil, constant.ErrRecordNotFound
}
account.AccessKey, _ = encrypt.StringDecryptWithKey(account.AccessKey, setting.Value)
account.Credential, _ = encrypt.StringDecryptWithKey(account.Credential, setting.Value)
} else {
account, _ = backupRepo.Get(repo.WithByID(id))
}
account, _ := backupRepo.Get(repo.WithByID(id))
backClient, err := newClient(&account)
if err != nil {
return nil, nil, err
@ -356,55 +425,22 @@ type backupClientHelper struct {
func NewBackupClientMap(ids []string) (map[string]backupClientHelper, error) {
var accounts []model.BackupAccount
if global.IsMaster {
var setting model.Setting
if err := global.CoreDB.Where("key = ?", "EncryptKey").First(&setting).Error; err != nil {
return nil, err
}
if err := global.CoreDB.Where("id in (?)", ids).Find(&accounts).Error; err != nil {
return nil, err
}
if len(accounts) == 0 {
return nil, constant.ErrRecordNotFound
}
for i := 0; i < len(accounts); i++ {
accounts[i].AccessKey, _ = encrypt.StringDecryptWithKey(accounts[i].AccessKey, setting.Value)
accounts[i].Credential, _ = encrypt.StringDecryptWithKey(accounts[i].Credential, setting.Value)
}
} else {
var idItems []uint
for i := 0; i < len(ids); i++ {
item, _ := strconv.Atoi(ids[i])
idItems = append(idItems, uint(item))
}
accounts, _ = backupRepo.List(repo.WithByIDs(idItems))
var idItems []uint
for i := 0; i < len(ids); i++ {
item, _ := strconv.Atoi(ids[i])
idItems = append(idItems, uint(item))
}
accounts, _ = backupRepo.List(repo.WithByIDs(idItems))
clientMap := make(map[string]backupClientHelper)
for _, item := range accounts {
if !global.IsMaster {
accessItem, err := base64.StdEncoding.DecodeString(item.AccessKey)
if err != nil {
return nil, err
}
item.AccessKey = string(accessItem)
secretItem, err := base64.StdEncoding.DecodeString(item.Credential)
if err != nil {
return nil, err
}
item.Credential = string(secretItem)
}
backClient, err := newClient(&item)
if err != nil {
return nil, err
}
pathItem := item.BackupPath
if item.BackupPath != "/" {
pathItem = strings.TrimPrefix(item.BackupPath, "/")
}
clientMap[fmt.Sprintf("%v", item.ID)] = backupClientHelper{
client: backClient,
backupPath: pathItem,
name: item.Name,
backupPath: item.BackupPath,
accountType: item.Type,
id: item.ID,
}
@ -414,10 +450,15 @@ func NewBackupClientMap(ids []string) (map[string]backupClientHelper, error) {
func newClient(account *model.BackupAccount) (cloud_storage.CloudStorageClient, error) {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(account.Vars), &varMap); err != nil {
return nil, err
if len(account.Vars) != 0 {
if err := json.Unmarshal([]byte(account.Vars), &varMap); err != nil {
return nil, err
}
}
varMap["bucket"] = account.Bucket
varMap["backupPath"] = account.BackupPath
account.AccessKey, _ = encrypt.StringDecrypt(account.AccessKey)
account.Credential, _ = encrypt.StringDecrypt(account.Credential)
switch account.Type {
case constant.Sftp, constant.WebDAV:
varMap["username"] = account.AccessKey
@ -437,23 +478,57 @@ func newClient(account *model.BackupAccount) (cloud_storage.CloudStorageClient,
return client, nil
}
func LoadLocalDirByStr(vars string) (string, error) {
func loadRefreshTokenByCode(backup *model.BackupAccount) error {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(vars), &varMap); err != nil {
return "", err
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
return fmt.Errorf("unmarshal backup vars failed, err: %v", err)
}
if _, ok := varMap["dir"]; !ok {
return "", errors.New("load local backup dir failed")
}
baseDir, ok := varMap["dir"].(string)
if ok {
if _, err := os.Stat(baseDir); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(baseDir, os.ModePerm); err != nil {
return "", fmt.Errorf("mkdir %s failed, err: %v", baseDir, err)
}
return baseDir, nil
refreshToken := ""
var err error
if backup.Type == constant.GoogleDrive {
refreshToken, err = client.RefreshGoogleToken("authorization_code", "refreshToken", varMap)
if err != nil {
return err
}
} else {
refreshToken, err = client.RefreshToken("authorization_code", "refreshToken", varMap)
if err != nil {
return err
}
return baseDir, nil
}
return "", fmt.Errorf("error type dir: %T", varMap["dir"])
delete(varMap, "code")
varMap["refresh_status"] = constant.StatusSuccess
varMap["refresh_time"] = time.Now().Format(constant.DateTimeLayout)
varMap["refresh_token"] = refreshToken
itemVars, err := json.Marshal(varMap)
if err != nil {
return fmt.Errorf("json marshal var map failed, err: %v", err)
}
backup.Vars = string(itemVars)
return nil
}
func loadBackupNamesByID(accountIDs string, downloadID uint) ([]string, string, error) {
accountIDList := strings.Split(accountIDs, ",")
var ids []uint
for _, item := range accountIDList {
if len(item) != 0 {
itemID, _ := strconv.Atoi(item)
ids = append(ids, uint(itemID))
}
}
list, err := backupRepo.List(repo.WithByIDs(ids))
if err != nil {
return nil, "", err
}
var accounts []string
var downloadAccount string
for _, item := range list {
itemName := fmt.Sprintf("%s - %s", item.Type, item.Name)
accounts = append(accounts, itemName)
if item.ID == downloadID {
downloadAccount = itemName
}
}
return accounts, downloadAccount, nil
}

View File

@ -0,0 +1,251 @@
package service
import (
"context"
"fmt"
"os"
"path"
"sync"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/app/model"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
"github.com/jinzhu/copier"
)
type BackupRecordService struct{}
type IBackupRecordService interface {
SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error)
SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error)
DownloadRecord(info dto.DownloadRecord) (string, error)
DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error
BatchDeleteRecord(ids []uint) error
ListAppRecords(name, detailName, fileName string) ([]model.BackupRecord, error)
ListFiles(req dto.OperateByID) []string
LoadRecordSize(req dto.SearchForSize) ([]dto.RecordFileSize, error)
}
func NewIBackupRecordService() IBackupRecordService {
return &BackupRecordService{}
}
func (u *BackupRecordService) SearchRecordsWithPage(search dto.RecordSearch) (int64, []dto.BackupRecords, error) {
total, records, err := backupRepo.PageRecord(
search.Page, search.PageSize,
repo.WithOrderBy("created_at desc"),
repo.WithByName(search.Name),
repo.WithByType(search.Type),
repo.WithByDetailName(search.DetailName),
)
if err != nil {
return 0, nil, err
}
var data []dto.BackupRecords
for _, account := range records {
var item dto.BackupRecords
if err := copier.Copy(&item, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
data = append(data, item)
}
return total, data, err
}
func (u *BackupRecordService) SearchRecordsByCronjobWithPage(search dto.RecordSearchByCronjob) (int64, []dto.BackupRecords, error) {
total, records, err := backupRepo.PageRecord(
search.Page, search.PageSize,
repo.WithOrderBy("created_at desc"),
backupRepo.WithByCronID(search.CronjobID),
)
if err != nil {
return 0, nil, err
}
var data []dto.BackupRecords
for _, account := range records {
var item dto.BackupRecords
if err := copier.Copy(&item, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
data = append(data, item)
}
return total, data, err
}
func (u *BackupRecordService) DownloadRecord(info dto.DownloadRecord) (string, error) {
account, client, err := NewBackupClientWithID(info.DownloadAccountID)
if err != nil {
return "", fmt.Errorf("new cloud storage client failed, err: %v", err)
}
if account.Type == "LOCAL" {
return path.Join(global.CONF.System.Backup, info.FileDir, info.FileName), nil
}
targetPath := fmt.Sprintf("%s/download/%s/%s", constant.DataDir, info.FileDir, info.FileName)
if _, err := os.Stat(path.Dir(targetPath)); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(targetPath), os.ModePerm); err != nil {
global.LOG.Errorf("mkdir %s failed, err: %v", path.Dir(targetPath), err)
}
}
srcPath := fmt.Sprintf("%s/%s", info.FileDir, info.FileName)
if len(account.BackupPath) != 0 {
srcPath = path.Join(account.BackupPath, srcPath)
}
if exist, _ := client.Exist(srcPath); exist {
isOK, err := client.Download(srcPath, targetPath)
if !isOK {
return "", fmt.Errorf("cloud storage download failed, err: %v", err)
}
}
return targetPath, nil
}
func (u *BackupRecordService) DeleteRecordByName(backupType, name, detailName string, withDeleteFile bool) error {
if !withDeleteFile {
return backupRepo.DeleteRecord(context.Background(), repo.WithByType(backupType), repo.WithByName(name), repo.WithByDetailName(detailName))
}
records, err := backupRepo.ListRecord(repo.WithByType(backupType), repo.WithByName(name), repo.WithByDetailName(detailName))
if err != nil {
return err
}
for _, record := range records {
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
if err != nil {
global.LOG.Errorf("new client for backup account failed, err: %v", err)
continue
}
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
}
_ = backupRepo.DeleteRecord(context.Background(), repo.WithByID(record.ID))
}
return nil
}
func (u *BackupRecordService) BatchDeleteRecord(ids []uint) error {
records, err := backupRepo.ListRecord(repo.WithByIDs(ids))
if err != nil {
return err
}
for _, record := range records {
_, client, err := NewBackupClientWithID(record.DownloadAccountID)
if err != nil {
global.LOG.Errorf("new client for backup account failed, err: %v", err)
continue
}
if _, err = client.Delete(path.Join(record.FileDir, record.FileName)); err != nil {
global.LOG.Errorf("remove file %s failed, err: %v", path.Join(record.FileDir, record.FileName), err)
}
}
return backupRepo.DeleteRecord(context.Background(), repo.WithByIDs(ids))
}
func (u *BackupRecordService) ListAppRecords(name, detailName, fileName string) ([]model.BackupRecord, error) {
records, err := backupRepo.ListRecord(
repo.WithOrderBy("created_at asc"),
repo.WithByName(name),
repo.WithByType("app"),
backupRepo.WithFileNameStartWith(fileName),
backupRepo.WithByDetailName(detailName),
)
if err != nil {
return nil, err
}
return records, err
}
func (u *BackupRecordService) ListFiles(req dto.OperateByID) []string {
var datas []string
_, client, err := NewBackupClientWithID(req.ID)
if err != nil {
return datas
}
prefix := "system_snapshot"
files, err := client.ListObjects(prefix)
if err != nil {
global.LOG.Debugf("load files failed, err: %v", err)
return datas
}
for _, file := range files {
if len(file) != 0 {
datas = append(datas, path.Base(file))
}
}
return datas
}
type backupSizeHelper struct {
ID uint `json:"id"`
DownloadID uint `json:"downloadID"`
FilePath string `json:"filePath"`
Size uint `json:"size"`
}
func (u *BackupRecordService) LoadRecordSize(req dto.SearchForSize) ([]dto.RecordFileSize, error) {
var list []backupSizeHelper
switch req.Type {
case "snapshot":
_, records, err := snapshotRepo.Page(req.Page, req.PageSize, repo.WithByLikeName(req.Info))
if err != nil {
return nil, err
}
for _, item := range records {
list = append(list, backupSizeHelper{ID: item.ID, DownloadID: item.DownloadAccountID, FilePath: fmt.Sprintf("system_snapshot/%s.tar.gz", item.Name)})
}
case "cronjob":
_, records, err := backupRepo.PageRecord(req.Page, req.PageSize, backupRepo.WithByCronID(req.CronjobID))
if err != nil {
return nil, err
}
for _, item := range records {
list = append(list, backupSizeHelper{ID: item.ID, DownloadID: item.DownloadAccountID, FilePath: path.Join(item.FileDir, item.FileName)})
}
default:
_, records, err := backupRepo.PageRecord(
req.Page, req.PageSize,
repo.WithByName(req.Name),
repo.WithByType(req.Type),
repo.WithByDetailName(req.DetailName),
)
if err != nil {
return nil, err
}
for _, item := range records {
list = append(list, backupSizeHelper{ID: item.ID, DownloadID: item.DownloadAccountID, FilePath: path.Join(item.FileDir, item.FileName)})
}
}
recordMap := make(map[uint]struct{})
var recordIds []string
for _, record := range list {
if _, ok := recordMap[record.DownloadID]; !ok {
recordMap[record.DownloadID] = struct{}{}
recordIds = append(recordIds, fmt.Sprintf("%v", record.DownloadID))
}
}
clientMap, err := NewBackupClientMap(recordIds)
if err != nil {
return nil, err
}
var datas []dto.RecordFileSize
var wg sync.WaitGroup
for i := 0; i < len(list); i++ {
item := dto.RecordFileSize{ID: list[i].ID}
if val, ok := clientMap[fmt.Sprintf("%v", list[i].DownloadID)]; ok {
wg.Add(1)
go func(index int) {
item.Size, _ = val.client.Size(path.Join(val.backupPath, list[i].FilePath))
datas = append(datas, item)
wg.Done()
}(i)
} else {
datas = append(datas, item)
}
}
wg.Wait()
return datas, nil
}

View File

@ -6,8 +6,6 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/gin-gonic/gin"
"io"
"net/http"
"net/url"
@ -22,6 +20,9 @@ import (
"syscall"
"time"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/1Panel-dev/1Panel/agent/app/dto"

View File

@ -3,13 +3,14 @@ package service
import (
"bufio"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/app/model"
"github.com/1Panel-dev/1Panel/agent/constant"
@ -56,6 +57,7 @@ func (u *CronjobService) SearchWithPage(search dto.PageCronjob) (int64, interfac
} else {
item.LastRecordTime = "-"
}
item.SourceAccounts, item.DownloadAccount, _ = loadBackupNamesByID(cronjob.SourceAccountIDs, cronjob.DownloadAccountID)
dtoCronjobs = append(dtoCronjobs, item)
}
return total, dtoCronjobs, err

View File

@ -25,7 +25,7 @@ func (u *CronjobService) HandleJob(cronjob *model.Cronjob) {
message []byte
err error
)
record := cronjobRepo.StartRecords(cronjob.ID, "")
record := cronjobRepo.StartRecords(cronjob.ID, "", cronjob.Type)
go func() {
switch cronjob.Type {
case "shell":

View File

@ -3,12 +3,11 @@ package service
import (
"context"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"os"
"path"
"strconv"
"strings"
"sync"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/app/model"
@ -29,7 +28,6 @@ type SnapshotService struct {
type ISnapshotService interface {
SearchWithPage(req dto.PageSnapshot) (int64, interface{}, error)
LoadSize(req dto.SearchWithPage) ([]dto.SnapshotFile, error)
LoadSnapshotData() (dto.SnapshotData, error)
SnapshotCreate(req dto.SnapshotCreate, isCron bool) error
SnapshotReCreate(id uint) error
@ -56,58 +54,12 @@ func (u *SnapshotService) SearchWithPage(req dto.PageSnapshot) (int64, interface
if err := copier.Copy(&item, &records[i]); err != nil {
return 0, nil, err
}
item.SourceAccounts, item.DownloadAccount, _ = loadBackupNamesByID(records[i].SourceAccountIDs, records[i].DownloadAccountID)
datas = append(datas, item)
}
return total, datas, err
}
func (u *SnapshotService) LoadSize(req dto.SearchWithPage) ([]dto.SnapshotFile, error) {
_, records, err := snapshotRepo.Page(req.Page, req.PageSize, repo.WithByLikeName(req.Info))
if err != nil {
return nil, err
}
var datas []dto.SnapshotFile
var wg sync.WaitGroup
clientMap := make(map[uint]loadSizeHelper)
for i := 0; i < len(records); i++ {
itemPath := fmt.Sprintf("system_snapshot/%s.tar.gz", records[i].Name)
data := dto.SnapshotFile{ID: records[i].ID, Name: records[i].Name}
accounts := strings.Split(records[i].SourceAccountIDs, ",")
var accountNames []string
for _, account := range accounts {
itemVal, _ := strconv.Atoi(account)
if _, ok := clientMap[uint(itemVal)]; !ok {
backup, client, err := NewBackupClientWithID(uint(itemVal))
if err != nil {
global.LOG.Errorf("load backup client from db failed, err: %v", err)
clientMap[records[i].DownloadAccountID] = loadSizeHelper{}
continue
}
backupName := fmt.Sprintf("%s - %s", backup.Type, backup.Name)
clientMap[uint(itemVal)] = loadSizeHelper{backupPath: strings.TrimLeft(backup.BackupPath, "/"), client: client, isOk: true, backupName: backupName}
accountNames = append(accountNames, backupName)
} else {
accountNames = append(accountNames, clientMap[uint(itemVal)].backupName)
}
}
data.DefaultDownload = clientMap[records[i].DownloadAccountID].backupName
data.From = strings.Join(accountNames, ",")
if clientMap[records[i].DownloadAccountID].isOk {
wg.Add(1)
go func(index int) {
data.Size, _ = clientMap[records[index].DownloadAccountID].client.Size(path.Join(clientMap[records[index].DownloadAccountID].backupPath, itemPath))
datas = append(datas, data)
wg.Done()
}(i)
} else {
datas = append(datas, data)
}
}
wg.Wait()
return datas, nil
}
func (u *SnapshotService) SnapshotImport(req dto.SnapshotImport) error {
if len(req.Names) == 0 {
return fmt.Errorf("incorrect snapshot request body: %v", req.Names)
@ -195,7 +147,7 @@ func (u *SnapshotService) Delete(req dto.SnapshotBatchDelete) error {
}
for _, item := range accounts {
global.LOG.Debugf("remove snapshot file %s.tar.gz from %s", snap.Name, item.name)
_, _ = item.client.Delete(path.Join(item.backupPath, "system_snapshot", snap.Name+".tar.gz"))
_, _ = item.client.Delete(path.Join("system_snapshot", snap.Name+".tar.gz"))
}
}

View File

@ -4,13 +4,14 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"os"
"path"
"strings"
"sync"
"time"
"github.com/1Panel-dev/1Panel/agent/app/repo"
"github.com/1Panel-dev/1Panel/agent/app/dto"
"github.com/1Panel-dev/1Panel/agent/app/model"
"github.com/1Panel-dev/1Panel/agent/app/task"
@ -490,8 +491,8 @@ func snapUpload(snap snapHelper, accounts string, file string) error {
targetAccounts := strings.Split(accounts, ",")
for _, item := range targetAccounts {
snap.Task.LogStart(i18n.GetWithName("SnapUploadTo", fmt.Sprintf("[%s] %s", accountMap[item].name, path.Join(accountMap[item].backupPath, "system_snapshot", path.Base(file)))))
_, err := accountMap[item].client.Upload(source, path.Join(accountMap[item].backupPath, "system_snapshot", path.Base(file)))
snap.Task.LogStart(i18n.GetWithName("SnapUploadTo", fmt.Sprintf("[%s] %s", accountMap[item].name, path.Join("system_snapshot", path.Base(file)))))
_, err := accountMap[item].client.Upload(source, path.Join("system_snapshot", path.Base(file)))
snap.Task.LogWithStatus(i18n.GetWithName("SnapUploadRes", accountMap[item].name), err)
if err != nil {
return err

View File

@ -10,7 +10,6 @@ import (
"encoding/pem"
"errors"
"fmt"
"github.com/1Panel-dev/1Panel/agent/utils/docker"
"net"
"os"
"path"
@ -21,6 +20,8 @@ import (
"syscall"
"time"
"github.com/1Panel-dev/1Panel/agent/utils/docker"
"github.com/1Panel-dev/1Panel/agent/app/task"
"github.com/1Panel-dev/1Panel/agent/utils/common"
@ -604,7 +605,7 @@ func (w WebsiteService) DeleteWebsite(req request.WebsiteDelete) error {
defer tx.Rollback()
go func() {
_ = NewIBackupService().DeleteRecordByName("website", website.PrimaryDomain, website.Alias, req.DeleteBackup)
_ = NewIBackupRecordService().DeleteRecordByName("website", website.PrimaryDomain, website.Alias, req.DeleteBackup)
}()
if err := websiteRepo.DeleteBy(ctx, repo.WithByID(req.ID)); err != nil {

View File

@ -122,9 +122,8 @@ var (
)
var (
ErrBackupInUsed = "ErrBackupInUsed"
ErrOSSConn = "ErrOSSConn"
ErrEntrance = "ErrEntrance"
ErrOSSConn = "ErrOSSConn"
ErrEntrance = "ErrEntrance"
)
var (
@ -132,6 +131,14 @@ var (
ErrFirewallBoth = "ErrFirewallBoth"
)
// backup
var (
ErrBackupInUsed = "ErrBackupInUsed"
ErrBackupCheck = "ErrBackupCheck"
ErrBackupLocalDelete = "ErrBackupLocalDelete"
ErrBackupLocalCreate = "ErrBackupLocalCreate"
)
var (
ErrNotExistUser = "ErrNotExistUser"
)

View File

@ -46,6 +46,9 @@ func Run() {
if _, err := global.Cron.AddJob(fmt.Sprintf("%v %v * * *", mathRand.Intn(60), mathRand.Intn(3)), job.NewAppStoreJob()); err != nil {
global.LOG.Errorf("can not add appstore corn job: %s", err.Error())
}
if _, err := global.Cron.AddJob("0 3 */31 * *", job.NewBackupJob()); err != nil {
global.LOG.Errorf("can not add backup token refresh corn job: %s", err.Error())
}
var cronJobs []model.Cronjob
if err := global.DB.Where("status = ?", constant.StatusEnable).Find(&cronJobs).Error; err != nil {

61
agent/cron/job/backup.go Normal file
View File

@ -0,0 +1,61 @@
package job
import (
"encoding/json"
"time"
"github.com/1Panel-dev/1Panel/agent/app/model"
"github.com/1Panel-dev/1Panel/agent/constant"
"github.com/1Panel-dev/1Panel/agent/global"
"github.com/1Panel-dev/1Panel/agent/utils/cloud_storage/client"
)
type backup struct{}
func NewBackupJob() *backup {
return &backup{}
}
func (b *backup) Run() {
var backups []model.BackupAccount
_ = global.DB.Where("`type` in (?) AND is_public = 0", []string{constant.OneDrive, constant.ALIYUN, constant.GoogleDrive}).Find(&backups)
if len(backups) == 0 {
return
}
for _, backupItem := range backups {
if backupItem.ID == 0 {
continue
}
global.LOG.Infof("Start to refresh %s-%s access_token ...", backupItem.Type, backupItem.Name)
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backupItem.Vars), &varMap); err != nil {
global.LOG.Errorf("Failed to refresh %s - %s token, please retry, err: %v", backupItem.Type, backupItem.Name, err)
continue
}
var (
refreshToken string
err error
)
switch backupItem.Type {
case constant.OneDrive:
refreshToken, err = client.RefreshToken("refresh_token", "refreshToken", varMap)
case constant.GoogleDrive:
refreshToken, err = client.RefreshGoogleToken("refresh_token", "refreshToken", varMap)
case constant.ALIYUN:
refreshToken, err = client.RefreshALIToken(varMap)
}
if err != nil {
varMap["refresh_status"] = constant.StatusFailed
varMap["refresh_msg"] = err.Error()
global.LOG.Errorf("Failed to refresh OneDrive token, please retry, err: %v", err)
continue
}
varMap["refresh_status"] = constant.StatusSuccess
varMap["refresh_time"] = time.Now().Format(constant.DateTimeLayout)
varMap["refresh_token"] = refreshToken
varsItem, _ := json.Marshal(varMap)
_ = global.DB.Model(&model.BackupAccount{}).Where("id = ?", backupItem.ID).Updates(map[string]interface{}{"vars": string(varsItem)}).Error
global.LOG.Infof("Refresh %s-%s access_token successful!", backupItem.Type, backupItem.Name)
}
}

View File

@ -19,6 +19,12 @@ Success: "Success"
Failed: "Failed"
SystemRestart: "System restart causes task interruption"
#backup
ErrBackupInUsed: "The backup account is currently in use in a scheduled task and cannot be deleted."
ErrBackupCheck: "Backup account test connection failed {{.err}}"
ErrBackupLocalDelete: "Deleting local server backup accounts is not currently supported."
ErrBackupLocalCreate: "Creating local server backup accounts is not currently supported."
#app
ErrPortInUsed: "{{ .detail }} port already in use"
ErrAppLimit: "App exceeds install limit"

View File

@ -20,6 +20,12 @@ Failed: "失敗"
SystemRestart: "系統重啟導致任務中斷"
ErrInvalidChar: "禁止使用非法字元"
#backup
ErrBackupInUsed: "該備份帳號已在計劃任務中使用,無法刪除"
ErrBackupCheck: "備份帳號測試連接失敗 {{.err}}"
ErrBackupLocalDelete: "暫不支持刪除本地伺服器備份帳號"
ErrBackupLocalCreate: "暫不支持創建本地伺服器備份帳號"
#app
ErrPortInUsed: "{{ .detail }} 端口已被佔用!"
ErrAppLimit: "應用超出安裝數量限制"

View File

@ -19,6 +19,12 @@ Success: "成功"
Failed: "失败"
SystemRestart: "系统重启导致任务中断"
#backup
ErrBackupInUsed: "该备份账号已在计划任务中使用,无法删除"
ErrBackupCheck: "备份账号测试连接失败 {{ .err}}"
ErrBackupLocalDelete: "暂不支持删除本地服务器备份账号"
ErrBackupLocalCreate: "暂不支持创建本地服务器备份账号"
#app
ErrPortInUsed: "{{ .detail }} 端口已被占用!"
ErrAppLimit: "应用超出安装数量限制"

View File

@ -66,26 +66,10 @@ func handleCronjobStatus() {
}
func loadLocalDir() {
var vars string
if global.IsMaster {
var account model.BackupAccount
if err := global.CoreDB.Where("id = 1").First(&account).Error; err != nil {
global.LOG.Errorf("load local backup account info failed, err: %v", err)
return
}
vars = account.Vars
} else {
account, _, err := service.NewBackupClientWithID(1)
if err != nil {
global.LOG.Errorf("load local backup account info failed, err: %v", err)
return
}
vars = account.Vars
}
localDir, err := service.LoadLocalDirByStr(vars)
if err != nil {
global.LOG.Errorf("load local backup dir failed, err: %v", err)
var account model.BackupAccount
if err := global.DB.Where("`type` = ?", constant.Local).First(&account).Error; err != nil {
global.LOG.Errorf("load local backup account info failed, err: %v", err)
return
}
global.CONF.System.Backup = localDir
global.CONF.System.Backup = account.BackupPath
}

View File

@ -22,6 +22,7 @@ func InitAgentDB() {
migrations.InitDefaultCA,
migrations.InitPHPExtensions,
migrations.InitNodePort,
migrations.InitBackup,
})
if err := m.Migrate(); err != nil {
global.LOG.Error(err)

View File

@ -2,6 +2,7 @@ package migrations
import (
"fmt"
"path"
"github.com/1Panel-dev/1Panel/agent/app/dto/request"
"github.com/1Panel-dev/1Panel/agent/app/model"
@ -17,7 +18,7 @@ import (
)
var AddTable = &gormigrate.Migration{
ID: "20241231-add-table",
ID: "20240108-add-table",
Migrate: func(tx *gorm.DB) error {
return tx.AutoMigrate(
&model.AppDetail{},
@ -244,3 +245,17 @@ var InitNodePort = &gormigrate.Migration{
return nil
},
}
var InitBackup = &gormigrate.Migration{
ID: "20241226-init-backup",
Migrate: func(tx *gorm.DB) error {
if err := tx.Create(&model.BackupAccount{
Name: "localhost",
Type: "LOCAL",
BackupPath: path.Join(global.CONF.System.BaseDir, "1panel/backup"),
}).Error; err != nil {
return err
}
return nil
},
}

View File

@ -14,12 +14,21 @@ func (s *BackupRouter) InitRouter(Router *gin.RouterGroup) {
backupRouter.GET("/check/:id", baseApi.CheckBackupUsed)
backupRouter.POST("/sync", baseApi.SyncBackupAccount)
backupRouter.GET("/options", baseApi.LoadBackupOptions)
backupRouter.POST("/search", baseApi.SearchBackup)
backupRouter.GET("/local", baseApi.GetLocalDir)
backupRouter.POST("/refresh/token", baseApi.RefreshToken)
backupRouter.POST("/buckets", baseApi.ListBuckets)
backupRouter.POST("", baseApi.CreateBackup)
backupRouter.POST("/del", baseApi.DeleteBackup)
backupRouter.POST("/update", baseApi.UpdateBackup)
backupRouter.POST("/backup", baseApi.Backup)
backupRouter.POST("/recover", baseApi.Recover)
backupRouter.POST("/recover/byupload", baseApi.RecoverByUpload)
backupRouter.POST("/search/files", baseApi.LoadFilesFromBackup)
backupRouter.POST("/record/search", baseApi.SearchBackupRecords)
backupRouter.POST("/record/size", baseApi.LoadBackupRecordSize)
backupRouter.POST("/record/search/bycronjob", baseApi.SearchBackupRecordsByCronjob)
backupRouter.POST("/record/download", baseApi.DownloadRecord)
backupRouter.POST("/record/del", baseApi.DeleteBackupRecord)

View File

@ -20,7 +20,6 @@ func (s *SettingRouter) InitRouter(Router *gin.RouterGroup) {
settingRouter.POST("/snapshot", baseApi.CreateSnapshot)
settingRouter.POST("/snapshot/recreate", baseApi.RecreateSnapshot)
settingRouter.POST("/snapshot/search", baseApi.SearchSnapshot)
settingRouter.POST("/snapshot/size", baseApi.LoadSnapshotSize)
settingRouter.POST("/snapshot/import", baseApi.ImportSnapshot)
settingRouter.POST("/snapshot/del", baseApi.DeleteSnapshot)
settingRouter.POST("/snapshot/recover", baseApi.RecoverSnapshot)

View File

@ -502,3 +502,32 @@ func loadToken(refresh_token string) (string, error) {
}
return respItem.AccessToken, nil
}
func RefreshALIToken(varMap map[string]interface{}) (string, error) {
refresh_token := loadParamFromVars("refresh_token", varMap)
if len(refresh_token) == 0 {
return "", errors.New("no such refresh token find in db")
}
client := resty.New()
data := map[string]interface{}{
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
url := "https://api.aliyundrive.com/token/refresh"
resp, err := client.R().
SetBody(data).
Post(url)
if err != nil {
return "", fmt.Errorf("load account token failed, err: %v", err)
}
if resp.StatusCode() != 200 {
return "", fmt.Errorf("load account token failed, code: %v", resp.StatusCode())
}
var respItem tokenResp
if err := json.Unmarshal(resp.Body(), &respItem); err != nil {
return "", err
}
return respItem.AccessToken, nil
}

View File

@ -8,7 +8,7 @@ import (
func loadParamFromVars(key string, vars map[string]interface{}) string {
if _, ok := vars[key]; !ok {
if key != "bucket" && key != "port" {
if key != "bucket" && key != "port" && key != "authMode" && key != "passPhrase" {
global.LOG.Errorf("load param %s from vars failed, err: not exist!", key)
}
return ""

View File

@ -9,13 +9,10 @@ import (
"github.com/1Panel-dev/1Panel/agent/utils/files"
)
type localClient struct {
dir string
}
type localClient struct{}
func NewLocalClient(vars map[string]interface{}) (*localClient, error) {
dir := loadParamFromVars("dir", vars)
return &localClient{dir: dir}, nil
return &localClient{}, nil
}
func (c localClient) ListBuckets() ([]interface{}, error) {
@ -23,12 +20,12 @@ func (c localClient) ListBuckets() ([]interface{}, error) {
}
func (c localClient) Exist(file string) (bool, error) {
_, err := os.Stat(path.Join(c.dir, file))
_, err := os.Stat(file)
return err == nil, err
}
func (c localClient) Size(file string) (int64, error) {
fileInfo, err := os.Stat(path.Join(c.dir, file))
fileInfo, err := os.Stat(file)
if err != nil {
return 0, err
}
@ -36,7 +33,7 @@ func (c localClient) Size(file string) (int64, error) {
}
func (c localClient) Delete(file string) (bool, error) {
if err := os.RemoveAll(path.Join(c.dir, file)); err != nil {
if err := os.RemoveAll(file); err != nil {
return false, err
}
return true, nil
@ -44,26 +41,6 @@ func (c localClient) Delete(file string) (bool, error) {
func (c localClient) Upload(src, target string) (bool, error) {
fileOp := files.NewFileOp()
targetFilePath := path.Join(c.dir, target)
if _, err := os.Stat(path.Dir(targetFilePath)); err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(targetFilePath), os.ModePerm); err != nil {
return false, err
}
} else {
return false, err
}
}
if err := fileOp.CopyAndReName(src, targetFilePath, "", true); err != nil {
return false, fmt.Errorf("cp file failed, err: %v", err)
}
return true, nil
}
func (c localClient) Download(src, target string) (bool, error) {
fileOp := files.NewFileOp()
localPath := path.Join(c.dir, src)
if _, err := os.Stat(path.Dir(target)); err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(target), os.ModePerm); err != nil {
@ -74,7 +51,25 @@ func (c localClient) Download(src, target string) (bool, error) {
}
}
if err := fileOp.CopyAndReName(localPath, target, "", true); err != nil {
if err := fileOp.CopyAndReName(src, target, "", true); err != nil {
return false, fmt.Errorf("cp file failed, err: %v", err)
}
return true, nil
}
func (c localClient) Download(src, target string) (bool, error) {
fileOp := files.NewFileOp()
if _, err := os.Stat(path.Dir(target)); err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(path.Dir(target), os.ModePerm); err != nil {
return false, err
}
} else {
return false, err
}
}
if err := fileOp.CopyAndReName(src, target, "", true); err != nil {
return false, fmt.Errorf("cp file failed, err: %v", err)
}
return true, nil
@ -82,11 +77,10 @@ func (c localClient) Download(src, target string) (bool, error) {
func (c localClient) ListObjects(prefix string) ([]string, error) {
var files []string
itemPath := path.Join(c.dir, prefix)
if _, err := os.Stat(itemPath); err != nil {
if _, err := os.Stat(prefix); err != nil {
return files, nil
}
if err := filepath.Walk(itemPath, func(path string, info os.FileInfo, err error) error {
if err := filepath.Walk(prefix, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
files = append(files, info.Name())
}

View File

@ -14,7 +14,6 @@ import (
)
type sftpClient struct {
bucket string
connInfo string
config *ssh.ClientConfig
}
@ -29,7 +28,6 @@ func NewSftpClient(vars map[string]interface{}) (*sftpClient, error) {
passPhrase := loadParamFromVars("passPhrase", vars)
username := loadParamFromVars("username", vars)
password := loadParamFromVars("password", vars)
bucket := loadParamFromVars("bucket", vars)
var auth []ssh.AuthMethod
if authMode == "key" {
@ -59,7 +57,7 @@ func NewSftpClient(vars map[string]interface{}) (*sftpClient, error) {
return nil, err
}
return &sftpClient{bucket: bucket, connInfo: fmt.Sprintf("%s:%s", address, port), config: clientConfig}, nil
return &sftpClient{connInfo: fmt.Sprintf("%s:%s", address, port), config: clientConfig}, nil
}
func (s sftpClient) Upload(src, target string) (bool, error) {
@ -80,8 +78,7 @@ func (s sftpClient) Upload(src, target string) (bool, error) {
}
defer srcFile.Close()
targetFilePath := path.Join(s.bucket, target)
targetDir, _ := path.Split(targetFilePath)
targetDir, _ := path.Split(target)
if _, err = client.Stat(targetDir); err != nil {
if os.IsNotExist(err) {
if err = client.MkdirAll(targetDir); err != nil {
@ -91,7 +88,7 @@ func (s sftpClient) Upload(src, target string) (bool, error) {
return false, err
}
}
dstFile, err := client.Create(path.Join(s.bucket, target))
dstFile, err := client.Create(target)
if err != nil {
return false, err
}
@ -120,7 +117,7 @@ func (s sftpClient) Download(src, target string) (bool, error) {
defer client.Close()
defer sshClient.Close()
srcFile, err := client.Open(s.bucket + "/" + src)
srcFile, err := client.Open(src)
if err != nil {
return false, err
}
@ -150,7 +147,7 @@ func (s sftpClient) Exist(filePath string) (bool, error) {
defer client.Close()
defer sshClient.Close()
srcFile, err := client.Open(path.Join(s.bucket, filePath))
srcFile, err := client.Open(filePath)
if err != nil {
if os.IsNotExist(err) {
return false, nil
@ -174,7 +171,7 @@ func (s sftpClient) Size(filePath string) (int64, error) {
defer client.Close()
defer sshClient.Close()
files, err := client.Stat(path.Join(s.bucket, filePath))
files, err := client.Stat(filePath)
if err != nil {
return 0, err
}
@ -193,7 +190,7 @@ func (s sftpClient) Delete(filePath string) (bool, error) {
defer client.Close()
defer sshClient.Close()
if err := client.Remove(path.Join(s.bucket, filePath)); err != nil {
if err := client.Remove(filePath); err != nil {
return false, err
}
return true, nil
@ -211,7 +208,7 @@ func (s sftpClient) ListObjects(prefix string) ([]string, error) {
defer client.Close()
defer sshClient.Close()
files, err := client.ReadDir(path.Join(s.bucket, prefix))
files, err := client.ReadDir(prefix)
if err != nil {
return nil, err
}

View File

@ -46,6 +46,14 @@ func StringEncrypt(text string) (string, error) {
return "", err
}
func StringDecryptWithBase64(text string) (string, error) {
decryptItem, err := StringDecrypt(text)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString([]byte(decryptItem)), nil
}
func StringDecryptWithKey(text, key string) (string, error) {
if len(text) == 0 {
return "", nil

View File

@ -730,6 +730,11 @@ func ZipFile(files []archiver.File, dst afero.File) error {
}
func (f FileOp) TarGzCompressPro(withDir bool, src, dst, secret, exclusionRules string) error {
if !f.Stat(path.Dir(dst)) {
if err := f.Fs.MkdirAll(path.Dir(dst), constant.FilePerm); err != nil {
return err
}
}
workdir := src
srcItem := "."
if withDir {
@ -758,10 +763,10 @@ func (f FileOp) TarGzCompressPro(withDir bool, src, dst, secret, exclusionRules
itemPrefix = ""
}
if len(secret) != 0 {
commands = fmt.Sprintf("tar --warning=no-file-changed --ignore-failed-read --exclude-from=<(find %s -type s -print-printf '%s' | sed 's|^|%s/|') -zcf - %s | openssl enc -aes-256-cbc -salt -pbkdf2 -k '%s' -out %s", src, "%P\n", itemPrefix, srcItem, secret, dst)
commands = fmt.Sprintf("tar --warning=no-file-changed --ignore-failed-read --exclude-from=<(find %s -type s -print '%s' | sed 's|^|%s/|') -zcf - %s | openssl enc -aes-256-cbc -salt -pbkdf2 -k '%s' -out %s", src, "%P\n", itemPrefix, srcItem, secret, dst)
global.LOG.Debug(strings.ReplaceAll(commands, fmt.Sprintf(" %s ", secret), "******"))
} else {
commands = fmt.Sprintf("tar --warning=no-file-changed --ignore-failed-read --exclude-from=<(find %s -type s -print-printf '%s' | sed 's|^|%s/|') -zcf %s %s %s", src, "%P\n", itemPrefix, dst, exStr, srcItem)
commands = fmt.Sprintf("tar --warning=no-file-changed --ignore-failed-read --exclude-from=<(find %s -type s -printf '%s' | sed 's|^|%s/|') -zcf %s %s %s", src, "%P\n", itemPrefix, dst, exStr, srcItem)
global.LOG.Debug(commands)
}
return cmd.ExecCmdWithDir(commands, workdir)

View File

@ -15,7 +15,7 @@ import (
// @Param request body dto.BackupOperate true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup [post]
// @Router /core/backups [post]
// @x-panel-log {"bodyKeys":["type"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"创建备份账号 [type]","formatEN":"create backup account [type]"}
func (b *BaseApi) CreateBackup(c *gin.Context) {
var req dto.BackupOperate
@ -27,18 +27,27 @@ func (b *BaseApi) CreateBackup(c *gin.Context) {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, nil)
helper.SuccessWithOutData(c)
}
// @Tags Backup Account
// @Summary Refresh token
// @Description 刷新 token
// @Accept json
// @Param request body dto.BackupOperate true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup/refresh/token [post]
// @Router /core/backups/refresh/token [post]
func (b *BaseApi) RefreshToken(c *gin.Context) {
backupService.Run()
helper.SuccessWithData(c, nil)
var req dto.OperateByID
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
if err := backupService.RefreshToken(req); err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithOutData(c)
}
// @Tags Backup Account
@ -48,7 +57,7 @@ func (b *BaseApi) RefreshToken(c *gin.Context) {
// @Param request body dto.ForBuckets true "request"
// @Success 200 {array} string
// @Security ApiKeyAuth
// @Router /core/backup/search [post]
// @Router /core/backups/buckets [post]
func (b *BaseApi) ListBuckets(c *gin.Context) {
var req dto.ForBuckets
if err := helper.CheckBindAndValidate(&req, c); err != nil {
@ -69,7 +78,7 @@ func (b *BaseApi) ListBuckets(c *gin.Context) {
// @Accept json
// @Success 200 {object} dto.OneDriveInfo
// @Security ApiKeyAuth
// @Router /core/backup/client/:clientType [get]
// @Router /core/backups/client/:clientType [get]
func (b *BaseApi) LoadBackupClientInfo(c *gin.Context) {
clientType, ok := c.Params.Get("clientType")
if !ok {
@ -91,7 +100,7 @@ func (b *BaseApi) LoadBackupClientInfo(c *gin.Context) {
// @Param request body dto.OperateByID true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup/del [post]
// @Router /core/backups/del [post]
// @x-panel-log {"bodyKeys":["id"],"paramKeys":[],"BeforeFunctions":[{"input_column":"id","input_value":"id","isList":false,"db":"backup_accounts","output_column":"type","output_value":"types"}],"formatZH":"删除备份账号 [types]","formatEN":"delete backup account [types]"}
func (b *BaseApi) DeleteBackup(c *gin.Context) {
var req dto.OperateByID
@ -113,7 +122,7 @@ func (b *BaseApi) DeleteBackup(c *gin.Context) {
// @Param request body dto.BackupOperate true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup/update [post]
// @Router /core/backups/update [post]
// @x-panel-log {"bodyKeys":["type"],"paramKeys":[],"BeforeFunctions":[],"formatZH":"更新备份账号 [types]","formatEN":"update backup account [types]"}
func (b *BaseApi) UpdateBackup(c *gin.Context) {
var req dto.BackupOperate
@ -127,45 +136,3 @@ func (b *BaseApi) UpdateBackup(c *gin.Context) {
}
helper.SuccessWithData(c, nil)
}
// @Tags Backup Account
// @Summary Search backup accounts with page
// @Description 获取备份账号列表
// @Accept json
// @Param request body dto.SearchPageWithType true "request"
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup/search [post]
func (b *BaseApi) SearchBackup(c *gin.Context) {
var req dto.SearchPageWithType
if err := helper.CheckBindAndValidate(&req, c); err != nil {
return
}
total, list, err := backupService.SearchWithPage(req)
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, dto.PageResult{
Items: list,
Total: total,
})
}
// @Tags Backup Account
// @Summary get local backup dir
// @Description 获取本地备份目录
// @Success 200
// @Security ApiKeyAuth
// @Router /core/backup/local [get]
func (b *BaseApi) GetLocalDir(c *gin.Context) {
dir, err := backupService.GetLocalDir()
if err != nil {
helper.InternalServer(c, err)
return
}
helper.SuccessWithData(c, dir)
}

View File

@ -12,6 +12,7 @@ type BackupOperate struct {
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type" validate:"required"`
IsPublic bool `json:"isPublic"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`
@ -25,6 +26,7 @@ type BackupInfo struct {
ID uint `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
IsPublic bool `json:"isPublic"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`

View File

@ -52,7 +52,7 @@ type SettingUpdate struct {
type SSLUpdate struct {
SSLType string `json:"sslType" validate:"required,oneof=self select import import-paste import-local"`
Domain string `json:"domain"`
SSL string `json:"ssl" validate:"required,oneof=enable disable"`
SSL string `json:"ssl" validate:"required,oneof=Enable Disable"`
Cert string `json:"cert"`
Key string `json:"key"`
SSLID uint `json:"sslID"`
@ -143,7 +143,7 @@ type SyncTime struct {
}
type BindInfo struct {
Ipv6 string `json:"ipv6" validate:"required,oneof=enable disable"`
Ipv6 string `json:"ipv6" validate:"required,oneof=Enable Disable"`
BindAddress string `json:"bindAddress" validate:"required"`
}

View File

@ -2,8 +2,9 @@ package model
type BackupAccount struct {
BaseModel
Name string `gorm:"not null" json:"name"`
Type string `gorm:"not null" json:"type"`
Name string `gorm:"not null;default:''" json:"name"`
Type string `gorm:"not null;default:''" json:"type"`
IsPublic bool `json:"isPublic"`
Bucket string `json:"bucket"`
AccessKey string `json:"accessKey"`
Credential string `json:"credential"`

View File

@ -62,5 +62,5 @@ func syncLauncherToAgent(launcher model.AppLauncher, operation string) {
itemData, _ := json.Marshal(launcher)
itemJson := dto.SyncToAgent{Name: launcher.Key, Operation: operation, Data: string(itemData)}
bodyItem, _ := json.Marshal(itemJson)
_ = xpack.RequestToAgent("/api/v2/backups/sync", http.MethodPost, bytes.NewReader((bodyItem)))
_ = xpack.RequestToAllAgent("/api/v2/backups/sync", http.MethodPost, bytes.NewReader((bodyItem)))
}

View File

@ -21,7 +21,6 @@ import (
"github.com/1Panel-dev/1Panel/core/utils/cloud_storage"
"github.com/1Panel-dev/1Panel/core/utils/cloud_storage/client"
"github.com/1Panel-dev/1Panel/core/utils/encrypt"
fileUtils "github.com/1Panel-dev/1Panel/core/utils/files"
httpUtils "github.com/1Panel-dev/1Panel/core/utils/http"
"github.com/1Panel-dev/1Panel/core/utils/xpack"
"github.com/jinzhu/copier"
@ -31,131 +30,18 @@ import (
type BackupService struct{}
type IBackupService interface {
Get(req dto.OperateByID) (dto.BackupInfo, error)
List(req dto.OperateByIDs) ([]dto.BackupInfo, error)
GetLocalDir() (string, error)
SearchWithPage(search dto.SearchPageWithType) (int64, interface{}, error)
LoadBackupClientInfo(clientType string) (dto.BackupClientInfo, error)
Create(backupDto dto.BackupOperate) error
GetBuckets(backupDto dto.ForBuckets) ([]interface{}, error)
Update(req dto.BackupOperate) error
Delete(id uint) error
NewClient(backup *model.BackupAccount) (cloud_storage.CloudStorageClient, error)
Run()
RefreshToken(req dto.OperateByID) error
}
func NewIBackupService() IBackupService {
return &BackupService{}
}
func (u *BackupService) Get(req dto.OperateByID) (dto.BackupInfo, error) {
var data dto.BackupInfo
account, err := backupRepo.Get(repo.WithByID(req.ID))
if err != nil {
return data, err
}
if err := copier.Copy(&data, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
data.AccessKey, err = encrypt.StringDecryptWithBase64(data.AccessKey)
if err != nil {
return data, err
}
data.Credential, err = encrypt.StringDecryptWithBase64(data.Credential)
if err != nil {
return data, err
}
return data, nil
}
func (u *BackupService) List(req dto.OperateByIDs) ([]dto.BackupInfo, error) {
accounts, err := backupRepo.List(repo.WithByIDs(req.IDs), repo.WithOrderBy("created_at desc"))
if err != nil {
return nil, err
}
var data []dto.BackupInfo
for _, account := range accounts {
var item dto.BackupInfo
if err := copier.Copy(&item, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
item.AccessKey, err = encrypt.StringDecryptWithBase64(item.AccessKey)
if err != nil {
return nil, err
}
item.Credential, err = encrypt.StringDecryptWithBase64(item.Credential)
if err != nil {
return nil, err
}
data = append(data, item)
}
return data, nil
}
func (u *BackupService) GetLocalDir() (string, error) {
account, err := backupRepo.Get(repo.WithByType(constant.Local))
if err != nil {
return "", err
}
dir, err := LoadLocalDirByStr(account.Vars)
if err != nil {
return "", err
}
return dir, nil
}
func (u *BackupService) SearchWithPage(req dto.SearchPageWithType) (int64, interface{}, error) {
options := []global.DBOption{repo.WithOrderBy("created_at desc")}
if len(req.Type) != 0 {
options = append(options, repo.WithByType(req.Type))
}
if len(req.Info) != 0 {
options = append(options, repo.WithByType(req.Info))
}
count, accounts, err := backupRepo.Page(req.Page, req.PageSize, options...)
if err != nil {
return 0, nil, err
}
var data []dto.BackupInfo
for _, account := range accounts {
var item dto.BackupInfo
if err := copier.Copy(&item, &account); err != nil {
global.LOG.Errorf("copy backup account to dto backup info failed, err: %v", err)
}
if !item.RememberAuth {
item.AccessKey = ""
item.Credential = ""
if account.Type == constant.Sftp {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(item.Vars), &varMap); err != nil {
continue
}
delete(varMap, "passPhrase")
itemVars, _ := json.Marshal(varMap)
item.Vars = string(itemVars)
}
} else {
item.AccessKey = base64.StdEncoding.EncodeToString([]byte(item.AccessKey))
item.Credential = base64.StdEncoding.EncodeToString([]byte(item.Credential))
}
if account.Type == constant.OneDrive || account.Type == constant.ALIYUN || account.Type == constant.GoogleDrive {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(item.Vars), &varMap); err != nil {
continue
}
delete(varMap, "refresh_token")
delete(varMap, "drive_id")
itemVars, _ := json.Marshal(varMap)
item.Vars = string(itemVars)
}
data = append(data, item)
}
return count, data, nil
}
func (u *BackupService) LoadBackupClientInfo(clientType string) (dto.BackupClientInfo, error) {
var data dto.BackupClientInfo
clientIDKey := "OneDriveID"
@ -190,10 +76,16 @@ func (u *BackupService) LoadBackupClientInfo(clientType string) (dto.BackupClien
}
func (u *BackupService) Create(req dto.BackupOperate) error {
if !req.IsPublic {
return buserr.New(constant.ErrBackupPublic)
}
backup, _ := backupRepo.Get(repo.WithByName(req.Name))
if backup.ID != 0 {
return constant.ErrRecordExist
}
if req.Type != constant.Sftp && req.BackupPath != "/" {
req.BackupPath = strings.TrimPrefix(req.BackupPath, "/")
}
if err := copier.Copy(&backup, &req); err != nil {
return errors.WithMessage(constant.ErrStructTransform, err.Error())
}
@ -270,8 +162,11 @@ func (u *BackupService) Delete(id uint) error {
if backup.ID == 0 {
return constant.ErrRecordNotFound
}
if !backup.IsPublic {
return buserr.New(constant.ErrBackupPublic)
}
if backup.Type == constant.Local {
return buserr.New(constant.ErrBackupLocalDelete)
return buserr.New(constant.ErrBackupLocal)
}
if _, err := httpUtils.NewLocalClient(fmt.Sprintf("/api/v2/backups/check/%v", id), http.MethodGet, nil); err != nil {
global.LOG.Errorf("check used of local cronjob failed, err: %v", err)
@ -291,6 +186,15 @@ func (u *BackupService) Update(req dto.BackupOperate) error {
if backup.ID == 0 {
return constant.ErrRecordNotFound
}
if !backup.IsPublic {
return buserr.New(constant.ErrBackupPublic)
}
if backup.Type == constant.Local {
return buserr.New(constant.ErrBackupLocal)
}
if req.Type != constant.Sftp && req.BackupPath != "/" {
req.BackupPath = strings.TrimPrefix(req.BackupPath, "/")
}
var newBackup model.BackupAccount
if err := copier.Copy(&newBackup, &req); err != nil {
return errors.WithMessage(constant.ErrStructTransform, err.Error())
@ -305,36 +209,15 @@ func (u *BackupService) Update(req dto.BackupOperate) error {
return err
}
newBackup.Credential = string(itemCredential)
if backup.Type == constant.Local {
if newBackup.Vars != backup.Vars {
oldPath, err := LoadLocalDirByStr(backup.Vars)
if err != nil {
return err
}
newPath, err := LoadLocalDirByStr(newBackup.Vars)
if err != nil {
return err
}
if strings.HasSuffix(newPath, "/") && newPath != "/" {
newPath = newPath[:strings.LastIndex(newPath, "/")]
}
if err := copyDir(oldPath, newPath); err != nil {
return err
}
global.CONF.System.BackupDir = newPath
}
}
if newBackup.Type == constant.OneDrive || newBackup.Type == constant.GoogleDrive {
if err := u.loadRefreshTokenByCode(&backup); err != nil {
return err
}
}
if backup.Type != "LOCAL" {
isOk, err := u.checkBackupConn(&newBackup)
if err != nil || !isOk {
return buserr.WithMap("ErrBackupCheck", map[string]interface{}{"err": err.Error()}, err)
}
isOk, err := u.checkBackupConn(&newBackup)
if err != nil || !isOk {
return buserr.WithMap("ErrBackupCheck", map[string]interface{}{"err": err.Error()}, err)
}
newBackup.AccessKey, err = encrypt.StringEncrypt(newBackup.AccessKey)
@ -353,6 +236,44 @@ func (u *BackupService) Update(req dto.BackupOperate) error {
return nil
}
func (u *BackupService) RefreshToken(req dto.OperateByID) error {
backup, _ := backupRepo.Get(repo.WithByID(req.ID))
if backup.ID == 0 {
return constant.ErrRecordNotFound
}
if !backup.IsPublic {
return buserr.New(constant.ErrBackupPublic)
}
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
return fmt.Errorf("Failed to refresh %s - %s token, please retry, err: %v", backup.Type, backup.Name, err)
}
var (
refreshToken string
err error
)
switch backup.Type {
case constant.OneDrive:
refreshToken, err = client.RefreshToken("refresh_token", "refreshToken", varMap)
case constant.GoogleDrive:
refreshToken, err = client.RefreshGoogleToken("refresh_token", "refreshToken", varMap)
case constant.ALIYUN:
refreshToken, err = client.RefreshALIToken(varMap)
}
if err != nil {
varMap["refresh_status"] = constant.StatusFailed
varMap["refresh_msg"] = err.Error()
return fmt.Errorf("Failed to refresh %s-%s token, please retry, err: %v", backup.Type, backup.Name, err)
}
varMap["refresh_status"] = constant.StatusSuccess
varMap["refresh_time"] = time.Now().Format(constant.DateTimeLayout)
varMap["refresh_token"] = refreshToken
varsItem, _ := json.Marshal(varMap)
backup.Vars = string(varsItem)
return backupRepo.Save(&backup)
}
func (u *BackupService) NewClient(backup *model.BackupAccount) (cloud_storage.CloudStorageClient, error) {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backup.Vars), &varMap); err != nil {
@ -409,55 +330,6 @@ func (u *BackupService) loadRefreshTokenByCode(backup *model.BackupAccount) erro
return nil
}
func LoadLocalDirByStr(vars string) (string, error) {
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(vars), &varMap); err != nil {
return "", err
}
if _, ok := varMap["dir"]; !ok {
return "", errors.New("load local backup dir failed")
}
baseDir, ok := varMap["dir"].(string)
if ok {
if _, err := os.Stat(baseDir); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(baseDir, os.ModePerm); err != nil {
return "", fmt.Errorf("mkdir %s failed, err: %v", baseDir, err)
}
}
return baseDir, nil
}
return "", fmt.Errorf("error type dir: %T", varMap["dir"])
}
func copyDir(src, dst string) error {
srcInfo, err := os.Stat(src)
if err != nil {
return err
}
if err = os.MkdirAll(dst, srcInfo.Mode()); err != nil {
return err
}
files, err := os.ReadDir(src)
if err != nil {
return err
}
for _, file := range files {
srcPath := fmt.Sprintf("%s/%s", src, file.Name())
dstPath := fmt.Sprintf("%s/%s", dst, file.Name())
if file.IsDir() {
if err = copyDir(srcPath, dstPath); err != nil {
global.LOG.Errorf("copy dir %s to %s failed, err: %v", srcPath, dstPath, err)
}
} else {
if err := fileUtils.CopyFile(srcPath, dst, false); err != nil {
global.LOG.Errorf("copy file %s to %s failed, err: %v", srcPath, dstPath, err)
}
}
}
return nil
}
func (u *BackupService) checkBackupConn(backup *model.BackupAccount) (bool, error) {
client, err := u.NewClient(backup)
if err != nil {
@ -481,74 +353,22 @@ func (u *BackupService) checkBackupConn(backup *model.BackupAccount) (bool, erro
_, _ = write.WriteString("1Panelアカウントのテストファイルをバックアップします。\n")
write.Flush()
targetPath := strings.TrimPrefix(path.Join(backup.BackupPath, "test/1panel"), "/")
targetPath := path.Join(backup.BackupPath, "test/1panel")
if backup.Type != constant.Sftp && backup.Type != constant.Local && targetPath != "/" {
targetPath = strings.TrimPrefix(targetPath, "/")
}
return client.Upload(fileItem, targetPath)
}
func StartRefreshForToken() error {
service := NewIBackupService()
refreshID, err := global.Cron.AddJob("0 3 */31 * *", service)
if err != nil {
global.LOG.Errorf("add cron job of refresh backup account token failed, err: %s", err.Error())
return err
}
global.BackupAccountTokenEntryID = refreshID
return nil
}
func (u *BackupService) Run() {
refreshToken()
}
func refreshToken() {
var backups []model.BackupAccount
_ = global.DB.Where("`type` in (?)", []string{constant.OneDrive, constant.ALIYUN, constant.GoogleDrive}).Find(&backups)
if len(backups) == 0 {
func syncAccountToAgent(backup model.BackupAccount, operation string) {
if !backup.IsPublic {
return
}
for _, backupItem := range backups {
if backupItem.ID == 0 {
continue
}
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backupItem.Vars), &varMap); err != nil {
global.LOG.Errorf("Failed to refresh %s - %s token, please retry, err: %v", backupItem.Type, backupItem.Name, err)
continue
}
var (
refreshToken string
err error
)
switch backupItem.Type {
case constant.OneDrive:
refreshToken, err = client.RefreshToken("refresh_token", "refreshToken", varMap)
case constant.GoogleDrive:
refreshToken, err = client.RefreshGoogleToken("refresh_token", "refreshToken", varMap)
case constant.ALIYUN:
refreshToken, err = client.RefreshALIToken(varMap)
}
if err != nil {
varMap["refresh_status"] = constant.StatusFailed
varMap["refresh_msg"] = err.Error()
global.LOG.Errorf("Failed to refresh OneDrive token, please retry, err: %v", err)
continue
}
varMap["refresh_status"] = constant.StatusSuccess
varMap["refresh_time"] = time.Now().Format(constant.DateTimeLayout)
varMap["refresh_token"] = refreshToken
varsItem, _ := json.Marshal(varMap)
_ = global.DB.Model(&model.BackupAccount{}).Where("id = ?", backupItem.ID).Updates(map[string]interface{}{"vars": varsItem}).Error
go syncAccountToAgent(backupItem, "update")
}
}
func syncAccountToAgent(backup model.BackupAccount, operation string) {
backup.AccessKey, _ = encrypt.StringDecryptWithBase64(backup.AccessKey)
backup.Credential, _ = encrypt.StringDecryptWithBase64(backup.Credential)
itemData, _ := json.Marshal(backup)
itemJson := dto.SyncToAgent{Name: backup.Name, Operation: operation, Data: string(itemData)}
bodyItem, _ := json.Marshal(itemJson)
_ = xpack.RequestToAgent("/api/v2/backups/sync", http.MethodPost, bytes.NewReader((bodyItem)))
_ = xpack.RequestToAllAgent("/api/v2/backups/sync", http.MethodPost, bytes.NewReader((bodyItem)))
_, _ = httpUtils.NewLocalClient("/api/v2/backups/sync", http.MethodPost, bytes.NewReader((bodyItem)))
}

View File

@ -62,8 +62,9 @@ var (
// backup
var (
ErrBackupInUsed = "ErrBackupInUsed"
ErrBackupLocalDelete = "ErrBackupLocalDelete"
ErrBackupInUsed = "ErrBackupInUsed"
ErrBackupLocal = "ErrBackupLocal"
ErrBackupPublic = "ErrBackupPublic"
)
var (

View File

@ -23,8 +23,6 @@ var (
I18n *i18n.Localizer
Cron *cron.Cron
BackupAccountTokenEntryID cron.EntryID
)
type DBOption func(*gorm.DB) *gorm.DB

View File

@ -28,7 +28,8 @@ ErrNoSuchHost: "Network connection failed"
#backup
ErrBackupInUsed: "The backup account is currently in use in a scheduled task and cannot be deleted."
ErrBackupCheck: "Backup account test connection failed {{.err}}"
ErrBackupLocalDelete: "Deleting local server backup accounts is not currently supported."
ErrBackupLocal: "The local server backup account does not support this operation at the moment!"
ErrBackupPublic: "Detected that the backup account is non-public, please check and try again!"
#license
ErrLicense: "License format error, please check and try again!"

View File

@ -28,7 +28,8 @@ ErrNoSuchHost: "網路連接失敗"
#backup
ErrBackupInUsed: "該備份帳號已在計劃任務中使用,無法刪除"
ErrBackupCheck: "備份帳號測試連接失敗 {{.err}}"
ErrBackupLocalDelete: "暫不支持刪除本地伺服器備份帳號"
ErrBackupLocal: "本地伺服器備份帳號暫不支持該操作!"
ErrBackupPublic: "檢測到該備份帳號為非公用,請檢查後重試!"
#license
ErrLicense: "許可證格式錯誤,請檢查後重試!"

View File

@ -29,7 +29,8 @@ ErrNoSuchHost: "网络连接失败"
#backup
ErrBackupInUsed: "该备份账号已在计划任务中使用,无法删除"
ErrBackupCheck: "备份账号测试连接失败 {{ .err}}"
ErrBackupLocalDelete: "暂不支持删除本地服务器备份账号"
ErrBackupLocal: "本地服务器备份账号暂不支持该操作!"
ErrBackupPublic: "检测到该备份账号为非公用,请检查后重试!"
#license
ErrLicense: "许可证格式错误,请检查后重试!"

View File

@ -3,8 +3,8 @@ package cron
import (
"time"
"github.com/1Panel-dev/1Panel/core/app/service"
"github.com/1Panel-dev/1Panel/core/global"
"github.com/1Panel-dev/1Panel/core/init/cron/job"
"github.com/1Panel-dev/1Panel/core/utils/common"
"github.com/robfig/cron/v3"
)
@ -13,6 +13,8 @@ func Init() {
nyc, _ := time.LoadLocation(common.LoadTimeZoneByCmd())
global.Cron = cron.New(cron.WithLocation(nyc), cron.WithChain(cron.Recover(cron.DefaultLogger)), cron.WithChain(cron.DelayIfStillRunning(cron.DefaultLogger)))
_ = service.StartRefreshForToken()
if _, err := global.Cron.AddJob("0 3 */31 * *", job.NewBackupJob()); err != nil {
global.LOG.Errorf("[core] can not add backup token refresh corn job: %s", err.Error())
}
global.Cron.Start()
}

View File

@ -0,0 +1,61 @@
package job
import (
"encoding/json"
"time"
"github.com/1Panel-dev/1Panel/core/app/model"
"github.com/1Panel-dev/1Panel/core/constant"
"github.com/1Panel-dev/1Panel/core/global"
"github.com/1Panel-dev/1Panel/core/utils/cloud_storage/client"
)
type backup struct{}
func NewBackupJob() *backup {
return &backup{}
}
func (b *backup) Run() {
var backups []model.BackupAccount
_ = global.DB.Where("`type` in (?) AND is_public = 0", []string{constant.OneDrive, constant.ALIYUN, constant.GoogleDrive}).Find(&backups)
if len(backups) == 0 {
return
}
for _, backupItem := range backups {
if backupItem.ID == 0 {
continue
}
global.LOG.Infof("Start to refresh %s-%s access_token ...", backupItem.Type, backupItem.Name)
varMap := make(map[string]interface{})
if err := json.Unmarshal([]byte(backupItem.Vars), &varMap); err != nil {
global.LOG.Errorf("Failed to refresh %s - %s token, please retry, err: %v", backupItem.Type, backupItem.Name, err)
continue
}
var (
refreshToken string
err error
)
switch backupItem.Type {
case constant.OneDrive:
refreshToken, err = client.RefreshToken("refresh_token", "refreshToken", varMap)
case constant.GoogleDrive:
refreshToken, err = client.RefreshGoogleToken("refresh_token", "refreshToken", varMap)
case constant.ALIYUN:
refreshToken, err = client.RefreshALIToken(varMap)
}
if err != nil {
varMap["refresh_status"] = constant.StatusFailed
varMap["refresh_msg"] = err.Error()
global.LOG.Errorf("Failed to refresh %s-%s token, please retry, err: %v", backupItem.Type, backupItem.Name, err)
continue
}
varMap["refresh_status"] = constant.StatusSuccess
varMap["refresh_time"] = time.Now().Format(constant.DateTimeLayout)
varMap["refresh_token"] = refreshToken
varsItem, _ := json.Marshal(varMap)
_ = global.DB.Model(&model.BackupAccount{}).Where("id = ?", backupItem.ID).Updates(map[string]interface{}{"vars": string(varsItem)}).Error
global.LOG.Infof("Refresh %s-%s access_token successful!", backupItem.Type, backupItem.Name)
}
}

View File

@ -3,9 +3,7 @@ package hook
import (
"strings"
"github.com/1Panel-dev/1Panel/core/app/model"
"github.com/1Panel-dev/1Panel/core/app/repo"
"github.com/1Panel-dev/1Panel/core/app/service"
"github.com/1Panel-dev/1Panel/core/global"
"github.com/1Panel-dev/1Panel/core/utils/cmd"
"github.com/1Panel-dev/1Panel/core/utils/common"
@ -48,7 +46,6 @@ func Init() {
}
handleUserInfo(global.CONF.System.ChangeUserInfo, settingRepo)
loadLocalDir()
}
func handleUserInfo(tags string, settingRepo repo.ISettingRepo) {
@ -88,18 +85,3 @@ func handleUserInfo(tags string, settingRepo repo.ISettingRepo) {
sudo := cmd.SudoHandleCmd()
_, _ = cmd.Execf("%s sed -i '/CHANGE_USER_INFO=%v/d' /usr/local/bin/1pctl", sudo, global.CONF.System.ChangeUserInfo)
}
func loadLocalDir() {
var backup model.BackupAccount
_ = global.DB.Where("type = ?", "LOCAL").First(&backup).Error
if backup.ID == 0 {
global.LOG.Errorf("no such backup account `%s` in db", "LOCAL")
return
}
dir, err := service.LoadLocalDirByStr(backup.Vars)
if err != nil {
global.LOG.Errorf("load local backup dir failed,err: %v", err)
return
}
global.CONF.System.BackupDir = dir
}

View File

@ -19,6 +19,7 @@ func Init() {
migrations.InitGoogle,
migrations.AddTaskDB,
migrations.UpdateSettingStatus,
migrations.RemoveLocalBackup,
})
if err := m.Migrate(); err != nil {
global.LOG.Error(err)

View File

@ -15,7 +15,7 @@ import (
)
var AddTable = &gormigrate.Migration{
ID: "20241224-add-table",
ID: "20240109-add-table",
Migrate: func(tx *gorm.DB) error {
return tx.AutoMigrate(
&model.OperationLog{},
@ -273,3 +273,13 @@ var UpdateSettingStatus = &gormigrate.Migration{
return nil
},
}
var RemoveLocalBackup = &gormigrate.Migration{
ID: "20250109-remove-local-backup",
Migrate: func(tx *gorm.DB) error {
if err := tx.Where("`type` = ?", constant.Local).Delete(&model.BackupAccount{}).Error; err != nil {
return err
}
return nil
},
}

View File

@ -211,7 +211,7 @@ func newDB(pathItem string) (*gorm.DB, error) {
case strings.HasPrefix(pathItem, "/core"):
dbFile = path.Join(global.CONF.System.BaseDir, "1panel/db/core.db")
case strings.HasPrefix(pathItem, "/xpack"):
dbFile = path.Join(global.CONF.System.BaseDir, "1panel/db/xpack/xpack.db")
dbFile = path.Join(global.CONF.System.BaseDir, "1panel/db/xpack.db")
default:
dbFile = path.Join(global.CONF.System.BaseDir, "1panel/db/agent.db")
}

View File

@ -15,9 +15,7 @@ func (s *BackupRouter) InitRouter(Router *gin.RouterGroup) {
Use(middleware.PasswordExpired())
baseApi := v2.ApiGroupApp.BaseApi
{
backupRouter.GET("/local", baseApi.GetLocalDir)
backupRouter.GET("/client/:clientType", baseApi.LoadBackupClientInfo)
backupRouter.POST("/search", baseApi.SearchBackup)
backupRouter.POST("/refresh/token", baseApi.RefreshToken)
backupRouter.POST("/buckets", baseApi.ListBuckets)
backupRouter.POST("", baseApi.CreateBackup)

View File

@ -8,10 +8,10 @@ import (
"github.com/gin-gonic/gin"
)
func Proxy(c *gin.Context, currentNode string) { return }
func Proxy(c *gin.Context, currentNode string) {}
func UpdateGroup(name string, group, newGroup uint) error { return nil }
func CheckBackupUsed(id uint) error { return nil }
func RequestToAgent(reqUrl, reqMethod string, reqBody io.Reader) error { return nil }
func RequestToAllAgent(reqUrl, reqMethod string, reqBody io.Reader) error { return nil }

View File

@ -14,6 +14,7 @@ export namespace Backup {
id: number;
name: string;
type: string;
isPublic: boolean;
accessKey: string;
bucket: string;
credential: string;
@ -32,6 +33,8 @@ export namespace Backup {
export interface BackupOperate {
id: number;
type: string;
name: string;
isPublic: boolean;
accessKey: string;
bucket: string;
credential: string;
@ -55,6 +58,7 @@ export namespace Backup {
}
export interface ForBucket {
type: string;
isPublic: boolean;
accessKey: string;
credential: string;
vars: string;
@ -64,6 +68,17 @@ export namespace Backup {
name: string;
detailName: string;
}
export interface SearchForSize extends ReqPage {
type: string;
name: string;
detailName: string;
info: string;
cronjobID: number;
}
export interface RecordFileSize extends ReqPage {
id: number;
size: number;
}
export interface SearchBackupRecordByCronjob extends ReqPage {
cronjobID: number;
}

View File

@ -29,9 +29,8 @@ export namespace Cronjob {
files: Array<Item>;
sourceDir: string;
sourceAccountIDs: string;
downloadAccountID: number;
sourceAccounts: Array<number>;
sourceAccounts: Array<string>;
downloadAccount: string;
retainCopies: number;
status: string;
secret: string;

View File

@ -146,8 +146,8 @@ export namespace Setting {
export interface SnapshotInfo {
id: number;
name: string;
from: string;
defaultDownload: string;
sourceAccounts: Array<string>;
downloadAccount: string;
description: string;
status: string;
message: string;
@ -165,13 +165,6 @@ export namespace Setting {
rollbackStatus: string;
rollbackMessage: string;
}
export interface SnapshotFile {
id: number;
name: string;
from: string;
defaultDownload: string;
size: number;
}
export interface SnapshotData {
appData: Array<DataTree>;
panelData: Array<DataTree>;

View File

@ -4,8 +4,16 @@ import { Base64 } from 'js-base64';
import { ResPage } from '../interface';
import { Backup } from '../interface/backup';
import { TimeoutEnum } from '@/enums/http-enum';
import { GlobalStore } from '@/store';
const globalStore = GlobalStore();
// backup-agent
export const getLocalBackupDir = () => {
return http.get<string>(`/backups/local`);
};
export const searchBackup = (params: Backup.SearchWithType) => {
return http.post<ResPage<Backup.BackupInfo>>(`/backups/search`, params);
};
export const handleBackup = (params: Backup.Backup) => {
return http.post(`/backups/backup`, params, TimeoutEnum.T_1H);
};
@ -27,6 +35,9 @@ export const deleteBackupRecord = (params: { ids: number[] }) => {
export const searchBackupRecords = (params: Backup.SearchBackupRecord) => {
return http.post<ResPage<Backup.RecordInfo>>(`/backups/record/search`, params, TimeoutEnum.T_5M);
};
export const loadRecordSize = (param: Backup.SearchForSize) => {
return http.post<Array<Backup.RecordFileSize>>(`/backups/record/size`, param);
};
export const searchBackupRecordsByCronjob = (params: Backup.SearchBackupRecordByCronjob) => {
return http.post<ResPage<Backup.RecordInfo>>(`/backups/record/search/bycronjob`, params, TimeoutEnum.T_5M);
};
@ -35,14 +46,12 @@ export const getFilesFromBackup = (id: number) => {
};
// backup-core
export const refreshToken = () => {
return http.post(`/core/backups/refresh/token`, {});
};
export const getLocalBackupDir = () => {
return http.get<string>(`/core/backups/local`);
};
export const searchBackup = (params: Backup.SearchWithType) => {
return http.post<ResPage<Backup.BackupInfo>>(`/core/backups/search`, params);
export const refreshToken = (params: { id: number; isPublic: boolean }) => {
let urlItem = '/core/backups/refresh/token';
if (!params.isPublic || !globalStore.isProductPro) {
urlItem = '/backups/refresh/token';
}
return http.post(urlItem, { id: params.id });
};
export const getClientInfo = (clientType: string) => {
return http.get<Backup.ClientInfo>(`/core/backups/client/${clientType}`);
@ -55,7 +64,11 @@ export const addBackup = (params: Backup.BackupOperate) => {
if (request.credential) {
request.credential = Base64.encode(request.credential);
}
return http.post<Backup.BackupOperate>(`/core/backups`, request, TimeoutEnum.T_60S);
let urlItem = '/core/backups';
if (!params.isPublic || !globalStore.isProductPro) {
urlItem = '/backups';
}
return http.post<Backup.BackupOperate>(urlItem, request, TimeoutEnum.T_60S);
};
export const editBackup = (params: Backup.BackupOperate) => {
let request = deepCopy(params) as Backup.BackupOperate;
@ -65,10 +78,18 @@ export const editBackup = (params: Backup.BackupOperate) => {
if (request.credential) {
request.credential = Base64.encode(request.credential);
}
return http.post(`/core/backups/update`, request);
let urlItem = '/core/backups/update';
if (!params.isPublic || !globalStore.isProductPro) {
urlItem = '/backups/update';
}
return http.post(urlItem, request);
};
export const deleteBackup = (params: { id: number }) => {
return http.post(`/core/backups/del`, params);
export const deleteBackup = (params: { id: number; isPublic: boolean }) => {
let urlItem = '/core/backups/del';
if (!params.isPublic || !globalStore.isProductPro) {
urlItem = '/backups/del';
}
return http.post(urlItem, { id: params.id });
};
export const listBucket = (params: Backup.ForBucket) => {
let request = deepCopy(params) as Backup.BackupOperate;
@ -78,5 +99,9 @@ export const listBucket = (params: Backup.ForBucket) => {
if (request.credential) {
request.credential = Base64.encode(request.credential);
}
return http.post(`/core/backups/buckets`, request);
let urlItem = '/core/backups/buckets';
if (!params.isPublic || !globalStore.isProductPro) {
urlItem = '/backups/buckets';
}
return http.post(urlItem, request);
};

View File

@ -139,9 +139,6 @@ export const snapshotRollback = (param: Setting.SnapshotRecover) => {
export const searchSnapshotPage = (param: SearchWithPage) => {
return http.post<ResPage<Setting.SnapshotInfo>>(`/settings/snapshot/search`, param);
};
export const loadSnapshotSize = (param: SearchWithPage) => {
return http.post<Array<Setting.SnapshotFile>>(`/settings/snapshot/size`, param);
};
// upgrade
export const loadUpgradeInfo = () => {

View File

@ -38,10 +38,15 @@
<el-table-column :label="$t('commons.table.name')" prop="fileName" show-overflow-tooltip />
<el-table-column :label="$t('file.size')" prop="size" show-overflow-tooltip>
<template #default="{ row }">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
<div v-if="row.hasLoad">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
</div>
<div v-if="!row.hasLoad">
<el-button link loading></el-button>
</div>
</template>
</el-table-column>
<el-table-column :label="$t('database.source')" prop="backupType">
@ -108,6 +113,7 @@ import {
deleteBackupRecord,
downloadBackupRecord,
searchBackupRecords,
loadRecordSize,
} from '@/api/modules/backup';
import i18n from '@/lang';
import { Backup } from '@/api/interface/backup';
@ -188,6 +194,7 @@ const search = async () => {
await searchBackupRecords(params)
.then((res) => {
loading.value = false;
loadSize(params);
data.value = res.data.items || [];
paginationConfig.total = res.data.total;
})
@ -196,6 +203,28 @@ const search = async () => {
});
};
const loadSize = async (params: any) => {
await loadRecordSize(params)
.then((res) => {
let stats = res.data || [];
if (stats.length === 0) {
return;
}
for (const backup of data.value) {
for (const item of stats) {
if (backup.id === item.id) {
backup.hasLoad = true;
backup.size = item.size;
break;
}
}
}
})
.catch(() => {
loading.value = false;
});
};
const openTaskLog = (taskID: string) => {
taskLogRef.value.openWithTaskID(taskID);
};

View File

@ -1479,6 +1479,13 @@ const message = {
developerModeHelper: 'Get a preview version of 1Panel to provide feedback on new features and updates',
thirdParty: 'Third-party Account',
scope: 'Scope',
public: 'Public',
publicHelper:
'Public type backup accounts will be synchronized to each sub-node, and sub-nodes can use them together',
private: 'Private',
privateHelper:
'Private type backup accounts are only created on the current node and are for the use of the current node only',
createBackupAccount: 'Add {0}',
noTypeForCreate: 'No backup type is currently created',
LOCAL: 'Server Disks',

View File

@ -1390,6 +1390,11 @@ const message = {
developerModeHelper: '獲取 1Panel 的預覽版本以分享有關新功能和更新的反饋',
thirdParty: '第三方賬號',
scope: '使用範圍',
public: '公有',
publicHelper: '公有類型的備份帳號會同步到各個子節點子節點可以一起使用',
private: '私有',
privateHelper: '私有類型的備份帳號只創建在當前節點上僅供當前節點使用',
createBackupAccount: '添加 {0}',
noTypeForCreate: '當前無可創建備份類型',
LOCAL: '服務器磁盤',

View File

@ -1391,6 +1391,11 @@ const message = {
developerModeHelper: '获取 1Panel 的预览版本以分享有关新功能和更新的反馈',
thirdParty: '第三方账号',
scope: '使用范围',
public: '公有',
publicHelper: '公有类型的备份账号会同步到各个子节点子节点可以一起使用',
private: '私有',
privateHelper: '私有类型的备份账号只创建在当前节点上仅供当前节点使用',
createBackupAccount: '添加 {0}',
noTypeForCreate: '当前无可创建备份类型',
LOCAL: '服务器磁盘',

View File

@ -170,7 +170,7 @@ const loadNodes = async () => {
nodes.value = [];
return;
}
nodes.value = res.data;
nodes.value = res.data || [];
if (nodes.value.length === 0) {
globalStore.currentNode = 'local';
}

View File

@ -17,10 +17,15 @@
<el-table-column :label="$t('commons.table.name')" prop="fileName" show-overflow-tooltip />
<el-table-column :label="$t('file.size')" prop="size" show-overflow-tooltip>
<template #default="{ row }">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
<div v-if="row.hasLoad">
<span v-if="row.size">
{{ computeSize(row.size) }}
</span>
<span v-else>-</span>
</div>
<div v-if="!row.hasLoad">
<el-button link loading></el-button>
</div>
</template>
</el-table-column>
<el-table-column :label="$t('database.source')" prop="accountType" show-overflow-tooltip>
@ -51,7 +56,7 @@
import { reactive, ref } from 'vue';
import { computeSize, dateFormat, downloadFile } from '@/utils/util';
import i18n from '@/lang';
import { downloadBackupRecord, searchBackupRecordsByCronjob } from '@/api/modules/backup';
import { downloadBackupRecord, loadRecordSize, searchBackupRecordsByCronjob } from '@/api/modules/backup';
import { Backup } from '@/api/interface/backup';
import { MsgError } from '@/utils/message';
import { GlobalStore } from '@/store';
@ -96,6 +101,7 @@ const search = async () => {
await searchBackupRecordsByCronjob(params)
.then((res) => {
loading.value = false;
loadSize(params);
data.value = res.data.items || [];
paginationConfig.total = res.data.total;
})
@ -104,6 +110,29 @@ const search = async () => {
});
};
const loadSize = async (params: any) => {
params.type = 'cronjob';
await loadRecordSize(params)
.then((res) => {
let stats = res.data || [];
if (stats.length === 0) {
return;
}
for (const backup of data.value) {
for (const item of stats) {
if (backup.id === item.id) {
backup.hasLoad = true;
backup.size = item.size;
break;
}
}
}
})
.catch(() => {
loading.value = false;
});
};
const onDownload = async (row: Backup.RecordInfo) => {
if (row.accountType === 'ALIYUN' && row.size < 100 * 1024 * 1024) {
MsgError(i18n.global.t('setting.ALIYUNHelper'));

View File

@ -99,33 +99,34 @@
{{ row.lastRecordTime }}
</template>
</el-table-column>
<el-table-column :min-width="80" :label="$t('setting.backupAccount')" prop="defaultDownload">
<el-table-column :min-width="80" :label="$t('setting.backupAccount')">
<template #default="{ row }">
<span v-if="!hasBackup(row.type)">-</span>
<div v-else>
<div v-for="(item, index) of row.backupAccounts?.split(',')" :key="index">
<div v-for="(item, index) of row.sourceAccounts" :key="index">
<div v-if="row.accountExpand || (!row.accountExpand && index < 3)">
<span v-if="row.backupAccounts">
<span>
{{ $t('setting.' + item) }}
<div v-if="row.expand || (!row.expand && index < 3)">
<span type="info">
<span>
{{ loadName(item) }}
</span>
<el-icon
v-if="item === row.downloadAccount"
size="12"
class="relative top-px left-1"
>
<Star />
</el-icon>
</span>
<el-icon
size="12"
v-if="item === row.defaultDownload"
class="relative top-px left-1"
>
<Star />
</el-icon>
</span>
<span v-else>-</span>
</div>
</div>
</div>
<div v-if="!row.accountExpand && row.backupAccounts?.split(',').length > 3">
<div v-if="!row.accountExpand && row.sourceAccounts?.length > 3">
<el-button type="primary" link @click="row.accountExpand = true">
{{ $t('commons.button.expand') }}...
</el-button>
</div>
<div v-if="row.accountExpand && row.backupAccounts?.split(',').length > 3">
<div v-if="row.accountExpand && row.sourceAccounts?.length > 3">
<el-button type="primary" link @click="row.accountExpand = false">
{{ $t('commons.button.collapse') }}
</el-button>
@ -209,18 +210,6 @@ const search = async (column?: any) => {
.then((res) => {
loading.value = false;
data.value = res.data.items || [];
for (const item of data.value) {
let itemAccounts = item.backupAccounts.split(',') || [];
let accounts = [];
for (const account of itemAccounts) {
if (account == item.defaultDownload) {
accounts.unshift(account);
} else {
accounts.push(account);
}
}
item.itemAccounts = accounts.join(',');
}
paginationConfig.total = res.data.total;
})
.catch(() => {
@ -364,6 +353,11 @@ const loadDetail = (row: any) => {
dialogRecordRef.value!.acceptParams(params);
};
const loadName = (from: any) => {
let items = from.split(' - ');
return i18n.global.t('setting.' + items[0]) + ' ' + items[1];
};
const buttons = [
{
label: i18n.global.t('commons.button.handle'),

View File

@ -409,7 +409,7 @@
</div>
<div v-if="isBackup()">
<el-form-item :label="$t('setting.backupAccount')" prop="backupAccountList">
<el-form-item :label="$t('setting.backupAccount')" prop="sourceAccounts">
<el-select
multiple
class="selectClass"
@ -561,7 +561,6 @@ const acceptParams = (params: DialogProps): void => {
changeType();
dialogData.value.rowData.scriptMode = 'input';
dialogData.value.rowData.dbType = 'mysql';
dialogData.value.rowData.downloadAccountID = 1;
dialogData.value.rowData.isDir = true;
}
if (dialogData.value.rowData.sourceAccountIDs) {
@ -748,8 +747,8 @@ const rules = reactive({
url: [Rules.requiredInput],
files: [{ validator: verifyFiles, trigger: 'blur', required: true }],
sourceDir: [Rules.requiredInput],
backupAccounts: [Rules.requiredSelect],
defaultDownload: [Rules.requiredSelect],
sourceAccounts: [Rules.requiredSelect],
downloadAccountID: [Rules.requiredSelect],
retainCopies: [Rules.number],
});
@ -867,15 +866,19 @@ const loadBackups = async () => {
const res = await listBackupOptions();
let options = res.data || [];
backupOptions.value = [];
if (!dialogData.value.rowData!.sourceAccounts) {
dialogData.value.rowData!.sourceAccounts = [1];
}
let local = 0;
for (const item of options) {
if (item.id === 0) {
continue;
}
if (item.type == 'LOCAL') {
local = item.id;
}
backupOptions.value.push({ id: item.id, type: i18n.global.t('setting.' + item.type), name: item.name });
}
if (!dialogData.value.rowData!.sourceAccounts) {
dialogData.value.rowData!.sourceAccounts = local === 0 ? [local] : [];
}
changeAccount();
};

View File

@ -34,10 +34,22 @@
prop="name"
show-overflow-tooltip
/>
<el-table-column
v-if="globalStore.isProductPro"
:label="$t('setting.scope')"
:min-width="80"
prop="isPublic"
>
<template #default="{ row }">
<el-button plain size="small">
{{ row.isPublic ? $t('setting.public') : $t('setting.private') }}
</el-button>
</template>
</el-table-column>
<el-table-column :label="$t('commons.table.type')" :min-width="80" prop="type">
<template #default="{ row }">
<el-tag>{{ $t('setting.' + row.type) }}</el-tag>
<el-tooltip>
<el-tooltip v-if="row.type === 'OneDrive'">
<template #content>
{{ $t('setting.clickToRefresh') }}
<br />
@ -56,7 +68,7 @@
<br />
{{ $t('setting.refreshTime') + ':' + row.varsJson['refresh_time'] }}
</template>
<el-tag @click="refreshItemToken" v-if="row.type === 'OneDrive'" class="ml-1">
<el-tag @click="refreshItemToken(row)" class="ml-1">
{{ 'Token ' + $t('commons.button.refresh') }}
</el-tag>
</el-tooltip>
@ -102,6 +114,8 @@ import Operate from '@/views/setting/backup-account/operate/index.vue';
import { Backup } from '@/api/interface/backup';
import i18n from '@/lang';
import { MsgSuccess } from '@/utils/message';
import { GlobalStore } from '@/store';
const globalStore = GlobalStore();
const loading = ref();
const data = ref();
@ -159,7 +173,7 @@ const onDelete = async (row: Backup.BackupInfo) => {
i18n.global.t('commons.button.delete'),
]),
api: deleteBackup,
params: { id: row.id },
params: { id: row.id, isPublic: row.isPublic },
});
};
@ -167,6 +181,7 @@ const onOpenDialog = async (
title: string,
rowData: Partial<Backup.BackupInfo> = {
id: 0,
isPublic: false,
varsJson: {},
},
) => {
@ -177,8 +192,8 @@ const onOpenDialog = async (
dialogRef.value!.acceptParams(params);
};
const refreshItemToken = async () => {
await refreshToken();
const refreshItemToken = async (row: any) => {
await refreshToken({ id: row.id, isPublic: row.isPublic });
MsgSuccess(i18n.global.t('commons.msg.operationSuccess'));
search();
};

View File

@ -5,6 +5,20 @@
<el-tag v-if="dialogData.title === 'edit'">{{ dialogData.rowData!.name }}</el-tag>
<el-input v-else v-model="dialogData.rowData!.name" />
</el-form-item>
<el-form-item
v-if="globalStore.isProductPro"
:label="$t('setting.scope')"
prop="isPublic"
:rules="Rules.requiredSelect"
>
<el-radio-group v-model="dialogData.rowData!.isPublic">
<el-radio :value="true" size="large">{{ $t('setting.public') }}</el-radio>
<el-radio :value="false" size="large">{{ $t('setting.private') }}</el-radio>
<span class="input-help">
{{ dialogData.rowData!.isPublic ? $t('setting.publicHelper') : $t('setting.privateHelper') }}
</span>
</el-radio-group>
</el-form-item>
<el-form-item :label="$t('commons.table.type')" prop="type" :rules="Rules.requiredSelect">
<el-tag v-if="dialogData.title === 'edit'">{{ $t('setting.' + dialogData.rowData!.type) }}</el-tag>
<el-select v-else v-model="dialogData.rowData!.type" @change="changeType">
@ -147,7 +161,7 @@
>
<el-input v-model.trim="dialogData.rowData!.varsJson['endpointItem']">
<template #prepend>
<el-select v-model.trim="domainProto" class="p-w-120">
<el-select v-model.trim="domainProto" class="p-w-100">
<el-option label="http" value="http" />
<el-option label="https" value="https" />
</el-select>
@ -348,18 +362,18 @@
<el-form-item
v-if="dialogData.rowData!.type === 'SFTP'"
:label="$t('setting.backupDir')"
prop="bucket"
prop="backupPath"
:rules="[Rules.requiredInput]"
>
<el-input v-model.trim="dialogData.rowData!.bucket" />
<el-input v-model.trim="dialogData.rowData!.backupPath" />
</el-form-item>
<el-form-item
v-if="dialogData.rowData!.type === 'LOCAL'"
:label="$t('setting.backupDir')"
prop="varsJson['dir']"
prop="backupPath"
:rules="Rules.requiredInput"
>
<el-input v-model="dialogData.rowData!.varsJson['dir']">
<el-input v-model="dialogData.rowData!.backupPath">
<template #prepend>
<FileList @choose="loadDir" :dir="true"></FileList>
</template>
@ -388,6 +402,8 @@ import { cities } from './../helper';
import { deepCopy, spliceHttp, splitHttp } from '@/utils/util';
import { MsgSuccess } from '@/utils/message';
import { Base64 } from 'js-base64';
import { GlobalStore } from '@/store';
const globalStore = GlobalStore();
const loading = ref(false);
type FormInstance = InstanceType<typeof ElForm>;
@ -527,13 +543,14 @@ const hasPassword = () => {
let itemType = dialogData.value.rowData!.type;
return itemType === 'SFTP' || itemType === 'WebDAV';
};
const hasBackDir = () => {
let itemType = dialogData.value.rowData!.type;
return itemType !== 'LOCAL' && itemType !== 'SFTP';
};
const loadDir = async (path: string) => {
dialogData.value.rowData!.varsJson['dir'] = path;
dialogData.value.rowData!.backupPath = path;
};
const changeType = async () => {
@ -610,6 +627,7 @@ const getBuckets = async () => {
}
item['endpointItem'] = undefined;
listBucket({
isPublic: dialogData.value.rowData!.isPublic,
type: dialogData.value.rowData!.type,
vars: JSON.stringify(item),
accessKey: dialogData.value.rowData!.accessKey,

View File

@ -41,30 +41,29 @@
<el-table-column prop="version" :label="$t('app.version')" />
<el-table-column :label="$t('setting.backupAccount')" min-width="80" prop="from">
<template #default="{ row }">
<div v-if="row.hasLoad">
<div v-for="(item, index) of row.from.split(',')" :key="index" class="mt-1">
<div>
<div v-for="(item, index) of row.sourceAccounts" :key="index" class="mt-1">
<div v-if="row.expand || (!row.expand && index < 3)">
<span v-if="row.from" type="info">
<span type="info">
<span>
{{ loadName(item) }}
</span>
<el-icon
v-if="item === row.defaultDownload"
v-if="item === row.downloadAccount"
size="12"
class="relative top-px left-1"
>
<Star />
</el-icon>
</span>
<span v-else>-</span>
</div>
</div>
<div v-if="!row.expand && row.from.split(',').length > 3">
<div v-if="!row.expand && row.sourceAccounts.length > 3">
<el-button type="primary" link @click="row.expand = true">
{{ $t('commons.button.expand') }}...
</el-button>
</div>
<div v-if="row.expand && row.from.split(',').length > 3">
<div v-if="row.expand && row.sourceAccounts.length > 3">
<el-button type="primary" link @click="row.expand = false">
{{ $t('commons.button.collapse') }}
</el-button>
@ -182,7 +181,6 @@
<script setup lang="ts">
import {
searchSnapshotPage,
loadSnapshotSize,
snapshotDelete,
snapshotRecreate,
snapshotRollback,
@ -201,6 +199,7 @@ import SnapshotCreate from '@/views/setting/snapshot/create/index.vue';
import SnapRecover from '@/views/setting/snapshot/recover/index.vue';
import { MsgError, MsgSuccess } from '@/utils/message';
import { loadOsInfo } from '@/api/modules/dashboard';
import { loadRecordSize } from '@/api/modules/backup';
const loading = ref(false);
const data = ref();
@ -296,7 +295,7 @@ const onChange = async (info: any) => {
};
const onRecover = async (row: any) => {
if (row.defaultDownload.indexOf('ALIYUN') !== -1 && row.size > 100 * 1024 * 1024) {
if (row.downloadAccount.indexOf('ALIYUN') !== -1 && row.size > 100 * 1024 * 1024) {
MsgError(i18n.global.t('setting.ALIYUNRecover'));
return;
}
@ -396,7 +395,7 @@ const search = async (column?: any) => {
await searchSnapshotPage(params)
.then((res) => {
loading.value = false;
loadSize();
loadSize(params);
cleanData.value = false;
data.value = res.data.items || [];
paginationConfig.total = res.data.total;
@ -406,13 +405,9 @@ const search = async (column?: any) => {
});
};
const loadSize = async () => {
let params = {
info: searchName.value,
page: paginationConfig.currentPage,
pageSize: paginationConfig.pageSize,
};
await loadSnapshotSize(params)
const loadSize = async (params: any) => {
params.type = 'snapshot';
await loadRecordSize(params)
.then((res) => {
let stats = res.data || [];
if (stats.length === 0) {
@ -422,8 +417,6 @@ const loadSize = async () => {
for (const item of stats) {
if (snap.id === item.id) {
snap.hasLoad = true;
snap.from = item.from;
snap.defaultDownload = item.defaultDownload;
snap.size = item.size;
break;
}

View File

@ -95,7 +95,7 @@ export default defineConfig(({ mode }: ConfigEnv): UserConfig => {
}),
],
esbuild: {
pure: viteEnv.VITE_DROP_CONSOLE ? ['console.log', 'debugger'] : [],
pure: viteEnv.VITE_DROP_CONSOLE ? ['console.log'] : [],
},
build: {
outDir: '../core/cmd/server/web',