From a3748af772ef3f01e9d9821c0034d91602ee4bff Mon Sep 17 00:00:00 2001 From: Sean <866155+SeanHeuc@users.noreply.github.com> Date: Sun, 27 Aug 2023 21:14:23 +0800 Subject: [PATCH] feat: misc improvements about upload/copy/hash (#5045) general: add createTime/updateTime support in webdav and some drivers general: add hash support in some drivers general: cross-storage rapid-upload support general: enhance upload to avoid local temp file if possible general: replace readseekcloser with File interface to speed upstream operations feat(aliyun_open): same as above feat(crypt): add hack for 139cloud Close #4934 Close #4819 baidu_netdisk needs to improve the upload code to support rapid-upload --- drivers/115/driver.go | 74 +++++- drivers/115/types.go | 16 +- drivers/115/util.go | 371 ++++++++++++++++++++++++++++- drivers/123/driver.go | 11 +- drivers/123/types.go | 9 + drivers/123_share/types.go | 8 + drivers/189pc/types.go | 17 ++ drivers/189pc/utils.go | 7 +- drivers/alist_v3/driver.go | 3 +- drivers/aliyundrive/driver.go | 27 ++- drivers/aliyundrive_open/types.go | 3 + drivers/aliyundrive_open/upload.go | 129 +++++----- drivers/baidu_netdisk/driver.go | 21 +- drivers/baidu_netdisk/types.go | 20 +- drivers/baidu_netdisk/util.go | 10 +- drivers/baidu_photo/driver.go | 4 +- drivers/baidu_photo/help.go | 12 +- drivers/baidu_photo/types.go | 28 ++- drivers/cloudreve/driver.go | 2 +- drivers/crypt/driver.go | 103 ++++---- drivers/crypt/util.go | 11 - drivers/ftp/driver.go | 2 +- drivers/ftp/util.go | 41 ++-- drivers/google_drive/driver.go | 4 +- drivers/google_drive/util.go | 20 +- drivers/google_photo/driver.go | 2 +- drivers/lanzou/types.go | 21 ++ drivers/local/driver.go | 40 ++-- drivers/mediatrack/driver.go | 4 +- drivers/mega/driver.go | 36 +-- drivers/mega/types.go | 34 ++- drivers/mopan/driver.go | 4 +- drivers/pikpak/driver.go | 4 +- drivers/quark_uc/driver.go | 6 +- drivers/s3/driver.go | 7 +- drivers/sftp/driver.go | 2 +- drivers/smb/driver.go | 3 +- drivers/terabox/driver.go | 4 +- drivers/thunder/driver.go | 4 +- drivers/thunder/types.go | 12 + drivers/virtual/driver.go | 19 +- drivers/weiyun/driver.go | 4 +- drivers/weiyun/types.go | 16 ++ go.mod | 4 + go.sum | 14 +- internal/aria2/monitor.go | 14 +- internal/errs/errors.go | 1 + internal/fs/copy.go | 10 +- internal/fs/fs.go | 5 +- internal/fs/put.go | 12 +- internal/fs/util.go | 73 ------ internal/model/args.go | 29 ++- internal/model/file.go | 25 ++ internal/model/obj.go | 22 +- internal/model/object.go | 19 +- internal/model/stream.go | 33 --- internal/model/user.go | 4 +- internal/net/request.go | 12 +- internal/net/request_test.go | 2 +- internal/net/serve.go | 11 +- internal/op/fs.go | 15 +- internal/qbittorrent/monitor.go | 15 +- internal/stream/stream.go | 278 +++++++++++++++++++++ internal/stream/util.go | 84 +++++++ pkg/utils/file.go | 9 +- pkg/utils/hash.go | 180 ++++++++++++-- pkg/utils/hash_test.go | 64 +++++ pkg/utils/io.go | 63 +++-- pkg/utils/str.go | 18 ++ server/common/proxy.go | 49 +--- server/handles/down.go | 4 +- server/handles/fsmanage.go | 4 +- server/handles/fsread.go | 4 + server/handles/fsup.go | 24 +- server/webdav/prop.go | 7 +- server/webdav/util.go | 29 +++ server/webdav/webdav.go | 14 +- 77 files changed, 1721 insertions(+), 605 deletions(-) delete mode 100644 internal/fs/util.go create mode 100644 internal/model/file.go delete mode 100644 internal/model/stream.go create mode 100644 internal/stream/stream.go create mode 100644 internal/stream/util.go create mode 100644 pkg/utils/hash_test.go create mode 100644 server/webdav/util.go diff --git a/drivers/115/driver.go b/drivers/115/driver.go index b9554c89e12..a7c52f29c50 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -2,13 +2,13 @@ package _115 import ( "context" - "os" - driver115 "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" + "strings" ) type Pan115 struct { @@ -38,15 +38,15 @@ func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ( if err != nil && !errors.Is(err, driver115.ErrNotExist) { return nil, err } - return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) { - return src, nil + return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) { + return &src, nil }) } func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { downloadInfo, err := d.client. SetUserAgent(driver115.UA115Browser). - Download(file.(driver115.File).PickCode) + Download(file.(*FileObj).PickCode) // recover for upload d.client.SetUserAgent(driver115.UA115Desktop) if err != nil { @@ -83,15 +83,67 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error { } func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + var ( + fastInfo *driver115.UploadInitResp + dirID = dstDir.GetID() + ) + + if ok, err := d.client.UploadAvailable(); err != nil || !ok { + return err + } + if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit { + return driver115.ErrUploadTooLarge + } + //if digest, err = d.client.GetDigestResult(stream); err != nil { + // return err + //} + + const PreHashSize int64 = 128 * utils.KB + hashSize := PreHashSize + if stream.GetSize() < PreHashSize { + hashSize = stream.GetSize() + } + reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize}) + if err != nil { + return err + } + preHash, err := utils.HashReader(utils.SHA1, reader) if err != nil { return err } - defer func() { - _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) - }() - return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile) + preHash = strings.ToUpper(preHash) + fullHash := stream.GetHash().GetHash(utils.SHA1) + if len(fullHash) <= 0 { + tmpF, err := stream.CacheFullInTempFile() + if err != nil { + return err + } + fullHash, err = utils.HashFile(utils.SHA1, tmpF) + if err != nil { + return err + } + } + fullHash = strings.ToUpper(fullHash) + + // rapid-upload + // note that 115 add timeout for rapid-upload, + // and "sig invalid" err is thrown even when the hash is correct after timeout. + if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil { + return err + } + if matched, err := fastInfo.Ok(); err != nil { + return err + } else if matched { + return nil + } + + // 闪传失败,上传 + if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传 + return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID) + } + // 分片上传 + return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) + } var _ driver.Driver = (*Pan115)(nil) diff --git a/drivers/115/types.go b/drivers/115/types.go index 25492bdc3ba..830e347b44e 100644 --- a/drivers/115/types.go +++ b/drivers/115/types.go @@ -3,6 +3,20 @@ package _115 import ( "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "time" ) -var _ model.Obj = (*driver.File)(nil) +var _ model.Obj = (*FileObj)(nil) + +type FileObj struct { + driver.File +} + +func (f *FileObj) CreateTime() time.Time { + return f.File.CreateTime +} + +func (f *FileObj) GetHash() utils.HashInfo { + return utils.NewHashInfo(utils.SHA1, f.Sha1) +} diff --git a/drivers/115/util.go b/drivers/115/util.go index cc04baafe44..35d1fbda759 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -1,10 +1,25 @@ package _115 import ( + "bytes" "crypto/tls" + "encoding/json" "fmt" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/orzogc/fake115uploader/cipher" + "io" + "net/url" + "path/filepath" + "strconv" + "strings" + "sync" + "time" "github.com/SheltonZhu/115driver/pkg/driver" + driver115 "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/conf" "github.com/pkg/errors" ) @@ -41,8 +56,8 @@ func (d *Pan115) login() error { return d.client.LoginCheck() } -func (d *Pan115) getFiles(fileId string) ([]driver.File, error) { - res := make([]driver.File, 0) +func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { + res := make([]FileObj, 0) if d.PageSize <= 0 { d.PageSize = driver.FileListLimit } @@ -51,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) { return nil, err } for _, file := range *files { - res = append(res, file) + res = append(res, FileObj{file}) } return res, nil } + +const ( + appVer = "2.0.3.6" +) + +func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) { + var ( + ecdhCipher *cipher.EcdhCipher + encrypted []byte + decrypted []byte + encodedToken string + err error + target = "U_1_" + dirID + bodyBytes []byte + result = driver115.UploadInitResp{} + fileSizeStr = strconv.FormatInt(fileSize, 10) + ) + if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil { + return nil, err + } + + userID := strconv.FormatInt(d.client.UserID, 10) + form := url.Values{} + form.Set("appid", "0") + form.Set("appversion", appVer) + form.Set("userid", userID) + form.Set("filename", fileName) + form.Set("filesize", fileSizeStr) + form.Set("fileid", fileID) + form.Set("target", target) + form.Set("sig", d.client.GenerateSignature(fileID, target)) + + signKey, signVal := "", "" + for retry := true; retry; { + t := driver115.Now() + + if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil { + return nil, err + } + + params := map[string]string{ + "k_ec": encodedToken, + } + + form.Set("t", t.String()) + form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal)) + if signKey != "" && signVal != "" { + form.Set("sign_key", signKey) + form.Set("sign_val", signVal) + } + if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil { + return nil, err + } + + req := d.client.NewRequest(). + SetQueryParams(params). + SetBody(encrypted). + SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded"). + SetDoNotParseResponse(true) + resp, err := req.Post(driver115.ApiUploadInit) + if err != nil { + return nil, err + } + data := resp.RawBody() + defer data.Close() + if bodyBytes, err = io.ReadAll(data); err != nil { + return nil, err + } + if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil { + return nil, err + } + if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil { + return nil, err + } + if result.Status == 7 { + // Update signKey & signVal + signKey = result.SignKey + signVal, err = UploadDigestRange(stream, result.SignCheck) + if err != nil { + return nil, err + } + } else { + retry = false + } + result.SHA1 = fileID + } + + return &result, nil +} + +func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) { + var start, end int64 + if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil { + return + } + + length := end - start + 1 + reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length}) + hashStr, err := utils.HashReader(utils.SHA1, reader) + if err != nil { + return "", err + } + result = strings.ToUpper(hashStr) + return +} + +// UploadByMultipart upload by mutipart blocks +func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error { + var ( + chunks []oss.FileChunk + parts []oss.UploadPart + imur oss.InitiateMultipartUploadResult + ossClient *oss.Client + bucket *oss.Bucket + ossToken *driver115.UploadOSSTokenResp + err error + ) + + tmpF, err := stream.CacheFullInTempFile() + if err != nil { + return err + } + + options := driver115.DefalutUploadMultipartOptions() + if len(opts) > 0 { + for _, f := range opts { + f(options) + } + } + + if ossToken, err = d.client.GetOSSToken(); err != nil { + return err + } + + if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil { + return err + } + + if bucket, err = ossClient.Bucket(params.Bucket); err != nil { + return err + } + + // ossToken一小时后就会失效,所以每50分钟重新获取一次 + ticker := time.NewTicker(options.TokenRefreshTime) + defer ticker.Stop() + // 设置超时 + timeout := time.NewTimer(options.Timeout) + + if chunks, err = SplitFile(fileSize); err != nil { + return err + } + + if imur, err = bucket.InitiateMultipartUpload(params.Object, + oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken), + oss.UserAgentHeader(driver115.OSSUserAgent), + ); err != nil { + return err + } + + wg := sync.WaitGroup{} + wg.Add(len(chunks)) + + chunksCh := make(chan oss.FileChunk) + errCh := make(chan error) + UploadedPartsCh := make(chan oss.UploadPart) + quit := make(chan struct{}) + + // producer + go chunksProducer(chunksCh, chunks) + go func() { + wg.Wait() + quit <- struct{}{} + }() + + // consumers + for i := 0; i < options.ThreadsNum; i++ { + go func(threadId int) { + defer func() { + if r := recover(); r != nil { + errCh <- fmt.Errorf("Recovered in %v", r) + } + }() + for chunk := range chunksCh { + var part oss.UploadPart // 出现错误就继续尝试,共尝试3次 + for retry := 0; retry < 3; retry++ { + select { + case <-ticker.C: + if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken + errCh <- errors.Wrap(err, "刷新token时出现错误") + } + default: + } + + buf := make([]byte, chunk.Size) + if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) { + continue + } + + b := bytes.NewBuffer(buf) + if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { + break + } + } + if err != nil { + errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err)) + } + UploadedPartsCh <- part + } + }(i) + } + + go func() { + for part := range UploadedPartsCh { + parts = append(parts, part) + wg.Done() + } + }() +LOOP: + for { + select { + case <-ticker.C: + // 到时重新获取ossToken + if ossToken, err = d.client.GetOSSToken(); err != nil { + return err + } + case <-quit: + break LOOP + case <-errCh: + return err + case <-timeout.C: + return fmt.Errorf("time out") + } + } + + // EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的 + if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) { + // 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的 + if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") { + return err + } + } + return d.checkUploadStatus(dirID, params.SHA1) +} +func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { + for _, chunk := range chunks { + ch <- chunk + } +} +func (d *Pan115) checkUploadStatus(dirID, sha1 string) error { + // 验证上传是否成功 + req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8") + opts := []driver115.GetFileOptions{ + driver115.WithOrder(driver115.FileOrderByTime), + driver115.WithShowDirEnable(false), + driver115.WithAsc(false), + driver115.WithLimit(500), + } + fResp, err := driver115.GetFiles(req, dirID, opts...) + if err != nil { + return err + } + for _, fileInfo := range fResp.Files { + if fileInfo.Sha1 == sha1 { + return nil + } + } + return driver115.ErrUploadFailed +} + +func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) { + for i := int64(1); i < 10; i++ { + if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片 + if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil { + return + } + break + } + } + if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片 + if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil { + return + } + } + // 单个分片大小不能小于100KB + if chunks[0].Size < 100*utils.KB { + if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil { + return + } + } + return +} + +// SplitFileByPartNum splits big file into parts by the num of parts. +// Split the file with specified parts count, returns the split result when error is nil. +func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + if int64(chunkNum) > fileSize { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []oss.FileChunk + var chunk = oss.FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (fileSize / chunkN) + if i == chunkN-1 { + chunk.Size = fileSize/chunkN + fileSize%chunkN + } else { + chunk.Size = fileSize / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize splits big file into parts by the size of parts. +// Splits the file by the part size. Returns the FileChunk when error is nil. +func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + var chunkN = fileSize / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size") + } + + var chunks []oss.FileChunk + var chunk = oss.FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if fileSize%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = fileSize % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} diff --git a/drivers/123/driver.go b/drivers/123/driver.go index bc1758a176b..6f7fec1bd43 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -6,11 +6,6 @@ import ( "encoding/base64" "encoding/hex" "fmt" - "io" - "net/http" - "net/url" - "os" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -22,6 +17,9 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/go-resty/resty/v2" log "github.com/sirupsen/logrus" + "io" + "net/http" + "net/url" ) type Pan123 struct { @@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // const DEFAULT int64 = 10485760 h := md5.New() // need to calculate md5 of the full content - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() if _, err = io.Copy(h, tempFile); err != nil { return err diff --git a/drivers/123/types.go b/drivers/123/types.go index bc7cbf3d5b2..b79be12e201 100644 --- a/drivers/123/types.go +++ b/drivers/123/types.go @@ -1,6 +1,7 @@ package _123 import ( + "github.com/alist-org/alist/v3/pkg/utils" "net/url" "path" "strconv" @@ -21,6 +22,14 @@ type File struct { DownloadUrl string `json:"DownloadUrl"` } +func (f File) CreateTime() time.Time { + return f.UpdateAt +} + +func (f File) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (f File) GetPath() string { return "" } diff --git a/drivers/123_share/types.go b/drivers/123_share/types.go index cd96b7556b6..e8ca9e77440 100644 --- a/drivers/123_share/types.go +++ b/drivers/123_share/types.go @@ -1,6 +1,7 @@ package _123Share import ( + "github.com/alist-org/alist/v3/pkg/utils" "net/url" "path" "strconv" @@ -21,6 +22,10 @@ type File struct { DownloadUrl string `json:"DownloadUrl"` } +func (f File) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (f File) GetPath() string { return "" } @@ -36,6 +41,9 @@ func (f File) GetName() string { func (f File) ModTime() time.Time { return f.UpdateAt } +func (f File) CreateTime() time.Time { + return f.UpdateAt +} func (f File) IsDir() bool { return f.Type == 1 diff --git a/drivers/189pc/types.go b/drivers/189pc/types.go index 1087d33b568..d779659ed16 100644 --- a/drivers/189pc/types.go +++ b/drivers/189pc/types.go @@ -3,6 +3,7 @@ package _189pc import ( "encoding/xml" "fmt" + "github.com/alist-org/alist/v3/pkg/utils" "sort" "strings" "time" @@ -175,6 +176,14 @@ type Cloud189File struct { // StarLabel int64 `json:"starLabel"` } +func (c *Cloud189File) CreateTime() time.Time { + return time.Time(c.CreateDate) +} + +func (c *Cloud189File) GetHash() utils.HashInfo { + return utils.NewHashInfo(utils.MD5, c.Md5) +} + func (c *Cloud189File) GetSize() int64 { return c.Size } func (c *Cloud189File) GetName() string { return c.Name } func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) } @@ -199,6 +208,14 @@ type Cloud189Folder struct { // StarLabel int64 `json:"starLabel"` } +func (c *Cloud189Folder) CreateTime() time.Time { + return time.Time(c.CreateDate) +} + +func (c *Cloud189Folder) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (c *Cloud189Folder) GetSize() int64 { return 0 } func (c *Cloud189Folder) GetName() string { return c.Name } func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) } diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index a35a0efdc0d..386dbc59a57 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -13,7 +13,6 @@ import ( "net/http" "net/http/cookiejar" "net/url" - "os" "regexp" "sort" "strconv" @@ -550,13 +549,12 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // 快传 func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { // 需要获取完整文件md5,必须支持 io.Seek - tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize()) + tempFile, err := file.CacheFullInTempFile() if err != nil { return nil, err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() var sliceSize = partSize(file.GetSize()) @@ -742,13 +740,12 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string // 旧版本上传,家庭云不支持覆盖 func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { // 需要获取完整文件md5,必须支持 io.Seek - tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize()) + tempFile, err := file.CacheFullInTempFile() if err != nil { return nil, err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() // 计算md5 diff --git a/drivers/alist_v3/driver.go b/drivers/alist_v3/driver.go index 30141d546f7..75208408a68 100644 --- a/drivers/alist_v3/driver.go +++ b/drivers/alist_v3/driver.go @@ -3,6 +3,7 @@ package alist_v3 import ( "context" "fmt" + "io" "net/http" "path" "strconv" @@ -176,7 +177,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt SetHeader("Password", d.MetaPassword). SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)). SetContentLength(true). - SetBody(stream.GetReadCloser()) + SetBody(io.ReadCloser(stream)) }) return err } diff --git a/drivers/aliyundrive/driver.go b/drivers/aliyundrive/driver.go index ce8487aec5c..f11452629a4 100644 --- a/drivers/aliyundrive/driver.go +++ b/drivers/aliyundrive/driver.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "math" "math/big" @@ -67,7 +68,7 @@ func (d *AliDrive) Init(ctx context.Context) error { return nil } // init deviceID - deviceID := utils.GetSHA256Encode([]byte(d.UserID)) + deviceID := utils.HashData(utils.SHA256, []byte(d.UserID)) // init privateKey privateKey, _ := NewPrivateKeyFromHex(deviceID) state := State{ @@ -163,14 +164,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - file := model.FileStream{ - Obj: stream, - ReadCloser: stream, - Mimetype: stream.GetMimetype(), +func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error { + file := stream.FileStream{ + Obj: streamer, + Reader: streamer, + Mimetype: streamer.GetMimetype(), } const DEFAULT int64 = 10485760 - var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT))) + var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT))) partInfoList := make([]base.Json, 0, count) for i := 1; i <= count; i++ { @@ -187,25 +188,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS } var localFile *os.File - if fileStream, ok := file.ReadCloser.(*model.FileStream); ok { - localFile, _ = fileStream.ReadCloser.(*os.File) + if fileStream, ok := file.Reader.(*stream.FileStream); ok { + localFile, _ = fileStream.Reader.(*os.File) } if d.RapidUpload { buf := bytes.NewBuffer(make([]byte, 0, 1024)) io.CopyN(buf, file, 1024) - reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes()) + reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes()) if localFile != nil { if _, err := localFile.Seek(0, io.SeekStart); err != nil { return err } } else { // 把头部拼接回去 - file.ReadCloser = struct { + file.Reader = struct { io.Reader io.Closer }{ Reader: io.MultiReader(buf, file), - Closer: file, + Closer: &file, } } } else { @@ -281,7 +282,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS if _, err = localFile.Seek(0, io.SeekStart); err != nil { return err } - file.ReadCloser = localFile + file.Reader = localFile } for i, partInfo := range resp.PartInfoList { diff --git a/drivers/aliyundrive_open/types.go b/drivers/aliyundrive_open/types.go index 3ae5961cc29..46830a51336 100644 --- a/drivers/aliyundrive_open/types.go +++ b/drivers/aliyundrive_open/types.go @@ -1,6 +1,7 @@ package aliyundrive_open import ( + "github.com/alist-org/alist/v3/pkg/utils" "time" "github.com/alist-org/alist/v3/internal/model" @@ -46,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb { Size: f.Size, Modified: f.UpdatedAt, IsFolder: f.Type == "folder", + Ctime: f.CreatedAt, + HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash), }, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail}, } diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index 9a69c7ca260..1f96a9d573b 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -3,14 +3,12 @@ package aliyundrive_open import ( "bytes" "context" - "crypto/sha1" "encoding/base64" - "encoding/hex" "fmt" + "github.com/alist-org/alist/v3/pkg/http_range" "io" "math" "net/http" - "os" "strconv" "strings" "time" @@ -33,19 +31,19 @@ func makePartInfos(size int) []base.Json { } func calPartSize(fileSize int64) int64 { - var partSize int64 = 20 * 1024 * 1024 + var partSize int64 = 20 * utils.MB if fileSize > partSize { - if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB - partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB - } else if fileSize > 768*1024*1024*1024 { // over 768GB + if fileSize > 1*utils.TB { // file Size over 1TB + partSize = 5 * utils.GB // file part size 5GB + } else if fileSize > 768*utils.GB { // over 768GB partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part - } else if fileSize > 512*1024*1024*1024 { // over 512GB + } else if fileSize > 512*utils.GB { // over 512GB partSize = 82463373 // ≈ 78.6432MB - } else if fileSize > 384*1024*1024*1024 { // over 384GB + } else if fileSize > 384*utils.GB { // over 384GB partSize = 54975582 // ≈ 52.4288MB - } else if fileSize > 256*1024*1024*1024 { // over 256GB + } else if fileSize > 256*utils.GB { // over 256GB partSize = 41231687 // ≈ 39.3216MB - } else if fileSize > 128*1024*1024*1024 { // over 128GB + } else if fileSize > 128*utils.GB { // over 128GB partSize = 27487791 // ≈ 26.2144MB } } @@ -127,17 +125,22 @@ func getProofRange(input string, size int64) (*ProofRange, error) { return pr, nil } -func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) { - proofRange, err := getProofRange(d.AccessToken, fileSize) +func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) { + proofRange, err := getProofRange(d.AccessToken, stream.GetSize()) if err != nil { return "", err } - buf := make([]byte, proofRange.End-proofRange.Start) - _, err = file.ReadAt(buf, proofRange.Start) + length := proofRange.End - proofRange.Start + buf := bytes.NewBuffer(make([]byte, 0, length)) + reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length}) if err != nil { return "", err } - return base64.StdEncoding.EncodeToString(buf), nil + _, err = io.CopyN(buf, reader, length) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf.Bytes()), nil } func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { @@ -145,70 +148,68 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m // Part Size Unit: Bytes, Default: 20MB, // Maximum number of slices 10,000, ≈195.3125GB var partSize = calPartSize(stream.GetSize()) + const dateFormat = "2006-01-02T15:04:05.88Z" + mtime := stream.ModTime() + mtimeStr := mtime.UTC().Format(dateFormat) + ctimeStr := stream.CreateTime().UTC().Format(dateFormat) + createData := base.Json{ - "drive_id": d.DriveId, - "parent_file_id": dstDir.GetID(), - "name": stream.GetName(), - "type": "file", - "check_name_mode": "ignore", + "drive_id": d.DriveId, + "parent_file_id": dstDir.GetID(), + "name": stream.GetName(), + "type": "file", + "check_name_mode": "ignore", + "local_modified_at": mtimeStr, + "local_created_at": ctimeStr, } count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize))) createData["part_info_list"] = makePartInfos(count) // rapid upload - rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload + rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload if rapidUpload { log.Debugf("[aliyundrive_open] start cal pre_hash") // read 1024 bytes to calculate pre hash - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - _, err := io.CopyN(buf, stream, 1024) + reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024}) if err != nil { return nil, err } - createData["size"] = stream.GetSize() - createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes()) - // if support seek, seek to start - if localFile, ok := stream.(io.Seeker); ok { - if _, err := localFile.Seek(0, io.SeekStart); err != nil { - return nil, err - } - } else { - // Put spliced head back to stream - stream.SetReadCloser(struct { - io.Reader - io.Closer - }{ - Reader: io.MultiReader(buf, stream.GetReadCloser()), - Closer: stream.GetReadCloser(), - }) + hash, err := utils.HashReader(utils.SHA1, reader) + if err != nil { + return nil, err } + createData["size"] = stream.GetSize() + createData["pre_hash"] = hash } var createResp CreateResp _, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { req.SetBody(createData).SetResult(&createResp) }) + var tmpF model.File if err != nil { if e.Code != "PreHashMatched" || !rapidUpload { return nil, err } log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload") - // convert to local file - file, err := utils.CreateTempFile(stream, stream.GetSize()) - if err != nil { - return nil, err - } - _ = stream.GetReadCloser().Close() - stream.SetReadCloser(file) - // calculate full hash - h := sha1.New() - _, err = io.Copy(h, file) - if err != nil { - return nil, err + + hi := stream.GetHash() + hash := hi.GetHash(utils.SHA1) + if len(hash) <= 0 { + tmpF, err = stream.CacheFullInTempFile() + if err != nil { + return nil, err + } + hash, err = utils.HashFile(utils.SHA1, tmpF) + if err != nil { + return nil, err + } + } + delete(createData, "pre_hash") createData["proof_version"] = "v1" createData["content_hash_name"] = "sha1" - createData["content_hash"] = hex.EncodeToString(h.Sum(nil)) - createData["proof_code"], err = d.calProofCode(file, stream.GetSize()) + createData["content_hash"] = hash + createData["proof_code"], err = d.calProofCode(stream) if err != nil { return nil, fmt.Errorf("cal proof code error: %s", err.Error()) } @@ -218,17 +219,15 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m if err != nil { return nil, err } - // seek to start - if _, err = file.Seek(0, io.SeekStart); err != nil { - return nil, err - } } if !createResp.RapidUpload { - // 2. upload + // 2. normal upload log.Debugf("[aliyundive_open] normal upload") preTime := time.Now() + var offset, length int64 = 0, partSize + //var length for i := 0; i < len(createResp.PartInfoList); i++ { if utils.IsCanceled(ctx) { return nil, ctx.Err() @@ -241,9 +240,16 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m } preTime = time.Now() } - rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) + if remain := stream.GetSize() - offset; length > remain { + length = remain + } + //rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) + rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length}) + if err != nil { + return nil, err + } err = retry.Do(func() error { - rd.Reset() + //rd.Reset() return d.uploadPart(ctx, rd, createResp.PartInfoList[i]) }, retry.Attempts(3), @@ -252,6 +258,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m if err != nil { return nil, err } + offset += partSize } } else { log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 3066843e84a..2bd59978ead 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -9,7 +9,6 @@ import ( "io" "math" "net/url" - "os" stdpath "path" "strconv" "time" @@ -31,7 +30,7 @@ type BaiduNetdisk struct { uploadThread int } -const DefaultSliceSize int64 = 4 * 1024 * 1024 +const DefaultSliceSize int64 = 4 * utils.MB func (d *BaiduNetdisk) Config() driver.Config { return config @@ -81,7 +80,7 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { var newDir File - _, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir) + _, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0) if err != nil { return nil, err } @@ -148,14 +147,10 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error { } func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return nil, err } - defer func() { - _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) - }() streamSize := stream.GetSize() count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1)) @@ -194,15 +189,15 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName()) path := encodeURIComponent(rawPath) + mtime := stream.ModTime().Unix() + ctime := stream.CreateTime().Unix() // step.1 预上传 // 尝试获取之前的进度 precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5) if !ok { - data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s", - path, streamSize, - blockListStr, - contentMd5, sliceMd5) + data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s&local_mtime=%d&local_ctime=%d", + path, streamSize, blockListStr, contentMd5, sliceMd5, mtime, ctime) params := map[string]string{ "method": "precreate", } @@ -263,7 +258,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F // step.3 创建文件 var newFile File - _, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile) + _, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime) if err != nil { return nil, err } diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index 4effe70f853..81a491129f0 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -1,6 +1,7 @@ package baidu_netdisk import ( + "github.com/alist-org/alist/v3/pkg/utils" "path" "strconv" "time" @@ -40,11 +41,11 @@ type File struct { Isdir int `json:"isdir"` // list resp - //ServerCtime int64 `json:"server_ctime"` + ServerCtime int64 `json:"server_ctime"` ServerMtime int64 `json:"server_mtime"` - //ServerAtime int64 `json:"server_atime"` - //LocalCtime int64 `json:"local_ctime"` - //LocalMtime int64 `json:"local_mtime"` + LocalMtime int64 `json:"local_mtime"` + LocalCtime int64 `json:"local_ctime"` + //ServerAtime int64 `json:"server_atime"` ` // only create and precreate resp Ctime int64 `json:"ctime"` @@ -55,8 +56,11 @@ func fileToObj(f File) *model.ObjThumb { if f.ServerFilename == "" { f.ServerFilename = path.Base(f.Path) } - if f.ServerMtime == 0 { - f.ServerMtime = int64(f.Mtime) + if f.LocalCtime == 0 { + f.LocalCtime = f.Ctime + } + if f.LocalMtime == 0 { + f.LocalMtime = f.Mtime } return &model.ObjThumb{ Object: model.Object{ @@ -64,8 +68,10 @@ func fileToObj(f File) *model.ObjThumb { Path: f.Path, Name: f.ServerFilename, Size: f.Size, - Modified: time.Unix(f.ServerMtime, 0), + Modified: time.Unix(f.LocalMtime, 0), + Ctime: time.Unix(f.LocalCtime, 0), IsFolder: f.Isdir == 1, + HashInfo: utils.NewHashInfo(utils.MD5, f.Md5), }, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3}, } diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index 81b798e561d..e876e819aca 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -198,11 +198,17 @@ func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) { return d.post("/xpan/file", params, data, nil) } -func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any) ([]byte, error) { +func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) { params := map[string]string{ "method": "create", } - data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir) + data := "" + if mtime == 0 || ctime == 0 { + data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir) + } else { + data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3&local_mtime=%d&local_ctime=%d", encodeURIComponent(path), size, isdir, mtime, ctime) + } + if uploadid != "" { data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list) } diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index 3ff3bc6ec09..0b622872b8f 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -8,7 +8,6 @@ import ( "fmt" "io" "math" - "os" "regexp" "strconv" "strings" @@ -229,13 +228,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil } // 需要获取完整文件md5,必须支持 io.Seek - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return nil, err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() const DEFAULT int64 = 1 << 22 diff --git a/drivers/baidu_photo/help.go b/drivers/baidu_photo/help.go index d689f81d05f..40588ee99ee 100644 --- a/drivers/baidu_photo/help.go +++ b/drivers/baidu_photo/help.go @@ -61,12 +61,12 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile { func renameAlbum(album *Album, newName string) *Album { return &Album{ - AlbumID: album.AlbumID, - Tid: album.Tid, - JoinTime: album.JoinTime, - CreateTime: album.CreateTime, - Title: newName, - Mtime: time.Now().Unix(), + AlbumID: album.AlbumID, + Tid: album.Tid, + JoinTime: album.JoinTime, + CreationTime: album.CreationTime, + Title: newName, + Mtime: time.Now().Unix(), } } diff --git a/drivers/baidu_photo/types.go b/drivers/baidu_photo/types.go index 7ac66570d46..f89aedae84f 100644 --- a/drivers/baidu_photo/types.go +++ b/drivers/baidu_photo/types.go @@ -2,6 +2,7 @@ package baiduphoto import ( "fmt" + "github.com/alist-org/alist/v3/pkg/utils" "time" "github.com/alist-org/alist/v3/internal/model" @@ -73,6 +74,13 @@ func (c *File) Thumb() string { } return "" } +func (c *File) CreateTime() time.Time { + return time.Unix(c.Ctime, 0) +} + +func (c *File) GetHash() utils.HashInfo { + return utils.HashInfo{} +} /*相册部分*/ type ( @@ -84,12 +92,12 @@ type ( } Album struct { - AlbumID string `json:"album_id"` - Tid int64 `json:"tid"` - Title string `json:"title"` - JoinTime int64 `json:"join_time"` - CreateTime int64 `json:"create_time"` - Mtime int64 `json:"mtime"` + AlbumID string `json:"album_id"` + Tid int64 `json:"tid"` + Title string `json:"title"` + JoinTime int64 `json:"join_time"` + CreationTime int64 `json:"create_time"` + Mtime int64 `json:"mtime"` parseTime *time.Time } @@ -109,6 +117,14 @@ type ( } ) +func (a *Album) CreateTime() time.Time { + return time.Unix(a.CreationTime, 0) +} + +func (a *Album) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (a *Album) GetSize() int64 { return 0 } func (a *Album) GetName() string { return a.Title } func (a *Album) ModTime() time.Time { diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index 22407d0c3e6..030de7c279d 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -115,7 +115,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error { } func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - if stream.GetReadCloser() == http.NoBody { + if io.ReadCloser(stream) == http.NoBody { return d.create(ctx, dstDir, stream) } var r DirectoryResp diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index a2d5a17c3e9..cf33c357f83 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -3,8 +3,8 @@ package crypt import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" - "net/http" stdpath "path" "regexp" "strings" @@ -13,7 +13,6 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" @@ -82,7 +81,6 @@ func (d *Crypt) Init(ctx context.Context) error { } d.cipher = c - //c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil) return nil } @@ -128,6 +126,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ Size: 0, Modified: obj.ModTime(), IsFolder: obj.IsDir(), + Ctime: obj.CreateTime(), + // discarding hash as it's encrypted } result = append(result, &objRes) } else { @@ -147,6 +147,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ Size: size, Modified: obj.ModTime(), IsFolder: obj.IsDir(), + Ctime: obj.CreateTime(), + // discarding hash as it's encrypted } if !ok { result = append(result, &objRes) @@ -232,70 +234,53 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( return nil, err } - if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 { + if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 { return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion") } remoteFileSize := remoteFile.GetSize() - remoteClosers := utils.NewClosers() + remoteClosers := utils.EmptyClosers() rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) { length := underlyingLength if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize { length = -1 } - if remoteLink.RangeReadCloser.RangeReader != nil { - //remoteRangeReader, err := - remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length}) - remoteClosers.Add(remoteLink.RangeReadCloser.Closers) + rrc := remoteLink.RangeReadCloser + if len(remoteLink.URL) > 0 { + + rangedRemoteLink := &model.Link{ + URL: remoteLink.URL, + Header: remoteLink.Header, + } + var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink) if err != nil { return nil, err } - return remoteReader, nil + rrc = converted } - if remoteLink.ReadSeekCloser != nil { - _, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart) + if rrc != nil { + //remoteRangeReader, err := + remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length}) + remoteClosers.AddClosers(rrc.GetClosers()) if err != nil { return nil, err } - //remoteClosers.Add(remoteLink.ReadSeekCloser) - //keep reuse same ReadSeekCloser and close at last. - return io.NopCloser(remoteLink.ReadSeekCloser), nil + return remoteReader, nil } - if len(remoteLink.URL) > 0 { - rangedRemoteLink := &model.Link{ - URL: remoteLink.URL, - Header: remoteLink.Header, - } - response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length) - //remoteClosers.Add(response.Body) + if remoteLink.MFile != nil { + _, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart) if err != nil { - return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err) - } - if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent { - return response.Body, nil - } else if response.StatusCode == http.StatusOK { - log.Warnf("remote http server not supporting range request, expect low perfromace!") - readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length) - if err != nil { - return nil, err - } - return readCloser, nil + return nil, err } - - return response.Body, nil + //remoteClosers.Add(remoteLink.MFile) + //keep reuse same MFile and close at last. + remoteClosers.Add(remoteLink.MFile) + return io.NopCloser(remoteLink.MFile), nil } - //if remoteLink.Data != nil { - // log.Warnf("remote storage not supporting range request, expect low perfromace!") - // readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length) - // remoteCloser = remoteLink.Data - // if err != nil { - // return nil, err - // } - // return readCloser, nil - //} + return nil, errs.NotSupport } - resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) { + resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length) if err != nil { return nil, err @@ -306,7 +291,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers} resultLink := &model.Link{ Header: remoteLink.Header, - RangeReadCloser: *resultRangeReadCloser, + RangeReadCloser: resultRangeReadCloser, Expiration: remoteLink.Expiration, } @@ -370,32 +355,32 @@ func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error { return op.Remove(ctx, d.remoteStorage, remoteActualPath) } -func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error { dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true) if err != nil { return fmt.Errorf("failed to convert path to remote path: %w", err) } - in := stream.GetReadCloser() // Encrypt the data into wrappedIn - wrappedIn, err := d.cipher.EncryptData(in) + wrappedIn, err := d.cipher.EncryptData(streamer) if err != nil { return fmt.Errorf("failed to EncryptData: %w", err) } - streamOut := &model.FileStream{ + // doesn't support seekableStream, since rapid-upload is not working for encrypted data + streamOut := &stream.FileStream{ Obj: &model.Object{ - ID: stream.GetID(), - Path: stream.GetPath(), - Name: d.cipher.EncryptFileName(stream.GetName()), - Size: d.cipher.EncryptedSize(stream.GetSize()), - Modified: stream.ModTime(), - IsFolder: stream.IsDir(), + ID: streamer.GetID(), + Path: streamer.GetPath(), + Name: d.cipher.EncryptFileName(streamer.GetName()), + Size: d.cipher.EncryptedSize(streamer.GetSize()), + Modified: streamer.ModTime(), + IsFolder: streamer.IsDir(), }, - ReadCloser: io.NopCloser(wrappedIn), + Reader: wrappedIn, Mimetype: "application/octet-stream", - WebPutAsTask: stream.NeedStore(), - Old: stream.GetOld(), + WebPutAsTask: streamer.NeedStore(), + Exist: streamer.GetExist(), } err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false) if err != nil { diff --git a/drivers/crypt/util.go b/drivers/crypt/util.go index f4246756792..3e55fb37ac1 100644 --- a/drivers/crypt/util.go +++ b/drivers/crypt/util.go @@ -1,24 +1,13 @@ package crypt import ( - "net/http" stdpath "path" "path/filepath" "strings" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/pkg/http_range" ) -func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) { - header := net.ProcessHeader(http.Header{}, link.Header) - header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header) - - return net.RequestHttp("GET", header, link.URL) -} - // will give the best guessing based on the path func guessPath(path string) (isFolder, secondTry bool) { if strings.HasSuffix(path, "/") { diff --git a/drivers/ftp/driver.go b/drivers/ftp/driver.go index 03606bc931d..b8deae8afa1 100644 --- a/drivers/ftp/driver.go +++ b/drivers/ftp/driver.go @@ -66,7 +66,7 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m r := NewFTPFileReader(d.conn, file.GetPath()) link := &model.Link{ - ReadSeekCloser: r, + MFile: r, } return link, nil } diff --git a/drivers/ftp/util.go b/drivers/ftp/util.go index 3abffd932e7..d90d2cd7f9d 100644 --- a/drivers/ftp/util.go +++ b/drivers/ftp/util.go @@ -30,13 +30,14 @@ func (d *FTP) login() error { return nil } -// An FTP file reader that implements io.ReadSeekCloser for seeking. +// An FTP file reader that implements io.MFile for seeking. type FTPFileReader struct { - conn *ftp.ServerConn - resp *ftp.Response - offset int64 - mu sync.Mutex - path string + conn *ftp.ServerConn + resp *ftp.Response + offset int64 + readAtOffset int64 + mu sync.Mutex + path string } func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader { @@ -50,15 +51,33 @@ func (r *FTPFileReader) Read(buf []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() + n, err = r.ReadAt(buf, r.offset) + r.offset += int64(n) + return +} +func (r *FTPFileReader) ReadAt(buf []byte, off int64) (n int, err error) { + if off < 0 { + return -1, os.ErrInvalid + } + r.mu.Lock() + defer r.mu.Unlock() + + if off != r.readAtOffset { + //have to restart the connection, to correct offset + _ = r.resp.Close() + r.resp = nil + } + if r.resp == nil { - r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset)) + r.resp, err = r.conn.RetrFrom(r.path, uint64(off)) + r.readAtOffset = off if err != nil { return 0, err } } n, err = r.resp.Read(buf) - r.offset += int64(n) + r.readAtOffset += int64(n) return } @@ -92,12 +111,6 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) { return oldOffset, nil } r.offset = newOffset - - if r.resp != nil { - // close the existing ftp data connection, otherwise the next read will be blocked - _ = r.resp.Close() // we do not care about whether it returns an error - r.resp = nil - } return newOffset, nil } diff --git a/drivers/google_drive/driver.go b/drivers/google_drive/driver.go index cf573d9366b..dccdcea902f 100644 --- a/drivers/google_drive/driver.go +++ b/drivers/google_drive/driver.go @@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error { } func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - obj := stream.GetOld() + obj := stream.GetExist() var ( e Error url string @@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi putUrl := res.Header().Get("location") if stream.GetSize() < d.ChunkSize*1024*1024 { _, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) { - req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser()) + req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream) }, nil) } else { err = d.chunkUpload(ctx, stream, putUrl) diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index 0168b21c329..5637d00e71d 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -5,7 +5,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io" + "github.com/alist-org/alist/v3/pkg/http_range" "io/ioutil" "net/http" "os" @@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) { func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error { var defaultChunkSize = d.ChunkSize * 1024 * 1024 - var finish int64 = 0 - for finish < stream.GetSize() { + var offset int64 = 0 + for offset < stream.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() } - chunkSize := stream.GetSize() - finish + chunkSize := stream.GetSize() - offset if chunkSize > defaultChunkSize { chunkSize = defaultChunkSize } - _, err := d.request(url, http.MethodPut, func(req *resty.Request) { + reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize}) + if err != nil { + return err + } + _, err = d.request(url, http.MethodPut, func(req *resty.Request) { req.SetHeaders(map[string]string{ "Content-Length": strconv.FormatInt(chunkSize, 10), - "Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()), - }).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx) + "Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()), + }).SetBody(reader).SetContext(ctx) }, nil) if err != nil { return err } - finish += chunkSize + offset += chunkSize } return nil } diff --git a/drivers/google_photo/driver.go b/drivers/google_photo/driver.go index aab3b5d96d2..b54132ef9ed 100644 --- a/drivers/google_photo/driver.go +++ b/drivers/google_photo/driver.go @@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi } resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) { - req.SetBody(stream.GetReadCloser()).SetContext(ctx) + req.SetBody(stream).SetContext(ctx) }, nil, postHeaders) if err != nil { diff --git a/drivers/lanzou/types.go b/drivers/lanzou/types.go index 2e2daf461a5..d03838ddf7a 100644 --- a/drivers/lanzou/types.go +++ b/drivers/lanzou/types.go @@ -3,6 +3,8 @@ package lanzou import ( "errors" "fmt" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" "time" ) @@ -18,6 +20,9 @@ type RespInfo[T any] struct { Info T `json:"info"` } +var _ model.Obj = (*FileOrFolder)(nil) +var _ model.Obj = (*FileOrFolderByShareUrl)(nil) + type FileOrFolder struct { Name string `json:"name"` //Onof string `json:"onof"` // 是否存在提取码 @@ -49,6 +54,14 @@ type FileOrFolder struct { shareInfo *FileShare `json:"-"` } +func (f *FileOrFolder) CreateTime() time.Time { + return f.ModTime() +} + +func (f *FileOrFolder) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (f *FileOrFolder) GetID() string { if f.IsDir() { return f.FolID @@ -130,6 +143,14 @@ type FileOrFolderByShareUrl struct { repairFlag bool `json:"-"` } +func (f *FileOrFolderByShareUrl) CreateTime() time.Time { + return f.ModTime() +} + +func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (f *FileOrFolderByShareUrl) GetID() string { return f.ID } func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll } func (f *FileOrFolderByShareUrl) GetPath() string { return "" } diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 7bd54638be2..bf63168cb66 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -5,15 +5,6 @@ import ( "context" "errors" "fmt" - "io" - "io/fs" - "net/http" - "os" - stdpath "path" - "path/filepath" - "strconv" - "strings" - "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -21,7 +12,15 @@ import ( "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" + "github.com/djherbis/times" _ "golang.org/x/image/webp" + "io/fs" + "net/http" + "os" + stdpath "path" + "path/filepath" + "strconv" + "strings" ) type Local struct { @@ -102,6 +101,14 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo if !isFolder { size = f.Size() } + ctime := f.ModTime() + t, err := times.Stat(stdpath.Join(fullPath, f.Name())) + if err == nil { + if t.HasBirthTime() { + ctime = t.BirthTime() + } + } + file := model.ObjThumb{ Object: model.Object{ Path: filepath.Join(fullPath, f.Name()), @@ -109,6 +116,7 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo Modified: f.ModTime(), Size: size, IsFolder: isFolder, + Ctime: ctime, }, Thumbnail: model.Thumbnail{ Thumbnail: thumb, @@ -171,9 +179,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( if err != nil { return nil, err } - link.ReadSeekCloser = open + link.MFile = open } else { - link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes())) + link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes())) //link.Header.Set("Content-Length", strconv.Itoa(buf.Len())) } } else { @@ -181,15 +189,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( if err != nil { return nil, err } - link.ReadSeekCloser = struct { - io.Reader - io.Seeker - io.Closer - }{ - Reader: open, - Seeker: open, - Closer: open, - } + link.MFile = open } return &link, nil } diff --git a/drivers/mediatrack/driver.go b/drivers/mediatrack/driver.go index eeed29ad1fc..90e66ae0e34 100644 --- a/drivers/mediatrack/driver.go +++ b/drivers/mediatrack/driver.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "net/http" - "os" "strconv" "time" @@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if err != nil { return err } - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() uploader := s3manager.NewUploader(s) input := &s3manager.UploadInput{ diff --git a/drivers/mega/driver.go b/drivers/mega/driver.go index b329d4873b4..c1ae9f7f6c9 100644 --- a/drivers/mega/driver.go +++ b/drivers/mega/driver.go @@ -42,7 +42,7 @@ func (d *Mega) Drop(ctx context.Context) error { func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { if node, ok := dir.(*MegaNode); ok { - nodes, err := d.c.FS.GetChildren(node.Node) + nodes, err := d.c.FS.GetChildren(node.n) if err != nil { return nil, err } @@ -56,7 +56,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([] return res, nil } log.Errorf("can't convert: %+v", dir) - return nil, fmt.Errorf("unable to convert dir to mega node") + return nil, fmt.Errorf("unable to convert dir to mega n") } func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) { @@ -68,21 +68,21 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) { func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { if node, ok := file.(*MegaNode); ok { - //down, err := d.c.NewDownload(node.Node) + //down, err := d.c.NewDownload(n.Node) //if err != nil { // return nil, fmt.Errorf("open download file failed: %w", err) //} size := file.GetSize() var finalClosers utils.Closers - resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) { + resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { length := httpRange.Length if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { length = -1 } var down *mega.Download err := utils.Retry(3, time.Second, func() (err error) { - down, err = d.c.NewDownload(node.Node) + down, err = d.c.NewDownload(node.n) return err }) if err != nil { @@ -97,37 +97,37 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* return readers.NewLimitedReadCloser(oo, length), nil } - resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers} + resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers} resultLink := &model.Link{ - RangeReadCloser: *resultRangeReadCloser, + RangeReadCloser: resultRangeReadCloser, } return resultLink, nil } - return nil, fmt.Errorf("unable to convert dir to mega node") + return nil, fmt.Errorf("unable to convert dir to mega n") } func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { if parentNode, ok := parentDir.(*MegaNode); ok { - _, err := d.c.CreateDir(dirName, parentNode.Node) + _, err := d.c.CreateDir(dirName, parentNode.n) return err } - return fmt.Errorf("unable to convert dir to mega node") + return fmt.Errorf("unable to convert dir to mega n") } func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error { if srcNode, ok := srcObj.(*MegaNode); ok { if dstNode, ok := dstDir.(*MegaNode); ok { - return d.c.Move(srcNode.Node, dstNode.Node) + return d.c.Move(srcNode.n, dstNode.n) } } - return fmt.Errorf("unable to convert dir to mega node") + return fmt.Errorf("unable to convert dir to mega n") } func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error { if srcNode, ok := srcObj.(*MegaNode); ok { - return d.c.Rename(srcNode.Node, newName) + return d.c.Rename(srcNode.n, newName) } - return fmt.Errorf("unable to convert dir to mega node") + return fmt.Errorf("unable to convert dir to mega n") } func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { @@ -136,14 +136,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Mega) Remove(ctx context.Context, obj model.Obj) error { if node, ok := obj.(*MegaNode); ok { - return d.c.Delete(node.Node, false) + return d.c.Delete(node.n, false) } - return fmt.Errorf("unable to convert dir to mega node") + return fmt.Errorf("unable to convert dir to mega n") } func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { if dstNode, ok := dstDir.(*MegaNode); ok { - u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize()) + u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize()) if err != nil { return err } @@ -175,7 +175,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea _, err = u.Finish() return err } - return fmt.Errorf("unable to convert dir to mega node") + return fmt.Errorf("unable to convert dir to mega n") } //func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/mega/types.go b/drivers/mega/types.go index 4e7b3a9ad4c..3046d449c34 100644 --- a/drivers/mega/types.go +++ b/drivers/mega/types.go @@ -1,6 +1,7 @@ package mega import ( + "github.com/alist-org/alist/v3/pkg/utils" "time" "github.com/alist-org/alist/v3/internal/model" @@ -8,29 +9,36 @@ import ( ) type MegaNode struct { - *mega.Node + n *mega.Node } -//func (m *MegaNode) GetSize() int64 { -// //TODO implement me -// panic("implement me") -//} -// -//func (m *MegaNode) GetName() string { -// //TODO implement me -// panic("implement me") -//} +func (m *MegaNode) GetSize() int64 { + return m.n.GetSize() +} + +func (m *MegaNode) GetName() string { + return m.n.GetName() +} + +func (m *MegaNode) CreateTime() time.Time { + return m.n.GetTimeStamp() +} + +func (m *MegaNode) GetHash() utils.HashInfo { + //Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud + return utils.HashInfo{} +} func (m *MegaNode) ModTime() time.Time { - return m.GetTimeStamp() + return m.n.GetTimeStamp() } func (m *MegaNode) IsDir() bool { - return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT + return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT } func (m *MegaNode) GetID() string { - return m.GetHash() + return m.n.GetHash() } func (m *MegaNode) GetPath() string { diff --git a/drivers/mopan/driver.go b/drivers/mopan/driver.go index 796a80cb41d..30852ebf13f 100644 --- a/drivers/mopan/driver.go +++ b/drivers/mopan/driver.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "net/http" - "os" "strconv" "time" @@ -231,13 +230,12 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error { } func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - file, err := utils.CreateTempFile(stream, stream.GetSize()) + file, err := stream.CacheFullInTempFile() if err != nil { return nil, err } defer func() { _ = file.Close() - _ = os.Remove(file.Name()) }() // step.1 diff --git a/drivers/pikpak/driver.go b/drivers/pikpak/driver.go index a86a75390da..e23ad6bd1e6 100644 --- a/drivers/pikpak/driver.go +++ b/drivers/pikpak/driver.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net/http" - "os" "strings" "github.com/alist-org/alist/v3/drivers/base" @@ -124,13 +123,12 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error { } func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() // cal gcid sha1Str, err := getGcid(tempFile, stream.GetSize()) diff --git a/drivers/quark_uc/driver.go b/drivers/quark_uc/driver.go index 4969af5a70e..7c254022a92 100644 --- a/drivers/quark_uc/driver.go +++ b/drivers/quark_uc/driver.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "io" "net/http" - "os" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -75,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg "User-Agent": []string{ua}, }, Concurrency: 2, - PartSize: 10 * 1024 * 1024, + PartSize: 10 * utils.MB, }, nil } @@ -136,13 +135,12 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error { } func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() m := md5.New() _, err = io.Copy(m, tempFile) diff --git a/drivers/s3/driver.go b/drivers/s3/driver.go index e888ecf8969..dd643f5d76e 100644 --- a/drivers/s3/driver.go +++ b/drivers/s3/driver.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/url" stdpath "path" @@ -96,13 +97,13 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { return d.Put(ctx, &model.Object{ Path: stdpath.Join(parentDir.GetPath(), dirName), - }, &model.FileStream{ + }, &stream.FileStream{ Obj: &model.Object{ Name: getPlaceholderName(d.Placeholder), Modified: time.Now(), }, - ReadCloser: io.NopCloser(bytes.NewReader([]byte{})), - Mimetype: "application/octet-stream", + Reader: io.NopCloser(bytes.NewReader([]byte{})), + Mimetype: "application/octet-stream", }, func(int) {}) } diff --git a/drivers/sftp/driver.go b/drivers/sftp/driver.go index cf67ae0e792..77f5198457c 100644 --- a/drivers/sftp/driver.go +++ b/drivers/sftp/driver.go @@ -56,7 +56,7 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* return nil, err } link := &model.Link{ - ReadSeekCloser: remoteFile, + MFile: remoteFile, } return link, nil } diff --git a/drivers/smb/driver.go b/drivers/smb/driver.go index 7e1be57df15..9632f24e0eb 100644 --- a/drivers/smb/driver.go +++ b/drivers/smb/driver.go @@ -61,6 +61,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m Modified: f.ModTime(), Size: f.Size(), IsFolder: f.IsDir(), + Ctime: f.(*smb2.FileStat).CreationTime, }, } files = append(files, &file) @@ -79,7 +80,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m return nil, err } link := &model.Link{ - ReadSeekCloser: remoteFile, + MFile: remoteFile, } d.updateLastConnTime() return link, nil diff --git a/drivers/terabox/driver.go b/drivers/terabox/driver.go index 4c4ad8b58cf..67eeed755dd 100644 --- a/drivers/terabox/driver.go +++ b/drivers/terabox/driver.go @@ -11,7 +11,6 @@ import ( log "github.com/sirupsen/logrus" "io" "math" - "os" stdpath "path" "strconv" "strings" @@ -116,13 +115,12 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error { } func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() var Default int64 = 4 * 1024 * 1024 defaultByteData := make([]byte, Default) diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 8b91b5a954a..081290dcd23 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net/http" - "os" "strings" "github.com/alist-org/alist/v3/drivers/base" @@ -333,13 +332,12 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error { } func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err } defer func() { _ = tempFile.Close() - _ = os.Remove(tempFile.Name()) }() gcid, err := getGcid(tempFile, stream.GetSize()) diff --git a/drivers/thunder/types.go b/drivers/thunder/types.go index 0c60dc5642b..db589746acd 100644 --- a/drivers/thunder/types.go +++ b/drivers/thunder/types.go @@ -2,6 +2,8 @@ package thunder import ( "fmt" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" "strconv" "time" ) @@ -84,6 +86,8 @@ type Link struct { Type string `json:"type"` } +var _ model.Obj = (*Files)(nil) + type Files struct { Kind string `json:"kind"` ID string `json:"id"` @@ -146,6 +150,14 @@ type Files struct { //Collection interface{} `json:"collection"` } +func (c *Files) CreateTime() time.Time { + return c.CreatedTime +} + +func (c *Files) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (c *Files) GetSize() int64 { size, _ := strconv.ParseInt(c.Size, 10, 64); return size } func (c *Files) GetName() string { return c.Name } func (c *Files) ModTime() time.Time { return c.ModifiedTime } diff --git a/drivers/virtual/driver.go b/drivers/virtual/driver.go index e76f95362f6..8775f06d5da 100644 --- a/drivers/virtual/driver.go +++ b/drivers/virtual/driver.go @@ -52,18 +52,29 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs) return res, nil } -type nopReadSeekCloser struct { +type DummyMFile struct { io.Reader } -func (nopReadSeekCloser) Seek(offset int64, whence int) (int64, error) { +func (f DummyMFile) Read(p []byte) (n int, err error) { + return f.Reader.Read(p) +} + +func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) { + return f.Reader.Read(p) +} + +func (f DummyMFile) Close() error { + return nil +} + +func (DummyMFile) Seek(offset int64, whence int) (int64, error) { return offset, nil } -func (nopReadSeekCloser) Close() error { return nil } func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { return &model.Link{ - ReadSeekCloser: nopReadSeekCloser{io.LimitReader(random.Rand, file.GetSize())}, + MFile: DummyMFile{Reader: random.Rand}, }, nil } diff --git a/drivers/weiyun/driver.go b/drivers/weiyun/driver.go index 628536f0cf4..8c3e6a85012 100644 --- a/drivers/weiyun/driver.go +++ b/drivers/weiyun/driver.go @@ -6,7 +6,6 @@ import ( "io" "math" "net/http" - "os" "strconv" "time" @@ -310,13 +309,12 @@ func (d *WeiYun) Remove(ctx context.Context, obj model.Obj) error { func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { if folder, ok := dstDir.(*Folder); ok { - file, err := utils.CreateTempFile(stream, stream.GetSize()) + file, err := stream.CacheFullInTempFile() if err != nil { return nil, err } defer func() { _ = file.Close() - _ = os.Remove(file.Name()) }() // step 1. diff --git a/drivers/weiyun/types.go b/drivers/weiyun/types.go index 355583275ca..664693c80ed 100644 --- a/drivers/weiyun/types.go +++ b/drivers/weiyun/types.go @@ -1,6 +1,7 @@ package weiyun import ( + "github.com/alist-org/alist/v3/pkg/utils" "time" weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go" @@ -21,12 +22,27 @@ func (f *File) GetPath() string { return "" } func (f *File) GetPKey() string { return f.PFolder.DirKey } +func (f *File) CreateTime() time.Time { + return time.Time(f.FileCtime) +} + +func (f *File) GetHash() utils.HashInfo { + return utils.NewHashInfo(utils.SHA1, f.FileSha) +} type Folder struct { PFolder *Folder weiyunsdkgo.Folder } +func (f *Folder) CreateTime() time.Time { + return time.Time(f.DirCtime) +} + +func (f *Folder) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + func (f *Folder) GetID() string { return f.DirKey } func (f *Folder) GetSize() int64 { return 0 } func (f *Folder) GetName() string { return f.DirName } diff --git a/go.mod b/go.mod index 115593029c7..9268c727a83 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/coreos/go-oidc v2.2.1+incompatible github.com/deckarep/golang-set/v2 v2.3.1 github.com/disintegration/imaging v1.6.2 + github.com/djherbis/times v1.5.0 github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 github.com/foxxorcat/mopan-sdk-go v0.1.4 github.com/foxxorcat/weiyun-sdk-go v0.1.2 @@ -39,6 +40,7 @@ require ( github.com/rclone/rclone v1.63.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.4 github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca github.com/u2takey/ffmpeg-go v0.5.0 github.com/upyun/go-sdk/v3 v3.0.4 @@ -90,6 +92,7 @@ require ( github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/fxamacker/cbor/v2 v2.4.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -158,6 +161,7 @@ require ( github.com/orzogc/fake115uploader v0.3.3-0.20221009101310-08b764073b77 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect diff --git a/go.sum b/go.sum index 6f5be907fe1..5abaf8dede2 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,6 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= -github.com/SheltonZhu/115driver v1.0.14 h1:uW3dl8J9KDMw+3gPxQdhTysoGhw0/uI1484GT9xhfU4= -github.com/SheltonZhu/115driver v1.0.14/go.mod h1:00ixivHH5HqDj4S7kAWbkuUrjtsJTxc7cGv5RMw3RVs= github.com/SheltonZhu/115driver v1.0.15 h1:RRvgXvXEzvrPwkRno0CUIg7ucEphbsfwct2mQxfNOdQ= github.com/SheltonZhu/115driver v1.0.15/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4= github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= @@ -30,8 +28,6 @@ github.com/andreburgaud/crypt2go v1.1.0/go.mod h1:4qhZPzarj1dCIRmCkpdgCklwp+hBq9 github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.316 h1:UC3alCEyzj2XU13ZFGIOHW3yjCNLGTIGVauyetl9fwE= -github.com/aws/aws-sdk-go v1.44.316/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY= github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= @@ -83,7 +79,6 @@ github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc= github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.16.1 h1:6uzpAAaT9ZqKssntbvZMlksWHruQLNxg49H5WdeuYSY= @@ -109,8 +104,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= -github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= @@ -118,6 +111,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= +github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= +github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM= github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8= github.com/foxxorcat/mopan-sdk-go v0.1.3 h1:6ww0ulyLDh6neXZBqUM2PDbxQ6lfdkQbr0FCh9BTY0Y= @@ -144,7 +139,6 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -290,7 +284,6 @@ github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOj github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -439,8 +432,6 @@ github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZ github.com/upyun/go-sdk/v3 v3.0.4 h1:2DCJa/Yi7/3ZybT9UCPATSzvU3wpPPxhXinNlb1Hi8Q= github.com/upyun/go-sdk/v3 v3.0.4/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E= github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= -github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0 h1:j3un8DqYvvAOqKI5OPz+/RRVhDFipbPKI4t2Uk5RBJw= -github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXorZG0KzTxbp0Cr1n3FEegfmyd9br1k= github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -524,7 +515,6 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/internal/aria2/monitor.go b/internal/aria2/monitor.go index 583cd72d43c..77265b372b1 100644 --- a/internal/aria2/monitor.go +++ b/internal/aria2/monitor.go @@ -2,6 +2,7 @@ package aria2 import ( "fmt" + "github.com/alist-org/alist/v3/internal/stream" "os" "path" "path/filepath" @@ -162,22 +163,27 @@ func (m *Monitor) Complete() error { if err != nil { return errors.Wrapf(err, "failed to open file %s", file.Path) } - stream := &model.FileStream{ + s := stream.FileStream{ Obj: &model.Object{ Name: path.Base(file.Path), Size: size, Modified: time.Now(), IsFolder: false, }, - ReadCloser: f, - Mimetype: mimetype, + Reader: f, + Closers: utils.NewClosers(f), + Mimetype: mimetype, + } + ss, err := stream.NewSeekableStream(s, nil) + if err != nil { + return err } relDir, err := filepath.Rel(m.tempDir, filepath.Dir(file.Path)) if err != nil { log.Errorf("find relation directory error: %v", err) } newDistDir := filepath.Join(dstDirActualPath, relDir) - return op.Put(tsk.Ctx, storage, newDistDir, stream, tsk.SetProgress) + return op.Put(tsk.Ctx, storage, newDistDir, ss, tsk.SetProgress) }, })) } diff --git a/internal/errs/errors.go b/internal/errs/errors.go index 0cab41356f8..b48718778a6 100644 --- a/internal/errs/errors.go +++ b/internal/errs/errors.go @@ -17,6 +17,7 @@ var ( MetaNotFound = errors.New("meta not found") StorageNotFound = errors.New("storage not found") StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue") + StreamPeekFail = errors.New("StreamPeekFail") ) // NewErr wrap constant error with an extra message diff --git a/internal/fs/copy.go b/internal/fs/copy.go index b8f92599dc8..caf858826ae 100644 --- a/internal/fs/copy.go +++ b/internal/fs/copy.go @@ -10,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/task" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" @@ -94,9 +95,14 @@ func copyFileBetween2Storages(tsk *task.Task[uint64], srcStorage, dstStorage dri if err != nil { return errors.WithMessagef(err, "failed get [%s] link", srcFilePath) } - stream, err := getFileStreamFromLink(tsk.Ctx, srcFile, link) + fs := stream.FileStream{ + Obj: srcFile, + Ctx: tsk.Ctx, + } + // any link provided is seekable + ss, err := stream.NewSeekableStream(fs, link) if err != nil { return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath) } - return op.Put(tsk.Ctx, dstStorage, dstDirPath, stream, tsk.SetProgress, true) + return op.Put(tsk.Ctx, dstStorage, dstDirPath, ss, tsk.SetProgress, true) } diff --git a/internal/fs/fs.go b/internal/fs/fs.go index ce922bc79b9..2b23142a662 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -2,7 +2,6 @@ package fs import ( "context" - "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" @@ -93,7 +92,7 @@ func Remove(ctx context.Context, path string) error { return err } -func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error { +func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error { err := putDirectly(ctx, dstDirPath, file, lazyCache...) if err != nil { log.Errorf("failed put %s: %+v", dstDirPath, err) @@ -101,7 +100,7 @@ func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, return err } -func PutAsTask(dstDirPath string, file *model.FileStream) error { +func PutAsTask(dstDirPath string, file model.FileStreamer) error { err := putAsTask(dstDirPath, file) if err != nil { log.Errorf("failed put %s: %+v", dstDirPath, err) diff --git a/internal/fs/put.go b/internal/fs/put.go index 41f6b8db5b1..ab6d24bf571 100644 --- a/internal/fs/put.go +++ b/internal/fs/put.go @@ -3,13 +3,12 @@ package fs import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/model" "sync/atomic" "github.com/alist-org/alist/v3/internal/errs" - "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/task" - "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" ) @@ -18,7 +17,7 @@ var UploadTaskManager = task.NewTaskManager(3, func(tid *uint64) { }) // putAsTask add as a put task and return immediately -func putAsTask(dstDirPath string, file *model.FileStream) error { +func putAsTask(dstDirPath string, file model.FileStreamer) error { storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) if err != nil { return errors.WithMessage(err, "failed get storage") @@ -27,11 +26,12 @@ func putAsTask(dstDirPath string, file *model.FileStream) error { return errors.WithStack(errs.UploadNotSupported) } if file.NeedStore() { - tempFile, err := utils.CreateTempFile(file, file.GetSize()) + _, err := file.CacheFullInTempFile() if err != nil { return errors.Wrapf(err, "failed to create temp file") } - file.SetReadCloser(tempFile) + //file.SetReader(tempFile) + //file.SetTmpFile(tempFile) } UploadTaskManager.Submit(task.WithCancelCtx(&task.Task[uint64]{ Name: fmt.Sprintf("upload %s to [%s](%s)", file.GetName(), storage.GetStorage().MountPath, dstDirActualPath), @@ -43,7 +43,7 @@ func putAsTask(dstDirPath string, file *model.FileStream) error { } // putDirect put the file and return after finish -func putDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error { +func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error { storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) if err != nil { return errors.WithMessage(err, "failed get storage") diff --git a/internal/fs/util.go b/internal/fs/util.go deleted file mode 100644 index 5eca5fcee16..00000000000 --- a/internal/fs/util.go +++ /dev/null @@ -1,73 +0,0 @@ -package fs - -import ( - "context" - "io" - "net/http" - "strings" - - "github.com/alist-org/alist/v3/internal/net" - "github.com/alist-org/alist/v3/pkg/http_range" - - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/pkg/utils" - "github.com/alist-org/alist/v3/server/common" - "github.com/pkg/errors" -) - -func getFileStreamFromLink(ctx context.Context, file model.Obj, link *model.Link) (*model.FileStream, error) { - var rc io.ReadCloser - var err error - mimetype := utils.GetMimeType(file.GetName()) - if link.RangeReadCloser.RangeReader != nil { - rc, err = link.RangeReadCloser.RangeReader(http_range.Range{Length: -1}) - if err != nil { - return nil, err - } - } else if link.ReadSeekCloser != nil { - rc = link.ReadSeekCloser - } else if link.Concurrency != 0 || link.PartSize != 0 { - down := net.NewDownloader(func(d *net.Downloader) { - d.Concurrency = link.Concurrency - d.PartSize = link.PartSize - }) - req := &net.HttpRequestParams{ - URL: link.URL, - Range: http_range.Range{Length: -1}, - Size: file.GetSize(), - HeaderRef: link.Header, - } - rc, err = down.Download(ctx, req) - if err != nil { - return nil, err - } - } else { - //TODO: add accelerator - req, err := http.NewRequest(http.MethodGet, link.URL, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to create request for %s", link.URL) - } - for h, val := range link.Header { - req.Header[h] = val - } - res, err := common.HttpClient().Do(req) - if err != nil { - return nil, errors.Wrapf(err, "failed to get response for %s", link.URL) - } - mt := res.Header.Get("Content-Type") - if mt != "" && strings.ToLower(mt) != "application/octet-stream" { - mimetype = mt - } - rc = res.Body - } - // if can't get mimetype, use default application/octet-stream - if mimetype == "" { - mimetype = "application/octet-stream" - } - stream := &model.FileStream{ - Obj: file, - ReadCloser: rc, - Mimetype: mimetype, - } - return stream, nil -} diff --git a/internal/model/args.go b/internal/model/args.go index bb12ae07165..ac3c1875bfa 100644 --- a/internal/model/args.go +++ b/internal/model/args.go @@ -1,6 +1,7 @@ package model import ( + "context" "io" "net/http" "time" @@ -22,13 +23,14 @@ type LinkArgs struct { } type Link struct { - URL string `json:"url"` - Header http.Header `json:"header"` // needed header (for url) or response header(for data or writer) - RangeReadCloser RangeReadCloser `json:"-"` // recommended way - ReadSeekCloser io.ReadSeekCloser `json:"-"` // best for local,smb... file system, which exposes ReadSeekCloser + URL string `json:"url"` // most common way + Header http.Header `json:"header"` // needed header (for url) + RangeReadCloser RangeReadCloserIF `json:"-"` // recommended way if can't use URL + MFile File `json:"-"` // best for local,smb... file system, which exposes MFile Expiration *time.Duration // local cache expire Duration IPCacheKey bool `json:"-"` // add ip to cache key + //for accelerating request, use multi-thread downloading Concurrency int `json:"concurrency"` PartSize int `json:"part_size"` @@ -45,10 +47,23 @@ type FsOtherArgs struct { Method string `json:"method" form:"method"` Data interface{} `json:"data" form:"data"` } +type RangeReadCloserIF interface { + RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) + utils.ClosersIF +} + +var _ RangeReadCloserIF = (*RangeReadCloser)(nil) + type RangeReadCloser struct { RangeReader RangeReaderFunc - Closers *utils.Closers + utils.Closers +} + +func (r RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + rc, err := r.RangeReader(ctx, httpRange) + r.Closers.Add(rc) + return rc, err } -type WriterFunc func(w io.Writer) error -type RangeReaderFunc func(httpRange http_range.Range) (io.ReadCloser, error) +// type WriterFunc func(w io.Writer) error +type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) diff --git a/internal/model/file.go b/internal/model/file.go new file mode 100644 index 00000000000..ba65ef938db --- /dev/null +++ b/internal/model/file.go @@ -0,0 +1,25 @@ +package model + +import "io" + +// File is basic file level accessing interface +type File interface { + io.Reader + io.ReaderAt + io.Seeker + io.Closer +} + +type NopMFileIF interface { + io.Reader + io.ReaderAt + io.Seeker +} +type NopMFile struct { + NopMFileIF +} + +func (NopMFile) Close() error { return nil } +func NewNopMFile(r NopMFileIF) File { + return NopMFile{r} +} diff --git a/internal/model/obj.go b/internal/model/obj.go index 09e0a13e563..cb46201fe70 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -1,6 +1,8 @@ package model import ( + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" "io" "regexp" "sort" @@ -20,8 +22,9 @@ type Obj interface { GetSize() int64 GetName() string ModTime() time.Time + CreateTime() time.Time IsDir() bool - //GetHash() (string, string) + GetHash() utils.HashInfo // The internal information of the driver. // If you want to use it, please understand what it means @@ -29,14 +32,20 @@ type Obj interface { GetPath() string } +// FileStreamer ->check FileStream for more comments type FileStreamer interface { - io.ReadCloser + io.Reader + io.Closer Obj GetMimetype() string - SetReadCloser(io.ReadCloser) + //SetReader(io.Reader) NeedStore() bool - GetReadCloser() io.ReadCloser - GetOld() Obj + GetExist() Obj + SetExist(Obj) + //for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullInTempFile still works + RangeRead(http_range.Range) (io.Reader, error) + //for a non-seekable Stream, if Read is called, this function won't work + CacheFullInTempFile() (File, error) } type URL interface { @@ -50,9 +59,6 @@ type Thumb interface { type SetPath interface { SetPath(path string) } -type SetHash interface { - SetHash(hash string, hashType string) -} func SortFiles(objs []Obj, orderBy, orderDirection string) { if orderBy == "" { diff --git a/internal/model/object.go b/internal/model/object.go index b1ef1b4a9ad..93f2c307a03 100644 --- a/internal/model/object.go +++ b/internal/model/object.go @@ -28,9 +28,9 @@ type Object struct { Name string Size int64 Modified time.Time + Ctime time.Time // file create time IsFolder bool - Hash string - HashType string + HashInfo utils.HashInfo } func (o *Object) GetName() string { @@ -44,6 +44,12 @@ func (o *Object) GetSize() int64 { func (o *Object) ModTime() time.Time { return o.Modified } +func (o *Object) CreateTime() time.Time { + if o.Ctime.IsZero() { + return o.ModTime() + } + return o.Ctime +} func (o *Object) IsDir() bool { return o.IsFolder @@ -61,13 +67,8 @@ func (o *Object) SetPath(path string) { o.Path = path } -func (o *Object) SetHash(hash string, hashType string) { - o.Hash = hash - o.HashType = hashType -} - -func (o *Object) GetHash() (string, string) { - return o.Hash, o.HashType +func (o *Object) GetHash() utils.HashInfo { + return o.HashInfo } type Thumbnail struct { diff --git a/internal/model/stream.go b/internal/model/stream.go deleted file mode 100644 index cd7c3363c82..00000000000 --- a/internal/model/stream.go +++ /dev/null @@ -1,33 +0,0 @@ -package model - -import ( - "io" -) - -type FileStream struct { - Obj - io.ReadCloser - Mimetype string - WebPutAsTask bool - Old Obj -} - -func (f *FileStream) GetMimetype() string { - return f.Mimetype -} - -func (f *FileStream) NeedStore() bool { - return f.WebPutAsTask -} - -func (f *FileStream) GetReadCloser() io.ReadCloser { - return f.ReadCloser -} - -func (f *FileStream) SetReadCloser(rc io.ReadCloser) { - f.ReadCloser = rc -} - -func (f *FileStream) GetOld() Obj { - return f.Old -} diff --git a/internal/model/user.go b/internal/model/user.go index c768f3130f5..d7b2863cebe 100644 --- a/internal/model/user.go +++ b/internal/model/user.go @@ -124,11 +124,11 @@ func (u *User) JoinPath(reqPath string) (string, error) { } func StaticHash(password string) string { - return utils.GetSHA256Encode([]byte(fmt.Sprintf("%s-%s", password, StaticHashSalt))) + return utils.HashData(utils.SHA256, []byte(fmt.Sprintf("%s-%s", password, StaticHashSalt))) } func HashPwd(static string, salt string) string { - return utils.GetSHA256Encode([]byte(fmt.Sprintf("%s-%s", static, salt))) + return utils.HashData(utils.SHA256, []byte(fmt.Sprintf("%s-%s", static, salt))) } func TwoHashPwd(password string, salt string) string { diff --git a/internal/net/request.go b/internal/net/request.go index 0bcd966d2af..b450ede5a02 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -43,7 +43,7 @@ type Downloader struct { //RequestParam HttpRequestParams HttpClient HttpRequestFunc } -type HttpRequestFunc func(params *HttpRequestParams) (*http.Response, error) +type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error) func NewDownloader(options ...func(*Downloader)) *Downloader { d := &Downloader{ @@ -131,7 +131,7 @@ func (d *downloader) download() (io.ReadCloser, error) { } if d.cfg.Concurrency == 1 { - resp, err := d.cfg.HttpClient(d.params) + resp, err := d.cfg.HttpClient(d.ctx, d.params) if err != nil { return nil, err } @@ -258,7 +258,7 @@ func (d *downloader) downloadChunk(ch *chunk) error { func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { - resp, err := d.cfg.HttpClient(params) + resp, err := d.cfg.HttpClient(d.ctx, params) if err != nil { return 0, err } @@ -371,10 +371,10 @@ type chunk struct { //boundary http_range.Range } -func DefaultHttpRequestFunc(params *HttpRequestParams) (*http.Response, error) { +func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) { header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef) - res, err := RequestHttp("GET", header, params.URL) + res, err := RequestHttp(ctx, "GET", header, params.URL) if err != nil { return nil, err } @@ -456,7 +456,7 @@ type Buf struct { // NewBuf is a buffer that can have 1 read & 1 write at the same time. // when read is faster write, immediately feed data to read after written func NewBuf(ctx context.Context, maxSize int, id int) *Buf { - d := make([]byte, maxSize) + d := make([]byte, 0, maxSize) return &Buf{ctx: ctx, buffer: bytes.NewBuffer(d), size: maxSize, notify: make(chan struct{})} } diff --git a/internal/net/request_test.go b/internal/net/request_test.go index edc38c1ce5a..e41971fbf61 100644 --- a/internal/net/request_test.go +++ b/internal/net/request_test.go @@ -143,7 +143,7 @@ type downloadCaptureClient struct { lock sync.Mutex } -func (c *downloadCaptureClient) HttpRequest(params *HttpRequestParams) (*http.Response, error) { +func (c *downloadCaptureClient) HttpRequest(ctx context.Context, params *HttpRequestParams) (*http.Response, error) { c.lock.Lock() defer c.lock.Unlock() diff --git a/internal/net/serve.go b/internal/net/serve.go index 688882b98c5..eb1ba32309b 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -1,6 +1,7 @@ package net import ( + "context" "fmt" "io" "mime" @@ -110,7 +111,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time } switch { case len(ranges) == 0: - reader, err := RangeReaderFunc(http_range.Range{Length: -1}) + reader, err := RangeReaderFunc(context.Background(), http_range.Range{Length: -1}) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -129,7 +130,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // does not request multiple parts might not support // multipart responses." ra := ranges[0] - sendContent, err = RangeReaderFunc(ra) + sendContent, err = RangeReaderFunc(context.Background(), ra) if err != nil { http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) return @@ -156,7 +157,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time pw.CloseWithError(err) return } - reader, err := RangeReaderFunc(ra) + reader, err := RangeReaderFunc(context.Background(), ra) if err != nil { pw.CloseWithError(err) return @@ -209,8 +210,8 @@ func ProcessHeader(origin, override http.Header) http.Header { } // RequestHttp deal with Header properly then send the request -func RequestHttp(httpMethod string, headerOverride http.Header, URL string) (*http.Response, error) { - req, err := http.NewRequest(httpMethod, URL, nil) +func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Header, URL string) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, URL, nil) if err != nil { return nil, err } diff --git a/internal/op/fs.go b/internal/op/fs.go index 15384405f50..5a22362e738 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -2,7 +2,6 @@ package op import ( "context" - "os" stdpath "path" "time" @@ -481,18 +480,10 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error { return errors.WithStack(err) } -func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *model.FileStream, up driver.UpdateProgress, lazyCache ...bool) error { +func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file model.FileStreamer, up driver.UpdateProgress, lazyCache ...bool) error { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { return errors.Errorf("storage not init: %s", storage.GetStorage().Status) } - defer func() { - if f, ok := file.GetReadCloser().(*os.File); ok { - err := os.RemoveAll(f.Name()) - if err != nil { - log.Errorf("failed to remove file [%s]", f.Name()) - } - } - }() defer func() { if err := file.Close(); err != nil { log.Errorf("failed to close file streamer, %v", err) @@ -508,7 +499,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *mo if fi.GetSize() == 0 { err = Remove(ctx, storage, dstPath) if err != nil { - return errors.WithMessagef(err, "failed remove file that exist and have size 0") + return errors.WithMessagef(err, "while uploading, failed remove existing file which size = 0") } } else if storage.Config().NoOverwriteUpload { // try to rename old obj @@ -517,7 +508,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *mo return err } } else { - file.Old = fi + file.SetExist(fi) } } err = MakeDir(ctx, storage, dstDirPath) diff --git a/internal/qbittorrent/monitor.go b/internal/qbittorrent/monitor.go index f1b01efae21..12bb4ad21c5 100644 --- a/internal/qbittorrent/monitor.go +++ b/internal/qbittorrent/monitor.go @@ -2,7 +2,7 @@ package qbittorrent import ( "fmt" - "io" + "github.com/alist-org/alist/v3/internal/stream" "os" "path/filepath" "sync" @@ -157,17 +157,22 @@ func (m *Monitor) complete() error { if err != nil { return errors.Wrapf(err, "failed to open file %s", tempPath) } - stream := &model.FileStream{ + s := stream.FileStream{ Obj: &model.Object{ Name: fileName, Size: size, Modified: time.Now(), IsFolder: false, }, - ReadCloser: struct{ io.ReadSeekCloser }{f}, - Mimetype: mimetype, + Reader: f, + Closers: utils.NewClosers(f), + Mimetype: mimetype, } - return op.Put(tsk.Ctx, storage, dstDir, stream, tsk.SetProgress) + ss, err := stream.NewSeekableStream(s, nil) + if err != nil { + return err + } + return op.Put(tsk.Ctx, storage, dstDir, ss, tsk.SetProgress) }, })) } diff --git a/internal/stream/stream.go b/internal/stream/stream.go new file mode 100644 index 00000000000..bc2cf9eade0 --- /dev/null +++ b/internal/stream/stream.go @@ -0,0 +1,278 @@ +package stream + +import ( + "bytes" + "context" + "errors" + "fmt" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" + "io" + "os" +) + +type FileStream struct { + Ctx context.Context + model.Obj + io.Reader + Mimetype string + WebPutAsTask bool + Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it + utils.Closers + tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last + peekBuff *bytes.Reader +} + +func (f *FileStream) GetMimetype() string { + return f.Mimetype +} + +func (f *FileStream) NeedStore() bool { + return f.WebPutAsTask +} +func (f *FileStream) Close() error { + var err1, err2 error + err1 = f.Closers.Close() + if f.tmpFile != nil { + err2 = os.RemoveAll(f.tmpFile.Name()) + if err2 != nil { + err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name()) + } + } + + return errors.Join(err1, err2) +} + +func (f *FileStream) GetExist() model.Obj { + return f.Exist +} +func (f *FileStream) SetExist(obj model.Obj) { + f.Exist = obj +} + +// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk, +// and can't start upload until the file is written. It's not thread-safe! +// won't check if some +func (f *FileStream) CacheFullInTempFile() (model.File, error) { + if f.tmpFile != nil { + return f.tmpFile, nil + } + if file, ok := f.Reader.(model.File); ok { + return file, nil + } + tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize()) + if err != nil { + return nil, err + } + f.tmpFile = tmpF + f.Reader = tmpF + return f.tmpFile, nil +} + +const InMemoryBufMaxSize = 10 // Megabytes +const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024 + +// RangeRead have to cache all data first since only Reader is provided. +// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory +func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { + if httpRange.Length == -1 { + httpRange.Length = f.GetSize() + } + + if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) { + return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil + } + if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil { + bufSize := utils.Min(httpRange.Length, f.GetSize()) + newBuf := bytes.NewBuffer(make([]byte, 0, bufSize)) + n, err := io.CopyN(newBuf, f.Reader, bufSize) + if err != nil { + return nil, err + } + if n != bufSize { + return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n) + } + f.peekBuff = bytes.NewReader(newBuf.Bytes()) + f.Reader = io.MultiReader(f.peekBuff, f.Reader) + return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil + } + if f.tmpFile == nil { + _, err := f.CacheFullInTempFile() + if err != nil { + return nil, err + } + } + return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil +} + +var _ model.FileStreamer = (*SeekableStream)(nil) +var _ model.FileStreamer = (*FileStream)(nil) + +//var _ seekableStream = (*FileStream)(nil) + +// for most internal stream, which is either RangeReadCloser or MFile +type SeekableStream struct { + FileStream + Link *model.Link + // should have one of belows to support rangeRead + rangeReadCloser model.RangeReadCloserIF + mFile model.File +} + +func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) { + if len(fs.Mimetype) == 0 { + fs.Mimetype = utils.GetMimeType(fs.Obj.GetName()) + } + ss := SeekableStream{FileStream: fs, Link: link} + if ss.Reader != nil { + result, ok := ss.Reader.(model.File) + if ok { + ss.mFile = result + ss.Closers.Add(result) + return &ss, nil + } + } + if ss.Link != nil { + if ss.Link.MFile != nil { + ss.mFile = ss.Link.MFile + ss.Reader = ss.Link.MFile + ss.Closers.Add(ss.Link.MFile) + return &ss, nil + } + + if ss.Link.RangeReadCloser != nil { + ss.rangeReadCloser = ss.Link.RangeReadCloser + return &ss, nil + } + if len(ss.Link.URL) > 0 { + rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link) + if err != nil { + return nil, err + } + ss.rangeReadCloser = rrc + return &ss, nil + } + } + + return nil, fmt.Errorf("illegal seekableStream") +} + +//func (ss *SeekableStream) Peek(length int) { +// +//} + +// RangeRead is not thread-safe, pls use it in single thread only. +func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { + if httpRange.Length == -1 { + httpRange.Length = ss.GetSize() + } + if ss.mFile != nil { + return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil + } + if ss.tmpFile != nil { + return io.NewSectionReader(ss.tmpFile, httpRange.Start, httpRange.Length), nil + } + if ss.rangeReadCloser != nil { + rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange) + if err != nil { + return nil, err + } + return rc, nil + } + return nil, fmt.Errorf("can't find mFile or rangeReadCloser") +} + +//func (f *FileStream) GetReader() io.Reader { +// return f.Reader +//} + +// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory +func (ss *SeekableStream) Read(p []byte) (n int, err error) { + //f.mu.Lock() + + //f.peekedOnce = true + //defer f.mu.Unlock() + if ss.Reader == nil { + if ss.rangeReadCloser == nil { + return 0, fmt.Errorf("illegal seekableStream") + } + rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1}) + if err != nil { + return 0, nil + } + ss.Reader = io.NopCloser(rc) + ss.Closers.Add(rc) + + } + return ss.Reader.Read(p) +} + +func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) { + if ss.tmpFile != nil { + return ss.tmpFile, nil + } + if ss.mFile != nil { + return ss.mFile, nil + } + tmpF, err := utils.CreateTempFile(ss, ss.GetSize()) + if err != nil { + return nil, err + } + ss.tmpFile = tmpF + ss.Reader = tmpF + return ss.tmpFile, nil +} + +//func (f *FileStream) SetReader(r io.Reader) { +// f.Reader = r +//} + +/* +// RangePeek allow once peek at start of the data, since most drives check first XX bytes for rapid-upload +func (f *FileStream) RangePeek(length int64) (*bytes.Buffer, error) { + if length > InMemoryBufMaxSize*1024*1024 { + return nil, errs.NewErr(errs.StreamPeekFail, "can't peek size > %d MB", InMemoryBufMaxSize) + } + httpRange := &http_range.Range{Length: length} + bufSize := utils.Min(httpRange.Length, f.GetSize()) + buf := bytes.NewBuffer(make([]byte, 0, bufSize)) + if f.link == nil && f.tmpFile == nil { + if !f.peekedOnce { + f.mu.Lock() + f.peekedOnce = true + _, err := io.CopyN(buf, f.Reader, bufSize) + + if err != nil { + f.mu.Unlock() + return nil, errs.NewErr(errs.StreamPeekFail, "failed to copyN %d bytes data", bufSize) + } + f.Reader = io.MultiReader(buf, f.Reader) + f.mu.Unlock() + return buf, nil + + } + return nil, errs.NewErr(errs.StreamPeekFail, "link and tmpFile both are null") + } + f.mu.Lock() + defer f.mu.Unlock() + rc, _, err := GetReadCloserFromLink(f.Obj, f.link, httpRange) + + if err != nil { + return nil, err + } + _, err = io.CopyN(buf, rc, bufSize) + if err != nil { + return nil, err + } + return buf, nil +}*/ + +//func (f *FileStream) SetTmpFile(r *os.File) { +// f.mu.Lock() +// //f.readDisabled = true +// f.tmpFile = r +// f.Reader = r +// f.mu.Unlock() +//} diff --git a/internal/stream/util.go b/internal/stream/util.go new file mode 100644 index 00000000000..eda611f7c83 --- /dev/null +++ b/internal/stream/util.go @@ -0,0 +1,84 @@ +package stream + +import ( + "context" + "fmt" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/net" + "github.com/alist-org/alist/v3/pkg/http_range" + log "github.com/sirupsen/logrus" + "io" + "net/http" +) + +func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCloserIF, error) { + if len(link.URL) == 0 { + return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link") + } + //remoteClosers := utils.EmptyClosers() + rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) { + if link.Concurrency != 0 || link.PartSize != 0 { + header := net.ProcessHeader(http.Header{}, link.Header) + down := net.NewDownloader(func(d *net.Downloader) { + d.Concurrency = link.Concurrency + d.PartSize = link.PartSize + }) + req := &net.HttpRequestParams{ + URL: link.URL, + Range: r, + Size: size, + HeaderRef: header, + } + rc, err := down.Download(ctx, req) + if err != nil { + return nil, errs.NewErr(err, "GetReadCloserFromLink failed") + } + return rc, nil + + } + if len(link.URL) > 0 { + response, err := RequestRangedHttp(ctx, link, r.Start, r.Length) + if err != nil { + return nil, fmt.Errorf("http request failure,status: %d err:%s", response.StatusCode, err) + } + if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent || + checkContentRange(&response.Header, size, r.Start) { + return response.Body, nil + } else if response.StatusCode == http.StatusOK { + log.Warnf("remote http server not supporting range request, expect low perfromace!") + readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length) + if err != nil { + return nil, err + } + return readCloser, nil + + } + + return response.Body, nil + } + + return nil, errs.NotSupport + } + resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc} + return &resultRangeReadCloser, nil +} + +func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) { + header := net.ProcessHeader(http.Header{}, link.Header) + header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header) + + return net.RequestHttp(ctx, "GET", header, link.URL) +} + +// 139 cloud does not properly return 206 http status code, add a hack here +func checkContentRange(header *http.Header, size, offset int64) bool { + r, err2 := http_range.ParseRange(header.Get("Content-Range"), size) + if err2 != nil { + log.Warnf("exception trying to parse Content-Range, will ignore,err=%s", err2) + } + if len(r) == 1 && r[0].Start == offset { + return true + } + return false +} diff --git a/pkg/utils/file.go b/pkg/utils/file.go index 6dd78164ee6..ada1d5e2a13 100644 --- a/pkg/utils/file.go +++ b/pkg/utils/file.go @@ -113,7 +113,7 @@ func CreateNestedFile(path string) (*os.File, error) { } // CreateTempFile create temp file from io.ReadCloser, and seek to 0 -func CreateTempFile(r io.ReadCloser, size int64) (*os.File, error) { +func CreateTempFile(r io.Reader, size int64) (*os.File, error) { if f, ok := r.(*os.File); ok { return f, nil } @@ -171,3 +171,10 @@ func GetMimeType(name string) string { } return "application/octet-stream" } + +const ( + KB = 1 << (10 * (iota + 1)) + MB + GB + TB +) diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go index bf161a4ceb7..cd1a4b0a5e5 100644 --- a/pkg/utils/hash.go +++ b/pkg/utils/hash.go @@ -4,46 +4,178 @@ import ( "crypto/md5" "crypto/sha1" "crypto/sha256" - "encoding/base64" "encoding/hex" + "errors" + "github.com/alist-org/alist/v3/internal/errs" + "hash" + "io" "strings" ) -func GetSHA1Encode(data []byte) string { - h := sha1.New() - h.Write(data) - return hex.EncodeToString(h.Sum(nil)) +func GetMD5EncodeStr(data string) string { + return HashData(MD5, []byte(data)) } -func GetSHA256Encode(data []byte) string { - h := sha256.New() - h.Write(data) - return hex.EncodeToString(h.Sum(nil)) +//inspired by "github.com/rclone/rclone/fs/hash" + +// ErrUnsupported should be returned by filesystem, +// if it is requested to deliver an unsupported hash type. +var ErrUnsupported = errors.New("hash type not supported") + +// HashType indicates a standard hashing algorithm +type HashType struct { + Width int + Name string + Alias string + NewFunc func() hash.Hash +} + +var ( + name2hash = map[string]*HashType{} + alias2hash = map[string]*HashType{} + Supported []*HashType +) + +// RegisterHash adds a new Hash to the list and returns its Type +func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType { + + newType := &HashType{ + Name: name, + Alias: alias, + Width: width, + NewFunc: newFunc, + } + + name2hash[name] = newType + alias2hash[alias] = newType + Supported = append(Supported, newType) + return newType } -func GetMD5Encode(data []byte) string { - h := md5.New() +var ( + // MD5 indicates MD5 support + MD5 = RegisterHash("md5", "MD5", 32, md5.New) + + // SHA1 indicates SHA-1 support + SHA1 = RegisterHash("sha1", "SHA-1", 40, sha1.New) + + // SHA256 indicates SHA-256 support + SHA256 = RegisterHash("sha256", "SHA-256", 64, sha256.New) +) + +// HashData get hash of one hashType +func HashData(hashType *HashType, data []byte) string { + h := hashType.NewFunc() h.Write(data) return hex.EncodeToString(h.Sum(nil)) } -func GetMD5EncodeStr(data string) string { - return GetMD5Encode([]byte(data)) +// HashReader get hash of one hashType from a reader +func HashReader(hashType *HashType, reader io.Reader) (string, error) { + h := hashType.NewFunc() + _, err := io.Copy(h, reader) + if err != nil { + return "", errs.NewErr(err, "HashReader error") + } + return hex.EncodeToString(h.Sum(nil)), nil } -var DEC = map[string]string{ - "-": "+", - "_": "/", - ".": "=", +// HashFile get hash of one hashType from a model.File +func HashFile(hashType *HashType, file io.ReadSeeker) (string, error) { + str, err := HashReader(hashType, file) + if err != nil { + return "", err + } + if _, err = file.Seek(0, io.SeekStart); err != nil { + return str, err + } + return str, nil } -func SafeAtob(data string) (string, error) { - for k, v := range DEC { - data = strings.ReplaceAll(data, k, v) +// fromTypes will return hashers for all the requested types. +func fromTypes(types []*HashType) map[*HashType]hash.Hash { + hashers := map[*HashType]hash.Hash{} + for _, t := range types { + hashers[t] = t.NewFunc() } - bytes, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return "", err + return hashers +} + +// toMultiWriter will return a set of hashers into a +// single multiwriter, where one write will update all +// the hashers. +func toMultiWriter(h map[*HashType]hash.Hash) io.Writer { + // Convert to to slice + var w = make([]io.Writer, 0, len(h)) + for _, v := range h { + w = append(w, v) } - return string(bytes), err + return io.MultiWriter(w...) +} + +// A MultiHasher will construct various hashes on all incoming writes. +type MultiHasher struct { + w io.Writer + size int64 + h map[*HashType]hash.Hash // Hashes +} + +// NewMultiHasher will return a hash writer that will write +// the requested hash types. +func NewMultiHasher(types []*HashType) *MultiHasher { + hashers := fromTypes(types) + m := MultiHasher{h: hashers, w: toMultiWriter(hashers)} + return &m +} + +func (m *MultiHasher) Write(p []byte) (n int, err error) { + n, err = m.w.Write(p) + m.size += int64(n) + return n, err +} + +func (m *MultiHasher) GetHashInfo() *HashInfo { + dst := make(map[*HashType]string) + for k, v := range m.h { + dst[k] = hex.EncodeToString(v.Sum(nil)) + } + return &HashInfo{h: dst} +} + +// Sum returns the specified hash from the multihasher +func (m *MultiHasher) Sum(hashType *HashType) ([]byte, error) { + h, ok := m.h[hashType] + if !ok { + return nil, ErrUnsupported + } + return h.Sum(nil), nil +} + +// Size returns the number of bytes written +func (m *MultiHasher) Size() int64 { + return m.size +} + +// A HashInfo contains hash string for one or more hashType +type HashInfo struct { + h map[*HashType]string +} + +func NewHashInfo(ht *HashType, str string) HashInfo { + m := make(map[*HashType]string) + m[ht] = str + return HashInfo{h: m} +} + +func (hi HashInfo) String() string { + var tmp []string + for ht, str := range hi.h { + if len(str) > 0 { + tmp = append(tmp, ht.Name+":"+str) + } + } + return strings.Join(tmp, "\n") +} +func (hi HashInfo) GetHash(ht *HashType) string { + return hi.h[ht] } diff --git a/pkg/utils/hash_test.go b/pkg/utils/hash_test.go new file mode 100644 index 00000000000..64ff9cfdd66 --- /dev/null +++ b/pkg/utils/hash_test.go @@ -0,0 +1,64 @@ +package utils + +import ( + "bytes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "io" + "testing" +) + +type hashTest struct { + input []byte + output map[*HashType]string +} + +var hashTestSet = []hashTest{ + { + input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, + output: map[*HashType]string{ + MD5: "bf13fc19e5151ac57d4252e0e0f87abe", + SHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166", + SHA256: "c839e57675862af5c21bd0a15413c3ec579e0d5522dab600bc6c3489b05b8f54", + }, + }, + // Empty data set + { + input: []byte{}, + output: map[*HashType]string{ + MD5: "d41d8cd98f00b204e9800998ecf8427e", + SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + }, +} + +func TestMultiHasher(t *testing.T) { + for _, test := range hashTestSet { + mh := NewMultiHasher([]*HashType{MD5, SHA1, SHA256}) + n, err := io.Copy(mh, bytes.NewBuffer(test.input)) + require.NoError(t, err) + assert.Len(t, test.input, int(n)) + hashInfo := mh.GetHashInfo() + for k, v := range hashInfo.h { + expect, ok := test.output[k] + require.True(t, ok, "test output for hash not found") + assert.Equal(t, expect, v) + } + // Test that all are present + for k, v := range test.output { + expect, ok := hashInfo.h[k] + require.True(t, ok, "test output for hash not found") + assert.Equal(t, expect, v) + } + for k, v := range test.output { + expect := hashInfo.GetHash(k) + require.True(t, len(expect) > 0, "test output for hash not found") + assert.Equal(t, expect, v) + } + expect := hashInfo.GetHash(nil) + require.True(t, len(expect) == 0, "unknown type should return empty string") + Log.Info(hashInfo.String()) + + } +} diff --git a/pkg/utils/io.go b/pkg/utils/io.go index 936461a7281..d106531bd3d 100644 --- a/pkg/utils/io.go +++ b/pkg/utils/io.go @@ -3,7 +3,9 @@ package utils import ( "bytes" "context" + "errors" "fmt" + "golang.org/x/exp/constraints" "io" "time" @@ -17,7 +19,7 @@ type readerFunc func(p []byte) (n int, err error) func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } // CopyWithCtx slightly modified function signature: -// - context has been added in order to propagate cancelation +// - context has been added in order to propagate cancellation // - I do not return the number of bytes written, has it is not useful in my use case func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, progress func(percentage int)) error { // Copy will call the Reader and Writer interface multiple time, in order @@ -132,16 +134,6 @@ func (mr *MultiReadable) Close() error { return nil } -type nopCloser struct { - io.ReadSeeker -} - -func (nopCloser) Close() error { return nil } - -func ReadSeekerNopCloser(r io.ReadSeeker) io.ReadSeekCloser { - return nopCloser{r} -} - func Retry(attempts int, sleep time.Duration, f func() error) (err error) { for i := 0; i < attempts; i++ { fmt.Println("This is attempt number", i) @@ -158,23 +150,56 @@ func Retry(attempts int, sleep time.Duration, f func() error) (err error) { return fmt.Errorf("after %d attempts, last error: %s", attempts, err) } +type ClosersIF interface { + io.Closer + Add(closer io.Closer) + AddClosers(closers Closers) + GetClosers() Closers +} + type Closers struct { - closers []*io.Closer + closers []io.Closer +} + +func (c *Closers) GetClosers() Closers { + return *c } -func (c *Closers) Close() (err error) { +var _ ClosersIF = (*Closers)(nil) + +func (c *Closers) Close() error { + var errs []error for _, closer := range c.closers { if closer != nil { - _ = (*closer).Close() + errs = append(errs, closer.Close()) } } - return nil + return errors.Join(errs...) } func (c *Closers) Add(closer io.Closer) { - if closer != nil { - c.closers = append(c.closers, &closer) + c.closers = append(c.closers, closer) + +} +func (c *Closers) AddClosers(closers Closers) { + c.closers = append(c.closers, closers.closers...) +} + +func EmptyClosers() Closers { + return Closers{[]io.Closer{}} +} +func NewClosers(c ...io.Closer) Closers { + return Closers{c} +} + +func Min[T constraints.Ordered](a, b T) T { + if a < b { + return a } + return b } -func NewClosers() *Closers { - return &Closers{[]*io.Closer{}} +func Max[T constraints.Ordered](a, b T) T { + if a < b { + return b + } + return a } diff --git a/pkg/utils/str.go b/pkg/utils/str.go index 9b2d71d8eb8..509bb82809c 100644 --- a/pkg/utils/str.go +++ b/pkg/utils/str.go @@ -1,6 +1,7 @@ package utils import ( + "encoding/base64" "strings" "github.com/alist-org/alist/v3/internal/conf" @@ -12,3 +13,20 @@ func MappingName(name string) string { } return name } + +var DEC = map[string]string{ + "-": "+", + "_": "/", + ".": "=", +} + +func SafeAtob(data string) (string, error) { + for k, v := range DEC { + data = strings.ReplaceAll(data, k, v) + } + bytes, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return "", err + } + return string(bytes), err +} diff --git a/server/common/proxy.go b/server/common/proxy.go index 45c2b82030e..65d8d334130 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -3,58 +3,35 @@ package common import ( "context" "fmt" - "io" - "net/http" - "net/url" - "sync" - - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" - "github.com/pkg/errors" + "io" + "net/http" + "net/url" ) -func HttpClient() *http.Client { - once.Do(func() { - httpClient = base.NewHttpClient() - httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - req.Header.Del("Referer") - return nil - } - }) - return httpClient -} - -var once sync.Once -var httpClient *http.Client - func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { - if link.ReadSeekCloser != nil { + if link.MFile != nil { attachFileName(w, file) - http.ServeContent(w, r, file.GetName(), file.ModTime(), link.ReadSeekCloser) - defer link.ReadSeekCloser.Close() + http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile) + defer link.MFile.Close() return nil - } else if link.RangeReadCloser.RangeReader != nil { + } else if link.RangeReadCloser != nil { attachFileName(w, file) - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeReader) + net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead) defer func() { - if link.RangeReadCloser.Closers != nil { - link.RangeReadCloser.Closers.Close() - } + _ = link.RangeReadCloser.Close() }() return nil } else if link.Concurrency != 0 || link.PartSize != 0 { attachFileName(w, file) size := file.GetSize() //var finalClosers model.Closers - finalClosers := utils.NewClosers() + finalClosers := utils.EmptyClosers() header := net.ProcessHeader(r.Header, link.Header) - rangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) { + rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize @@ -65,7 +42,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. Size: size, HeaderRef: header, } - rc, err := down.Download(context.Background(), req) + rc, err := down.Download(ctx, req) finalClosers.Add(rc) return rc, err } @@ -75,7 +52,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. } else { //transparent proxy header := net.ProcessHeader(r.Header, link.Header) - res, err := net.RequestHttp(r.Method, header, link.URL) + res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL) if err != nil { return err } diff --git a/server/handles/down.go b/server/handles/down.go index e3528784d74..e4aec494243 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -40,13 +40,13 @@ func Down(c *gin.Context) { common.ErrorResp(c, err, 500) return } - if link.ReadSeekCloser != nil { + if link.MFile != nil { defer func(ReadSeekCloser io.ReadCloser) { err := ReadSeekCloser.Close() if err != nil { log.Errorf("close data error: %s", err) } - }(link.ReadSeekCloser) + }(link.MFile) } c.Header("Referrer-Policy", "no-referrer") c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 29e46665068..2733509e9a5 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -331,13 +331,13 @@ func Link(c *gin.Context) { common.ErrorResp(c, err, 500) return } - if link.ReadSeekCloser != nil { + if link.MFile != nil { defer func(ReadSeekCloser io.ReadCloser) { err := ReadSeekCloser.Close() if err != nil { log.Errorf("close link data error: %v", err) } - }(link.ReadSeekCloser) + }(link.MFile) } common.SuccessResp(c, link) return diff --git a/server/handles/fsread.go b/server/handles/fsread.go index c21e294b2b3..cd376723245 100644 --- a/server/handles/fsread.go +++ b/server/handles/fsread.go @@ -37,9 +37,11 @@ type ObjResp struct { Size int64 `json:"size"` IsDir bool `json:"is_dir"` Modified time.Time `json:"modified"` + Created time.Time `json:"created"` Sign string `json:"sign"` Thumb string `json:"thumb"` Type int `json:"type"` + HashInfo string `json:"hashinfo"` } type FsListResp struct { @@ -313,6 +315,8 @@ func FsGet(c *gin.Context) { Size: obj.GetSize(), IsDir: obj.IsDir(), Modified: obj.ModTime(), + Created: obj.CreateTime(), + HashInfo: obj.GetHash().String(), Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)), Type: utils.GetFileType(obj.GetName()), Thumb: thumb, diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 5cde6fb8ccb..237e0691664 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -1,6 +1,7 @@ package handles import ( + "github.com/alist-org/alist/v3/internal/stream" "net/url" stdpath "path" "strconv" @@ -33,21 +34,22 @@ func FsStream(c *gin.Context) { common.ErrorResp(c, err, 400) return } - stream := &model.FileStream{ + s := &stream.FileStream{ Obj: &model.Object{ Name: name, Size: size, Modified: time.Now(), }, - ReadCloser: c.Request.Body, + Reader: c.Request.Body, Mimetype: c.GetHeader("Content-Type"), WebPutAsTask: asTask, } if asTask { - err = fs.PutAsTask(dir, stream) + err = fs.PutAsTask(dir, s) } else { - err = fs.PutDirectly(c, dir, stream, true) + err = fs.PutDirectly(c, dir, s, true) } + defer c.Request.Body.Close() if err != nil { common.ErrorResp(c, err, 500) return @@ -89,21 +91,27 @@ func FsForm(c *gin.Context) { return } dir, name := stdpath.Split(path) - stream := &model.FileStream{ + s := stream.FileStream{ Obj: &model.Object{ Name: name, Size: file.Size, Modified: time.Now(), }, - ReadCloser: f, + Reader: f, Mimetype: file.Header.Get("Content-Type"), WebPutAsTask: false, } + ss, err := stream.NewSeekableStream(s, nil) + if err != nil { + common.ErrorResp(c, err, 500) + return + } if asTask { - err = fs.PutAsTask(dir, stream) + err = fs.PutAsTask(dir, ss) } else { - err = fs.PutDirectly(c, dir, stream, true) + err = fs.PutDirectly(c, dir, ss, true) } + defer f.Close() if err != nil { common.ErrorResp(c, err, 500) return diff --git a/server/webdav/prop.go b/server/webdav/prop.go index 73f92a2f2da..df2665a0e74 100644 --- a/server/webdav/prop.go +++ b/server/webdav/prop.go @@ -131,8 +131,8 @@ var liveProps = map[xml.Name]struct { dir: true, }, {Space: "DAV:", Local: "creationdate"}: { - findFn: nil, - dir: false, + findFn: findCreationDate, + dir: true, }, {Space: "DAV:", Local: "getcontentlanguage"}: { findFn: nil, @@ -383,6 +383,9 @@ func findContentLength(ctx context.Context, ls LockSystem, name string, fi model func findLastModified(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) { return fi.ModTime().UTC().Format(http.TimeFormat), nil } +func findCreationDate(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) { + return fi.CreateTime().UTC().Format(http.TimeFormat), nil +} // ErrNotImplemented should be returned by optional interfaces if they // want the original implementation to be used. diff --git a/server/webdav/util.go b/server/webdav/util.go new file mode 100644 index 00000000000..15d9e07cc56 --- /dev/null +++ b/server/webdav/util.go @@ -0,0 +1,29 @@ +package webdav + +import ( + log "github.com/sirupsen/logrus" + "net/http" + "strconv" + "time" +) + +func (h *Handler) getModTime(r *http.Request) time.Time { + return h.getHeaderTime(r, "X-OC-Mtime") +} + +// owncloud/ nextcloud haven't impl this, but we can add the support since rclone may support this soon +func (h *Handler) getCreateTime(r *http.Request) time.Time { + return h.getHeaderTime(r, "X-OC-Ctime") +} + +func (h *Handler) getHeaderTime(r *http.Request, header string) time.Time { + hVal := r.Header.Get(header) + if hVal != "" { + modTimeUnix, err := strconv.ParseInt(hVal, 10, 64) + if err == nil { + return time.Unix(modTimeUnix, 0) + } + log.Warnf("getModTime in Webdav, failed to parse %s, %s", header, err) + } + return time.Now() +} diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index 09c4ea71da2..3da13ddd0f4 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -8,6 +8,7 @@ package webdav // import "golang.org/x/net/webdav" import ( "errors" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "net/url" "os" @@ -321,12 +322,13 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, obj := model.Object{ Name: path.Base(reqPath), Size: r.ContentLength, - Modified: time.Now(), + Modified: h.getModTime(r), + Ctime: h.getCreateTime(r), } - stream := &model.FileStream{ - Obj: &obj, - ReadCloser: r.Body, - Mimetype: r.Header.Get("Content-Type"), + stream := &stream.FileStream{ + Obj: &obj, + Reader: r.Body, + Mimetype: r.Header.Get("Content-Type"), } if stream.Mimetype == "" { stream.Mimetype = utils.GetMimeType(reqPath) @@ -336,6 +338,8 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, return http.StatusNotFound, err } + _ = r.Body.Close() + _ = stream.Close() // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. if err != nil { return http.StatusMethodNotAllowed, err