8000 feat(cloudreve_v4): add Cloudreve V4 driver (#8470 closes #8328 #8467) · AlistGo/alist@ffa03bf · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Commit ffa03bf

Browse files
authored
feat(cloudreve_v4): add Cloudreve V4 driver (#8470 closes #8328 #8467)
* feat(cloudreve_v4): add Cloudreve V4 driver implementation * fix(cloudreve_v4): update request handling to prevent token refresh loop * feat(onedrive): implement retry logic for upload failures * feat(cloudreve): implement retry logic for upload failures * feat(cloudreve_v4): support cloud sorting * fix(cloudreve_v4): improve token handling in Init method * feat(cloudreve_v4): support share * feat(cloudreve): support reference * feat(cloudreve_v4): support version upload * fix(cloudreve_v4): add SetBody in upLocal * fix(cloudreve_v4): update URL structure in Link and FileUrlResp
1 parent 630cf30 commit ffa03bf

File tree

9 files changed

+1158
-66
lines changed

9 files changed

+1158
-66
lines changed

drivers/all.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import (
2222
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
2323
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
2424
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
25+
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
2526
_ "github.com/alist-org/alist/v3/drivers/crypt"
2627
_ "github.com/alist-org/alist/v3/drivers/doubao"
2728
_ "github.com/alist-org/alist/v3/drivers/doubao_share"

drivers/cloudreve/driver.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import (
1818
type Cloudreve struct {
1919
model.Storage
2020
Addition
21+
ref *Cloudreve
2122
}
22< 8000 /code>23

2324
func (d *Cloudreve) Config() driver.Config {
@@ -37,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error {
3738
return d.login()
3839
}
3940

41+
func (d *Cloudreve) InitReference(storage driver.Driver) error {
42+
refStorage, ok := storage.(*Cloudreve)
43+
if ok {
44+
d.ref = refStorage
45+
return nil
46+
}
47+
return errs.NotSupport
48+
}
49+
4050
func (d *Cloudreve) Drop(ctx context.Context) error {
4151
d.Cookie = ""
52+
d.ref = nil
4253
return nil
4354
}
4455

drivers/cloudreve/util.go

Lines changed: 113 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,14 @@ import (
44
"bytes"
55
"context"
66
"encoding/base64"
7+
"encoding/json"
78
"errors"
89
"fmt"
910
"io"
1011
"net/http"
1112
"strconv"
1213
"strings"
14+
"time"
1315

1416
"github.com/alist-org/alist/v3/drivers/base"
1517
"github.com/alist-org/alist/v3/internal/conf"
@@ -19,7 +21,6 @@ import (
1921
"github.com/alist-org/alist/v3/pkg/cookie"
2022
"github.com/alist-org/alist/v3/pkg/utils"
2123
"github.com/go-resty/resty/v2"
22-
json "github.com/json-iterator/go"
2324
jsoniter "github.com/json-iterator/go"
2425
)
2526

@@ -35,6 +36,9 @@ func (d *Cloudreve) getUA() string {
3536
}
3637

3738
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
39+
if d.ref != nil {
40+
return d.ref.request(method, path, callback, out)
41+
}
3842
u := d.Address + "/api/v3" + path
3943
req := base.RestyClient.R()
4044
req.SetHeaders(map[string]string{
@@ -79,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
7983
}
8084
if out != nil && r.Data != nil {
8185
var marshal []byte
82-
marshal, err = json.Marshal(r.Data)
86+
marshal, err = jsoniter.Marshal(r.Data)
8387
if err != nil {
8488
return err
8589
}
86-
err = json.Unmarshal(marshal, out)
90+
err = jsoniter.Unmarshal(marshal, out)
8791
if err != nil {
8892
return err
8993
}
@@ -187,12 +191,9 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
187191
if utils.IsCanceled(ctx) {
188192
return ctx.Err()
189193
}
190-
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
191-
var byteSize = DEFAULT
192194
left := stream.GetSize() - finish
193-
if left < DEFAULT {
194-
byteSize = left
195-
}
195+
byteSize := min(left, DEFAULT)
196+
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
196197
byteData := make([]byte, byteSize)
197198
n, err := io.ReadFull(stream, byteData)
198199
utils.Log.Debug(err, n)
@@ -205,9 +206,26 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
205206
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
206207
req.SetHeader("User-Agent", d.getUA())
207208
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
209+
req.AddRetryCondition(func(r *resty.Response, err error) bool {
210+
if err != nil {
211+
return true
212+
}
213+
if r.IsError() {
214+
return true
215+
}
216+
var retryResp Resp
217+
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
218+
if jErr != nil {
219+
return true
220+
}
221+
if retryResp.Code != 0 {
222+
return true
223+
}
224+
return false
225+
})
208226
}, nil)
209227
if err != nil {
210-
break
228+
return err
211229
}
212230
finish += byteSize
213231
up(float64(finish) * 100 / float64(stream.GetSize()))
@@ -222,16 +240,15 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
222240
var finish int64 = 0
223241
var chunk int = 0
224242
DEFAULT := int64(u.ChunkSize)
243+
retryCount := 0
244+
maxRetries := 3
225245
for finish < stream.GetSize() {
226246
if utils.IsCanceled(ctx) {
227247
return ctx.Err()
228248
}
229-
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
230-
var byteSize = DEFAULT
231249
left := stream.GetSize() - finish
232-
if left < DEFAULT {
233-
byteSize = left
234-
}
250+
byteSize := min(left, DEFAULT)
251+
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
235252
byteData := make([]byte, byteSize)
236253
n, err := io.ReadFull(stream, byteData)
237254
utils.Log.Debug(err, n)
@@ -248,14 +265,43 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
248265
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
249266
req.Header.Set("Authorization", fmt.Sprint(credential))
250267
req.Header.Set("User-Agent", d.getUA())
251-
finish += byteSize
252-
res, err := base.HttpClient.Do(req)
253-
if err != nil {
254-
return err
268+
err = func() error {
269+
res, err := base.HttpClient.Do(req)
270+
if err != nil {
271+
return err
272+
}
273+
defer res.Body.Close()
274+
if res.StatusCode != 200 {
275+
return errors.New(res.Status)
276+
}
277+
body, err := io.ReadAll(res.Body)
278+
if err != nil {
279+
return err
280+
}
281< CEB7 /code>+
var up Resp
282+
err = json.Unmarshal(body, &up)
283+
if err != nil {
284+
return err
285+
}
286+
if up.Code != 0 {
287+
return errors.New(up.Msg)
288+
}
289+
return nil
290+
}()
291+
if err == nil {
292+
retryCount = 0
293+
finish += byteSize
294+
up(float64(finish) * 100 / float64(stream.GetSize()))
295+
chunk++
296+
} else {
297+
retryCount++
298+
if retryCount > maxRetries {
299+
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
300+
}
301+
backoff := time.Duration(1<<retryCount) * time.Second
302+
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
303+
time.Sleep(backoff)
255304
}
256-
_ = res.Body.Close()
257-
up(float64(finish) * 100 / float64(stream.GetSize()))
258-
chunk++
259305
}
260306
return nil
261307
}
@@ -264,16 +310,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
264310
uploadUrl := u.UploadURLs[0]
265311
var finish int64 = 0
266312
DEFAULT := int64(u.ChunkSize)
313+
retryCount := 0
314+
maxRetries := 3
267315
for finish < stream.GetSize() {
268316
if utils.IsCanceled(ctx) {
269317
return ctx.Err()
270318
}
271-
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
272-
var byteSize = DEFAULT
273319
left := stream.GetSize() - finish
274-
if left < DEFAULT {
275-
byteSize = left
276-
}
320+
byteSize := min(left, DEFAULT)
321+
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
277322
byteData := make([]byte, byteSize)
278323
n, err := io.ReadFull(stream, byteData)
279324
utils.Log.Debug(err, n)
@@ -295,39 +340,47 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
295340
return err
296341
}
297342
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
298-
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
343+
switch {
344+
case res.StatusCode >= 500 && res.StatusCode <= 504:
345+
retryCount++
346+
if retryCount > maxRetries {
347+
res.Body.Close()
348+
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
349+
}
350+
backoff := time.Duration(1<<retryCount) * time.Second
351+
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
352+
time.Sleep(backoff)
353+
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
299354
data, _ := io.ReadAll(res.Body)
300-
_ = res.Body.Close()
355+
res.Body.Close()
301356
return errors.New(string(data))
357+
default:
358+
res.Body.Close()
359+
retryCount = 0
360+
finish += byteSize
361+
up(float64(finish) * 100 / float64(stream.GetSize()))
302362
}
303-
_ = res.Body.Close()
304-
up(float64(finish) * 100 / float64(stream.GetSize()))
305363
}
306364
// 上传成功发送回调请求
307-
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
365+
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
308366
req.SetBody("{}")
309367
}, nil)
310-
if err != nil {
311-
return err
312-
}
313-
return nil
314368
}
315369

316370
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
317371
var finish int64 = 0
318372
var chunk int = 0
319373
var etags []string
320374
DEFAULT := int64(u.ChunkSize)
375+
retryCount := 0
376+
maxRetries := 3
321377
for finish < stream.GetSize() {
322378
if utils.IsCanceled(ctx) {
323379
return ctx.Err()
324380
}
325-
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
326-
var byteSize = DEFAULT
327381
left := stream.GetSize() - finish
328-
if left < DEFAULT {
329-
byteSize = left
330-
}
382+
byteSize := min(left, DEFAULT)
383+
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
331384
byteData := make([]byte, byteSize)
332385
n, err := io.ReadFull(stream, byteData)
333386
utils.Log.Debug(err, n)
@@ -346,10 +399,26 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
346399
if err != nil {
347400
return err
348401
}
349-
_ = res.Body.Close()
350-
etags = append(etags, res.Header.Get("ETag"))
351-
up(float64(finish) * 100 / float64(stream.GetSize()))
352-
chunk++
402+
etag := res.Header.Get("ETag")
403+
res.Body.Close()
404+
switch {
405+
case res.StatusCode != 200:
406+
retryCount++
407+
if retryCount > maxRetries {
408+
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
409+
}
410+
backoff := time.Duration(1<<retryCount) * time.Second
411+
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
412+
time.Sleep(backoff)
413+
case 754D etag == "":
414+
return errors.New("faild to get ETag from header")
415+
default:
416+
retryCount = 0
417+
etags = append(etags, etag)
418+
finish += byteSize
419+
up(float64(finish) * 100 / float64(stream.GetSize()))
420+
chunk++
421+
}
353422
}
354423

355424
// s3LikeFinishUpload

0 commit comments

Comments
 (0)
0