11import logging
2+ import math
23from collections import namedtuple
34from concurrent import futures
45from io import BytesIO
@@ -65,7 +66,11 @@ def _recover_from_record(
6566 record_context = [
6667 ctx
6768 for ctx in record_context
68- if ctx .get ('expired_at' , 0 ) > now
69+ if (
70+ ctx .get ('expired_at' , 0 ) > now and
71+ ctx .get ('part_no' , None ) and
72+ ctx .get ('ctx' , None )
73+ )
6974 ]
7075
7176 # assign to context
@@ -173,6 +178,7 @@ def initial_parts(
173178 data = None ,
174179 modify_time = None ,
175180 data_size = None ,
181+ file_name = None ,
176182 ** kwargs
177183 ):
178184 """
@@ -184,6 +190,7 @@ def initial_parts(
184190 data
185191 modify_time
186192 data_size
193+ file_name
187194
188195 kwargs
189196
@@ -222,7 +229,8 @@ def initial_parts(
222229 )
223230
224231 # try to recover from record
225- file_name = path .basename (file_path ) if file_path else None
232+ if not file_name and file_path :
233+ file_name = path .basename (file_path )
226234 context = self ._recover_from_record (
227235 file_name ,
228236 key ,
@@ -275,7 +283,10 @@ def upload_parts(
275283
276284 # initial upload state
277285 part , resp = None , None
278- uploaded_size = 0
286+ uploaded_size = context .part_size * len (context .parts )
287+ if math .ceil (data_size / context .part_size ) in [p .part_no for p in context .parts ]:
288+ # if last part uploaded, should correct the uploaded size
289+ uploaded_size += (data_size % context .part_size ) - context .part_size
279290 lock = Lock ()
280291
281292 if not self .concurrent_executor :
@@ -469,6 +480,7 @@ def upload(
469480 up_token ,
470481 key ,
471482 file_path = file_path ,
483+ file_name = file_name ,
472484 data = data ,
473485 data_size = data_size ,
474486 modify_time = modify_time ,
0 commit comments