@@ -307,7 +307,7 @@ def __try_get_or_create_direct_blob(self, src_path: Path, src_path_md5: str, st:
307307 can_hash_once = exist is False
308308 if can_hash_once :
309309 # it's certain that this blob is unique, but notes: the following code
310- # cannot be interrupted (yield), or other generator could make a same blob
310+ # cannot be interrupted (yield), or another generator could make a same blob
311311 policy = _DirectBlobCreatePolicy .hash_once
312312 if policy is None :
313313 policy = _DirectBlobCreatePolicy .default
@@ -364,7 +364,7 @@ def check_changes(new_size: int, new_hash: Optional[str]):
364364 with self .__make_temp_file (src_path_md5 ) as temp_file_path , _FailureFileDeleter () as file_deleter :
365365 with self .__time_costs .measure_time_cost (CreateBackupTimeCostKey .kind_io_copy ):
366366 cr = compressor .copy_compressed (src_path , temp_file_path , calc_hash = True , estimate_read_size = st .st_size , open_r_func = SourceFileNotFoundWrapper .open_rb )
367- check_changes (cr .read_size , None ) # the size must be unchanged, to satisfy the uniqueness
367+ check_changes (cr .read_size , None ) # the size must be unchanged to satisfy the uniqueness
368368
369369 raw_size , blob_hash , stored_size = cr .read_size , cr .read_hash , cr .write_size
370370 blob_path = blob_utils .get_blob_path (blob_hash )
@@ -484,7 +484,7 @@ def __try_get_or_create_chunked_blob(self, src_path: Path, src_path_md5: str, st
484484 # The blob is specifically generated by the generator
485485 # if any yield is done, ensure to check __blob_by_hash_cache again
486486
487- # large files that need to be chunked are not common, and they already contains quite a few chunks
487+ # large files that need to be chunked are not common, and they already contain quite a few chunks,
488488 # so it's efficient enough to directly query for chunks from DB here
489489
490490 process_start_time = time .time ()
0 commit comments