-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathsplit_pdf_hook.py
More file actions
1229 lines (1072 loc) · 49.1 KB
/
split_pdf_hook.py
File metadata and controls
1229 lines (1072 loc) · 49.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
import asyncio
import io
import json
import logging
import math
import time
import os
import tempfile
import uuid
from collections.abc import Awaitable
from concurrent import futures
from functools import partial
from pathlib import Path
from typing import Any, Coroutine, Optional, Tuple, Union, cast, Generator, BinaryIO
import aiofiles
import httpx
from httpx import AsyncClient
from pypdf import PdfReader, PdfWriter
import pypdfium2 as pdfium # type: ignore[import-untyped]
from unstructured_client._hooks.custom import form_utils, pdf_utils, request_utils
from unstructured_client._hooks.custom.common import UNSTRUCTURED_CLIENT_LOGGER_NAME
from unstructured_client._hooks.custom.form_utils import (
PARTITION_FORM_CONCURRENCY_LEVEL_KEY,
PARTITION_FORM_FILES_KEY,
PARTITION_FORM_PAGE_RANGE_KEY,
PARTITION_FORM_SPLIT_CACHE_TMP_DATA_DIR_KEY,
PARTITION_FORM_SPLIT_CACHE_TMP_DATA_KEY,
PARTITION_FORM_SPLIT_PDF_ALLOW_FAILED_KEY,
PARTITION_FORM_SPLIT_PDF_PAGE_KEY,
PARTITION_FORM_STARTING_PAGE_NUMBER_KEY,
)
from unstructured_client._hooks.custom.request_utils import get_base_url
from unstructured_client._hooks.types import (
AfterErrorContext,
AfterErrorHook,
AfterSuccessContext,
AfterSuccessHook,
BeforeRequestContext,
BeforeRequestHook,
SDKInitHook,
)
from unstructured_client.httpclient import HttpClient, AsyncHttpClient
from unstructured_client.utils import RetryConfig
logger = logging.getLogger(UNSTRUCTURED_CLIENT_LOGGER_NAME)
DEFAULT_STARTING_PAGE_NUMBER = 1
DEFAULT_ALLOW_FAILED = False
DEFAULT_CONCURRENCY_LEVEL = 10
DEFAULT_CACHE_TMP_DATA = False
DEFAULT_CACHE_TMP_DATA_DIR = tempfile.gettempdir()
MAX_CONCURRENCY_LEVEL = 50
MIN_PAGES_PER_SPLIT = 2
MAX_PAGES_PER_SPLIT = 20
HI_RES_STRATEGY = 'hi_res'
MAX_PAGE_LENGTH = 4000
TIMEOUT_BUFFER_SECONDS = 5
DEFAULT_FUTURE_TIMEOUT_MINUTES = 60
OPERATION_ID_EXTENSION_KEY = "split_pdf_operation_id"
SPLIT_PDF_HEADER_PREFIX = "X-Unstructured-Split-"
class ChunkExecutionError(Exception):
def __init__(self, index: int, inner: BaseException):
super().__init__(str(inner))
self.index = index
self.inner = inner
def _get_request_timeout_seconds(request: httpx.Request) -> Optional[float]:
timeout = request.extensions.get("timeout")
if timeout is None:
return None
if isinstance(timeout, (int, float)):
return float(timeout)
if isinstance(timeout, dict):
timeout_values = [
float(value)
for value in timeout.values()
if isinstance(value, (int, float))
]
if timeout_values:
return max(timeout_values)
return None
def _run_coroutines_in_separate_thread(
coroutines_task: Coroutine[Any, Any, list[tuple[int, httpx.Response]]],
loop_holder: dict[str, Optional[asyncio.AbstractEventLoop]],
) -> list[tuple[int, httpx.Response]]:
async def runner() -> list[tuple[int, httpx.Response]]:
loop_holder["loop"] = asyncio.get_running_loop()
try:
return await coroutines_task
finally:
loop_holder["loop"] = None
return asyncio.run(runner())
async def _order_keeper(index: int, coro: Awaitable) -> Tuple[int, httpx.Response]:
try:
response = await coro
except BaseException as exc:
raise ChunkExecutionError(index, exc) from exc
return index, response
async def run_tasks(
coroutines: list[partial[Coroutine[Any, Any, httpx.Response]]],
allow_failed: bool = False,
concurrency_level: int = 10,
client_timeout: Optional[httpx.Timeout] = None,
operation_id: Optional[str] = None,
) -> list[tuple[int, httpx.Response]]:
"""Run a list of coroutines in parallel and return the results in order.
Args:
coroutines (list[Callable[[Coroutine], Awaitable]): A list of fuctions
parametrized with async_client that return Awaitable objects.
allow_failed (bool, optional): If True, failed responses will be included
in the results. Otherwise, the first failed request breaks the
process. Defaults to False.
"""
limiter = asyncio.Semaphore(concurrency_level)
if client_timeout is None:
# Use a variable to adjust the httpx client timeout, or default to 60 minutes.
# When we're able to reuse the SDK to make these calls, we can remove this var
# and let the SDK timeout flow through directly.
client_timeout_minutes = 60
if timeout_var := os.getenv("UNSTRUCTURED_CLIENT_TIMEOUT_MINUTES"):
client_timeout_minutes = int(timeout_var)
client_timeout = httpx.Timeout(60 * client_timeout_minutes)
logger.debug(
"split_pdf event=batch_async_start operation_id=%s chunk_count=%d concurrency=%d client_timeout=%s allow_failed=%s",
operation_id,
len(coroutines),
concurrency_level,
client_timeout,
allow_failed,
)
async with httpx.AsyncClient(timeout=client_timeout) as client:
armed_coroutines = [coro(async_client=client, limiter=limiter) for coro in coroutines] # type: ignore
if allow_failed:
responses = await asyncio.gather(*armed_coroutines, return_exceptions=True)
normalized_responses: list[tuple[int, httpx.Response]] = []
for index, result in enumerate(responses, 1):
if isinstance(result, ChunkExecutionError):
logger.error(
"split_pdf event=chunk_transport_error operation_id=%s chunk_index=%d error_type=%s error=%s",
operation_id,
result.index,
type(result.inner).__name__,
result.inner,
exc_info=result.inner,
)
normalized_responses.append(
(
result.index,
_create_transport_error_response(result.inner),
)
)
elif isinstance(result, BaseException):
logger.error(
"split_pdf event=chunk_transport_error operation_id=%s chunk_index=%d error_type=%s error=%s",
operation_id,
index,
type(result).__name__,
result,
exc_info=result,
)
normalized_responses.append((index, _create_transport_error_response(result)))
else:
normalized_responses.append((index, cast(httpx.Response, result)))
return normalized_responses
# TODO: replace with asyncio.TaskGroup for python >3.11 # pylint: disable=fixme
tasks = [asyncio.create_task(_order_keeper(index, coro))
for index, coro in enumerate(armed_coroutines, 1)]
results = []
remaining_tasks = dict(enumerate(tasks, 1))
for future in asyncio.as_completed(tasks):
try:
index, response = await future
except ChunkExecutionError as exc:
logger.error(
"split_pdf event=chunk_transport_error operation_id=%s chunk_index=%d error_type=%s error=%s",
operation_id,
exc.index,
type(exc.inner).__name__,
exc.inner,
exc_info=exc.inner,
)
for remaining_task in remaining_tasks.values():
remaining_task.cancel()
logger.warning(
"split_pdf event=batch_cancel_remaining operation_id=%s reason=transport_exception failed_chunk_index=%d remaining_tasks=%d",
operation_id,
exc.index,
len(remaining_tasks),
)
if isinstance(exc.inner, Exception):
raise exc.inner
raise RuntimeError("Split PDF chunk cancelled") from exc.inner
if response.status_code != 200:
# cancel all remaining tasks
for remaining_task in remaining_tasks.values():
remaining_task.cancel()
logger.warning(
"split_pdf event=batch_cancel_remaining operation_id=%s reason=http_error failed_chunk_index=%d status_code=%d remaining_tasks=%d",
operation_id,
index,
response.status_code,
len(remaining_tasks),
)
results.append((index, response))
break
results.append((index, response))
# remove task from remaining_tasks that should be cancelled in case of failure
del remaining_tasks[index]
# return results in the original order
return sorted(results, key=lambda x: x[0])
def _create_transport_error_response(error: BaseException) -> httpx.Response:
request = getattr(error, "request", None)
if not isinstance(request, httpx.Request):
request = httpx.Request("GET", "http://split-pdf.invalid")
return httpx.Response(
status_code=500,
request=request,
content=str(error).encode(),
extensions={"transport_exception": error},
)
def _cancel_running_tasks() -> None:
for task in asyncio.all_tasks():
if not task.done():
task.cancel()
def _request_task_cancellation(
loop: Optional[asyncio.AbstractEventLoop],
*,
operation_id: str,
) -> bool:
if loop is None:
return False
try:
loop.call_soon_threadsafe(_cancel_running_tasks)
return True
except RuntimeError as exc:
if "Event loop is closed" in str(exc):
logger.warning(
"split_pdf event=loop_closed_during_cancel operation_id=%s",
operation_id,
)
return False
raise
def get_optimal_split_size(num_pages: int, concurrency_level: int) -> int:
"""Distributes pages to workers evenly based on the number of pages and desired concurrency level."""
if num_pages < MAX_PAGES_PER_SPLIT * concurrency_level:
split_size = math.ceil(num_pages / concurrency_level)
else:
split_size = MAX_PAGES_PER_SPLIT
return max(split_size, MIN_PAGES_PER_SPLIT)
def load_elements_from_response(response: httpx.Response) -> list[dict]:
"""Loads elements from the response content - the response was modified
to keep the path for the json file that should be loaded and returned
Args:
response (httpx.Response): The response object, which contains the path
to the json file that should be loaded.
Returns:
list[dict]: The elements loaded from the response content cached in the json file.
"""
with open(response.text, mode="r", encoding="utf-8") as file:
return json.load(file)
class SplitPdfHook(SDKInitHook, BeforeRequestHook, AfterSuccessHook, AfterErrorHook):
"""
A hook class that splits a PDF file into multiple pages and sends each page as
a separate request. This hook is designed to be used with an Speakeasy SDK.
Usage:
1. Create an instance of the `SplitPdfHook` class.
2. Register SDK Init, Before Request, After Success and After Error hooks.
"""
def __init__(self) -> None:
self.client: Optional[HttpClient] = None
self.partition_base_url: Optional[str] = None
self.async_client: Optional[AsyncHttpClient] = None
self.coroutines_to_execute: dict[
str, list[partial[Coroutine[Any, Any, httpx.Response]]]
] = {}
self.concurrency_level: dict[str, int] = {}
self.api_successful_responses: dict[str, list[httpx.Response]] = {}
self.api_failed_responses: dict[str, list[httpx.Response]] = {}
self.executors: dict[str, futures.ThreadPoolExecutor] = {}
self.operation_futures: dict[str, futures.Future[list[tuple[int, httpx.Response]]]] = {}
self.tempdirs: dict[str, tempfile.TemporaryDirectory] = {}
self.operation_timeouts: dict[str, Optional[float]] = {}
self.operation_retry_configs: dict[str, Optional[RetryConfig]] = {}
self.operation_loops: dict[str, dict[str, Optional[asyncio.AbstractEventLoop]]] = {}
self.pending_operation_ids: dict[str, str] = {}
self.allow_failed: dict[str, bool] = {}
self.cache_tmp_data_feature: dict[str, bool] = {}
self.cache_tmp_data_dir: dict[str, str] = {}
@staticmethod
def _get_operation_id_from_request(request: Optional[httpx.Request]) -> Optional[str]:
if request is None:
return None
extension_operation_id = request.extensions.get(OPERATION_ID_EXTENSION_KEY)
if isinstance(extension_operation_id, str):
return extension_operation_id
header_operation_id = request.headers.get("operation_id")
if header_operation_id:
return header_operation_id
return None
def _get_operation_id(
self,
response: Optional[httpx.Response] = None,
error: Optional[Exception] = None,
) -> Optional[str]:
if response is not None:
operation_id = self._get_operation_id_from_request(response.request)
if operation_id is not None:
return operation_id
error_request = getattr(error, "request", None)
if isinstance(error_request, httpx.Request):
return self._get_operation_id_from_request(error_request)
return None
@staticmethod
def _retry_config_observability_mode(retry_config: Optional[RetryConfig]) -> str:
return "sdk_custom" if retry_config is not None else "sdk_default_or_unset"
@staticmethod
def _cache_mode_observability_value(
cache_enabled: bool,
cache_dir: str,
) -> str:
if not cache_enabled:
return "disabled"
if Path(cache_dir).resolve() == Path(DEFAULT_CACHE_TMP_DATA_DIR).resolve():
return "default"
return "custom"
@staticmethod
def _is_transport_failure_response(response: httpx.Response) -> bool:
return "transport_exception" in response.extensions
def _build_split_failure_metadata(
self,
operation_id: str,
*,
failed_chunk_index: int,
successful_count: int,
failed_count: int,
total_chunks: int,
response: httpx.Response,
) -> dict[str, str]:
metadata = {
f"{SPLIT_PDF_HEADER_PREFIX}Operation-Id": operation_id,
f"{SPLIT_PDF_HEADER_PREFIX}Chunk-Index": str(failed_chunk_index),
f"{SPLIT_PDF_HEADER_PREFIX}Chunk-Count": str(total_chunks),
f"{SPLIT_PDF_HEADER_PREFIX}Success-Count": str(successful_count),
f"{SPLIT_PDF_HEADER_PREFIX}Failure-Count": str(failed_count),
f"{SPLIT_PDF_HEADER_PREFIX}Transport-Failure": str(
self._is_transport_failure_response(response)
).lower(),
}
return metadata
def _annotate_failure_response(
self,
operation_id: str,
*,
failed_chunk_index: int,
successful_count: int,
failed_count: int,
total_chunks: int,
response: httpx.Response,
) -> httpx.Response:
metadata = self._build_split_failure_metadata(
operation_id,
failed_chunk_index=failed_chunk_index,
successful_count=successful_count,
failed_count=failed_count,
total_chunks=total_chunks,
response=response,
)
response.headers.update(metadata)
response.extensions["split_pdf_failure_metadata"] = metadata
return response
def sdk_init(
self, base_url: str, client: HttpClient
) -> Tuple[str, HttpClient]:
"""Initializes Split PDF Hook.
Adds a mock transport layer to the httpx client. This will return an
empty 200 response whenever the specified "dummy host" is used. The before_request
hook returns this request so the SDK always succeeds and jumps straight to
after_success, where we can await the split results.
Args:
base_url (str): URL of the API.
client (HttpClient): HTTP Client.
Returns:
Tuple[str, HttpClient]: The initialized SDK options.
"""
class DummyTransport(httpx.BaseTransport):
def __init__(self, base_transport: httpx.BaseTransport):
self.base_transport = base_transport
def handle_request(self, request: httpx.Request) -> httpx.Response:
# Return an empty 200 response if we send a request to this dummy host
if request.method == "GET" and request.url.host == "no-op":
return httpx.Response(status_code=200, content=b'')
# Otherwise, pass the request to the default transport
return self.base_transport.handle_request(request)
# Note(austin) - This hook doesn't have access to the async_client
# So, we can't do the same no-op trick for partition_async
# class AsyncDummyTransport(httpx.AsyncBaseTransport):
# def __init__(self, base_transport: httpx.AsyncBaseTransport):
# self.base_transport = base_transport
# async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
# # Return an empty 200 response if we send a request to this dummy host
# if request.method == "GET" and request.url.host == "no-op":
# return httpx.Response(status_code=200, content=b'')
# # Otherwise, pass the request to the default transport
# return await self.base_transport.handle_async_request(request)
# Instead, save the base url so we can use it for our dummy request
# As this can be overwritten with Platform API URL, we need to get it again in
# `before_request` hook from the request object as the real URL is not available here.
self.partition_base_url = base_url
# Explicit cast to httpx.Client to avoid a typing error
httpx_client = cast(httpx.Client, client)
# async_httpx_client = cast(httpx.AsyncClient, async_client)
# pylint: disable=protected-access
httpx_client._transport = DummyTransport(httpx_client._transport)
# pylint: disable=protected-access
# async_httpx_client._transport = AsyncDummyTransport(async_httpx_client._transport)
self.client = httpx_client
return base_url, self.client
# pylint: disable=too-many-return-statements
def before_request(
self, hook_ctx: BeforeRequestContext, request: httpx.Request
) -> Union[httpx.Request, Exception]:
"""If `splitPdfPage` is set to `true` in the request, the PDF file is split into
separate pages. Each page is sent as a separate request in parallel. The last
page request is returned by this method. It will return the original request
when: `splitPdfPage` is set to `false`, the file is not a PDF, or the HTTP
has not been initialized.
Args:
hook_ctx (BeforeRequestContext): The hook context containing information about
the operation.
request (httpx.PreparedRequest): The request object.
Returns:
Union[httpx.PreparedRequest, Exception]: If `splitPdfPage` is set to `true`,
the last page request; otherwise, the original request.
"""
# Actually the general.partition operation overwrites the default client's base url (as
# the platform operations do). Here we need to get the base url from the request object.
if hook_ctx.operation_id != "partition":
return request
self.partition_base_url = get_base_url(request.url)
if self.client is None:
logger.warning("HTTP client not accessible! Continuing without splitting.")
return request
# This is our key into coroutines_to_execute
# We need to pass it on to after_success so
# we know which results are ours
operation_id = str(uuid.uuid4())
content_type = request.headers.get("Content-Type")
if content_type is None:
return request
form_data = request_utils.get_multipart_stream_fields(request)
if not form_data:
return request
split_pdf_page = form_data.get(PARTITION_FORM_SPLIT_PDF_PAGE_KEY)
if split_pdf_page is None or split_pdf_page == "false":
return request
pdf_file_meta = form_data.get(PARTITION_FORM_FILES_KEY)
if (
pdf_file_meta is None or not all(metadata in pdf_file_meta for metadata in
["filename", "content_type", "file"])
):
return request
pdf_file = pdf_file_meta.get("file")
if pdf_file is None:
return request
pdf = pdf_utils.read_pdf(pdf_file)
if pdf is None:
return request
pdf = pdf_utils.check_pdf(pdf)
starting_page_number = form_utils.get_starting_page_number(
form_data,
key=PARTITION_FORM_STARTING_PAGE_NUMBER_KEY,
fallback_value=DEFAULT_STARTING_PAGE_NUMBER,
)
allow_failed = form_utils.get_split_pdf_allow_failed_param(
form_data,
key=PARTITION_FORM_SPLIT_PDF_ALLOW_FAILED_KEY,
fallback_value=DEFAULT_ALLOW_FAILED,
)
concurrency_level = form_utils.get_split_pdf_concurrency_level_param(
form_data,
key=PARTITION_FORM_CONCURRENCY_LEVEL_KEY,
fallback_value=DEFAULT_CONCURRENCY_LEVEL,
max_allowed=MAX_CONCURRENCY_LEVEL,
)
cache_tmp_data_feature = form_utils.get_split_pdf_cache_tmp_data(
form_data,
key=PARTITION_FORM_SPLIT_CACHE_TMP_DATA_KEY,
fallback_value=DEFAULT_CACHE_TMP_DATA,
)
cache_tmp_data_dir = form_utils.get_split_pdf_cache_tmp_data_dir(
form_data,
key=PARTITION_FORM_SPLIT_CACHE_TMP_DATA_DIR_KEY,
fallback_value=DEFAULT_CACHE_TMP_DATA_DIR,
)
page_range_start, page_range_end = form_utils.get_page_range(
form_data,
key=PARTITION_FORM_PAGE_RANGE_KEY.replace("[]", ""),
max_pages=pdf.get_num_pages(),
)
page_count = page_range_end - page_range_start + 1
split_size = get_optimal_split_size(
num_pages=page_count, concurrency_level=concurrency_level
)
# If the doc is small enough, and we aren't slicing it with a page range:
# do not split, just continue with the original request
if split_size >= page_count and page_count == len(pdf.pages):
return request
self.allow_failed[operation_id] = allow_failed
self.cache_tmp_data_feature[operation_id] = cache_tmp_data_feature
self.cache_tmp_data_dir[operation_id] = cache_tmp_data_dir
self.concurrency_level[operation_id] = concurrency_level
self.executors[operation_id] = futures.ThreadPoolExecutor(max_workers=1)
timeout_seconds = _get_request_timeout_seconds(request)
if timeout_seconds is None and hook_ctx.config.timeout_ms is not None:
timeout_seconds = hook_ctx.config.timeout_ms / 1000
self.operation_timeouts[operation_id] = timeout_seconds
self.operation_retry_configs[operation_id] = (
hook_ctx.config.retry_config
if isinstance(hook_ctx.config.retry_config, RetryConfig)
else None
)
try:
pdf = self._trim_large_pages(pdf, form_data)
pdf.stream.seek(0)
pdf_bytes = pdf.stream.read()
temp_dir_path = None
if cache_tmp_data_feature:
pdf_chunk_paths = self._get_pdf_chunk_paths(
pdf_bytes,
operation_id=operation_id,
cache_tmp_data_dir=cache_tmp_data_dir,
split_size=split_size,
page_start=page_range_start,
page_end=page_range_end
)
temp_dir = self.tempdirs.get(operation_id)
temp_dir_path = temp_dir.name if temp_dir is not None else None
# force free PDF object memory
del pdf
pdf_chunks = self._get_pdf_chunk_files(pdf_chunk_paths)
else:
pdf_chunks = self._get_pdf_chunks_in_memory(
pdf_bytes,
split_size=split_size,
page_start=page_range_start,
page_end=page_range_end
)
self.coroutines_to_execute[operation_id] = []
for pdf_chunk_file, page_index in pdf_chunks:
chunk_index = len(self.coroutines_to_execute[operation_id]) + 1
page_number = page_index + starting_page_number
pdf_chunk_request = request_utils.create_pdf_chunk_request(
form_data=form_data,
pdf_chunk=(pdf_chunk_file, page_number),
filename=pdf_file_meta["filename"],
original_request=request,
)
# using partial as the shared client parameter must be passed in `run_tasks` function
# in `after_success`.
coroutine = partial(
self.call_api_partial,
_operation_id=operation_id,
chunk_index=chunk_index,
page_number=page_number,
pdf_chunk_request=pdf_chunk_request,
pdf_chunk_file=pdf_chunk_file,
retry_config=self.operation_retry_configs.get(operation_id),
cache_tmp_data_feature=cache_tmp_data_feature,
temp_dir_path=temp_dir_path,
)
self.coroutines_to_execute[operation_id].append(coroutine)
logger.info(
"split_pdf event=plan_created operation_id=%s filename=%s strategy=%s page_range=%s-%s page_count=%d split_size=%d chunk_count=%d concurrency=%d allow_failed=%s cache_mode=%s timeout_seconds=%s retry_config_mode=%s",
operation_id,
Path(pdf_file_meta["filename"]).name,
form_data.get("strategy"),
page_range_start,
page_range_end,
page_count,
split_size,
len(self.coroutines_to_execute[operation_id]),
concurrency_level,
allow_failed,
self._cache_mode_observability_value(
cache_tmp_data_feature,
cache_tmp_data_dir,
),
timeout_seconds,
self._retry_config_observability_mode(
self.operation_retry_configs.get(operation_id),
),
)
self.pending_operation_ids[operation_id] = operation_id
dummy_request_extensions = request.extensions.copy()
dummy_request_extensions[OPERATION_ID_EXTENSION_KEY] = operation_id
return httpx.Request(
"GET",
f"{self.partition_base_url}/general/docs",
headers={"operation_id": operation_id},
extensions=dummy_request_extensions,
)
except Exception:
self._clear_operation(operation_id)
raise
async def call_api_partial(
self,
pdf_chunk_request: httpx.Request,
pdf_chunk_file: BinaryIO,
limiter: asyncio.Semaphore,
_operation_id: str,
chunk_index: int,
page_number: int,
async_client: AsyncClient,
retry_config: Optional[RetryConfig],
cache_tmp_data_feature: bool,
temp_dir_path: Optional[str],
) -> httpx.Response:
logger.debug(
"split_pdf event=chunk_start operation_id=%s chunk_index=%d page_number=%d cache_mode=%s",
_operation_id,
chunk_index,
page_number,
"cached" if cache_tmp_data_feature else "memory",
)
response = await request_utils.call_api_async(
client=async_client,
limiter=limiter,
pdf_chunk_request=pdf_chunk_request,
pdf_chunk_file=pdf_chunk_file,
retry_config=retry_config,
operation_id=_operation_id,
chunk_index=chunk_index,
page_number=page_number,
)
# Immediately delete request to save memory
del response._request # pylint: disable=protected-access
response._request = None # pylint: disable=protected-access
if response.status_code == 200:
if cache_tmp_data_feature:
if temp_dir_path is None:
raise RuntimeError("Temp directory path not found for cached split PDF operation")
# If we get 200, dump the contents to a file and return the path
temp_file_name = f"{temp_dir_path}/{uuid.uuid4()}.json"
async with aiofiles.open(temp_file_name, mode='wb') as temp_file:
# Avoid reading the entire response into memory
async for bytes_chunk in response.aiter_bytes():
await temp_file.write(bytes_chunk)
# we save the path in content attribute to be used in after_success
response._content = temp_file_name.encode() # pylint: disable=protected-access
logger.debug(
"split_pdf event=chunk_cached operation_id=%s chunk_index=%d page_number=%d cache_file=%s",
_operation_id,
chunk_index,
page_number,
Path(temp_file_name).name,
)
logger.debug(
"split_pdf event=chunk_complete operation_id=%s chunk_index=%d page_number=%d status_code=%d",
_operation_id,
chunk_index,
page_number,
response.status_code,
)
return response
def _trim_large_pages(self, pdf: PdfReader, form_data: dict[str, Any]) -> PdfReader:
if form_data.get("strategy") != HI_RES_STRATEGY:
return pdf
max_page_length = MAX_PAGE_LENGTH
any_page_over_maximum_length = False
for page in pdf.pages:
if page.mediabox.height >= max_page_length:
any_page_over_maximum_length = True
# early exit if all pages are safely under the max page length
if not any_page_over_maximum_length:
return pdf
w = PdfWriter()
# trims large pages that exceed the maximum supported height for processing
for page in pdf.pages:
if page.mediabox.height >= max_page_length:
page.mediabox.top = page.mediabox.height
page.mediabox.bottom = page.mediabox.top - max_page_length
w.add_page(page)
chunk_buffer = io.BytesIO()
w.write(chunk_buffer)
chunk_buffer.seek(0)
return PdfReader(chunk_buffer)
def _get_pdf_chunks_in_memory(
self,
pdf_bytes: bytes,
split_size: int = 1,
page_start: int = 1,
page_end: Optional[int] = None
) -> Generator[Tuple[BinaryIO, int], None, None]:
"""Reads given bytes of a pdf file and split it into n pdf-chunks, each
with `split_size` pages. The chunks are written into temporary files in
a temporary directory corresponding to the operation_id.
Args:
file_content: Content of the PDF file.
split_size: Split size, e.g. if the given file has 10 pages
and this value is set to 2 it will yield 5 documents, each containing 2 pages
of the original document. By default it will split each page to a separate file.
page_start: Begin splitting at this page number
page_end: If provided, split up to and including this page number
Returns:
The list of temporary file paths.
"""
with pdfium.PdfDocument(pdf_bytes) as pdf:
offset = page_start - 1
offset_end = page_end if page_end else len(pdf)
while offset < offset_end:
end = min(offset + split_size, offset_end)
# Create new PDF
new_pdf = pdfium.PdfDocument.new()
# Import pages
page_indices = list(range(offset, end))
new_pdf.import_pages(pdf, pages=page_indices)
# Save to buffer
chunk_buffer = io.BytesIO()
new_pdf.save(chunk_buffer)
chunk_buffer.seek(0)
new_pdf.close()
yield chunk_buffer, offset
offset += split_size
def _get_pdf_chunk_paths(
self,
pdf_bytes: bytes,
operation_id: str,
cache_tmp_data_dir: str,
split_size: int = 1,
page_start: int = 1,
page_end: Optional[int] = None
) -> list[Tuple[Path, int]]:
"""Reads given bytes of a pdf file and split it into n pdf-chunks, each
with `split_size` pages. The chunks are written into temporary files in
a temporary directory corresponding to the operation_id.
Args:
file_content: Content of the PDF file.
split_size: Split size, e.g. if the given file has 10 pages
and this value is set to 2 it will yield 5 documents, each containing 2 pages
of the original document. By default it will split each page to a separate file.
page_start: Begin splitting at this page number
page_end: If provided, split up to and including this page number
Returns:
The list of temporary file paths.
"""
with pdfium.PdfDocument(pdf_bytes) as pdf:
offset = page_start - 1
offset_end = page_end if page_end else len(pdf)
# Create temporary directory
tempdir = tempfile.TemporaryDirectory( # pylint: disable=consider-using-with
dir=cache_tmp_data_dir,
prefix="unstructured_client_"
)
self.tempdirs[operation_id] = tempdir
tempdir_path = Path(tempdir.name)
pdf_chunk_paths: list[Tuple[Path, int]] = []
chunk_no = 0
while offset < offset_end:
chunk_no += 1
end = min(offset + split_size, offset_end)
# Create new PDF with selected pages
new_pdf = pdfium.PdfDocument.new()
page_indices = list(range(offset, end))
new_pdf.import_pages(pdf, pages=page_indices)
# Save to file
chunk_path = tempdir_path / f"chunk_{chunk_no}.pdf"
new_pdf.save(str(chunk_path)) # Convert Path to string
new_pdf.close()
pdf_chunk_paths.append((chunk_path, offset))
offset += split_size
return pdf_chunk_paths
def _get_pdf_chunk_files(
self, pdf_chunks: list[Tuple[Path, int]]
) -> Generator[Tuple[BinaryIO, int], None, None]:
"""Yields the file objects for the given pdf chunk paths.
Args:
pdf_chunks (list[Tuple[Path, int]]): The list of pdf chunk paths and
their page offsets.
Yields:
Tuple[BinaryIO, int]: The file object and the page offset.
Raises:
Exception: If the file cannot be opened.
"""
for pdf_chunk_filename, offset in pdf_chunks:
pdf_chunk_file = None
try:
pdf_chunk_file = open( # pylint: disable=consider-using-with
pdf_chunk_filename,
mode="rb"
)
except (FileNotFoundError, IOError):
if pdf_chunk_file and not pdf_chunk_file.closed:
pdf_chunk_file.close()
raise
yield pdf_chunk_file, offset
def _await_elements(self, operation_id: str) -> Optional[list]:
"""
Waits for the partition requests to complete and returns the flattened
elements.
When `split_pdf_allow_failed=True`, chunk-level non-200 responses and
transport failures are recorded in `api_failed_responses` and omitted
from the returned element list. If every chunk fails, the combined
result is an empty list.
Args:
operation_id (str): The ID of the operation.
Returns:
Optional[list]: The flattened elements if the partition requests are
completed, otherwise None.
"""
tasks = self.coroutines_to_execute.get(operation_id)
if tasks is None:
return None
started_at = time.perf_counter()
concurrency_level = self.concurrency_level.get(operation_id, DEFAULT_CONCURRENCY_LEVEL)
timeout_seconds = self.operation_timeouts.get(operation_id)
client_timeout = httpx.Timeout(timeout_seconds) if timeout_seconds is not None else None
allow_failed = self.allow_failed.get(operation_id, DEFAULT_ALLOW_FAILED)
coroutines = run_tasks(
tasks,
allow_failed=allow_failed,
concurrency_level=concurrency_level,
client_timeout=client_timeout,
operation_id=operation_id,
)
# sending the coroutines to a separate thread to avoid blocking the current event loop
# this operation should be removed when the SDK is updated to support async hooks
executor = self.executors.get(operation_id)
if executor is None:
raise RuntimeError("Executor not found for operation_id")
loop_holder: dict[str, Optional[asyncio.AbstractEventLoop]] = {"loop": None}
self.operation_loops[operation_id] = loop_holder
try:
task_responses_future = executor.submit(
_run_coroutines_in_separate_thread,
coroutines,
loop_holder,
)
self.operation_futures[operation_id] = task_responses_future
# The per-chunk timeout bounds each HTTP call, but the batch may run in
# multiple waves (ceil(chunks / concurrency)). Scale the outer future
# timeout accordingly so healthy multi-wave batches aren't killed early.
num_waves = max(1, math.ceil(len(tasks) / concurrency_level))
per_chunk = timeout_seconds or DEFAULT_FUTURE_TIMEOUT_MINUTES * 60
future_timeout = per_chunk * num_waves + TIMEOUT_BUFFER_SECONDS
logger.info(
"split_pdf event=batch_start operation_id=%s chunk_count=%d concurrency=%d allow_failed=%s client_timeout_seconds=%s future_timeout_seconds=%s num_waves=%d",
operation_id,
len(tasks),
concurrency_level,
allow_failed,
timeout_seconds,
future_timeout,
num_waves,
)
task_responses = task_responses_future.result(timeout=future_timeout)
except futures.TimeoutError:
loop = loop_holder.get("loop")
logger.error(
"split_pdf event=batch_timeout operation_id=%s chunk_count=%d concurrency=%d allow_failed=%s client_timeout_seconds=%s future_timeout_seconds=%s",
operation_id,
len(tasks),
concurrency_level,
allow_failed,
timeout_seconds,