| | import os |
| | import json |
| | import shutil |
| | import argparse |
| | import logging |
| | import multiprocessing as mp |
| | from concurrent.futures import ProcessPoolExecutor, as_completed |
| | import torch |
| | import psutil |
| | import numpy as np |
| | from tqdm import tqdm |
| | from magic_pdf.pipe.UNIPipe import UNIPipe |
| | from magic_pdf.libs.commons import read_file |
| | from magic_pdf.libs.config_reader import get_device |
| | from magic_pdf.tools.common import do_parse |
| | from magic_pdf.libs.pdf_image_tools import cut_image |
| | from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter |
| | from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan |
| | from magic_pdf.filter.pdf_classify_by_type import classify |
| | import fitz |
| | import time |
| | import signal |
| | import traceback |
| |
|
| | |
| | logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') |
| | logger = logging.getLogger(__name__) |
| |
|
| | |
| | MIN_BATCH_SIZE = 1 |
| |
|
| |
|
| | def parse_arguments(): |
| | parser = argparse.ArgumentParser(description="Process multiple PDFs using Magic PDF") |
| | parser.add_argument("--input", default="input", help="Input folder containing PDF files") |
| | parser.add_argument("--output", default="output", help="Output folder for processed files") |
| | parser.add_argument("--config", default="magic-pdf.template.json", help="Path to configuration file") |
| | parser.add_argument("--timeout", type=int, default=240, help="Timeout for processing each PDF (in seconds)") |
| | parser.add_argument("--max-workers", type=int, default=None, help="Maximum number of worker processes") |
| | parser.add_argument("--use-bf16", action="store_true", help="Use bfloat16 precision for model inference") |
| | parser.add_argument("--initial-batch-size", type=int, default=1, help="Initial batch size for processing") |
| | return parser.parse_args() |
| |
|
| |
|
| | def load_config(config_path): |
| | with open(config_path, 'r') as f: |
| | return json.load(f) |
| |
|
| |
|
| | def get_available_memory(gpu_id): |
| | return torch.cuda.get_device_properties(gpu_id).total_memory - torch.cuda.memory_allocated(gpu_id) |
| |
|
| |
|
| | def extract_images(pdf_path, output_folder): |
| | doc = fitz.open(pdf_path) |
| | pdf_name = os.path.splitext(os.path.basename(pdf_path))[0] |
| | images_folder = os.path.join(output_folder, 'images') |
| | os.makedirs(images_folder, exist_ok=True) |
| |
|
| | for page_num, page in enumerate(doc): |
| | for img_index, img in enumerate(page.get_images(full=True)): |
| | xref = img[0] |
| | base_image = doc.extract_image(xref) |
| | image_bytes = base_image["image"] |
| | image_ext = base_image["ext"] |
| | image_filename = f'{pdf_name}_{page_num + 1:03d}_{img_index + 1:03d}.{image_ext}' |
| | image_path = os.path.join(images_folder, image_filename) |
| | with open(image_path, "wb") as image_file: |
| | image_file.write(image_bytes) |
| | doc.close() |
| |
|
| |
|
| | class MagicModel: |
| | def __init__(self, config): |
| | self.config = config |
| |
|
| | def process_pdf(self, pdf_data, parse_type, layout_info, log_file_path): |
| | processed_pages = [] |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Entering process_pdf\n") |
| | log_file.write(f" parse_type: {parse_type}, (expected: str)\n") |
| | log_file.write( |
| | f" layout_info (length: {len(layout_info)}), (expected: list of dicts): {layout_info}\n") |
| | for page_index, page_info in enumerate(layout_info): |
| | try: |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Processing page {page_index}\n") |
| | log_file.write(f" Page info (expected: dict): {page_info}\n") |
| | processed_page = self.process_page(page_info, parse_type) |
| | processed_pages.append(processed_page) |
| | except Exception as e: |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Error processing page {page_index} in process_pdf: {str(e)}\n") |
| | log_file.write(f"Page info (expected: dict): {page_info}\n") |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Exiting process_pdf\n") |
| | return { |
| | "processed_pages": processed_pages, |
| | "parse_type": parse_type, |
| | } |
| |
|
| | def process_page(self, page_info, parse_type): |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Entering process_page\n") |
| | log_file.write(f" page_info (expected: dict): {page_info}\n") |
| | log_file.write(f" parse_type (expected: str): {parse_type}\n") |
| | result = { |
| | "page_no": page_info.get("page_info", {}).get("page_no", "unknown"), |
| | "content": "Processed page content", |
| | "parse_type": parse_type |
| | } |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Exiting process_page\n") |
| | return result |
| |
|
| |
|
| | def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path): |
| | start_time = time.time() |
| | pdf_name = os.path.splitext(os.path.basename(input_file))[0] |
| | output_subfolder = os.path.join(output_folder, pdf_name, 'auto') |
| | os.makedirs(output_subfolder, exist_ok=True) |
| |
|
| | def timeout_handler(signum, frame): |
| | raise TimeoutError("PDF processing timed out") |
| |
|
| | try: |
| | signal.signal(signal.SIGALRM, timeout_handler) |
| | signal.alarm(timeout) |
| |
|
| | if gpu_id >= 0: |
| | torch.cuda.set_device(gpu_id) |
| | if use_bf16 and torch.cuda.is_bf16_supported(): |
| | torch.set_default_dtype(torch.bfloat16) |
| | else: |
| | torch.set_default_dtype(torch.float32) |
| | torch.set_default_device(f'cuda:{gpu_id}') |
| | else: |
| | if use_bf16: |
| | torch.set_default_dtype(torch.bfloat16) |
| | else: |
| | torch.set_default_dtype(torch.float32) |
| | torch.set_default_device('cpu') |
| |
|
| | pdf_data = read_file(input_file, 'rb') |
| |
|
| | |
| | metadata = pdf_meta_scan(pdf_data) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Processing PDF: {input_file}\n") |
| | log_file.write(f"Metadata (expected: dict): {json.dumps(metadata, indent=2)}\n") |
| |
|
| | |
| | if metadata.get("_need_drop", False): |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"Dropping PDF {input_file}: {metadata.get('_drop_reason', 'Unknown reason')}\n") |
| | return input_file, "Dropped", None |
| |
|
| | |
| | required_fields = ['total_page', 'page_width_pts', 'page_height_pts', 'image_info_per_page', |
| | 'text_len_per_page', 'imgs_per_page', 'text_layout_per_page', 'invalid_chars'] |
| | for field in required_fields: |
| | if field not in metadata: |
| | raise ValueError(f"Required field '{field}' not found in metadata for {input_file}") |
| |
|
| | |
| | total_page = metadata['total_page'] |
| | page_width = metadata['page_width_pts'] |
| | page_height = metadata['page_height_pts'] |
| | img_sz_list = metadata['image_info_per_page'] |
| | text_len_list = metadata['text_len_per_page'] |
| | img_num_list = metadata['imgs_per_page'] |
| | text_layout_list = metadata['text_layout_per_page'] |
| | invalid_chars = metadata['invalid_chars'] |
| |
|
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Classify parameters:\n") |
| | log_file.write(f" total_page (expected: int): {total_page}\n") |
| | log_file.write(f" page_width (expected: int): {page_width}\n") |
| | log_file.write(f" page_height (expected: int): {page_height}\n") |
| | log_file.write(f" img_sz_list (expected: list of lists): {img_sz_list[:5]}...\n") |
| | log_file.write(f" text_len_list (expected: list of ints): {text_len_list[:5]}...\n") |
| | log_file.write(f" img_num_list (expected: list of ints): {img_num_list[:5]}...\n") |
| | log_file.write( |
| | f" text_layout_list (expected: list of strs): {text_layout_list[:5]}...\n") |
| | log_file.write(f" invalid_chars (expected: bool): {invalid_chars}\n") |
| |
|
| | |
| | is_text_pdf, classification_results = classify( |
| | total_page, page_width, page_height, img_sz_list[:total_page], |
| | text_len_list[:total_page], img_num_list[:total_page], |
| | text_layout_list[:len(text_layout_list)], invalid_chars |
| | ) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Classification Results:\n") |
| | log_file.write(f" is_text_pdf (expected: bool): {is_text_pdf}\n") |
| | log_file.write( |
| | f" classification_results (expected: dict): {classification_results}\n") |
| |
|
| | image_writer = DiskReaderWriter(output_subfolder) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Image writer initialized: {image_writer}\n") |
| |
|
| | |
| | model_json = [] |
| | jso_useful_key = {"_pdf_type": "", "model_list": model_json} |
| |
|
| | unipipe = UNIPipe(pdf_data, jso_useful_key, image_writer) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"UNIPipe initialized: {unipipe}\n") |
| |
|
| | parse_type = unipipe.pipe_classify() |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"pipe_classify result (expected: str): {parse_type}\n") |
| |
|
| | |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Detailed pipe_analyze Inputs for {input_file}:\n") |
| | log_file.write(f" parse_type (expected: str): {parse_type}\n") |
| | layout_info = unipipe.pipe_analyze() |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"pipe_analyze Results (expected: list of dicts, length: {len(layout_info)}): {layout_info}\n") |
| |
|
| | |
| | if not is_text_pdf: |
| | parse_type = 'ocr' |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"parse_type after OCR check (expected: str): {parse_type}\n") |
| |
|
| | |
| | parse_result = model.process_pdf(pdf_data, parse_type, layout_info, log_file_path) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Model process_pdf result (expected: dict): {parse_result}\n") |
| |
|
| | markdown_content = unipipe.pipe_mk_markdown(parse_result) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"pipe_mk_markdown result (expected: str, length: {len(markdown_content)}): {markdown_content}\n") |
| |
|
| | uni_format = unipipe.pipe_mk_uni_format(parse_result) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"pipe_mk_uni_format result (expected: dict): {uni_format}\n") |
| |
|
| | |
| | with open(os.path.join(output_subfolder, f'{pdf_name}.md'), 'w', encoding='utf-8') as f: |
| | f.write(markdown_content) |
| |
|
| | |
| | with open(os.path.join(output_subfolder, 'middle.json'), 'w', encoding='utf-8') as f: |
| | json.dump(parse_result, f, ensure_ascii=False, indent=2) |
| |
|
| | |
| | with open(os.path.join(output_subfolder, 'model.json'), 'w', encoding='utf-8') as f: |
| | json.dump(uni_format, f, ensure_ascii=False, indent=2) |
| |
|
| | |
| | shutil.copy(input_file, os.path.join(output_subfolder, f'{pdf_name}.pdf')) |
| |
|
| | |
| | do_parse(input_file, parse_type, output_subfolder, draw_bbox=True) |
| |
|
| | |
| | extract_images(input_file, output_subfolder) |
| |
|
| | processing_time = time.time() - start_time |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"Successfully processed {input_file} on GPU {gpu_id} in {processing_time:.2f} seconds\n") |
| |
|
| | |
| | result = { |
| | "file_name": pdf_name, |
| | "processing_time": processing_time, |
| | "parse_type": parse_type, |
| | "metadata": metadata, |
| | "classification": classification_results, |
| | "is_text_pdf": is_text_pdf |
| | } |
| |
|
| | return input_file, "Success", result |
| |
|
| | except ValueError as ve: |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Metadata error: {str(ve)}\n") |
| | return input_file, f"Metadata Error: {str(ve)}", None |
| |
|
| | except TimeoutError: |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"Processing timed out after {timeout} seconds\n") |
| | return input_file, "Timeout", None |
| |
|
| | except Exception as e: |
| | |
| | traceback_file = os.path.join(output_folder, 'traceback.txt') |
| | with open(traceback_file, 'w') as f: |
| | f.write(traceback.format_exc()) |
| |
|
| | |
| | print(f"Error occurred: {e}") |
| | print(f"Full traceback saved to: {traceback_file}") |
| | exit(1) |
| |
|
| | finally: |
| | signal.alarm(0) |
| | if gpu_id >= 0: |
| | torch.cuda.empty_cache() |
| |
|
| |
|
| | def process_pdf_batch(batch, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path): |
| | results = [] |
| | for pdf_file in batch: |
| | result = process_single_pdf(pdf_file, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path) |
| | results.append(result) |
| | return results |
| |
|
| |
|
| | def write_to_jsonl(results, output_file): |
| | with open(output_file, 'a') as f: |
| | for result in results: |
| | if result[2]: |
| | json.dump(result[2], f) |
| | f.write('\n') |
| |
|
| |
|
| | def get_gpu_memory_usage(gpu_id): |
| | if gpu_id < 0: |
| | return 0, 0 |
| | total_memory = torch.cuda.get_device_properties(gpu_id).total_memory |
| | allocated_memory = torch.cuda.memory_allocated(gpu_id) |
| | return allocated_memory, total_memory |
| |
|
| |
|
| | def main(): |
| | mp.set_start_method('spawn', force=True) |
| |
|
| | args = parse_arguments() |
| | config = load_config(args.config) |
| |
|
| | input_folder = args.input |
| | output_folder = args.output |
| | os.makedirs(output_folder, exist_ok=True) |
| |
|
| | pdf_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.endswith('.pdf')] |
| |
|
| | num_gpus = torch.cuda.device_count() |
| | if num_gpus == 0: |
| | print("No GPUs available. Using CPU.") |
| | num_gpus = 1 |
| | gpu_ids = [-1] |
| | else: |
| | gpu_ids = list(range(num_gpus)) |
| |
|
| | num_workers = args.max_workers or min(num_gpus, os.cpu_count()) |
| |
|
| | main_jsonl = os.path.join(output_folder, 'processing_results.jsonl') |
| | temp_jsonl = os.path.join(output_folder, 'temp_results.jsonl') |
| | log_file_path = os.path.join(output_folder, 'processing_log.txt') |
| |
|
| | |
| | torch.backends.cudnn.deterministic = True |
| | torch.backends.cudnn.benchmark = False |
| |
|
| | |
| | model = MagicModel(config) |
| |
|
| | results = [] |
| | with ProcessPoolExecutor(max_workers=num_workers) as executor: |
| | for gpu_id in gpu_ids: |
| | batch_size = args.initial_batch_size |
| | pdf_index = 0 |
| | oom_occurred = False |
| | while pdf_index < len(pdf_files): |
| | batch = pdf_files[pdf_index:pdf_index + batch_size] |
| | try: |
| | future = executor.submit(process_pdf_batch, batch, output_folder, gpu_id, config, args.timeout, |
| | args.use_bf16, model, log_file_path) |
| | batch_results = future.result() |
| | results.extend(batch_results) |
| | for result in batch_results: |
| | write_to_jsonl([result], temp_jsonl) |
| |
|
| | |
| | allocated, total = get_gpu_memory_usage(gpu_id) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"GPU {gpu_id} - Batch size: {batch_size}, VRAM usage: {allocated / 1024 ** 3:.2f}GB / {total / 1024 ** 3:.2f}GB\n") |
| | |
| | if not oom_occurred: |
| | batch_size += 1 |
| | pdf_index += len(batch) |
| | except torch.cuda.OutOfMemoryError: |
| | |
| | oom_occurred = True |
| | batch_size = max(MIN_BATCH_SIZE, batch_size - 1) |
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write(f"OOM error occurred. Reducing batch size to {batch_size}\n") |
| | torch.cuda.empty_cache() |
| | continue |
| |
|
| | |
| | if os.path.exists(temp_jsonl): |
| | with open(temp_jsonl, 'r') as temp, open(main_jsonl, 'a') as main: |
| | shutil.copyfileobj(temp, main) |
| | os.remove(temp_jsonl) |
| |
|
| | |
| | if gpu_id >= 0: |
| | torch.cuda.empty_cache() |
| |
|
| | success_count = sum(1 for _, status, _ in results if status == "Success") |
| | timeout_count = sum(1 for _, status, _ in results if status == "Timeout") |
| | error_count = len(results) - success_count - timeout_count |
| |
|
| | with open(log_file_path, 'a') as log_file: |
| | log_file.write( |
| | f"Processed {len(results)} PDFs. {success_count} succeeded, {timeout_count} timed out, {error_count} failed.\n") |
| |
|
| | with open(os.path.join(output_folder, 'processing_summary.txt'), 'w') as summary: |
| | summary.write(f"Total PDFs processed: {len(results)}\n") |
| | summary.write(f"Successful: {success_count}\n") |
| | summary.write(f"Timed out: {timeout_count}\n") |
| | summary.write(f"Failed: {error_count}\n\n") |
| | summary.write("Failed PDFs:\n") |
| | for pdf, status, _ in [result for result in results if result[1] != "Success"]: |
| | summary.write(f" - {pdf}: {status}\n") |
| |
|
| |
|
| | if __name__ == '__main__': |
| | main() |
| |
|