Improve algorithms for HTML and PDF processing

This commit is contained in:
Josako
2024-07-08 15:20:45 +02:00
parent 318d23d8c6
commit ea0127b4b8
6 changed files with 176 additions and 194 deletions

View File

@@ -14,11 +14,8 @@ from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from sqlalchemy.exc import SQLAlchemyError
# Unstructured commercial client imports
from unstructured_client import UnstructuredClient
from unstructured_client.models import shared
from unstructured_client.models.errors import SDKError
from pytube import YouTube
import PyPDF2
from common.extensions import db
from common.models.document import DocumentVersion, Embedding
@@ -105,22 +102,19 @@ def create_embeddings(tenant_id, document_version_id):
def process_pdf(tenant, model_variables, document_version):
base_path = os.path.join(current_app.config['UPLOAD_FOLDER'],
document_version.file_location)
file_path = os.path.join(current_app.config['UPLOAD_FOLDER'],
document_version.file_location,
document_version.file_name)
if os.path.exists(file_path):
with open(file_path, 'rb') as f:
files = shared.Files(content=f.read(), file_name=document_version.file_name)
req = shared.PartitionParameters(
files=files,
strategy='hi_res',
hi_res_model_name='yolox',
coordinates=True,
extract_image_block_types=['Image', 'Table'],
chunking_strategy='by_title',
combine_under_n_chars=model_variables['min_chunk_size'],
max_characters=model_variables['max_chunk_size'],
)
pdf_text = ''
# Function to extract text from PDF and return as string
with open(file_path, 'rb') as file:
reader = PyPDF2.PdfReader(file)
for page_num in range(len(reader.pages)):
page = reader.pages[page_num]
pdf_text += page.extract_text()
else:
current_app.logger.error(f'The physical file for document version {document_version.id} '
f'for tenant {tenant.id} '
@@ -128,17 +122,22 @@ def process_pdf(tenant, model_variables, document_version):
create_embeddings.update_state(state=states.FAILURE)
raise
try:
chunks = partition_doc_unstructured(tenant, document_version, req)
except Exception as e:
current_app.logger.error(f'Unable to create Embeddings for tenant {tenant.id} '
f'while processing PDF on document version {document_version.id} '
f'error: {e}')
create_embeddings.update_state(state=states.FAILURE)
raise
markdown = generate_markdown_from_pdf(tenant, model_variables, document_version, pdf_text)
markdown_file_name = f'{document_version.id}.md'
output_file = os.path.join(base_path, markdown_file_name)
with open(output_file, 'w') as f:
f.write(markdown)
potential_chunks = create_potential_chunks_for_markdown(base_path, markdown_file_name, tenant)
chunks = combine_chunks_for_markdown(potential_chunks, model_variables['min_chunk_size'],
model_variables['max_chunk_size'])
if len(chunks) > 1:
summary = summarize_chunk(tenant, model_variables, document_version, chunks[0])
document_version.system_context = f'Summary: {summary}\n'
else:
document_version.system_context = ''
summary = summarize_chunk(tenant, model_variables, document_version, chunks[0])
document_version.system_context = f'Summary: {summary}\n'
enriched_chunks = enrich_chunks(tenant, document_version, chunks)
embeddings = embed_chunks(tenant, model_variables, document_version, enriched_chunks)
@@ -150,10 +149,8 @@ def process_pdf(tenant, model_variables, document_version):
db.session.commit()
except SQLAlchemyError as e:
current_app.logger.error(f'Error saving embedding information for tenant {tenant.id} '
f'on PDF, document version {document_version.id}'
f'on HTML, document version {document_version.id}'
f'error: {e}')
db.session.rollback()
create_embeddings.update_state(state=states.FAILURE)
raise
current_app.logger.info(f'Embeddings created successfully for tenant {tenant.id} '
@@ -179,6 +176,9 @@ def process_html(tenant, model_variables, document_version):
html_included_elements = model_variables['html_included_elements']
html_excluded_elements = model_variables['html_excluded_elements']
base_path = os.path.join(current_app.config['UPLOAD_FOLDER'],
document_version.file_location)
file_path = os.path.join(current_app.config['UPLOAD_FOLDER'],
document_version.file_location,
document_version.file_name)
@@ -193,16 +193,22 @@ def process_html(tenant, model_variables, document_version):
create_embeddings.update_state(state=states.FAILURE)
raise
extracted_data, title = parse_html(html_content, html_tags, included_elements=html_included_elements,
extracted_html, title = parse_html(html_content, html_tags, included_elements=html_included_elements,
excluded_elements=html_excluded_elements)
potential_chunks = create_potential_chunks(extracted_data, html_end_tags)
current_app.embed_tuning_logger.debug(f'Nr of potential chunks: {len(potential_chunks)}')
extracted_file_name = f'{document_version.id}-extracted.html'
output_file = os.path.join(base_path, extracted_file_name)
with open(output_file, 'w') as f:
f.write(extracted_html)
chunks = combine_chunks(potential_chunks,
model_variables['min_chunk_size'],
model_variables['max_chunk_size']
)
current_app.logger.debug(f'Nr of chunks: {len(chunks)}')
markdown = generate_markdown_from_html(tenant, model_variables, document_version, extracted_html)
markdown_file_name = f'{document_version.id}.md'
output_file = os.path.join(base_path, markdown_file_name)
with open(output_file, 'w') as f:
f.write(markdown)
potential_chunks = create_potential_chunks_for_markdown(base_path, markdown_file_name, tenant)
chunks = combine_chunks_for_markdown(potential_chunks, model_variables['min_chunk_size'],
model_variables['max_chunk_size'])
if len(chunks) > 1:
summary = summarize_chunk(tenant, model_variables, document_version, chunks[0])
@@ -253,6 +259,40 @@ def enrich_chunks(tenant, document_version, chunks):
return enriched_chunks
def generate_markdown_from_html(tenant, model_variables, document_version, html_content):
current_app.logger.debug(f'Generating Markdown from HTML for tenant {tenant.id} '
f'on document version {document_version.id}')
llm = model_variables['llm']
template = model_variables['html_parse_template']
parse_prompt = ChatPromptTemplate.from_template(template)
setup = RunnablePassthrough()
output_parser = StrOutputParser()
chain = setup | parse_prompt | llm | output_parser
input_html = {"html": html_content}
markdown = chain.invoke(input_html)
return markdown
def generate_markdown_from_pdf(tenant, model_variables, document_version, pdf_content):
current_app.logger.debug(f'Generating Markdown from PDF for tenant {tenant.id} '
f'on document version {document_version.id}')
llm = model_variables['llm']
template = model_variables['pdf_parse_template']
parse_prompt = ChatPromptTemplate.from_template(template)
setup = RunnablePassthrough()
output_parser = StrOutputParser()
chain = setup | parse_prompt | llm | output_parser
input_pdf = {"pdf_content": pdf_content}
markdown = chain.invoke(input_pdf)
return markdown
def summarize_chunk(tenant, model_variables, document_version, chunk):
current_app.logger.debug(f'Summarizing chunk for tenant {tenant.id} '
f'on document version {document_version.id}')
@@ -277,33 +317,6 @@ def summarize_chunk(tenant, model_variables, document_version, chunk):
raise
def partition_doc_unstructured(tenant, document_version, unstructured_request):
current_app.logger.debug(f'Partitioning document version {document_version.id} for tenant {tenant.id}')
# Initiate the connection to unstructured.io
url = current_app.config.get('UNSTRUCTURED_FULL_URL')
api_key = current_app.config.get('UNSTRUCTURED_API_KEY')
unstructured_client = UnstructuredClient(server_url=url, api_key_auth=api_key)
try:
res = unstructured_client.general.partition(unstructured_request)
chunks = []
for el in res.elements:
match el['type']:
case 'CompositeElement':
chunks.append(el['text'])
case 'Image':
pass
case 'Table':
chunks.append(el['metadata']['text_as_html'])
current_app.logger.debug(f'Finished partioning document version {document_version.id} for tenant {tenant.id}')
return chunks
except SDKError as e:
current_app.logger.error(f'Error creating embeddings for tenant {tenant.id} '
f'on document version {document_version.id} while chuncking'
f'error: {e}')
raise
def embed_chunks(tenant, model_variables, document_version, chunks):
current_app.logger.debug(f'Embedding chunks for tenant {tenant.id} '
f'on document version {document_version.id}')
@@ -334,7 +347,7 @@ def embed_chunks(tenant, model_variables, document_version, chunks):
def parse_html(html_content, tags, included_elements=None, excluded_elements=None):
soup = BeautifulSoup(html_content, 'html.parser')
extracted_content = []
extracted_html = ''
if included_elements:
elements_to_parse = soup.find_all(included_elements)
@@ -353,82 +366,28 @@ def parse_html(html_content, tags, included_elements=None, excluded_elements=Non
if excluded_elements and sub_element.find_parent(excluded_elements):
continue # Skip this sub_element if it's within any of the excluded_elements
sub_content = html.unescape(sub_element.get_text(strip=False))
extracted_content.append((sub_element.name, sub_content))
extracted_html += f'<{sub_element.name}>{sub_element.get_text(strip=True)}</{sub_element.name}>\n'
title = soup.find('title').get_text(strip=True)
return extracted_content, title
def create_potential_chunks(extracted_data, end_tags):
potential_chunks = []
current_chunk = []
for tag, text in extracted_data:
formatted_text = f"- {text}" if tag == 'li' else f"{text}\n"
if current_chunk and tag in end_tags and current_chunk[-1][0] in end_tags:
# Consecutive li and p elements stay together
current_chunk.append((tag, formatted_text))
else:
# End the current chunk if the last element was an end tag
if current_chunk and current_chunk[-1][0] in end_tags:
potential_chunks.append(current_chunk)
current_chunk = []
current_chunk.append((tag, formatted_text))
# Add the last chunk
if current_chunk:
potential_chunks.append(current_chunk)
return potential_chunks
def combine_chunks(potential_chunks, min_chars, max_chars):
actual_chunks = []
current_chunk = ""
current_length = 0
for chunk in potential_chunks:
current_app.embed_tuning_logger.debug(f'chunk: {chunk}')
chunk_content = ''.join(text for _, text in chunk)
current_app.embed_tuning_logger.debug(f'chunk_content: {chunk_content}')
chunk_length = len(chunk_content)
if current_length + chunk_length > max_chars:
if current_length >= min_chars:
current_app.embed_tuning_logger.debug(f'Adding chunk to actual_chunks: {current_chunk}')
actual_chunks.append(current_chunk)
current_chunk = chunk_content
current_length = chunk_length
else:
# If the combined chunk is still less than max_chars, keep adding
current_chunk += chunk_content
current_length += chunk_length
else:
current_chunk += chunk_content
current_length += chunk_length
current_app.embed_tuning_logger.debug(f'Remaining Chunk: {current_chunk}')
current_app.embed_tuning_logger.debug(f'Remaining Length: {current_length}')
# Handle the last chunk
if current_chunk and current_length >= 0:
actual_chunks.append(current_chunk)
return actual_chunks
return extracted_html, title
def process_youtube(tenant, model_variables, document_version):
base_path = os.path.join(current_app.config['UPLOAD_FOLDER'],
document_version.file_location)
# clean old files if necessary
download_file_name = f'{document_version.id}.mp4'
compressed_file_name = f'{document_version.id}.mp3'
transcription_file_name = f'{document_version.id}.txt'
markdown_file_name = f'{document_version.id}.md'
of, title, description, author = download_youtube(document_version.url, base_path, 'downloaded.mp4', tenant)
of, title, description, author = download_youtube(document_version.url, base_path, download_file_name, tenant)
document_version.system_context = f'Title: {title}\nDescription: {description}\nAuthor: {author}'
compress_audio(base_path, 'downloaded.mp4', 'compressed.mp3', tenant)
transcribe_audio(base_path, 'compressed.mp3', 'transcription.txt', document_version.language, tenant, model_variables)
annotate_transcription(base_path, 'transcription.txt', 'transcription.md', tenant, model_variables)
compress_audio(base_path, download_file_name, compressed_file_name, tenant)
transcribe_audio(base_path, compressed_file_name, transcription_file_name, document_version.language, tenant, model_variables)
annotate_transcription(base_path, transcription_file_name, markdown_file_name, tenant, model_variables)
potential_chunks = create_potential_chunks_for_markdown(base_path, 'transcription.md', tenant)
potential_chunks = create_potential_chunks_for_markdown(base_path, markdown_file_name, tenant)
actual_chunks = combine_chunks_for_markdown(potential_chunks, model_variables['min_chunk_size'],
model_variables['max_chunk_size'])
enriched_chunks = enrich_chunks(tenant, document_version, actual_chunks)