Skip to content

upload all unstructured files to gcs #455

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions backend/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,8 @@ async def extract_knowledge_graph_from_file(
error_message = str(e)
graphDb_data_Access.update_exception_db(file_name,error_message)
gcs_file_cache = os.environ.get('GCS_FILE_CACHE')
if source_type == 'local file' and gcs_file_cache == 'True' and (file_name.split('.')[-1]).upper()=='PDF':
folder_name = create_gcs_bucket_folder_name_hashed
if source_type == 'local file' and gcs_file_cache == 'True':
folder_name = create_gcs_bucket_folder_name_hashed(uri,file_name)
delete_file_from_gcs(BUCKET_UPLOAD,folder_name,file_name)
else:
logging.info(f'Deleted File Path: {merged_file_path} and Deleted File Name : {file_name}')
Expand Down
42 changes: 21 additions & 21 deletions backend/src/document_sources/gcs_bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import io
from google.oauth2.credentials import Credentials
import time
from .local_file import load_document_content, get_pages_with_page_numbers

def get_gcs_bucket_files_info(gcs_project_id, gcs_bucket_name, gcs_bucket_folder, creds):
storage_client = storage.Client(project=gcs_project_id, credentials=creds)
Expand Down Expand Up @@ -42,7 +43,7 @@ def get_gcs_bucket_files_info(gcs_project_id, gcs_bucket_name, gcs_bucket_folder
def load_pdf(file_path):
return PyMuPDFLoader(file_path)

def get_documents_from_gcs(gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token=None, folder_name_sha1_hashed = None):
def get_documents_from_gcs(gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token=None):

if gcs_bucket_folder is not None:
if gcs_bucket_folder.endswith('/'):
Expand All @@ -51,33 +52,32 @@ def get_documents_from_gcs(gcs_project_id, gcs_bucket_name, gcs_bucket_folder, g
blob_name = gcs_bucket_folder+'/'+gcs_blob_filename
else:
blob_name = gcs_blob_filename
#credentials, project_id = google.auth.default()

logging.info(f"GCS project_id : {gcs_project_id}")
#loader = GCSFileLoader(project_name=gcs_project_id, bucket=gcs_bucket_name, blob=blob_name, loader_func=load_pdf)
# pages = loader.load()
# file_name = gcs_blob_filename
#creds= Credentials(access_token)

if access_token is None:
storage_client = storage.Client(project=gcs_project_id)
blob_name = folder_name_sha1_hashed +'/'+gcs_blob_filename
loader = GCSFileLoader(project_name=gcs_project_id, bucket=gcs_bucket_name, blob=blob_name, loader_func=load_document_content)
pages = loader.load()
if (gcs_blob_filename.split('.')[-1]).lower() != 'pdf':
pages = get_pages_with_page_numbers(pages)
else:
creds= Credentials(access_token)
storage_client = storage.Client(project=gcs_project_id, credentials=creds)
print(f'BLOB Name: {blob_name}')
bucket = storage_client.bucket(gcs_bucket_name)
blob = bucket.blob(blob_name)
if blob.exists():
content = blob.download_as_bytes()
pdf_file = io.BytesIO(content)
pdf_reader = PdfReader(pdf_file)


bucket = storage_client.bucket(gcs_bucket_name)
blob = bucket.blob(blob_name)
if blob.exists():
content = blob.download_as_bytes()
pdf_file = io.BytesIO(content)
pdf_reader = PdfReader(pdf_file)
# Extract text from all pages
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
pages = [Document(page_content = text)]
else:
raise Exception('Blob Not Found')
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
pages = [Document(page_content = text)]
else:
raise Exception('Blob Not Found')
return gcs_blob_filename, pages

def upload_file_to_gcs(file_chunk, chunk_number, original_file_name, bucket_name, folder_name_sha1_hashed):
Expand Down
89 changes: 50 additions & 39 deletions backend/src/document_sources/local_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,58 +18,69 @@
# pages = loader.load_and_split()
# return file_name, pages

def load_document_content(file_path):
if Path(file_path).suffix.lower() == '.pdf':
print("in if")
return PyMuPDFLoader(file_path)
else:
print("in else")
return UnstructuredFileLoader(file_path, encoding="utf-8", mode="elements")

def get_documents_from_file_by_path(file_path,file_name):
file_path = Path(file_path)
if file_path.exists():
logging.info(f'file {file_name} processing')
# loader = PyPDFLoader(str(file_path))
file_extension = file_path.suffix.lower()
try:
loader = load_document_content(file_path)
if file_extension == ".pdf":
loader = PyMuPDFLoader(str(file_path))
pages = loader.load()
else:
loader = UnstructuredFileLoader(str(file_path), encoding="utf-8", mode="elements")
unstructured_pages = loader.load()
pages = []
page_number = 1
page_content=''
metadata = {}
for page in unstructured_pages:
if 'page_number' in page.metadata:
if page.metadata['page_number']==page_number:
page_content += page.page_content
metadata = {'source':page.metadata['source'],'page_number':page_number, 'filename':page.metadata['filename'],
'filetype':page.metadata['filetype'], 'total_pages':unstructured_pages[-1].metadata['page_number']}

if page.metadata['page_number']>page_number:
page_number+=1
if not metadata:
metadata = {'total_pages':unstructured_pages[-1].metadata['page_number']}
pages.append(Document(page_content = page_content, metadata=metadata))
page_content=''

if page == unstructured_pages[-1]:
if not metadata:
metadata = {'total_pages':unstructured_pages[-1].metadata['page_number']}
pages.append(Document(page_content = page_content, metadata=metadata))

elif page.metadata['category']=='PageBreak' and page!=unstructured_pages[0]:
page_number+=1
pages.append(Document(page_content = page_content, metadata=metadata))
page_content=''
metadata={}

else:
page_content += page.page_content
metadata_with_custom_page_number = {'source':page.metadata['source'],
'page_number':1, 'filename':page.metadata['filename'],
'filetype':page.metadata['filetype'], 'total_pages':1}
if page == unstructured_pages[-1]:
pages.append(Document(page_content = page_content, metadata=metadata_with_custom_page_number))
pages= get_pages_with_page_numbers(unstructured_pages)
except Exception as e:
raise Exception('Error while reading the file content or metadata')
else:
logging.info(f'File {file_name} does not exist')
raise Exception(f'File {file_name} does not exist')
return file_name, pages , file_extension
return file_name, pages , file_extension

def get_pages_with_page_numbers(unstructured_pages):
pages = []
page_number = 1
page_content=''
metadata = {}
for page in unstructured_pages:
if 'page_number' in page.metadata:
if page.metadata['page_number']==page_number:
page_content += page.page_content
metadata = {'source':page.metadata['source'],'page_number':page_number, 'filename':page.metadata['filename'],
'filetype':page.metadata['filetype'], 'total_pages':unstructured_pages[-1].metadata['page_number']}

if page.metadata['page_number']>page_number:
page_number+=1
if not metadata:
metadata = {'total_pages':unstructured_pages[-1].metadata['page_number']}
pages.append(Document(page_content = page_content, metadata=metadata))
page_content=''

if page == unstructured_pages[-1]:
if not metadata:
metadata = {'total_pages':unstructured_pages[-1].metadata['page_number']}
pages.append(Document(page_content = page_content, metadata=metadata))

elif page.metadata['category']=='PageBreak' and page!=unstructured_pages[0]:
page_number+=1
pages.append(Document(page_content = page_content, metadata=metadata))
page_content=''
metadata={}

else:
page_content += page.page_content
metadata_with_custom_page_number = {'source':page.metadata['source'],
'page_number':1, 'filename':page.metadata['filename'],
'filetype':page.metadata['filetype'], 'total_pages':1}
if page == unstructured_pages[-1]:
pages.append(Document(page_content = page_content, metadata=metadata_with_custom_page_number))
return pages
2 changes: 1 addition & 1 deletion backend/src/graphDB_dataAccess.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def delete_file_from_graph(self, filenames, source_types, deleteEntities:str, me
# source_types_list = source_types.split(',')
for (file_name,source_type) in zip(filename_list, source_types_list):
merged_file_path = os.path.join(merged_dir, file_name)
if source_type == 'local file' and gcs_file_cache == 'True' and (file_name.split('.')[-1]).upper()=='PDF':
if source_type == 'local file' and gcs_file_cache == 'True':
folder_name = create_gcs_bucket_folder_name_hashed(uri, file_name)
delete_file_from_gcs(BUCKET_UPLOAD,folder_name,file_name)
else:
Expand Down
12 changes: 6 additions & 6 deletions backend/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,9 @@ def extract_graph_from_file_local_file(graph, model, merged_file_path, fileName,

logging.info(f'Process file name :{fileName}')
gcs_file_cache = os.environ.get('GCS_FILE_CACHE')
if gcs_file_cache == 'True' and (fileName.split('.')[-1]).upper() =='PDF':
if gcs_file_cache == 'True':
folder_name = create_gcs_bucket_folder_name_hashed(uri, fileName)
file_name, pages = get_documents_from_gcs( PROJECT_ID, BUCKET_UPLOAD, None, fileName, folder_name_sha1_hashed=folder_name)
file_name, pages = get_documents_from_gcs( PROJECT_ID, BUCKET_UPLOAD, folder_name, fileName)
else:
file_name, pages, file_extension = get_documents_from_file_by_path(merged_file_path,fileName)
if pages==None or len(pages)==0:
Expand Down Expand Up @@ -308,7 +308,7 @@ def processing_source(graph, model, file_name, pages, allowedNodes, allowedRelat

if is_uploaded_from_local:
gcs_file_cache = os.environ.get('GCS_FILE_CACHE')
if gcs_file_cache == 'True' and (file_name.split('.')[-1]).upper()=='PDF':
if gcs_file_cache == 'True':
folder_name = create_gcs_bucket_folder_name_hashed(uri, file_name)
delete_file_from_gcs(BUCKET_UPLOAD,folder_name,file_name)
else:
Expand Down Expand Up @@ -424,7 +424,7 @@ def upload_file(graph, model, chunk, chunk_number:int, total_chunks:int, origina
gcs_file_cache = os.environ.get('GCS_FILE_CACHE')
logging.info(f'gcs file cache: {gcs_file_cache}')

if gcs_file_cache == 'True' and (originalname.split('.')[-1]).upper() =='PDF':
if gcs_file_cache == 'True':
folder_name = create_gcs_bucket_folder_name_hashed(uri,originalname)
upload_file_to_gcs(chunk, chunk_number, originalname, BUCKET_UPLOAD, folder_name)
else:
Expand All @@ -439,7 +439,7 @@ def upload_file(graph, model, chunk, chunk_number:int, total_chunks:int, origina

if int(chunk_number) == int(total_chunks):
# If this is the last chunk, merge all chunks into a single file
if gcs_file_cache == 'True' and (originalname.split('.')[-1]).upper()=='PDF':
if gcs_file_cache == 'True':
file_size = merge_file_gcs(BUCKET_UPLOAD, originalname, folder_name)
total_pages = 1
else:
Expand Down Expand Up @@ -494,7 +494,7 @@ def manually_cancelled_job(graph, filenames, source_types, merged_dir, uri):
graphDb_data_Access.update_source_node(obj_source_node)
obj_source_node = None
merged_file_path = os.path.join(merged_dir, file_name)
if source_type == 'local file' and gcs_file_cache == 'True' and (file_name.split('.')[-1]).upper()=='PDF':
if source_type == 'local file' and gcs_file_cache == 'True':
folder_name = create_gcs_bucket_folder_name_hashed(uri, file_name)
delete_file_from_gcs(BUCKET_UPLOAD,folder_name,file_name)
else:
Expand Down