Skip to content

Commit

Permalink
fix table desc bugs, add positions to chunks (infiniflow#91)
Browse files Browse the repository at this point in the history
  • Loading branch information
KevinHuSh authored Mar 4, 2024
1 parent bcb7249 commit 64a0633
Show file tree
Hide file tree
Showing 13 changed files with 117 additions and 46 deletions.
7 changes: 6 additions & 1 deletion api/apps/chunk_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def list():
if not e:
return get_data_error_result(retmsg="Document not found!")
query = {
"doc_ids": [doc_id], "page": page, "size": size, "question": question
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
}
if "available_int" in req:
query["available_int"] = int(req["available_int"])
Expand All @@ -66,7 +66,12 @@ def list():
"important_kwd": sres.field[id].get("important_kwd", []),
"img_id": sres.field[id].get("img_id", ""),
"available_int": sres.field[id].get("available_int", 1),
"positions": sres.field[id].get("position_int", "").split("\t")
}
poss = []
for i in range(0, len(d["positions"]), 5):
poss.append([float(d["positions"][i]), float(d["positions"][i+1]), float(d["positions"][i+2]), float(d["positions"][i+3]), float(d["positions"][i+4])])
d["positions"] = poss
res["chunks"].append(d)
return get_json_result(data=res)
except Exception as e:
Expand Down
19 changes: 9 additions & 10 deletions api/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,14 @@
from api.utils.file_utils import get_project_base_directory
from api.utils.log_utils import LoggerFactory, getLogger

from rag.nlp import search
from rag.utils import ELASTICSEARCH
# Logger
LoggerFactory.set_directory(os.path.join(get_project_base_directory(), "logs", "api"))
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
LoggerFactory.LEVEL = 10

stat_logger = getLogger("stat")
access_logger = getLogger("access")
database_logger = getLogger("database")

API_VERSION = "v1"
RAG_FLOW_SERVICE_NAME = "ragflow"
Expand Down Expand Up @@ -133,16 +138,10 @@
PRIVILEGE_COMMAND_WHITELIST = []
CHECK_NODES_IDENTITY = False

from rag.nlp import search
from rag.utils import ELASTICSEARCH
retrievaler = search.Dealer(ELASTICSEARCH)

# Logger
LoggerFactory.set_directory(os.path.join(get_project_base_directory(), "logs", "api"))
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
LoggerFactory.LEVEL = 10

stat_logger = getLogger("stat")
access_logger = getLogger("access")
database_logger = getLogger("database")

class CustomEnum(Enum):
@classmethod
Expand Down
33 changes: 26 additions & 7 deletions deepdoc/parser/pdf_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ def _merge_with_same_bullet(self):
b_["top"] = b["top"]
self.boxes.pop(i)

def _extract_table_figure(self, need_image, ZM, return_html):
def _extract_table_figure(self, need_image, ZM, return_html, need_position):
tables = {}
figures = {}
# extract figure and table boxes
Expand Down Expand Up @@ -658,8 +658,9 @@ def nearest(tbls):
self.boxes.pop(i)

res = []
positions = []

def cropout(bxs, ltype):
def cropout(bxs, ltype, poss):
nonlocal ZM
pn = set([b["page_number"] - 1 for b in bxs])
if len(pn) < 2:
Expand All @@ -682,6 +683,7 @@ def cropout(bxs, ltype):
"layoutno", "")))

left, top, right, bott = b["x0"], b["top"], b["x1"], b["bottom"]
poss.append((pn, left, right, top, bott))
return self.page_images[pn] \
.crop((left * ZM, top * ZM,
right * ZM, bott * ZM))
Expand All @@ -692,7 +694,7 @@ def cropout(bxs, ltype):
pn[p] = []
pn[p].append(b)
pn = sorted(pn.items(), key=lambda x: x[0])
imgs = [cropout(arr, ltype) for p, arr in pn]
imgs = [cropout(arr, ltype, poss) for p, arr in pn]
pic = Image.new("RGB",
(int(np.max([i.size[0] for i in imgs])),
int(np.sum([m.size[1] for m in imgs]))),
Expand All @@ -714,18 +716,26 @@ def cropout(bxs, ltype):
if not txt:
continue

poss = []
res.append(
(cropout(
bxs,
"figure"),
"figure", poss),
[txt] if not return_html else [f"<p>{txt}</p>"]))
positions.append(poss)

for k, bxs in tables.items():
if not bxs:
continue
res.append((cropout(bxs, "table"),
bxs = Recognizer.sort_Y_firstly(bxs, np.mean([(b["bottom"]-b["top"])/2 for b in bxs]))
poss = []
res.append((cropout(bxs, "table", poss),
self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english)))
positions.append(poss)

assert len(positions) == len(res)

if need_position: return list(zip(res, positions))
return res

def proj_match(self, line):
Expand Down Expand Up @@ -922,13 +932,13 @@ def __call__(self, fnm, need_image=True, zoomin=3, return_html=False):
self._text_merge()
self._concat_downward()
self._filter_forpages()
tbls = self._extract_table_figure(need_image, zoomin, return_html)
tbls = self._extract_table_figure(need_image, zoomin, return_html, False)
return self.__filterout_scraps(deepcopy(self.boxes), zoomin), tbls

def remove_tag(self, txt):
return re.sub(r"@@[\t0-9.-]+?##", "", txt)

def crop(self, text, ZM=3):
def crop(self, text, ZM=3, need_position=False):
imgs = []
poss = []
for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", text):
Expand All @@ -946,6 +956,7 @@ def crop(self, text, ZM=3):
pos = poss[-1]
poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1]/ZM, pos[4]+GAP), min(self.page_images[pos[0][-1]].size[1]/ZM, pos[4]+120)))

positions = []
for ii, (pns, left, right, top, bottom) in enumerate(poss):
right = left + max_width
bottom *= ZM
Expand All @@ -958,6 +969,8 @@ def crop(self, text, ZM=3):
bottom, self.page_images[pns[0]].size[1])
))
)
positions.append((pns[0], left, right, top, min(
bottom, self.page_images[pns[0]].size[1])/ZM))
bottom -= self.page_images[pns[0]].size[1]
for pn in pns[1:]:
imgs.append(
Expand All @@ -967,9 +980,12 @@ def crop(self, text, ZM=3):
self.page_images[pn].size[1])
))
)
positions.append((pn, left, right, 0, min(
bottom, self.page_images[pn].size[1]) / ZM))
bottom -= self.page_images[pn].size[1]

if not imgs:
if need_position: return None, None
return
height = 0
for img in imgs:
Expand All @@ -988,6 +1004,9 @@ def crop(self, text, ZM=3):
img = Image.alpha_composite(img, overlay).convert("RGB")
pic.paste(img, (0, int(height)))
height += img.size[1] + GAP

if need_position:
return pic, positions
return pic


Expand Down
1 change: 1 addition & 0 deletions deepdoc/vision/recognizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ def find_horizontally_tightest_fit(box, boxes):
return
min_dis, min_i = 1000000, None
for i,b in enumerate(boxes):
if box.get("layoutno", "0") != b.get("layoutno", "0"): continue
dis = min(abs(box["x0"] - b["x0"]), abs(box["x1"] - b["x1"]), abs(box["x0"]+box["x1"] - b["x1"] - b["x0"])/2)
if dis < min_dis:
min_i = i
Expand Down
12 changes: 7 additions & 5 deletions rag/app/book.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@
import copy
import re
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions
from rag.nlp import huqie
from deepdoc.parser import PdfParser, DocxParser


class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
Expand All @@ -40,11 +41,11 @@ def __call__(self, filename, binary=None, from_page=0,
self._filter_forpages()
self._merge_with_same_bullet()
callback(0.75, "Text merging finished.")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)

callback(0.8, "Text extraction finished")

return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls, tbl_poss


def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
Expand All @@ -69,7 +70,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
callback(0.8, "Finish parsing.")
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
pdf_parser = Pdf()
sections,tbls = pdf_parser(filename if not binary else binary,
sections, tbls = pdf_parser(filename if not binary else binary,
from_page=from_page, to_page=to_page, callback=callback)
elif re.search(r"\.txt$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
Expand Down Expand Up @@ -105,7 +106,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
d = copy.deepcopy(doc)
ck = "\n".join(ck)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
tokenize(d, ck, eng)
res.append(d)
Expand Down
6 changes: 4 additions & 2 deletions rag/app/laws.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from io import BytesIO
from docx import Document
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
make_colon_as_title
make_colon_as_title, add_positions
from rag.nlp import huqie
from deepdoc.parser import PdfParser, DocxParser
from rag.settings import cron_logger
Expand Down Expand Up @@ -49,6 +49,7 @@ def __call__(self, filename, binary=None, from_page=0, to_page=100000):
class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
Expand Down Expand Up @@ -122,7 +123,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
ck = "\n".join(ck)
d = copy.deepcopy(doc)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
tokenize(d, ck, eng)
res.append(d)
Expand Down
8 changes: 5 additions & 3 deletions rag/app/manual.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import re

from api.db import ParserType
from rag.nlp import huqie, tokenize, tokenize_table
from rag.nlp import huqie, tokenize, tokenize_table, add_positions
from deepdoc.parser import PdfParser
from rag.utils import num_tokens_from_string

Expand All @@ -14,6 +14,7 @@ def __init__(self):

def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
Expand All @@ -32,7 +33,7 @@ def __call__(self, filename, binary=None, from_page=0,
self._concat_downward(concat_between_pages=False)
self._filter_forpages()
callback(0.77, "Text merging finished")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)

# clean mess
for b in self.boxes:
Expand Down Expand Up @@ -91,7 +92,8 @@ def add_chunk():
d = copy.deepcopy(doc)
ck = "\n".join(chunk)
tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
res.append(d)
chunk = []
tk_cnt = 0
Expand Down
10 changes: 6 additions & 4 deletions rag/app/naive.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@
import copy
import re
from rag.app import laws
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table, add_positions
from deepdoc.parser import PdfParser
from rag.settings import cron_logger


class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
Expand All @@ -39,7 +40,7 @@ def __call__(self, filename, binary=None, from_page=0,
self._concat_downward(concat_between_pages=False)
self._filter_forpages()
callback(0.77, "Text merging finished")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)

cron_logger.info("paddle layouts:".format((timer() - start) / (self.total_page + 0.1)))
#self._naive_vertical_merge()
Expand Down Expand Up @@ -95,11 +96,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca

# wrap up to es documents
for ck in cks:
if len(ck.strip()) == 0:continue
print("--", ck)
if not ck:continue
d = copy.deepcopy(doc)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
tokenize(d, ck, eng)
res.append(d)
Expand Down
16 changes: 10 additions & 6 deletions rag/app/paper.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from collections import Counter

from api.db import ParserType
from rag.nlp import huqie, tokenize, tokenize_table
from rag.nlp import huqie, tokenize, tokenize_table, add_positions
from deepdoc.parser import PdfParser
import numpy as np
from rag.utils import num_tokens_from_string
Expand All @@ -28,6 +28,7 @@ def __init__(self):

def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
Expand All @@ -47,7 +48,7 @@ def __call__(self, filename, binary=None, from_page=0,
self._concat_downward(concat_between_pages=False)
self._filter_forpages()
callback(0.75, "Text merging finished.")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)

# clean mess
if column_width < self.page_images[0].size[0] / zoomin / 2:
Expand Down Expand Up @@ -165,7 +166,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
txt = pdf_parser.remove_tag(paper["abstract"])
d["important_kwd"] = ["abstract", "总结", "概括", "summary", "summarize"]
d["important_tks"] = " ".join(d["important_kwd"])
d["image"] = pdf_parser.crop(paper["abstract"])
d["image"], poss = pdf_parser.crop(paper["abstract"], need_position=True)
add_positions(d, poss)
tokenize(d, txt, eng)
res.append(d)

Expand Down Expand Up @@ -198,8 +200,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
for p in proj:
d = copy.deepcopy(doc)
txt += "\n" + pdf_parser.remove_tag(p)
d["image"] = pdf_parser.crop(p)
tokenize(d, txt)
d["image"], poss = pdf_parser.crop(p, need_position=True)
add_positions(d, poss)
tokenize(d, txt, eng)
res.append(d)

i = 0
Expand All @@ -210,7 +213,8 @@ def add_chunk():
d = copy.deepcopy(doc)
ck = "\n".join(chunk)
tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
res.append(d)
chunk = []
tk_cnt = 0
Expand Down
Loading

0 comments on commit 64a0633

Please sign in to comment.