Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename get_txt to get_text #2649

Merged
merged 1 commit into from
Sep 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion deepdoc/parser/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from rag.nlp import find_codec


def get_txt(fnm: str, binary=None) -> str:
def get_text(fnm: str, binary=None) -> str:
txt = ""
if binary:
encoding = find_codec(binary)
Expand Down
1 change: 0 additions & 1 deletion rag/app/book.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from tika import parser
import re
from io import BytesIO
Expand Down
4 changes: 2 additions & 2 deletions rag/app/laws.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from docx import Document

from api.db import ParserType
from deepdoc.parser.utils import get_txt
from deepdoc.parser.utils import get_text
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
make_colon_as_title, add_positions, tokenize_chunks, find_codec, docx_question_level
from rag.nlp import rag_tokenizer
Expand Down Expand Up @@ -166,7 +166,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,

elif re.search(r"\.txt$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
txt = get_txt(filename, binary)
txt = get_text(filename, binary)
sections = txt.split("\n")
sections = [l for l in sections if l]
callback(0.8, "Finish parsing.")
Expand Down
6 changes: 3 additions & 3 deletions rag/app/one.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
from io import BytesIO
import re

from deepdoc.parser.utils import get_txt
from deepdoc.parser.utils import get_text
from rag.app import laws
from rag.nlp import rag_tokenizer, tokenize, find_codec
from rag.nlp import rag_tokenizer, tokenize
from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser


Expand Down Expand Up @@ -84,7 +84,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,

elif re.search(r"\.(txt|md|markdown)$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
txt = get_txt(filename, binary)
txt = get_text(filename, binary)
sections = txt.split("\n")
sections = [s for s in sections if s]
callback(0.8, "Finish parsing.")
Expand Down
10 changes: 6 additions & 4 deletions rag/app/qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,16 @@
from nltk import word_tokenize
from openpyxl import load_workbook

from deepdoc.parser.utils import get_txt
from rag.nlp import is_english, random_choices, find_codec, qbullets_category, add_positions, has_qbullet, docx_question_level
from deepdoc.parser.utils import get_text
from rag.nlp import is_english, random_choices, qbullets_category, add_positions, has_qbullet, docx_question_level
from rag.nlp import rag_tokenizer, tokenize_table, concat_img
from rag.settings import cron_logger
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
from docx import Document
from PIL import Image
from markdown import markdown


class Excel(ExcelParser):
def __call__(self, fnm, binary=None, callback=None):
if not binary:
Expand Down Expand Up @@ -307,7 +309,7 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
return res
elif re.search(r"\.(txt|csv)$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
txt = get_txt(filename, binary)
txt = get_text(filename, binary)
lines = txt.split("\n")
comma, tab = 0, 0
for l in lines:
Expand Down Expand Up @@ -350,7 +352,7 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
return res
elif re.search(r"\.(md|markdown)$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
txt = get_txt(filename, binary)
txt = get_text(filename, binary)
lines = txt.split("\n")
last_question, last_answer = "", ""
question_stack, level_stack = [], []
Expand Down
2 changes: 1 addition & 1 deletion rag/app/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

from api.db.services.knowledgebase_service import KnowledgebaseService
from deepdoc.parser.utils import get_text
from rag.nlp import rag_tokenizer, is_english, tokenize, find_codec
from rag.nlp import rag_tokenizer, tokenize
from deepdoc.parser import ExcelParser


Expand Down