|
import os |
|
|
|
import gradio as gr |
|
|
|
from Plan.AiLLM import llm_recognition |
|
from Plan.pytesseractOCR import ocr_recognition |
|
from Preprocess.preprocessImg import preprocess_image001 |
|
|
|
langs = [] |
|
|
|
choices = os.popen('tesseract --list-langs').read().split('\n')[1:-1] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
languages = os.popen('tesseract --list-langs').read().split('\n')[1:-1] |
|
|
|
|
|
print(' ======================================================== ') |
|
print(' ###### choices:' + choices) |
|
print(' ###### GET ENV - TESSDATA_PREFIX:' + os.getenv('TESSDATA_PREFIX')) |
|
print(' ###### OS - TESSDATA_PREFIX:' + os.environ['TESSDATA_PREFIX']) |
|
|
|
print(' ###### Tesseract_Cmd:' + pytesseract.pytesseract.tesseract_cmd) |
|
|
|
print(' ======================================================== ') |
|
|
|
|
|
def preprocess_and_ocr(image, validation_type, language): |
|
preprocessed_image = preprocess_image001(image) |
|
ocr_result = ocr_recognition(preprocessed_image, validation_type, language) |
|
return preprocessed_image, ocr_result |
|
|
|
|
|
def preprocess_and_llm(image, validation_type, language): |
|
preprocessed_image = preprocess_image001(image) |
|
llm_result = llm_recognition(preprocessed_image, validation_type, language) |
|
return preprocessed_image, llm_result |
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
image_input = gr.Image(type="pil", label="上傳圖片") |
|
validation_type = gr.Dropdown(choices=["身分證正面", "身分證反面"], label="驗證類別") |
|
language_dropdown = gr.Dropdown(choices=languages, value="chi_tra", label="語言") |
|
|
|
with gr.Row(): |
|
ocr_button = gr.Button("使用 OCR") |
|
llm_button = gr.Button("使用 AI LLM") |
|
|
|
with gr.Row(): |
|
preprocess_output = gr.Image(label="OCR 預處理圖片") |
|
with gr.Row(): |
|
ocr_output = gr.JSON(label="OCR 解析結果") |
|
llm_output = gr.JSON(label="AI LLM 解析結果") |
|
|
|
ocr_button.click(preprocess_and_ocr, inputs=[image_input, validation_type, language_dropdown], outputs=ocr_output) |
|
llm_button.click(preprocess_and_llm, inputs=[image_input, validation_type, language_dropdown], outputs=llm_output) |
|
|
|
demo.launch(share=False) |
|
|