tanach_clock / cli_clock.py
neuralworm's picture
cli_clock error handling
44db853
import json
import logging
import datetime
import time
import requests
import pytz
from deep_translator import GoogleTranslator
from deep_translator.exceptions import NotValidLength, RequestError
from utils import process_json_files, flatten_text_with_line_breaks, build_word_index
import sqlite3 # Import sqlite3 for database handling
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Load Tanach text
TANACH_DATA = process_json_files(1, 39)
WORD_INDEX = build_word_index(TANACH_DATA)
# --- Database Setup ---
conn = sqlite3.connect('translation_cache.db') # Create or connect to database
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS translations (
book_id INTEGER,
chapter_id INTEGER,
english_text TEXT,
PRIMARY KEY (book_id, chapter_id)
)
''')
conn.commit()
def translate_chapter(hebrew_chapter, book_id, chapter_id):
"""Translates a Hebrew chapter to English, caching the result in the database."""
# Check if translation exists in the database
cursor.execute(
"SELECT english_text FROM translations WHERE book_id=? AND chapter_id=?",
(book_id, chapter_id)
)
result = cursor.fetchone()
if result:
return result[0].split('\n') # Retrieve from database and split into lines
try:
translator = GoogleTranslator(source='iw', target='en')
max_length = 2000 # Slightly below the limit to be safe
translated_text = ""
# Split the chapter into chunks smaller than the max length
chunks = [hebrew_chapter[i:i + max_length] for i in range(0, len(hebrew_chapter), max_length)]
for chunk in chunks:
translated_text += translator.translate(chunk)
# Store the translation in the database
cursor.execute(
"INSERT INTO translations (book_id, chapter_id, english_text) VALUES (?, ?, ?)",
(book_id, chapter_id, translated_text)
)
conn.commit()
return translated_text.split('\n') # Return as list of lines
except RequestError as e:
logging.warning(f"Translation failed: Request Error - {e}")
return ["Translation unavailable: Request Error"]
def display_current_verse():
"""Displays the verse corresponding to the current time."""
while True:
now = datetime.datetime.now()
current_time_str = now.strftime("%H:%M:%S")
word_data, _ = get_current_word_data(current_time_str)
if word_data is None:
logging.error("Word data not found for current time.")
time.sleep(1)
continue
book_id = word_data["book_id"]
chapter_id = word_data["chapter_id"]
verse_id = word_data["verse_id"]
hebrew_chapter = flatten_text_with_line_breaks(TANACH_DATA[book_id]["text"][chapter_id])
english_chapter = translate_chapter('\n'.join(hebrew_chapter), book_id, chapter_id)
print("\033c", end="") # Clear the terminal
print(f"Time: {current_time_str}")
print(f"{TANACH_DATA[book_id]['title']}, Chapter {chapter_id + 1}, Verse {verse_id}")
print("-" * 30)
print(hebrew_chapter[verse_id - 1])
try:
print(english_chapter[verse_id - 1]) # Display corresponding English line
except Exception as e:
logging.error(f"Error displaying translation: {e}")
print("-" * 30)
time.sleep(1)
# --- Utility Functions --- (Same as before)
def get_current_word_data(client_time_str):
"""Gets data about the current word based on the client's time."""
try:
client_time = datetime.datetime.strptime(client_time_str, "%H:%M:%S")
total_seconds = int(client_time.strftime("%H")) * 3600 + \
int(client_time.strftime("%M")) * 60 + \
int(client_time.strftime("%S"))
# Find the closest key in WORD_INDEX
word_position = min(WORD_INDEX.keys(), key=lambda k: abs(k - total_seconds))
return WORD_INDEX[word_position], word_position
except Exception as e:
logging.error(f"Error processing client time: {e}")
return None, None
if __name__ == "__main__":
display_current_verse()