init python:
import re
import requests
import html
import random
import json
import time
import hashlib
import weakref
TRANSLATION_CACHE_FILE = "translation_cache.json"
PENDING_TRANSLATIONS = []
TEXT_OBJECTS_TO_REDRAW = weakref.WeakValueDictionary()
LAST_TRANSLATION_TIME = 0
global LAST_SAVED_CACHE_SIZE
LAST_SAVED_CACHE_SIZE = 0
original_texts_set = set()
translated_texts_set = set()
retry_texts_set = set()
font_size_cache = {}
try:
with open(TRANSLATION_CACHE_FILE, "r", encoding="utf-8") as f:
translation_cache = json.load(f)
for original, translated in translation_cache.items():
original_texts_set.add(original)
translated_texts_set.add(translated)
LAST_SAVED_CACHE_SIZE = len(translation_cache)
except :
translation_cache = {}
LAST_SAVED_CACHE_SIZE = 0
def save_translation_cache():
global LAST_SAVED_CACHE_SIZE
current_size = len(translation_cache)
if current_size - LAST_SAVED_CACHE_SIZE >= 50:
with open(TRANSLATION_CACHE_FILE, "w", encoding="utf-8") as f:
json.dump(translation_cache, f, ensure_ascii=False, indent=2)
LAST_SAVED_CACHE_SIZE = current_size
return True
else:
return False
PROTECT_PATTERNS = [
r'%[^%]+?[bdiouxXeEfFgGcrsaHMS%]',
r'\{[^{}]*\{?[^{}]*\}?[^{}]*\}',
r'\[\[.*?\]\]',
r'\\[n"\'% ]',
r'#.*?(?=\n|$)',
r'\$.*?(?=\n|$)',
r'@.*?(?=\n|$)',
r'<.*?>',
]
COMBINED_PATTERN = re.compile('|'.join(PROTECT_PATTERNS))
def calculate_text_length_ratio(original_text, translated_text):
orig_len = len(original_text)
trans_len = len(translated_text)
return (1+(float(trans_len) / orig_len))/2 if orig_len > 0 else 1.0
def get_adjusted_font_size(original_text, translated_text, original_size=22):
if not FONT_SIZE_ADJUSTMENT_CONFIG['enabled']:
return original_size
ratio = calculate_text_length_ratio(original_text, translated_text)
if ratio > FONT_SIZE_ADJUSTMENT_CONFIG['length_threshold']:
scale = max(1.0 / ratio, FONT_SIZE_ADJUSTMENT_CONFIG['min_scale'])
return int(original_size * scale)
elif ratio < 1.0 / FONT_SIZE_ADJUSTMENT_CONFIG['length_threshold']:
scale = min(ratio, FONT_SIZE_ADJUSTMENT_CONFIG['max_scale'])
return int(original_size * scale)
return original_size
def translation_thread(texts_to_translate):
translations = translate_batch(texts_to_translate)
renpy.invoke_in_thread(process_translation_results, texts_to_translate, translations)
def _send_batch_translation_request(html_content, target_lang):
api_key = "AIzaSyATBXajvzQLTDHEQbcpq0Ihe0vWDHmO520"
url = "https://translate-pa.googleapis.com/v1/translateHtml"
headers = {
"Accept": "/",
"Content-Type": "application/json+protobuf",
"User-Agent": random.choice(USER_AGENTS),
"X-Goog-API-Key": api_key,
"model": "nmt"
}
data = [
[html_content, "auto", target_lang],
"wt_lib"
]
response = requests.post(
url,
headers=headers,
json=data,
proxies=None if not PROXIES_ENABLED else PROXIES,
timeout=10,
#verify=False,
)
response.raise_for_status()
result = response.json()
return result[0][0]
def process_pending_translations():
global PENDING_TRANSLATIONS, LAST_TRANSLATION_TIME
current_time = time.time()
if current_time - LAST_TRANSLATION_TIME < time_interval:
return
if not PENDING_TRANSLATIONS:
return
texts_to_translate = list(set(PENDING_TRANSLATIONS))
PENDING_TRANSLATIONS = []
LAST_TRANSLATION_TIME = current_time
renpy.invoke_in_thread(translation_thread, texts_to_translate)
def process_translation_results(texts_to_translate, translations):
for original, translated in translations.items():
if (original != translated) and (original not in retry_texts_set):
translation_cache[original] = translated
original_texts_set.add(original)
translated_texts_set.add(translated)
if original in TEXT_OBJECTS_TO_REDRAW:
text_obj = TEXT_OBJECTS_TO_REDRAW[original]
if hasattr(text_obj, 'style'):
original_size = text_obj.style.size
adjusted_size = get_adjusted_font_size(original, translated, original_size)
font_size_cache[translated] = adjusted_size
if text_obj:
text_obj.set_text(translated)
del TEXT_OBJECTS_TO_REDRAW[original]
else:
retry_texts_set.add(original)
PENDING_TRANSLATIONS.append(original)
save_translation_cache()
#renpy.restart_interaction()
config.periodic_callbacks.append(process_pending_translations)
def translate_line(text, target_lang=TARGET_LANGUAGE):
if not ENABLE_TRANSLATION or not text.strip():
return text
if text in original_texts_set:
return translation_cache[text]
if text in translated_texts_set:
return text
if text not in PENDING_TRANSLATIONS:
PENDING_TRANSLATIONS.append(text)
process_pending_translations()
return text
def _text_render_debug(self, *args, **kwargs):
if not ENABLE_TRANSLATION:
return original_render(self, *args, **kwargs)
if hasattr(self, "text") and self.text:
text_content = self.text[0]
if text_content in original_texts_set:
self.text = [translation_cache[text_content]]
elif text_content in translated_texts_set:
pass
else:
if text_content not in PENDING_TRANSLATIONS:
PENDING_TRANSLATIONS.append(text_content)
TEXT_OBJECTS_TO_REDRAW[text_content] = self
return original_render(self, *args, **kwargs)
original_render = renpy.text.text.Text.update
renpy.text.text.Text.update = _text_render_debug
label before_main_menu:
python:
process_pending_translations()
current_size = len(translation_cache)
if current_size > LAST_SAVED_CACHE_SIZE:
with open(TRANSLATION_CACHE_FILE, "w", encoding="utf-8") as f:
json.dump(translation_cache, f, ensure_ascii=False, indent=2)
LAST_SAVED_CACHE_SIZE = current_size
init python:
global latest_font
latest_font = "None"
def add2(self, font, start, end, target=None, target_increment=False):
global latest_font
if font != latest_font:
latest_font = font
if isinstance(font, FontGroup):
return self
if start is None:
if isinstance(font, FontGroup):
for k, v in font.map.items():
self.map[k] = v
if k in font.char_map:
self.char_map[k] = font.char_map[k]
elif k in self.char_map:
del self.char_map[k]
else:
self.map[None] = font
if None in self.char_map:
del self.char_map[None]
return self
if not isinstance(start, int):
start = ord(start)
if not isinstance(end, int):
end = ord(end)
if target and not isinstance(target, int):
target = ord(target)
if end < start:
raise Exception("In FontGroup.add, the start of a character range must be before the end of the range.")
for i in range(start, end + 1):
self.map[i] = font
if target is not None:
self.char_map[i] = target
if target_increment:
target += 1
elif i in self.char_map:
del self.char_map[i]
return self
else:
return self
original_add = renpy.text.font.FontGroup.add
renpy.text.font.FontGroup.add = add2
my_fontgroup = FontGroup()
try:
my_fontgroup.add(TRANS_FONT, None, None)
except:
print("add main font failed")
try:
my_fontgroup.add("TwemojiCOLRv0.ttf", 0x1F300, 0x1FAFF)
except:
print("add emoji failed")
def hook_tssubseg(self, s):
if not ENABLE_TRANSLATION:
return original_subsegment(self, s)
if s in font_size_cache:
self.size = font_size_cache[s]
my_fontgroup.add(self.font, 0x0000, 0x007F)
self.font = my_fontgroup
return original_subsegment(self, s)
original_subsegment = renpy.text.text.TextSegment.subsegment
renpy.text.text.TextSegment.subsegment = hook_tssubseg
init python:
def translate_with_llm(texts, target_lang=TARGET_LANGUAGE):
if not ENABLE_TRANSLATION or not texts:
return {text: text for text in texts}
context_items = []
if translation_cache:
recent_items = list(translation_cache.items())[-APPENDED_LINES:]
for original, translated in recent_items:
context_items.append("Original: {}\nTranslated: {}".format(original, translated))
context = "\n\n".join(context_items) if context_items else "No previous translation context available"
prompt = TRANSLATION_PROMPT.format(target_lang=target_lang, context=context)
protected_map = {}
protected_texts = []
for i, text in enumerate(texts):
text_hash = hashlib.md5(text.encode('utf-8')).hexdigest()
protected_text = COMBINED_PATTERN.sub(
lambda match: '{1}
'.format(text_hash, match.group(0)),
text
)
protected_map[text_hash] = text
protected_texts.append(protected_text)
combined_html = "".join(['
(.*?)
', re.DOTALL)
for code_match in code_pattern.finditer(translated_text):
text_hash = code_match.group(1)
if text_hash in protected_map:
original_code = code_match.group(2)
translated_text = translated_text.replace(code_match.group(0), original_code)
translated_text = re.sub(r'\s*\{\s*', '{', translated_text)
translated_text = re.sub(r'\s*\}\s*', '}', translated_text)
translated_texts[texts[idx]] = translated_text
for i, text in enumerate(texts):
if text not in translated_texts:
translated_texts[text] = text
return translated_texts
else:
print("API error: {0} - {1}".format(response.status_code, response.text))
return {text: text for text in texts}
except Exception as e:
print("llm translation error: {0}".format(str(e)))
return {text: text for text in texts}
def translate_batch(texts, target_lang=TARGET_LANGUAGE):
if TRANSLATION_SERVICE == "LLM":
return translate_with_llm(texts, target_lang)
else:
if not ENABLE_TRANSLATION or not texts:
print("Translation disabled or no texts provided.")
return {text: text for text in texts}
protected_map = {}
protected_texts = []
for text in texts:
text_hash = hashlib.md5(text.encode('utf-8')).hexdigest()
protected_text = COMBINED_PATTERN.sub(
lambda match: '{1}
'.format(text_hash, match.group(0)),
text
)
protected_map[text_hash] = text
protected_texts.append(protected_text)
combined_html = "".join(['(.*?)
', re.DOTALL)
for code_match in code_pattern.finditer(translated_text):
text_hash = code_match.group(1)
if text_hash in protected_map:
original_code = code_match.group(2)
translated_text = translated_text.replace(code_match.group(0), original_code)
translated_text = re.sub(r'\s*\{\s*', '{', translated_text)
translated_text = re.sub(r'\s*\}\s*', '}', translated_text)
translated_texts[texts[idx]] = translated_text
return translated_texts
except Exception as e:
return {text: text for text in texts}