From 4fdea542b0ce333ab5fb2a123992cdf248b437d4 Mon Sep 17 00:00:00 2001 From: wangdage12 <124357765+wangdage12@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:59:30 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BB=8E=E8=AF=B7=E6=B1=82?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E4=B8=AD=E6=81=A2=E5=A4=8D=E6=97=A7=E5=9B=BE?= =?UTF-8?q?=E7=89=87=E8=B5=84=E6=BA=90=E7=9A=84=E5=B7=A5=E5=85=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 添加从请求日志中恢复旧图片资源的工具 --- tools/GetImages.py | 68 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 tools/GetImages.py diff --git a/tools/GetImages.py b/tools/GetImages.py new file mode 100644 index 0000000..a32bbe1 --- /dev/null +++ b/tools/GetImages.py @@ -0,0 +1,68 @@ +import hashlib +import os +import shutil +import re + +def sha1_name(filename): + """对输入文件名进行 SHA1 哈希,返回大写 40 位字符串""" + sha = hashlib.sha1(filename.encode('utf-8')).hexdigest().upper() + return sha + +def search_and_copy(original_name, search_dir, output_dir): + sha_name = sha1_name(original_name) + print(f"SHA1 计算结果: {sha_name}") + + # 查找匹配文件 + matched_path = None + for root, dirs, files in os.walk(search_dir): + for file in files: + if file.upper() == sha_name: + matched_path = os.path.join(root, file) + break + + if not matched_path: + print("❌ 未找到匹配文件!") + return + + # 创建输出目录 + os.makedirs(output_dir, exist_ok=True) + + # 复制并重命名 + # 文件名是url,取最后一部分作为原文件名 + original_filename = os.path.basename(original_name) + # 创建url中最后一个文件夹作为输出目录 + last_folder = original_name.split('/')[-2] + output_subdir = os.path.join(output_dir, last_folder) + os.makedirs(output_subdir, exist_ok=True) + new_path = os.path.join(output_subdir, original_filename) + shutil.copy(matched_path, new_path) + print(f"✔ 已复制并重命名文件: {new_path}") + +def extract_urls_from_log(log_file): + """ + 从日志中提取原始文件 URL,并打印到终端 + """ + base_url = "https://api.snapgenshin.com/static/raw" + url_pattern = re.compile(r'GET\s+/static/raw/([^/]+)/([^ ]+)\s+HTTP') + + urls = [] + + with open(log_file, 'r', encoding='utf-8') as f: + for line in f: + match = url_pattern.search(line) + if match: + category, filename = match.groups() + full_url = f"{base_url}/{category}/{filename}" + urls.append(full_url) + + return urls + + +if __name__ == "__main__": + logfile="1.txt" # 日志文件路径 + original_file = extract_urls_from_log(logfile) + search_directory = "C:\\Users\\username\\AppData\\Local\\Packages\\60568DGPStudio.SnapHutao_wbnnev551gwxy1\\LocalCache\\ImageCache" # 搜索目录 + output_directory = "C:\\Users\\username\\AppData\\Local\\Packages\\60568DGPStudio.SnapHutao_wbnnev551gwxy1\\LocalCache\\ImageCache\\output" # 输出目录 + + for url in original_file: + search_and_copy(url, search_directory, output_directory)