Feature scan info (#53)

* pref: migrate fetch model info to end back

* fix(download): can't select model type

* feat: add scan model info

* feat: add trigger button in setting

* feat: add printing logs

* chore: add explanation of scan model info
This commit is contained in:
Hayden
2024-11-21 22:04:39 +08:00
committed by GitHub
parent 6ae7e1835f
commit 659637c6e0
20 changed files with 921 additions and 428 deletions

View File

@@ -5,6 +5,7 @@ import folder_paths
from . import config
from . import utils
from . import download
from . import searcher
def scan_models():
@@ -128,3 +129,180 @@ async def resume_model_download_task(task_id, request):
async def delete_model_download_task(task_id):
return await download.delete_model_download_task(task_id)
def fetch_model_info(model_page: str):
if not model_page:
return []
model_searcher = searcher.get_model_searcher_by_url(model_page)
result = model_searcher.search_by_url(model_page)
return result
async def download_model_info(scan_mode: str):
utils.print_info(f"Download model info for {scan_mode}")
model_base_paths = config.model_base_paths
for model_type in model_base_paths:
folders, extensions = folder_paths.folder_names_and_paths[model_type]
for path_index, base_path in enumerate(folders):
files = utils.recursive_search_files(base_path)
models = folder_paths.filter_files_extensions(files, extensions)
images = folder_paths.filter_files_content_types(files, ["image"])
image_dict = utils.file_list_to_name_dict(images)
descriptions = folder_paths.filter_files_extensions(files, [".md"])
description_dict = utils.file_list_to_name_dict(descriptions)
for fullname in models:
fullname = utils.normalize_path(fullname)
basename = os.path.splitext(fullname)[0]
abs_model_path = utils.join_path(base_path, fullname)
image_name = image_dict.get(basename, "no-preview.png")
abs_image_path = utils.join_path(base_path, image_name)
has_preview = os.path.isfile(abs_image_path)
description_name = description_dict.get(basename, None)
abs_description_path = (
utils.join_path(base_path, description_name)
if description_name
else None
)
has_description = (
os.path.isfile(abs_description_path)
if abs_description_path
else False
)
try:
utils.print_info(f"Checking model {abs_model_path}")
utils.print_debug(f"Scan mode: {scan_mode}")
utils.print_debug(f"Has preview: {has_preview}")
utils.print_debug(f"Has description: {has_description}")
if scan_mode != "full" and (has_preview and has_description):
continue
utils.print_debug(f"Calculate sha256 for {abs_model_path}")
hash_value = utils.calculate_sha256(abs_model_path)
utils.print_info(f"Searching model info by hash {hash_value}")
model_info = searcher.CivitaiModelSearcher().search_by_hash(
hash_value
)
preview_url_list = model_info.get("preview", [])
preview_image_url = (
preview_url_list[0] if preview_url_list else None
)
if preview_image_url:
utils.print_debug(f"Save preview image to {abs_image_path}")
utils.save_model_preview_image(
abs_model_path, preview_image_url
)
description = model_info.get("description", None)
if description:
utils.save_model_description(abs_model_path, description)
except Exception as e:
utils.print_error(
f"Failed to download model info for {abs_model_path}: {e}"
)
utils.print_debug("Completed scan model information.")
async def migrate_legacy_information():
import json
import yaml
from PIL import Image
utils.print_info(f"Migrating legacy information...")
model_base_paths = config.model_base_paths
for model_type in model_base_paths:
folders, extensions = folder_paths.folder_names_and_paths[model_type]
for path_index, base_path in enumerate(folders):
files = utils.recursive_search_files(base_path)
models = folder_paths.filter_files_extensions(files, extensions)
for fullname in models:
fullname = utils.normalize_path(fullname)
abs_model_path = utils.join_path(base_path, fullname)
base_file_name = os.path.splitext(abs_model_path)[0]
utils.print_debug(f"Try to migrate legacy info for {abs_model_path}")
preview_path = utils.join_path(
os.path.dirname(abs_model_path),
utils.get_model_preview_name(abs_model_path),
)
new_preview_path = f"{base_file_name}.webp"
if os.path.isfile(preview_path) and preview_path != new_preview_path:
utils.print_info(f"Migrate preview image from {fullname}")
with Image.open(preview_path) as image:
image.save(new_preview_path, format="WEBP")
os.remove(preview_path)
description_path = f"{base_file_name}.md"
metadata_info = {
"website": "Civitai",
}
url_info_path = f"{base_file_name}.url"
if os.path.isfile(url_info_path):
with open(url_info_path, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("URL="):
model_page_url = line[len("URL=") :].strip()
metadata_info.update({"modelPage": model_page_url})
json_info_path = f"{base_file_name}.json"
if os.path.isfile(json_info_path):
with open(json_info_path, "r", encoding="utf-8") as f:
version = json.load(f)
metadata_info.update(
{
"baseModel": version.get("baseModel"),
"preview": [i["url"] for i in version["images"]],
}
)
description_parts: list[str] = [
"---",
yaml.dump(metadata_info).strip(),
"---",
"",
]
text_info_path = f"{base_file_name}.txt"
if os.path.isfile(text_info_path):
with open(text_info_path, "r", encoding="utf-8") as f:
description_parts.append(f.read())
description_path = f"{base_file_name}.md"
if os.path.isfile(text_info_path):
utils.print_info(f"Migrate description from {fullname}")
with open(description_path, "w", encoding="utf-8", newline="") as f:
f.write("\n".join(description_parts))
def try_to_remove_file(file_path):
if os.path.isfile(file_path):
os.remove(file_path)
try_to_remove_file(url_info_path)
try_to_remove_file(text_info_path)
try_to_remove_file(json_info_path)
utils.print_debug("Completed migrate model information.")