Compare commits

..

1 Commits

Author SHA1 Message Date
Robin Huang
982a60e85f Add linux form factor. 2025-03-11 11:32:53 -07:00
51 changed files with 8731 additions and 33899 deletions

View File

@@ -7,19 +7,15 @@ on:
paths: paths:
- "pyproject.toml" - "pyproject.toml"
permissions:
issues: write
jobs: jobs:
publish-node: publish-node:
name: Publish Custom Node to registry name: Publish Custom Node to registry
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ltdrdata' }}
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Publish Custom Node - name: Publish Custom Node
uses: Comfy-Org/publish-node-action@v1 uses: Comfy-Org/publish-node-action@main
with: with:
## Add your own personal access token to your Github Repository secrets and reference it here. ## Add your own personal access token to your Github Repository secrets and reference it here.
personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}

View File

@@ -314,6 +314,9 @@ The following settings are applied based on the section marked as `is_default`.
* Use `aria2` as downloader * Use `aria2` as downloader
* [howto](docs/en/use_aria2.md) * [howto](docs/en/use_aria2.md)
* If you add the item `skip_migration_check = True` to `config.ini`, it will not check whether there are nodes that can be migrated at startup.
* This option can be used if performance issues occur in a Colab+GDrive environment.
## Environment Variables ## Environment Variables

View File

@@ -1,7 +1,3 @@
"""
This file is the entry point for the ComfyUI-Manager package, handling CLI-only mode and initial setup.
"""
import os import os
import sys import sys

View File

@@ -43,13 +43,9 @@ import cnr_utils
comfyui_manager_path = os.path.abspath(os.path.dirname(__file__)) comfyui_manager_path = os.path.abspath(os.path.dirname(__file__))
cm_global.pip_blacklist = {'torch', 'torchaudio', 'torchsde', 'torchvision'} cm_global.pip_blacklist = {'torch', 'torchsde', 'torchvision'}
cm_global.pip_downgrade_blacklist = ['torch', 'torchaudio', 'torchsde', 'torchvision', 'transformers', 'safetensors', 'kornia'] cm_global.pip_downgrade_blacklist = ['torch', 'torchsde', 'torchvision', 'transformers', 'safetensors', 'kornia']
cm_global.pip_overrides = {'numpy': 'numpy<2'}
if sys.version_info < (3, 13):
cm_global.pip_overrides = {'numpy': 'numpy<2'}
else:
cm_global.pip_overrides = {}
if os.path.exists(os.path.join(manager_util.comfyui_manager_path, "pip_overrides.json")): if os.path.exists(os.path.join(manager_util.comfyui_manager_path, "pip_overrides.json")):
with open(os.path.join(manager_util.comfyui_manager_path, "pip_overrides.json"), 'r', encoding="UTF-8", errors="ignore") as json_file: with open(os.path.join(manager_util.comfyui_manager_path, "pip_overrides.json"), 'r', encoding="UTF-8", errors="ignore") as json_file:
@@ -151,9 +147,7 @@ class Ctx:
if os.path.exists(core.manager_pip_overrides_path): if os.path.exists(core.manager_pip_overrides_path):
with open(core.manager_pip_overrides_path, 'r', encoding="UTF-8", errors="ignore") as json_file: with open(core.manager_pip_overrides_path, 'r', encoding="UTF-8", errors="ignore") as json_file:
cm_global.pip_overrides = json.load(json_file) cm_global.pip_overrides = json.load(json_file)
cm_global.pip_overrides = {'numpy': 'numpy<2'}
if sys.version_info < (3, 13):
cm_global.pip_overrides = {'numpy': 'numpy<2'}
if os.path.exists(core.manager_pip_blacklist_path): if os.path.exists(core.manager_pip_blacklist_path):
with open(core.manager_pip_blacklist_path, 'r', encoding="UTF-8", errors="ignore") as f: with open(core.manager_pip_blacklist_path, 'r', encoding="UTF-8", errors="ignore") as f:
@@ -190,18 +184,13 @@ class Ctx:
cmd_ctx = Ctx() cmd_ctx = Ctx()
def install_node(node_spec_str, is_all=False, cnt_msg='', **kwargs): def install_node(node_spec_str, is_all=False, cnt_msg=''):
exit_on_fail = kwargs.get('exit_on_fail', False)
print(f"install_node exit on fail:{exit_on_fail}...")
if core.is_valid_url(node_spec_str): if core.is_valid_url(node_spec_str):
# install via urls # install via urls
res = asyncio.run(core.gitclone_install(node_spec_str, no_deps=cmd_ctx.no_deps)) res = asyncio.run(core.gitclone_install(node_spec_str, no_deps=cmd_ctx.no_deps))
if not res.result: if not res.result:
print(res.msg) print(res.msg)
print(f"[bold red]ERROR: An error occurred while installing '{node_spec_str}'.[/bold red]") print(f"[bold red]ERROR: An error occurred while installing '{node_spec_str}'.[/bold red]")
if exit_on_fail:
sys.exit(1)
else: else:
print(f"{cnt_msg} [INSTALLED] {node_spec_str:50}") print(f"{cnt_msg} [INSTALLED] {node_spec_str:50}")
else: else:
@@ -236,8 +225,6 @@ def install_node(node_spec_str, is_all=False, cnt_msg='', **kwargs):
print("") print("")
else: else:
print(f"[bold red]ERROR: An error occurred while installing '{node_name}'.\n{res.msg}[/bold red]") print(f"[bold red]ERROR: An error occurred while installing '{node_name}'.\n{res.msg}[/bold red]")
if exit_on_fail:
sys.exit(1)
def reinstall_node(node_spec_str, is_all=False, cnt_msg=''): def reinstall_node(node_spec_str, is_all=False, cnt_msg=''):
@@ -599,7 +586,7 @@ def get_all_installed_node_specs():
return res return res
def for_each_nodes(nodes, act, allow_all=True, **kwargs): def for_each_nodes(nodes, act, allow_all=True):
is_all = False is_all = False
if allow_all and 'all' in nodes: if allow_all and 'all' in nodes:
is_all = True is_all = True
@@ -611,7 +598,7 @@ def for_each_nodes(nodes, act, allow_all=True, **kwargs):
i = 1 i = 1
for x in nodes: for x in nodes:
try: try:
act(x, is_all=is_all, cnt_msg=f'{i}/{total}', **kwargs) act(x, is_all=is_all, cnt_msg=f'{i}/{total}')
except Exception as e: except Exception as e:
print(f"ERROR: {e}") print(f"ERROR: {e}")
traceback.print_exc() traceback.print_exc()
@@ -655,17 +642,13 @@ def install(
None, None,
help="user directory" help="user directory"
), ),
exit_on_fail: bool = typer.Option(
False,
help="Exit on failure"
)
): ):
cmd_ctx.set_user_directory(user_directory) cmd_ctx.set_user_directory(user_directory)
cmd_ctx.set_channel_mode(channel, mode) cmd_ctx.set_channel_mode(channel, mode)
cmd_ctx.set_no_deps(no_deps) cmd_ctx.set_no_deps(no_deps)
pip_fixer = manager_util.PIPFixer(manager_util.get_installed_packages(), comfy_path, core.manager_files_path) pip_fixer = manager_util.PIPFixer(manager_util.get_installed_packages(), comfy_path, core.manager_files_path)
for_each_nodes(nodes, act=install_node, exit_on_fail=exit_on_fail) for_each_nodes(nodes, act=install_node)
pip_fixer.fix_broken() pip_fixer.fix_broken()
@@ -1064,16 +1047,18 @@ def save_snapshot(
): ):
cmd_ctx.set_user_directory(user_directory) cmd_ctx.set_user_directory(user_directory)
if output is not None: if output is None:
if(not output.endswith('.json') and not output.endswith('.yaml')): print("[bold red]ERROR: missing output path[/bold red]")
print("[bold red]ERROR: output path should be either '.json' or '.yaml' file.[/bold red]") raise typer.Exit(code=1)
raise typer.Exit(code=1)
dir_path = os.path.dirname(output) if(not output.endswith('.json') and not output.endswith('.yaml')):
print("[bold red]ERROR: output path should be either '.json' or '.yaml' file.[/bold red]")
raise typer.Exit(code=1)
if(dir_path != '' and not os.path.exists(dir_path)): dir_path = os.path.dirname(output)
print(f"[bold red]ERROR: {output} path not exists.[/bold red]") if(dir_path != '' and not os.path.exists(dir_path)):
raise typer.Exit(code=1) print(f"[bold red]ERROR: {output} path not exists.[/bold red]")
raise typer.Exit(code=1)
path = asyncio.run(core.save_snapshot_with_postfix('snapshot', output, not full_snapshot)) path = asyncio.run(core.save_snapshot_with_postfix('snapshot', output, not full_snapshot))
print(f"Current snapshot is saved as `{path}`") print(f"Current snapshot is saved as `{path}`")
@@ -1286,6 +1271,20 @@ def export_custom_node_ids(
print(f"{x['id']}@unknown", file=output_file) print(f"{x['id']}@unknown", file=output_file)
@app.command(
"migrate",
help="Migrate legacy node system to new node system",
)
def migrate(
user_directory: str = typer.Option(
None,
help="user directory"
)
):
cmd_ctx.set_user_directory(user_directory)
asyncio.run(unified_manager.migrate_unmanaged_nodes())
if __name__ == '__main__': if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(app()) sys.exit(app())

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -43,7 +43,7 @@ import manager_downloader
from node_package import InstalledNodePackage from node_package import InstalledNodePackage
version_code = [3, 32, 3] version_code = [3, 30, 3]
version_str = f"V{version_code[0]}.{version_code[1]}" + (f'.{version_code[2]}' if len(version_code) > 2 else '') version_str = f"V{version_code[0]}.{version_code[1]}" + (f'.{version_code[2]}' if len(version_code) > 2 else '')
@@ -53,11 +53,6 @@ DEFAULT_CHANNEL = "https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/ma
default_custom_nodes_path = None default_custom_nodes_path = None
class InvalidChannel(Exception):
def __init__(self, channel):
self.channel = channel
super().__init__(channel)
def get_default_custom_nodes_path(): def get_default_custom_nodes_path():
global default_custom_nodes_path global default_custom_nodes_path
if default_custom_nodes_path is None: if default_custom_nodes_path is None:
@@ -256,7 +251,6 @@ comfy_ui_revision = "Unknown"
comfy_ui_commit_datetime = datetime(1900, 1, 1, 0, 0, 0) comfy_ui_commit_datetime = datetime(1900, 1, 1, 0, 0, 0)
channel_dict = None channel_dict = None
valid_channels = {'default', 'local'}
channel_list = None channel_list = None
@@ -361,7 +355,7 @@ def normalize_channel(channel):
if channel_url: if channel_url:
return channel_url return channel_url
raise InvalidChannel(channel) raise Exception(f"Invalid channel name '{channel}'")
class ManagedResult: class ManagedResult:
@@ -768,9 +762,6 @@ class UnifiedManager:
@staticmethod @staticmethod
async def load_nightly(channel, mode): async def load_nightly(channel, mode):
if channel is None:
return {}
res = {} res = {}
channel_url = normalize_channel(channel) channel_url = normalize_channel(channel)
@@ -779,11 +770,6 @@ class UnifiedManager:
print(f"[bold red]ERROR: Invalid mode is specified `--mode {mode}`[/bold red]", file=sys.stderr) print(f"[bold red]ERROR: Invalid mode is specified `--mode {mode}`[/bold red]", file=sys.stderr)
return {} return {}
# validate channel - only the channel set by the user is allowed.
if channel_url not in valid_channels:
logging.error(f'[ComfyUI-Manager] An invalid channel was used: {channel_url}')
raise InvalidChannel(channel_url)
json_obj = await get_data_by_mode(mode, 'custom-node-list.json', channel_url=channel_url) json_obj = await get_data_by_mode(mode, 'custom-node-list.json', channel_url=channel_url)
for x in json_obj['custom_nodes']: for x in json_obj['custom_nodes']:
try: try:
@@ -801,9 +787,8 @@ class UnifiedManager:
return res return res
async def get_custom_nodes(self, channel, mode): async def get_custom_nodes(self, channel, mode):
if channel is None and mode is None: # default_channel = normalize_channel('default')
channel = 'default' # cache = self.custom_node_map_cache.get((default_channel, mode)) # CNR/nightly should always be based on the default channel.
mode = 'cache'
channel = normalize_channel(channel) channel = normalize_channel(channel)
cache = self.custom_node_map_cache.get((channel, mode)) # CNR/nightly should always be based on the default channel. cache = self.custom_node_map_cache.get((channel, mode)) # CNR/nightly should always be based on the default channel.
@@ -812,6 +797,7 @@ class UnifiedManager:
return cache return cache
channel = normalize_channel(channel) channel = normalize_channel(channel)
print(f"nightly_channel: {channel}/{mode}")
nodes = await self.load_nightly(channel, mode) nodes = await self.load_nightly(channel, mode)
res = {} res = {}
@@ -855,7 +841,6 @@ class UnifiedManager:
install_script_path = os.path.join(repo_path, "install.py") install_script_path = os.path.join(repo_path, "install.py")
requirements_path = os.path.join(repo_path, "requirements.txt") requirements_path = os.path.join(repo_path, "requirements.txt")
res = True
if lazy_mode: if lazy_mode:
install_cmd = ["#LAZY-INSTALL-SCRIPT", sys.executable] install_cmd = ["#LAZY-INSTALL-SCRIPT", sys.executable]
return try_install_script(url, repo_path, install_cmd) return try_install_script(url, repo_path, install_cmd)
@@ -863,25 +848,26 @@ class UnifiedManager:
if os.path.exists(requirements_path) and not no_deps: if os.path.exists(requirements_path) and not no_deps:
print("Install: pip packages") print("Install: pip packages")
pip_fixer = manager_util.PIPFixer(manager_util.get_installed_packages(), comfy_path, manager_files_path) pip_fixer = manager_util.PIPFixer(manager_util.get_installed_packages(), comfy_path, manager_files_path)
res = True
lines = manager_util.robust_readlines(requirements_path) lines = manager_util.robust_readlines(requirements_path)
for line in lines: for line in lines:
package_name = remap_pip_package(line.strip()) package_name = remap_pip_package(line.strip())
if package_name and not package_name.startswith('#') and package_name not in self.processed_install: if package_name and not package_name.startswith('#') and package_name not in self.processed_install:
self.processed_install.add(package_name) self.processed_install.add(package_name)
clean_package_name = package_name.split('#')[0].strip() install_cmd = manager_util.make_pip_cmd(["install", package_name])
install_cmd = manager_util.make_pip_cmd(["install", clean_package_name]) if package_name.strip() != "" and not package_name.startswith('#'):
if clean_package_name != "" and not clean_package_name.startswith('#'):
res = res and try_install_script(url, repo_path, install_cmd, instant_execution=instant_execution) res = res and try_install_script(url, repo_path, install_cmd, instant_execution=instant_execution)
pip_fixer.fix_broken() pip_fixer.fix_broken()
return res
if os.path.exists(install_script_path) and install_script_path not in self.processed_install: if os.path.exists(install_script_path) and install_script_path not in self.processed_install:
self.processed_install.add(install_script_path) self.processed_install.add(install_script_path)
print("Install: install script") print("Install: install script")
install_cmd = [sys.executable, "install.py"] install_cmd = [sys.executable, "install.py"]
return res and try_install_script(url, repo_path, install_cmd, instant_execution=instant_execution) return try_install_script(url, repo_path, install_cmd, instant_execution=instant_execution)
return res return True
def reserve_cnr_switch(self, target, zip_url, from_path, to_path, no_deps): def reserve_cnr_switch(self, target, zip_url, from_path, to_path, no_deps):
script_path = os.path.join(manager_startup_script_path, "install-scripts.txt") script_path = os.path.join(manager_startup_script_path, "install-scripts.txt")
@@ -893,6 +879,14 @@ class UnifiedManager:
return True return True
def reserve_migration(self, moves):
script_path = os.path.join(manager_startup_script_path, "install-scripts.txt")
with open(script_path, "a") as file:
obj = ["", "#LAZY-MIGRATION", moves]
file.write(f"{obj}\n")
return True
def unified_fix(self, node_id, version_spec, instant_execution=False, no_deps=False): def unified_fix(self, node_id, version_spec, instant_execution=False, no_deps=False):
""" """
fix dependencies fix dependencies
@@ -1321,66 +1315,67 @@ class UnifiedManager:
return result.fail(f'Path not found: {repo_path}') return result.fail(f'Path not found: {repo_path}')
# version check # version check
with git.Repo(repo_path) as repo: repo = git.Repo(repo_path)
if repo.head.is_detached:
if not switch_to_default_branch(repo):
return result.fail(f"Failed to switch to default branch: {repo_path}")
current_branch = repo.active_branch if repo.head.is_detached:
branch_name = current_branch.name if not switch_to_default_branch(repo):
return result.fail(f"Failed to switch to default branch: {repo_path}")
if current_branch.tracking_branch() is None: current_branch = repo.active_branch
print(f"[ComfyUI-Manager] There is no tracking branch ({current_branch})") branch_name = current_branch.name
remote_name = get_remote_name(repo)
if current_branch.tracking_branch() is None:
print(f"[ComfyUI-Manager] There is no tracking branch ({current_branch})")
remote_name = get_remote_name(repo)
else:
remote_name = current_branch.tracking_branch().remote_name
if remote_name is None:
return result.fail(f"Failed to get remote when installing: {repo_path}")
remote = repo.remote(name=remote_name)
try:
remote.fetch()
except Exception as e:
if 'detected dubious' in str(e):
print(f"[ComfyUI-Manager] Try fixing 'dubious repository' error on '{repo_path}' repository")
safedir_path = repo_path.replace('\\', '/')
subprocess.run(['git', 'config', '--global', '--add', 'safe.directory', safedir_path])
try:
remote.fetch()
except Exception:
print("\n[ComfyUI-Manager] Failed to fixing repository setup. Please execute this command on cmd: \n"
"-----------------------------------------------------------------------------------------\n"
f'git config --global --add safe.directory "{safedir_path}"\n'
"-----------------------------------------------------------------------------------------\n")
commit_hash = repo.head.commit.hexsha
if f'{remote_name}/{branch_name}' in repo.refs:
remote_commit_hash = repo.refs[f'{remote_name}/{branch_name}'].object.hexsha
else:
return result.fail(f"Not updatable branch: {branch_name}")
if commit_hash != remote_commit_hash:
git_pull(repo_path)
if len(repo.remotes) > 0:
url = repo.remotes[0].url
else: else:
remote_name = current_branch.tracking_branch().remote_name url = "unknown repo"
if remote_name is None: def postinstall():
return result.fail(f"Failed to get remote when installing: {repo_path}") return self.execute_install_script(url, repo_path, instant_execution=instant_execution, no_deps=no_deps)
remote = repo.remote(name=remote_name) if return_postinstall:
return result.with_postinstall(postinstall)
try:
remote.fetch()
except Exception as e:
if 'detected dubious' in str(e):
print(f"[ComfyUI-Manager] Try fixing 'dubious repository' error on '{repo_path}' repository")
safedir_path = repo_path.replace('\\', '/')
subprocess.run(['git', 'config', '--global', '--add', 'safe.directory', safedir_path])
try:
remote.fetch()
except Exception:
print("\n[ComfyUI-Manager] Failed to fixing repository setup. Please execute this command on cmd: \n"
"-----------------------------------------------------------------------------------------\n"
f'git config --global --add safe.directory "{safedir_path}"\n'
"-----------------------------------------------------------------------------------------\n")
commit_hash = repo.head.commit.hexsha
if f'{remote_name}/{branch_name}' in repo.refs:
remote_commit_hash = repo.refs[f'{remote_name}/{branch_name}'].object.hexsha
else: else:
return result.fail(f"Not updatable branch: {branch_name}") if not postinstall():
return result.fail(f"Failed to execute install script: {url}")
if commit_hash != remote_commit_hash: return result
git_pull(repo_path) else:
return ManagedResult('skip').with_msg('Up to date')
if len(repo.remotes) > 0:
url = repo.remotes[0].url
else:
url = "unknown repo"
def postinstall():
return self.execute_install_script(url, repo_path, instant_execution=instant_execution, no_deps=no_deps)
if return_postinstall:
return result.with_postinstall(postinstall)
else:
if not postinstall():
return result.fail(f"Failed to execute install script: {url}")
return result
else:
return ManagedResult('skip').with_msg('Up to date')
def unified_update(self, node_id, version_spec=None, instant_execution=False, no_deps=False, return_postinstall=False): def unified_update(self, node_id, version_spec=None, instant_execution=False, no_deps=False, return_postinstall=False):
orig_print(f"\x1b[2K\rUpdating: {node_id}", end='') orig_print(f"\x1b[2K\rUpdating: {node_id}", end='')
@@ -1420,11 +1415,7 @@ class UnifiedManager:
version_spec = self.resolve_unspecified_version(node_id) version_spec = self.resolve_unspecified_version(node_id)
if version_spec == 'unknown' or version_spec == 'nightly': if version_spec == 'unknown' or version_spec == 'nightly':
try: custom_nodes = await self.get_custom_nodes(channel, mode)
custom_nodes = await self.get_custom_nodes(channel, mode)
except InvalidChannel as e:
return ManagedResult('fail').fail(f'Invalid channel is used: {e.channel}')
the_node = custom_nodes.get(node_id) the_node = custom_nodes.get(node_id)
if the_node is not None: if the_node is not None:
if version_spec == 'unknown': if version_spec == 'unknown':
@@ -1482,6 +1473,28 @@ class UnifiedManager:
return res return res
async def migrate_unmanaged_nodes(self):
"""
fix path for nightly and unknown nodes of unmanaged nodes
"""
await self.reload('cache')
await self.get_custom_nodes('default', 'cache')
print("Migration: STAGE 1")
moves = []
# migrate nightly inactive
for x, v in self.nightly_inactive_nodes.items():
if v.endswith('@nightly'):
continue
new_path = os.path.join(get_default_custom_nodes_path(), '.disabled', f"{x}@nightly")
moves.append((v, new_path))
self.reserve_migration(moves)
print("DONE (Migration reserved)")
unified_manager = UnifiedManager() unified_manager = UnifiedManager()
@@ -1551,14 +1564,8 @@ def get_installed_node_packs():
return res return res
def refresh_channel_dict():
if channel_dict is None:
get_channel_dict()
def get_channel_dict(): def get_channel_dict():
global channel_dict global channel_dict
global valid_channels
if channel_dict is None: if channel_dict is None:
channel_dict = {} channel_dict = {}
@@ -1572,7 +1579,6 @@ def get_channel_dict():
channel_info = x.split("::") channel_info = x.split("::")
if len(channel_info) == 2: if len(channel_info) == 2:
channel_dict[channel_info[0]] = channel_info[1] channel_dict[channel_info[0]] = channel_info[1]
valid_channels.add(channel_info[1])
return channel_dict return channel_dict
@@ -1625,6 +1631,7 @@ def write_config():
'model_download_by_agent': get_config()['model_download_by_agent'], 'model_download_by_agent': get_config()['model_download_by_agent'],
'downgrade_blacklist': get_config()['downgrade_blacklist'], 'downgrade_blacklist': get_config()['downgrade_blacklist'],
'security_level': get_config()['security_level'], 'security_level': get_config()['security_level'],
'skip_migration_check': get_config()['skip_migration_check'],
'always_lazy_install': get_config()['always_lazy_install'], 'always_lazy_install': get_config()['always_lazy_install'],
'network_mode': get_config()['network_mode'], 'network_mode': get_config()['network_mode'],
'db_mode': get_config()['db_mode'], 'db_mode': get_config()['db_mode'],
@@ -1663,6 +1670,7 @@ def read_config():
'windows_selector_event_loop_policy': get_bool('windows_selector_event_loop_policy', False), 'windows_selector_event_loop_policy': get_bool('windows_selector_event_loop_policy', False),
'model_download_by_agent': get_bool('model_download_by_agent', False), 'model_download_by_agent': get_bool('model_download_by_agent', False),
'downgrade_blacklist': default_conf.get('downgrade_blacklist', '').lower(), 'downgrade_blacklist': default_conf.get('downgrade_blacklist', '').lower(),
'skip_migration_check': get_bool('skip_migration_check', False),
'always_lazy_install': get_bool('always_lazy_install', False), 'always_lazy_install': get_bool('always_lazy_install', False),
'network_mode': default_conf.get('network_mode', 'public').lower(), 'network_mode': default_conf.get('network_mode', 'public').lower(),
'security_level': default_conf.get('security_level', 'normal').lower(), 'security_level': default_conf.get('security_level', 'normal').lower(),
@@ -1686,6 +1694,7 @@ def read_config():
'windows_selector_event_loop_policy': False, 'windows_selector_event_loop_policy': False,
'model_download_by_agent': False, 'model_download_by_agent': False,
'downgrade_blacklist': '', 'downgrade_blacklist': '',
'skip_migration_check': False,
'always_lazy_install': False, 'always_lazy_install': False,
'network_mode': 'public', # public | private | offline 'network_mode': 'public', # public | private | offline
'security_level': 'normal', # strong | normal | normal- | weak 'security_level': 'normal', # strong | normal | normal- | weak
@@ -2073,13 +2082,6 @@ def is_valid_url(url):
return False return False
def extract_url_and_commit_id(s):
index = s.rfind('@')
if index == -1:
return (s, '')
else:
return (s[:index], s[index+1:])
async def gitclone_install(url, instant_execution=False, msg_prefix='', no_deps=False): async def gitclone_install(url, instant_execution=False, msg_prefix='', no_deps=False):
await unified_manager.reload('cache') await unified_manager.reload('cache')
await unified_manager.get_custom_nodes('default', 'cache') await unified_manager.get_custom_nodes('default', 'cache')
@@ -2097,11 +2099,8 @@ async def gitclone_install(url, instant_execution=False, msg_prefix='', no_deps=
cnr = unified_manager.get_cnr_by_repo(url) cnr = unified_manager.get_cnr_by_repo(url)
if cnr: if cnr:
cnr_id = cnr['id'] cnr_id = cnr['id']
return await unified_manager.install_by_id(cnr_id, version_spec=None, channel='default', mode='cache') return await unified_manager.install_by_id(cnr_id, version_spec='nightly')
else: else:
new_url, commit_id = extract_url_and_commit_id(url)
if commit_id != "":
url = new_url
repo_name = os.path.splitext(os.path.basename(url))[0] repo_name = os.path.splitext(os.path.basename(url))[0]
# NOTE: Keep original name as possible if unknown node # NOTE: Keep original name as possible if unknown node
@@ -2134,10 +2133,6 @@ async def gitclone_install(url, instant_execution=False, msg_prefix='', no_deps=
return result.fail(f"Failed to clone '{clone_url}' into '{repo_path}'") return result.fail(f"Failed to clone '{clone_url}' into '{repo_path}'")
else: else:
repo = git.Repo.clone_from(clone_url, repo_path, recursive=True, progress=GitProgress()) repo = git.Repo.clone_from(clone_url, repo_path, recursive=True, progress=GitProgress())
if commit_id!= "":
repo.git.checkout(commit_id)
repo.git.submodule('update', '--init', '--recursive')
repo.git.clear_cache() repo.git.clear_cache()
repo.close() repo.close()
@@ -2654,8 +2649,22 @@ async def get_current_snapshot(custom_nodes_only = False):
cnr_custom_nodes[info['id']] = info['ver'] cnr_custom_nodes[info['id']] = info['ver']
else: else:
commit_hash = git_utils.get_commit_hash(fullpath) repo = git.Repo(fullpath)
url = git_utils.git_url(fullpath)
if repo.head.is_detached:
remote_name = get_remote_name(repo)
else:
current_branch = repo.active_branch
if current_branch.tracking_branch() is None:
remote_name = get_remote_name(repo)
else:
remote_name = current_branch.tracking_branch().remote_name
commit_hash = repo.head.commit.hexsha
url = repo.remotes[remote_name].url
git_custom_nodes[url] = dict(hash=commit_hash, disabled=is_disabled) git_custom_nodes[url] = dict(hash=commit_hash, disabled=is_disabled)
except: except:
print(f"Failed to extract snapshots for the custom node '{path}'.") print(f"Failed to extract snapshots for the custom node '{path}'.")
@@ -3018,9 +3027,6 @@ async def restore_snapshot(snapshot_path, git_helper_extras=None):
enabled_repos = [] enabled_repos = []
disabled_repos = [] disabled_repos = []
skip_node_packs = [] skip_node_packs = []
switched_node_packs = []
installed_node_packs = []
failed = []
await unified_manager.reload('cache') await unified_manager.reload('cache')
await unified_manager.get_custom_nodes('default', 'cache') await unified_manager.get_custom_nodes('default', 'cache')
@@ -3066,13 +3072,8 @@ async def restore_snapshot(snapshot_path, git_helper_extras=None):
disabled_repos.append(x) disabled_repos.append(x)
for x in todo_checkout: for x in todo_checkout:
ps = unified_manager.cnr_switch_version(x[0], x[1], instant_execution=True, no_deps=True, return_postinstall=False) unified_manager.cnr_switch_version(x[0], x[1], instant_execution=True, no_deps=True, return_postinstall=False)
if ps.action == 'switch-cnr' and ps.result: checkout_repos.append(x[1])
switched_node_packs.append(f"{x[0]}@{x[1]}")
elif ps.action == 'skip':
skip_node_packs.append(f"{x[0]}@{x[1]}")
elif not ps.result:
failed.append(f"{x[0]}@{x[1]}")
# install listed cnr nodes # install listed cnr nodes
for k, v in cnr_info.items(): for k, v in cnr_info.items():
@@ -3080,9 +3081,7 @@ async def restore_snapshot(snapshot_path, git_helper_extras=None):
continue continue
ps = await unified_manager.install_by_id(k, version_spec=v, instant_execution=True, return_postinstall=True) ps = await unified_manager.install_by_id(k, version_spec=v, instant_execution=True, return_postinstall=True)
if ps.action == 'install-cnr' and ps.result: cloned_repos.append(k)
installed_node_packs.append(f"{k}@{v}")
if ps is not None and ps.result: if ps is not None and ps.result:
if hasattr(ps, 'postinstall'): if hasattr(ps, 'postinstall'):
postinstalls.append(ps.postinstall) postinstalls.append(ps.postinstall)
@@ -3140,41 +3139,40 @@ async def restore_snapshot(snapshot_path, git_helper_extras=None):
disabled_repos.append(x) disabled_repos.append(x)
for x in todo_enable: for x in todo_enable:
res = unified_manager.unified_enable(x[0], 'nightly') res = unified_manager.unified_enable(x, 'nightly')
is_switched = False is_switched = False
if res and res.target: if res and res.target:
is_switched = repo_switch_commit(res.target, x[1]) is_switched = repo_switch_commit(res.target, x[1])
if is_switched: if is_switched:
checkout_repos.append(f"{x[0]}@{x[1]}") checkout_repos.append(x)
else: else:
enabled_repos.append(x[0]) enabled_repos.append(x)
for x in todo_checkout: for x in todo_checkout:
is_switched = repo_switch_commit(x[0], x[1]) is_switched = repo_switch_commit(x[0], x[1])
if is_switched: if is_switched:
checkout_repos.append(f"{x[0]}@{x[1]}") checkout_repos.append(x)
else:
skip_node_packs.append(x[0])
for x in git_info.keys(): for x in git_info.keys():
normalized_url = git_utils.normalize_url(x) normalized_url = git_utils.normalize_url(x)
cnr = unified_manager.repo_cnr_map.get(normalized_url) cnr = unified_manager.repo_cnr_map.get(normalized_url)
if cnr is not None: if cnr is not None:
pack_id = cnr['id'] pack_id = cnr['id']
res = await unified_manager.install_by_id(pack_id, 'nightly', instant_execution=True, no_deps=False, return_postinstall=False) await unified_manager.install_by_id(pack_id, 'nightly', instant_execution=True, no_deps=False, return_postinstall=False)
if res.action == 'install-git' and res.result: cloned_repos.append(pack_id)
cloned_repos.append(pack_id)
elif res.action == 'skip':
skip_node_packs.append(pack_id)
elif not res.result:
failed.append(pack_id)
processed_urls.append(x) processed_urls.append(x)
for x in processed_urls: for x in processed_urls:
if x in git_info: if x in git_info:
del git_info[x] del git_info[x]
# remained nightly will be installed and migrated
# for unknown restore # for unknown restore
todo_disable = [] todo_disable = []
todo_enable = [] todo_enable = []
@@ -3221,15 +3219,15 @@ async def restore_snapshot(snapshot_path, git_helper_extras=None):
is_switched = repo_switch_commit(res.target, x[1]) is_switched = repo_switch_commit(res.target, x[1])
if is_switched: if is_switched:
checkout_repos.append(f"{x[0]}@{x[1]}") checkout_repos.append(x)
else: else:
enabled_repos.append(x[0]) enabled_repos.append(x)
for x in todo_checkout: for x in todo_checkout:
is_switched = repo_switch_commit(x[0], x[1]) is_switched = repo_switch_commit(x[0], x[1])
if is_switched: if is_switched:
checkout_repos.append(f"{x[0]}@{x[1]}") checkout_repos.append(x)
else: else:
skip_node_packs.append(x[0]) skip_node_packs.append(x[0])
@@ -3246,28 +3244,53 @@ async def restore_snapshot(snapshot_path, git_helper_extras=None):
unified_manager.repo_install(repo_url, to_path, instant_execution=True, no_deps=False, return_postinstall=False) unified_manager.repo_install(repo_url, to_path, instant_execution=True, no_deps=False, return_postinstall=False)
cloned_repos.append(repo_name) cloned_repos.append(repo_name)
# reload
await unified_manager.migrate_unmanaged_nodes()
# print summary # print summary
for x in cloned_repos: for x in cloned_repos:
print(f"[ INSTALLED ] {x}") print(f"[ INSTALLED ] {x}")
for x in installed_node_packs:
print(f"[ INSTALLED ] {x}")
for x in checkout_repos: for x in checkout_repos:
print(f"[ CHECKOUT ] {x}") print(f"[ CHECKOUT ] {x}")
for x in switched_node_packs:
print(f"[ SWITCHED ] {x}")
for x in enabled_repos: for x in enabled_repos:
print(f"[ ENABLED ] {x}") print(f"[ ENABLED ] {x}")
for x in disabled_repos: for x in disabled_repos:
print(f"[ DISABLED ] {x}") print(f"[ DISABLED ] {x}")
for x in skip_node_packs: for x in skip_node_packs:
print(f"[ SKIPPED ] {x}") print(f"[ SKIPPED ] {x}")
for x in failed:
print(f"[ FAILED ] {x}")
# if is_failed: # if is_failed:
# print("[bold red]ERROR: Failed to restore snapshot.[/bold red]") # print("[bold red]ERROR: Failed to restore snapshot.[/bold red]")
# check need to migrate
need_to_migrate = False
async def check_need_to_migrate():
global need_to_migrate
await unified_manager.reload('cache')
await unified_manager.load_nightly(channel='default', mode='cache')
legacy_custom_nodes = []
for x in unified_manager.active_nodes.values():
if x[0] == 'nightly' and not x[1].endswith('@nightly'):
legacy_custom_nodes.append(x[1])
for x in unified_manager.nightly_inactive_nodes.values():
if not x.endswith('@nightly'):
legacy_custom_nodes.append(x)
if len(legacy_custom_nodes) > 0:
print("\n--------------------- ComfyUI-Manager migration notice --------------------")
print("The following custom nodes were installed using the old management method and require migration:\n")
print("\n".join(legacy_custom_nodes))
print("---------------------------------------------------------------------------\n")
need_to_migrate = True
def get_comfyui_versions(repo=None): def get_comfyui_versions(repo=None):
if repo is None: if repo is None:
repo = git.Repo(comfy_path) repo = git.Repo(comfy_path)

View File

@@ -279,17 +279,8 @@ def get_model_dir(data, show_log=False):
else: else:
models_base = folder_paths.models_dir models_base = folder_paths.models_dir
# NOTE: Validate to prevent path traversal.
if any(char in data['filename'] for char in {'/', '\\', ':'}):
return None
def resolve_custom_node(save_path): def resolve_custom_node(save_path):
save_path = save_path[13:] # remove 'custom_nodes/' save_path = save_path[13:] # remove 'custom_nodes/'
# NOTE: Validate to prevent path traversal.
if save_path.startswith(os.path.sep) or ':' in save_path:
return None
repo_name = save_path.replace('\\','/').split('/')[0] # get custom node repo name repo_name = save_path.replace('\\','/').split('/')[0] # get custom node repo name
# NOTE: The creation of files within the custom node path should be removed in the future. # NOTE: The creation of files within the custom node path should be removed in the future.
@@ -408,6 +399,7 @@ async def task_worker():
try: try:
node_spec = core.unified_manager.resolve_node_spec(node_spec_str) node_spec = core.unified_manager.resolve_node_spec(node_spec_str)
if node_spec is None: if node_spec is None:
logging.error(f"Cannot resolve install target: '{node_spec_str}'") logging.error(f"Cannot resolve install target: '{node_spec_str}'")
return f"Cannot resolve install target: '{node_spec_str}'" return f"Cannot resolve install target: '{node_spec_str}'"
@@ -937,7 +929,6 @@ def check_model_installed(json_obj):
@routes.get("/externalmodel/getlist") @routes.get("/externalmodel/getlist")
async def fetch_externalmodel_list(request): async def fetch_externalmodel_list(request):
# The model list is only allowed in the default channel, yet.
json_obj = await core.get_data_by_mode(request.rel_url.query["mode"], 'model-list.json') json_obj = await core.get_data_by_mode(request.rel_url.query["mode"], 'model-list.json')
check_model_installed(json_obj) check_model_installed(json_obj)
@@ -1206,8 +1197,9 @@ async def install_custom_node(request):
git_url = None git_url = None
selected_version = json_data.get('selected_version') if json_data['version'] != 'unknown':
if json_data['version'] != 'unknown' and selected_version != 'unknown': selected_version = json_data.get('selected_version')
if skip_post_install: if skip_post_install:
if cnr_id in core.unified_manager.nightly_inactive_nodes or cnr_id in core.unified_manager.cnr_inactive_nodes: if cnr_id in core.unified_manager.nightly_inactive_nodes or cnr_id in core.unified_manager.cnr_inactive_nodes:
core.unified_manager.unified_enable(cnr_id) core.unified_manager.unified_enable(cnr_id)
@@ -1224,9 +1216,6 @@ async def install_custom_node(request):
if git_url is None: if git_url is None:
logging.error(f"[ComfyUI-Manager] Following node pack doesn't provide `nightly` version: ${git_url}") logging.error(f"[ComfyUI-Manager] Following node pack doesn't provide `nightly` version: ${git_url}")
return web.Response(status=404, text=f"Following node pack doesn't provide `nightly` version: ${git_url}") return web.Response(status=404, text=f"Following node pack doesn't provide `nightly` version: ${git_url}")
elif json_data['version'] != 'unknown' and selected_version == 'unknown':
logging.error(f"[ComfyUI-Manager] Invalid installation request: {json_data}")
return web.Response(status=400, text="Invalid installation request")
else: else:
# unknown # unknown
unknown_name = os.path.basename(json_data['files'][0]) unknown_name = os.path.basename(json_data['files'][0])
@@ -1418,20 +1407,17 @@ async def disable_node(request):
return web.Response(status=200) return web.Response(status=200)
async def check_whitelist_for_model(item): @routes.get("/manager/migrate_unmanaged_nodes")
json_obj = await core.get_data_by_mode('cache', 'model-list.json') async def migrate_unmanaged_nodes(request):
logging.info("[ComfyUI-Manager] Migrating unmanaged nodes...")
await core.unified_manager.migrate_unmanaged_nodes()
logging.info("Done.")
return web.Response(status=200)
for x in json_obj.get('models', []):
if x['save_path'] == item['save_path'] and x['base'] == item['base'] and x['filename'] == item['filename']:
return True
json_obj = await core.get_data_by_mode('local', 'model-list.json') @routes.get("/manager/need_to_migrate")
async def need_to_migrate(request):
for x in json_obj.get('models', []): return web.Response(text=str(core.need_to_migrate), status=200)
if x['save_path'] == item['save_path'] and x['base'] == item['base'] and x['filename'] == item['filename']:
return True
return False
@routes.post("/manager/queue/install_model") @routes.post("/manager/queue/install_model")
@@ -1442,11 +1428,6 @@ async def install_model(request):
logging.error(SECURITY_MESSAGE_MIDDLE_OR_BELOW) logging.error(SECURITY_MESSAGE_MIDDLE_OR_BELOW)
return web.Response(status=403, text="A security error has occurred. Please check the terminal logs") return web.Response(status=403, text="A security error has occurred. Please check the terminal logs")
# validate request
if not await check_whitelist_for_model(json_data):
logging.error(f"[ComfyUI-Manager] Invalid model install request is detected: {json_data}")
return web.Response(status=400, text="Invalid model install request is detected")
if not json_data['filename'].endswith('.safetensors') and not is_allowed_security_level('high'): if not json_data['filename'].endswith('.safetensors') and not is_allowed_security_level('high'):
models_json = await core.get_data_by_mode('cache', 'model-list.json', 'default') models_json = await core.get_data_by_mode('cache', 'model-list.json', 'default')
@@ -1714,7 +1695,6 @@ cm_global.register_api('cm.try-install-custom-node', confirm_try_install)
async def default_cache_update(): async def default_cache_update():
core.refresh_channel_dict()
channel_url = core.get_config()['channel_url'] channel_url = core.get_config()['channel_url']
async def get_cache(filename): async def get_cache(filename):
try: try:
@@ -1754,6 +1734,11 @@ async def default_cache_update():
logging.info("[ComfyUI-Manager] All startup tasks have been completed.") logging.info("[ComfyUI-Manager] All startup tasks have been completed.")
# NOTE: hide migration button temporarily.
# if not core.get_config()['skip_migration_check']:
# await core.check_need_to_migrate()
# else:
# logging.info("[ComfyUI-Manager] Migration check is skipped...")
threading.Thread(target=lambda: asyncio.run(default_cache_update())).start() threading.Thread(target=lambda: asyncio.run(default_cache_update())).start()

View File

@@ -15,7 +15,6 @@ import re
import logging import logging
import platform import platform
import shlex import shlex
import cm_global
cache_lock = threading.Lock() cache_lock = threading.Lock()
@@ -36,17 +35,11 @@ def add_python_path_to_env():
def make_pip_cmd(cmd): def make_pip_cmd(cmd):
if 'python_embeded' in sys.executable: if use_uv:
if use_uv: return [sys.executable, '-m', 'uv', 'pip'] + cmd
return [sys.executable, '-s', '-m', 'uv', 'pip'] + cmd
else:
return [sys.executable, '-s', '-m', 'pip'] + cmd
else: else:
# FIXED: https://github.com/ltdrdata/ComfyUI-Manager/issues/1667 return [sys.executable, '-m', 'pip'] + cmd
if use_uv:
return [sys.executable, '-m', 'uv', 'pip'] + cmd
else:
return [sys.executable, '-m', 'pip'] + cmd
# DON'T USE StrictVersion - cannot handle pre_release version # DON'T USE StrictVersion - cannot handle pre_release version
# try: # try:
@@ -257,7 +250,7 @@ def get_installed_packages(renew=False):
pip_map[normalized_name] = y[1] pip_map[normalized_name] = y[1]
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
logging.error("[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") logging.error("[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.")
return {} return set()
return pip_map return pip_map
@@ -308,7 +301,6 @@ def parse_requirement_line(line):
torch_torchvision_torchaudio_version_map = { torch_torchvision_torchaudio_version_map = {
'2.7.0': ('0.22.0', '2.7.0'),
'2.6.0': ('0.21.0', '2.6.0'), '2.6.0': ('0.21.0', '2.6.0'),
'2.5.1': ('0.20.0', '2.5.0'), '2.5.1': ('0.20.0', '2.5.0'),
'2.5.0': ('0.20.0', '2.5.0'), '2.5.0': ('0.20.0', '2.5.0'),
@@ -412,9 +404,8 @@ class PIPFixer:
if len(targets) > 0: if len(targets) > 0:
for x in targets: for x in targets:
if sys.version_info < (3, 13): cmd = make_pip_cmd(['install', f"{x}=={versions[0].version_string}", "numpy<2"])
cmd = make_pip_cmd(['install', f"{x}=={versions[0].version_string}", "numpy<2"]) subprocess.check_output(cmd, universal_newlines=True)
subprocess.check_output(cmd, universal_newlines=True)
logging.info(f"[ComfyUI-Manager] 'opencv' dependencies were fixed: {targets}") logging.info(f"[ComfyUI-Manager] 'opencv' dependencies were fixed: {targets}")
except Exception as e: except Exception as e:
@@ -422,21 +413,17 @@ class PIPFixer:
logging.error(e) logging.error(e)
# fix numpy # fix numpy
if sys.version_info >= (3, 13): try:
logging.info("[ComfyUI-Manager] In Python 3.13 and above, PIP Fixer does not downgrade `numpy` below version 2.0. If you need to force a downgrade of `numpy`, please use `pip_auto_fix.list`.") np = new_pip_versions.get('numpy')
else: if np is not None:
try: if StrictVersion(np) >= StrictVersion('2'):
np = new_pip_versions.get('numpy') cmd = make_pip_cmd(['install', "numpy<2"])
if cm_global.pip_overrides.get('numpy') == 'numpy<2': subprocess.check_output(cmd , universal_newlines=True)
if np is not None:
if StrictVersion(np) >= StrictVersion('2'):
cmd = make_pip_cmd(['install', "numpy<2"])
subprocess.check_output(cmd , universal_newlines=True)
logging.info("[ComfyUI-Manager] 'numpy' dependency were fixed") logging.info("[ComfyUI-Manager] 'numpy' dependency were fixed")
except Exception as e: except Exception as e:
logging.error("[ComfyUI-Manager] Failed to restore numpy") logging.error("[ComfyUI-Manager] Failed to restore numpy")
logging.error(e) logging.error(e)
# fix missing frontend # fix missing frontend
try: try:
@@ -452,12 +439,10 @@ class PIPFixer:
lines = file.readlines() lines = file.readlines()
front_line = next((line.strip() for line in lines if line.startswith('comfyui-frontend-package')), None) front_line = next((line.strip() for line in lines if line.startswith('comfyui-frontend-package')), None)
if front_line is None: cmd = make_pip_cmd(['install', front_line])
logging.info("[ComfyUI-Manager] Skipped fixing the 'comfyui-frontend-package' dependency because the ComfyUI is outdated.") subprocess.check_output(cmd , universal_newlines=True)
else:
cmd = make_pip_cmd(['install', front_line]) logging.info("[ComfyUI-Manager] 'comfyui-frontend-package' dependency were fixed")
subprocess.check_output(cmd , universal_newlines=True)
logging.info("[ComfyUI-Manager] 'comfyui-frontend-package' dependency were fixed")
except Exception as e: except Exception as e:
logging.error("[ComfyUI-Manager] Failed to restore comfyui-frontend-package") logging.error("[ComfyUI-Manager] Failed to restore comfyui-frontend-package")
logging.error(e) logging.error(e)

View File

@@ -13,7 +13,7 @@ import {
import { OpenArtShareDialog } from "./comfyui-share-openart.js"; import { OpenArtShareDialog } from "./comfyui-share-openart.js";
import { import {
free_models, install_pip, install_via_git_url, manager_instance, free_models, install_pip, install_via_git_url, manager_instance,
rebootAPI, setManagerInstance, show_message, customAlert, customPrompt, rebootAPI, migrateAPI, setManagerInstance, show_message, customAlert, customPrompt,
infoToast, showTerminal, setNeedRestart infoToast, showTerminal, setNeedRestart
} from "./common.js"; } from "./common.js";
import { ComponentBuilderDialog, getPureName, load_components, set_component_policy } from "./components-manager.js"; import { ComponentBuilderDialog, getPureName, load_components, set_component_policy } from "./components-manager.js";
@@ -946,6 +946,28 @@ class ManagerMenuDialog extends ComfyDialog {
restart_stop_button, restart_stop_button,
]; ];
let migration_btn =
$el("button.cm-button-orange", {
type: "button",
textContent: "Migrate to New Node System",
onclick: () => migrateAPI()
});
migration_btn.style.display = 'none';
res.push(migration_btn);
api.fetchApi('/manager/need_to_migrate')
.then(response => response.text())
.then(text => {
if (text === 'True') {
migration_btn.style.display = 'block';
}
})
.catch(error => {
console.error('Error checking migration status:', error);
});
return res; return res;
} }

View File

@@ -1,7 +1,6 @@
import { app } from "../../scripts/app.js"; import { app } from "../../scripts/app.js";
import { api } from "../../scripts/api.js"; import { api } from "../../scripts/api.js";
import { $el, ComfyDialog } from "../../scripts/ui.js"; import { $el, ComfyDialog } from "../../scripts/ui.js";
import { getBestPosition, getPositionStyle, getRect } from './popover-helper.js';
function internalCustomConfirm(message, confirmMessage, cancelMessage) { function internalCustomConfirm(message, confirmMessage, cancelMessage) {
@@ -182,6 +181,23 @@ export function rebootAPI() {
} }
export async function migrateAPI() {
let confirmed = await customConfirm("When performing a migration, existing installed custom nodes will be renamed and the server will be restarted. Are you sure you want to apply this?\n\n(If you don't perform the migration, ComfyUI-Manager's start-up time will be longer each time due to re-checking during startup.)")
if (confirmed) {
try {
await api.fetchApi("/manager/migrate_unmanaged_nodes");
api.fetchApi("/manager/reboot");
}
catch(exception) {
}
return true;
}
return false;
}
export var manager_instance = null; export var manager_instance = null;
export function setManagerInstance(obj) { export function setManagerInstance(obj) {
@@ -388,14 +404,12 @@ export async function fetchData(route, options) {
} }
} }
// https://cenfun.github.io/open-icons/
export const icons = { export const icons = {
search: '<svg viewBox="0 0 24 24" width="100%" height="100%" pointer-events="none" xmlns="http://www.w3.org/2000/svg"><path fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m21 21-4.486-4.494M19 10.5a8.5 8.5 0 1 1-17 0 8.5 8.5 0 0 1 17 0"/></svg>', search: '<svg viewBox="0 0 24 24" width="100%" height="100%" pointer-events="none" xmlns="http://www.w3.org/2000/svg"><path fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m21 21-4.486-4.494M19 10.5a8.5 8.5 0 1 1-17 0 8.5 8.5 0 0 1 17 0"/></svg>',
extensions: '<svg viewBox="64 64 896 896" width="100%" height="100%" pointer-events="none" xmlns="http://www.w3.org/2000/svg"><path fill="currentColor" d="M843.5 737.4c-12.4-75.2-79.2-129.1-155.3-125.4S550.9 676 546 752c-153.5-4.8-208-40.7-199.1-113.7 3.3-27.3 19.8-41.9 50.1-49 18.4-4.3 38.8-4.9 57.3-3.2 1.7.2 3.5.3 5.2.5 11.3 2.7 22.8 5 34.3 6.8 34.1 5.6 68.8 8.4 101.8 6.6 92.8-5 156-45.9 159.2-132.7 3.1-84.1-54.7-143.7-147.9-183.6-29.9-12.8-61.6-22.7-93.3-30.2-14.3-3.4-26.3-5.7-35.2-7.2-7.9-75.9-71.5-133.8-147.8-134.4S189.7 168 180.5 243.8s40 146.3 114.2 163.9 149.9-23.3 175.7-95.1c9.4 1.7 18.7 3.6 28 5.8 28.2 6.6 56.4 15.4 82.4 26.6 70.7 30.2 109.3 70.1 107.5 119.9-1.6 44.6-33.6 65.2-96.2 68.6-27.5 1.5-57.6-.9-87.3-5.8-8.3-1.4-15.9-2.8-22.6-4.3-3.9-.8-6.6-1.5-7.8-1.8l-3.1-.6c-2.2-.3-5.9-.8-10.7-1.3-25-2.3-52.1-1.5-78.5 4.6-55.2 12.9-93.9 47.2-101.1 105.8-15.7 126.2 78.6 184.7 276 188.9 29.1 70.4 106.4 107.9 179.6 87 73.3-20.9 119.3-93.4 106.9-168.6M329.1 345.2a83.3 83.3 0 1 1 .01-166.61 83.3 83.3 0 0 1-.01 166.61M695.6 845a83.3 83.3 0 1 1 .01-166.61A83.3 83.3 0 0 1 695.6 845"/></svg>',
conflicts: '<svg viewBox="0 0 400 400" width="100%" height="100%" pointer-events="none" xmlns="http://www.w3.org/2000/svg"><path fill="currentColor" d="m397.2 350.4.2-.2-180-320-.2.2C213.8 24.2 207.4 20 200 20s-13.8 4.2-17.2 10.4l-.2-.2-180 320 .2.2c-1.6 2.8-2.8 6-2.8 9.6 0 11 9 20 20 20h360c11 0 20-9 20-20 0-3.6-1.2-6.8-2.8-9.6M220 340h-40v-40h40zm0-60h-40V120h40z"/></svg>', conflicts: '<svg viewBox="0 0 400 400" width="100%" height="100%" pointer-events="none" xmlns="http://www.w3.org/2000/svg"><path fill="currentColor" d="m397.2 350.4.2-.2-180-320-.2.2C213.8 24.2 207.4 20 200 20s-13.8 4.2-17.2 10.4l-.2-.2-180 320 .2.2c-1.6 2.8-2.8 6-2.8 9.6 0 11 9 20 20 20h360c11 0 20-9 20-20 0-3.6-1.2-6.8-2.8-9.6M220 340h-40v-40h40zm0-60h-40V120h40z"/></svg>',
passed: '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 426.667 426.667"><path fill="#6AC259" d="M213.333,0C95.518,0,0,95.514,0,213.333s95.518,213.333,213.333,213.333c117.828,0,213.333-95.514,213.333-213.333S331.157,0,213.333,0z M174.199,322.918l-93.935-93.931l31.309-31.309l62.626,62.622l140.894-140.898l31.309,31.309L174.199,322.918z"/></svg>', passed: '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 426.667 426.667"><path fill="#6AC259" d="M213.333,0C95.518,0,0,95.514,0,213.333s95.518,213.333,213.333,213.333c117.828,0,213.333-95.514,213.333-213.333S331.157,0,213.333,0z M174.199,322.918l-93.935-93.931l31.309-31.309l62.626,62.622l140.894-140.898l31.309,31.309L174.199,322.918z"/></svg>',
download: '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" width="100%" height="100%" viewBox="0 0 32 32"><path fill="currentColor" d="M26 24v4H6v-4H4v4a2 2 0 0 0 2 2h20a2 2 0 0 0 2-2v-4zm0-10l-1.41-1.41L17 20.17V2h-2v18.17l-7.59-7.58L6 14l10 10l10-10z"></path></svg>', download: '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" width="100%" height="100%" viewBox="0 0 32 32"><path fill="currentColor" d="M26 24v4H6v-4H4v4a2 2 0 0 0 2 2h20a2 2 0 0 0 2-2v-4zm0-10l-1.41-1.41L17 20.17V2h-2v18.17l-7.59-7.58L6 14l10 10l10-10z"></path></svg>'
close: '<svg xmlns="http://www.w3.org/2000/svg" pointer-events="none" width="100%" height="100%" viewBox="0 0 16 16"><g fill="currentColor"><path fill-rule="evenodd" clip-rule="evenodd" d="m7.116 8-4.558 4.558.884.884L8 8.884l4.558 4.558.884-.884L8.884 8l4.558-4.558-.884-.884L8 7.116 3.442 2.558l-.884.884L7.116 8z"/></g></svg>',
arrowRight: '<svg xmlns="http://www.w3.org/2000/svg" pointer-events="none" width="100%" height="100%" viewBox="0 0 20 20"><path fill="currentColor" fill-rule="evenodd" d="m2.542 2.154 7.254 7.26c.136.14.204.302.204.483a.73.73 0 0 1-.204.5l-7.575 7.398c-.383.317-.724.317-1.022 0-.299-.317-.299-.643 0-.98l7.08-6.918-6.754-6.763c-.237-.343-.215-.654.066-.935.281-.28.598-.295.951-.045Zm9 0 7.254 7.26c.136.14.204.302.204.483a.73.73 0 0 1-.204.5l-7.575 7.398c-.383.317-.724.317-1.022 0-.299-.317-.299-.643 0-.98l7.08-6.918-6.754-6.763c-.237-.343-.215-.654.066-.935.281-.28.598-.295.951-.045Z"/></svg>'
} }
export function sanitizeHTML(str) { export function sanitizeHTML(str) {
@@ -489,166 +503,3 @@ export function restoreColumnWidth(gridId, columns) {
}); });
} }
export function getTimeAgo(dateStr) {
const date = new Date(dateStr);
if (!date || !(date instanceof Date) || isNaN(date.getTime())) {
return "";
}
const units = [
{ max: 2760000, value: 60000, name: 'minute', past: 'a minute ago', future: 'in a minute' },
{ max: 72000000, value: 3600000, name: 'hour', past: 'an hour ago', future: 'in an hour' },
{ max: 518400000, value: 86400000, name: 'day', past: 'yesterday', future: 'tomorrow' },
{ max: 2419200000, value: 604800000, name: 'week', past: 'last week', future: 'in a week' },
{ max: 28512000000, value: 2592000000, name: 'month', past: 'last month', future: 'in a month' }
];
const diff = Date.now() - date.getTime();
// less than a minute
if (Math.abs(diff) < 60000)
return 'just now';
for (let i = 0; i < units.length; i++) {
if (Math.abs(diff) < units[i].max) {
return format(diff, units[i].value, units[i].name, units[i].past, units[i].future, diff < 0);
}
}
function format(diff, divisor, unit, past, future, isInTheFuture) {
const val = Math.round(Math.abs(diff) / divisor);
if (isInTheFuture)
return val <= 1 ? future : 'in ' + val + ' ' + unit + 's';
return val <= 1 ? past : val + ' ' + unit + 's ago';
}
return format(diff, 31536000000, 'year', 'last year', 'in a year', diff < 0);
};
export const loadCss = (cssFile) => {
const cssPath = import.meta.resolve(cssFile);
//console.log(cssPath);
const $link = document.createElement("link");
$link.setAttribute("rel", 'stylesheet');
$link.setAttribute("href", cssPath);
document.head.appendChild($link);
};
export const copyText = (text) => {
return new Promise((resolve) => {
let err;
try {
navigator.clipboard.writeText(text);
} catch (e) {
err = e;
}
if (err) {
resolve(false);
} else {
resolve(true);
}
});
};
function renderPopover($elem, target, options = {}) {
// async microtask
queueMicrotask(() => {
const containerRect = getRect(window);
const targetRect = getRect(target);
const elemRect = getRect($elem);
const positionInfo = getBestPosition(
containerRect,
targetRect,
elemRect,
options.positions
);
const style = getPositionStyle(positionInfo, {
bgColor: options.bgColor,
borderColor: options.borderColor,
borderRadius: options.borderRadius
});
$elem.style.top = positionInfo.top + "px";
$elem.style.left = positionInfo.left + "px";
$elem.style.background = style.background;
});
}
let $popover;
export function hidePopover() {
if ($popover) {
$popover.remove();
$popover = null;
}
}
export function showPopover(target, text, className, options) {
hidePopover();
$popover = document.createElement("div");
$popover.className = ['cn-popover', className].filter(it => it).join(" ");
document.body.appendChild($popover);
$popover.innerHTML = text;
$popover.style.display = "block";
renderPopover($popover, target, {
borderRadius: 10,
... options
});
}
let $tooltip;
export function hideTooltip(target) {
if ($tooltip) {
$tooltip.style.display = "none";
$tooltip.innerHTML = "";
$tooltip.style.top = "0px";
$tooltip.style.left = "0px";
}
}
export function showTooltip(target, text, className = 'cn-tooltip', styleMap = {}) {
if (!$tooltip) {
$tooltip = document.createElement("div");
$tooltip.className = className;
$tooltip.style.cssText = `
pointer-events: none;
position: fixed;
z-index: 10001;
padding: 20px;
color: #1e1e1e;
max-width: 350px;
filter: drop-shadow(1px 5px 5px rgb(0 0 0 / 30%));
${Object.keys(styleMap).map(k=>k+":"+styleMap[k]+";").join("")}
`;
document.body.appendChild($tooltip);
}
$tooltip.innerHTML = text;
$tooltip.style.display = "block";
renderPopover($tooltip, target, {
positions: ['top', 'bottom', 'right', 'center'],
bgColor: "#ffffff",
borderColor: "#cccccc",
borderRadius: 5
});
}
function initTooltip () {
const mouseenterHandler = (e) => {
const target = e.target;
const text = target.getAttribute('tooltip');
if (text) {
showTooltip(target, text);
}
};
const mouseleaveHandler = (e) => {
const target = e.target;
const text = target.getAttribute('tooltip');
if (text) {
hideTooltip(target);
}
};
document.body.removeEventListener('mouseenter', mouseenterHandler, true);
document.body.removeEventListener('mouseleave', mouseleaveHandler, true);
document.body.addEventListener('mouseenter', mouseenterHandler, true);
document.body.addEventListener('mouseleave', mouseleaveHandler, true);
}
initTooltip();

View File

@@ -1,699 +0,0 @@
.cn-manager {
--grid-font: -apple-system, BlinkMacSystemFont, "Segue UI", "Noto Sans", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji";
z-index: 1099;
width: 80%;
height: 80%;
display: flex;
flex-direction: column;
gap: 10px;
color: var(--fg-color);
font-family: arial, sans-serif;
text-underline-offset: 3px;
outline: none;
}
.cn-manager .cn-flex-auto {
flex: auto;
}
.cn-manager button {
font-size: 16px;
color: var(--input-text);
background-color: var(--comfy-input-bg);
border-radius: 8px;
border-color: var(--border-color);
border-style: solid;
margin: 0;
padding: 4px 8px;
min-width: 100px;
}
.cn-manager button:disabled,
.cn-manager input:disabled,
.cn-manager select:disabled {
color: gray;
}
.cn-manager button:disabled {
background-color: var(--comfy-input-bg);
}
.cn-manager .cn-manager-restart {
display: none;
background-color: #500000;
color: white;
}
.cn-manager .cn-manager-stop {
display: none;
background-color: #500000;
color: white;
}
.cn-manager .cn-manager-back {
align-items: center;
justify-content: center;
}
.arrow-icon {
height: 1em;
width: 1em;
margin-right: 5px;
transform: translateY(2px);
}
.cn-icon {
display: block;
width: 16px;
height: 16px;
}
.cn-icon svg {
display: block;
margin: 0;
pointer-events: none;
}
.cn-manager-header {
display: flex;
flex-wrap: wrap;
gap: 5px;
align-items: center;
padding: 0 5px;
}
.cn-manager-header label {
display: flex;
gap: 5px;
align-items: center;
}
.cn-manager-filter {
height: 28px;
line-height: 28px;
}
.cn-manager-keywords {
height: 28px;
line-height: 28px;
padding: 0 5px 0 26px;
background-size: 16px;
background-position: 5px center;
background-repeat: no-repeat;
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg%20viewBox%3D%220%200%2024%2024%22%20width%3D%22100%25%22%20height%3D%22100%25%22%20pointer-events%3D%22none%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%3E%3Cpath%20fill%3D%22none%22%20stroke%3D%22%23888%22%20stroke-linecap%3D%22round%22%20stroke-linejoin%3D%22round%22%20stroke-width%3D%222%22%20d%3D%22m21%2021-4.486-4.494M19%2010.5a8.5%208.5%200%201%201-17%200%208.5%208.5%200%200%201%2017%200%22%2F%3E%3C%2Fsvg%3E");
}
.cn-manager-status {
padding-left: 10px;
}
.cn-manager-grid {
flex: auto;
border: 1px solid var(--border-color);
overflow: hidden;
position: relative;
}
.cn-manager-selection {
display: flex;
flex-wrap: wrap;
gap: 10px;
align-items: center;
}
.cn-manager-message {
position: relative;
}
.cn-manager-footer {
display: flex;
flex-wrap: wrap;
gap: 10px;
align-items: center;
}
.cn-manager-grid .tg-turbogrid {
font-family: var(--grid-font);
font-size: 15px;
background: var(--bg-color);
}
.cn-manager-grid .tg-turbogrid .tg-highlight::after {
position: absolute;
top: 0;
left: 0;
content: "";
display: block;
width: 100%;
height: 100%;
box-sizing: border-box;
background-color: #80bdff11;
pointer-events: none;
}
.cn-manager-grid .cn-pack-name a {
color: skyblue;
text-decoration: none;
word-break: break-word;
}
.cn-manager-grid .cn-pack-desc a {
color: #5555FF;
font-weight: bold;
text-decoration: none;
}
.cn-manager-grid .tg-cell a:hover {
text-decoration: underline;
}
.cn-manager-grid .cn-pack-version {
line-height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
height: 100%;
gap: 5px;
}
.cn-manager-grid .cn-pack-nodes {
line-height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
gap: 5px;
cursor: pointer;
height: 100%;
}
.cn-manager-grid .cn-pack-nodes:hover {
text-decoration: underline;
}
.cn-manager-grid .cn-pack-conflicts {
color: orange;
}
.cn-popover {
position: fixed;
z-index: 10000;
padding: 20px;
color: #1e1e1e;
filter: drop-shadow(1px 5px 5px rgb(0 0 0 / 30%));
overflow: hidden;
}
.cn-flyover {
position: absolute;
top: 0;
right: 0;
z-index: 1000;
display: none;
width: 50%;
height: 100%;
background-color: var(--comfy-menu-bg);
animation-duration: 0.2s;
animation-fill-mode: both;
flex-direction: column;
}
.cn-flyover::before {
position: absolute;
top: 0;
content: "";
z-index: 10;
display: block;
width: 10px;
height: 100%;
pointer-events: none;
left: -10px;
background-image: linear-gradient(to left, rgb(0 0 0 / 20%), rgb(0 0 0 / 0%));
}
.cn-flyover-header {
height: 45px;
display: flex;
align-items: center;
gap: 5px;
border-bottom: 1px solid var(--border-color);
}
.cn-flyover-close {
display: flex;
align-items: center;
padding: 0 10px;
justify-content: center;
cursor: pointer;
opacity: 0.8;
height: 100%;
}
.cn-flyover-close:hover {
opacity: 1;
}
.cn-flyover-close svg {
display: block;
margin: 0;
pointer-events: none;
width: 20px;
height: 20px;
}
.cn-flyover-title {
display: flex;
align-items: center;
font-weight: bold;
gap: 10px;
flex: auto;
}
.cn-flyover-body {
height: calc(100% - 45px);
overflow-y: auto;
position: relative;
background-color: var(--comfy-menu-secondary-bg);
}
@keyframes cn-slide-in-right {
from {
visibility: visible;
transform: translate3d(100%, 0, 0);
}
to {
transform: translate3d(0, 0, 0);
}
}
.cn-slide-in-right {
animation-name: cn-slide-in-right;
}
@keyframes cn-slide-out-right {
from {
transform: translate3d(0, 0, 0);
}
to {
visibility: hidden;
transform: translate3d(100%, 0, 0);
}
}
.cn-slide-out-right {
animation-name: cn-slide-out-right;
}
.cn-nodes-list {
width: 100%;
}
.cn-nodes-row {
display: flex;
align-items: center;
gap: 10px;
}
.cn-nodes-row:nth-child(odd) {
background-color: rgb(0 0 0 / 5%);
}
.cn-nodes-row:hover {
background-color: rgb(0 0 0 / 10%);
}
.cn-nodes-sn {
text-align: right;
min-width: 35px;
color: var(--drag-text);
flex-shrink: 0;
font-size: 12px;
padding: 8px 5px;
}
.cn-nodes-name {
cursor: pointer;
white-space: nowrap;
flex-shrink: 0;
position: relative;
padding: 8px 5px;
}
.cn-nodes-name::after {
content: attr(action);
position: absolute;
pointer-events: none;
top: 50%;
left: 100%;
transform: translate(5px, -50%);
font-size: 12px;
color: var(--drag-text);
background-color: var(--comfy-input-bg);
border-radius: 10px;
border: 1px solid var(--border-color);
padding: 3px 8px;
display: none;
}
.cn-nodes-name.action::after {
display: block;
}
.cn-nodes-name:hover {
text-decoration: underline;
}
.cn-nodes-conflict .cn-nodes-name,
.cn-nodes-conflict .cn-icon {
color: orange;
}
.cn-conflicts-list {
display: flex;
flex-wrap: wrap;
gap: 5px;
align-items: center;
padding: 5px 0;
}
.cn-conflicts-list b {
font-weight: normal;
color: var(--descrip-text);
}
.cn-nodes-pack {
cursor: pointer;
color: skyblue;
}
.cn-nodes-pack:hover {
text-decoration: underline;
}
.cn-pack-badge {
font-size: 12px;
font-weight: normal;
background-color: var(--comfy-input-bg);
border-radius: 10px;
border: 1px solid var(--border-color);
padding: 3px 8px;
color: var(--error-text);
}
.cn-preview {
min-width: 300px;
max-width: 500px;
min-height: 120px;
overflow: hidden;
font-size: 12px;
pointer-events: none;
padding: 12px;
color: var(--fg-color);
}
.cn-preview-header {
display: flex;
gap: 8px;
align-items: center;
border-bottom: 1px solid var(--comfy-input-bg);
padding: 5px 10px;
}
.cn-preview-dot {
width: 8px;
height: 8px;
border-radius: 50%;
background-color: grey;
position: relative;
filter: drop-shadow(1px 2px 3px rgb(0 0 0 / 30%));
}
.cn-preview-dot.cn-preview-optional::after {
content: "";
position: absolute;
pointer-events: none;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background-color: var(--comfy-input-bg);
border-radius: 50%;
width: 3px;
height: 3px;
}
.cn-preview-dot.cn-preview-grid {
border-radius: 0;
}
.cn-preview-dot.cn-preview-grid::before {
content: '';
position: absolute;
border-left: 1px solid var(--comfy-input-bg);
border-right: 1px solid var(--comfy-input-bg);
width: 4px;
height: 100%;
left: 2px;
top: 0;
z-index: 1;
}
.cn-preview-dot.cn-preview-grid::after {
content: '';
position: absolute;
border-top: 1px solid var(--comfy-input-bg);
border-bottom: 1px solid var(--comfy-input-bg);
width: 100%;
height: 4px;
left: 0;
top: 2px;
z-index: 1;
}
.cn-preview-name {
flex: auto;
font-size: 14px;
}
.cn-preview-io {
display: flex;
justify-content: space-between;
padding: 10px 10px;
}
.cn-preview-column > div {
display: flex;
gap: 10px;
align-items: center;
height: 18px;
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
}
.cn-preview-input {
justify-content: flex-start;
}
.cn-preview-output {
justify-content: flex-end;
}
.cn-preview-list {
display: flex;
flex-direction: column;
gap: 3px;
padding: 0 10px 10px 10px;
}
.cn-preview-switch {
position: relative;
display: flex;
justify-content: space-between;
align-items: center;
background: var(--bg-color);
border: 2px solid var(--border-color);
border-radius: 10px;
text-wrap: nowrap;
padding: 2px 20px;
gap: 10px;
}
.cn-preview-switch::before,
.cn-preview-switch::after {
position: absolute;
pointer-events: none;
top: 50%;
transform: translate(0, -50%);
color: var(--fg-color);
opacity: 0.8;
}
.cn-preview-switch::before {
content: "◀";
left: 5px;
}
.cn-preview-switch::after {
content: "▶";
right: 5px;
}
.cn-preview-value {
color: var(--descrip-text);
}
.cn-preview-string {
min-height: 30px;
max-height: 300px;
background: var(--bg-color);
color: var(--descrip-text);
border-radius: 3px;
padding: 3px 5px;
overflow-y: auto;
overflow-x: hidden;
}
.cn-preview-description {
margin: 0px 10px 10px 10px;
padding: 6px;
background: var(--border-color);
color: var(--descrip-text);
border-radius: 5px;
font-style: italic;
word-break: break-word;
}
.cn-tag-list {
display: flex;
flex-wrap: wrap;
gap: 5px;
align-items: center;
margin-bottom: 5px;
}
.cn-tag-list > div {
background-color: var(--border-color);
border-radius: 5px;
padding: 0 5px;
}
.cn-install-buttons {
display: flex;
flex-direction: column;
gap: 3px;
padding: 3px;
align-items: center;
justify-content: center;
height: 100%;
}
.cn-selected-buttons {
display: flex;
gap: 5px;
align-items: center;
padding-right: 20px;
}
.cn-manager .cn-btn-enable {
background-color: #333399;
color: white;
}
.cn-manager .cn-btn-disable {
background-color: #442277;
color: white;
}
.cn-manager .cn-btn-update {
background-color: #1155AA;
color: white;
}
.cn-manager .cn-btn-try-update {
background-color: Gray;
color: white;
}
.cn-manager .cn-btn-try-fix {
background-color: #6495ED;
color: white;
}
.cn-manager .cn-btn-import-failed {
background-color: #AA1111;
font-size: 10px;
font-weight: bold;
color: white;
}
.cn-manager .cn-btn-install {
background-color: black;
color: white;
}
.cn-manager .cn-btn-try-install {
background-color: Gray;
color: white;
}
.cn-manager .cn-btn-uninstall {
background-color: #993333;
color: white;
}
.cn-manager .cn-btn-reinstall {
background-color: #993333;
color: white;
}
.cn-manager .cn-btn-switch {
background-color: #448833;
color: white;
}
@keyframes cn-btn-loading-bg {
0% {
left: 0;
}
100% {
left: -105px;
}
}
.cn-manager button.cn-btn-loading {
position: relative;
overflow: hidden;
border-color: rgb(0 119 207 / 80%);
background-color: var(--comfy-input-bg);
}
.cn-manager button.cn-btn-loading::after {
position: absolute;
top: 0;
left: 0;
content: "";
width: 500px;
height: 100%;
background-image: repeating-linear-gradient(
-45deg,
rgb(0 119 207 / 30%),
rgb(0 119 207 / 30%) 10px,
transparent 10px,
transparent 15px
);
animation: cn-btn-loading-bg 2s linear infinite;
}
.cn-manager-light .cn-pack-name a {
color: blue;
}
.cn-manager-light .cm-warn-note {
background-color: #ccc !important;
}
.cn-manager-light .cn-btn-install {
background-color: #333;
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,213 +0,0 @@
.cmm-manager {
--grid-font: -apple-system, BlinkMacSystemFont, "Segoe UI", "Noto Sans", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji";
z-index: 1099;
width: 80%;
height: 80%;
display: flex;
flex-direction: column;
gap: 10px;
color: var(--fg-color);
font-family: arial, sans-serif;
}
.cmm-manager .cmm-flex-auto {
flex: auto;
}
.cmm-manager button {
font-size: 16px;
color: var(--input-text);
background-color: var(--comfy-input-bg);
border-radius: 8px;
border-color: var(--border-color);
border-style: solid;
margin: 0;
padding: 4px 8px;
min-width: 100px;
}
.cmm-manager button:disabled,
.cmm-manager input:disabled,
.cmm-manager select:disabled {
color: gray;
}
.cmm-manager button:disabled {
background-color: var(--comfy-input-bg);
}
.cmm-manager .cmm-manager-refresh {
display: none;
background-color: #000080;
color: white;
}
.cmm-manager .cmm-manager-stop {
display: none;
background-color: #500000;
color: white;
}
.cmm-manager-header {
display: flex;
flex-wrap: wrap;
gap: 5px;
align-items: center;
padding: 0 5px;
}
.cmm-manager-header label {
display: flex;
gap: 5px;
align-items: center;
}
.cmm-manager-type,
.cmm-manager-base,
.cmm-manager-filter {
height: 28px;
line-height: 28px;
}
.cmm-manager-keywords {
height: 28px;
line-height: 28px;
padding: 0 5px 0 26px;
background-size: 16px;
background-position: 5px center;
background-repeat: no-repeat;
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg%20viewBox%3D%220%200%2024%2024%22%20width%3D%22100%25%22%20height%3D%22100%25%22%20pointer-events%3D%22none%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%3E%3Cpath%20fill%3D%22none%22%20stroke%3D%22%23888%22%20stroke-linecap%3D%22round%22%20stroke-linejoin%3D%22round%22%20stroke-width%3D%222%22%20d%3D%22m21%2021-4.486-4.494M19%2010.5a8.5%208.5%200%201%201-17%200%208.5%208.5%200%200%201%2017%200%22%2F%3E%3C%2Fsvg%3E");
}
.cmm-manager-status {
padding-left: 10px;
}
.cmm-manager-grid {
flex: auto;
border: 1px solid var(--border-color);
overflow: hidden;
}
.cmm-manager-selection {
display: flex;
flex-wrap: wrap;
gap: 10px;
align-items: center;
}
.cmm-manager-footer {
display: flex;
flex-wrap: wrap;
gap: 10px;
align-items: center;
}
.cmm-manager-grid .tg-turbogrid {
font-family: var(--grid-font);
font-size: 15px;
background: var(--bg-color);
}
.cmm-manager-grid .cmm-node-name a {
color: skyblue;
text-decoration: none;
word-break: break-word;
}
.cmm-manager-grid .cmm-node-desc a {
color: #5555FF;
font-weight: bold;
text-decoration: none;
}
.cmm-manager-grid .tg-cell a:hover {
text-decoration: underline;
}
.cmm-icon-passed {
width: 20px;
height: 20px;
position: absolute;
left: calc(50% - 10px);
top: calc(50% - 10px);
}
.cmm-manager .cmm-btn-enable {
background-color: blue;
color: white;
}
.cmm-manager .cmm-btn-disable {
background-color: MediumSlateBlue;
color: white;
}
.cmm-manager .cmm-btn-install {
background-color: black;
color: white;
}
.cmm-btn-download {
width: 18px;
height: 18px;
position: absolute;
left: calc(50% - 10px);
top: calc(50% - 10px);
cursor: pointer;
opacity: 0.8;
color: #fff;
}
.cmm-btn-download:hover {
opacity: 1;
}
.cmm-manager-light .cmm-btn-download {
color: #000;
}
@keyframes cmm-btn-loading-bg {
0% {
left: 0;
}
100% {
left: -105px;
}
}
.cmm-manager button.cmm-btn-loading {
position: relative;
overflow: hidden;
border-color: rgb(0 119 207 / 80%);
background-color: var(--comfy-input-bg);
}
.cmm-manager button.cmm-btn-loading::after {
position: absolute;
top: 0;
left: 0;
content: "";
width: 500px;
height: 100%;
background-image: repeating-linear-gradient(
-45deg,
rgb(0 119 207 / 30%),
rgb(0 119 207 / 30%) 10px,
transparent 10px,
transparent 15px
);
animation: cmm-btn-loading-bg 2s linear infinite;
}
.cmm-manager-light .cmm-node-name a {
color: blue;
}
.cmm-manager-light .cm-warn-note {
background-color: #ccc !important;
}
.cmm-manager-light .cmm-btn-install {
background-color: #333;
}

View File

@@ -3,17 +3,236 @@ import { $el } from "../../scripts/ui.js";
import { import {
manager_instance, rebootAPI, manager_instance, rebootAPI,
fetchData, md5, icons, show_message, customAlert, infoToast, showTerminal, fetchData, md5, icons, show_message, customAlert, infoToast, showTerminal,
storeColumnWidth, restoreColumnWidth, loadCss storeColumnWidth, restoreColumnWidth
} from "./common.js"; } from "./common.js";
import { api } from "../../scripts/api.js"; import { api } from "../../scripts/api.js";
// https://cenfun.github.io/turbogrid/api.html // https://cenfun.github.io/turbogrid/api.html
import TG from "./turbogrid.esm.js"; import TG from "./turbogrid.esm.js";
loadCss("./model-manager.css");
const gridId = "model"; const gridId = "model";
const pageCss = `
.cmm-manager {
--grid-font: -apple-system, BlinkMacSystemFont, "Segoe UI", "Noto Sans", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji";
z-index: 1099;
width: 80%;
height: 80%;
display: flex;
flex-direction: column;
gap: 10px;
color: var(--fg-color);
font-family: arial, sans-serif;
}
.cmm-manager .cmm-flex-auto {
flex: auto;
}
.cmm-manager button {
font-size: 16px;
color: var(--input-text);
background-color: var(--comfy-input-bg);
border-radius: 8px;
border-color: var(--border-color);
border-style: solid;
margin: 0;
padding: 4px 8px;
min-width: 100px;
}
.cmm-manager button:disabled,
.cmm-manager input:disabled,
.cmm-manager select:disabled {
color: gray;
}
.cmm-manager button:disabled {
background-color: var(--comfy-input-bg);
}
.cmm-manager .cmm-manager-refresh {
display: none;
background-color: #000080;
color: white;
}
.cmm-manager .cmm-manager-stop {
display: none;
background-color: #500000;
color: white;
}
.cmm-manager-header {
display: flex;
flex-wrap: wrap;
gap: 5px;
align-items: center;
padding: 0 5px;
}
.cmm-manager-header label {
display: flex;
gap: 5px;
align-items: center;
}
.cmm-manager-type,
.cmm-manager-base,
.cmm-manager-filter {
height: 28px;
line-height: 28px;
}
.cmm-manager-keywords {
height: 28px;
line-height: 28px;
padding: 0 5px 0 26px;
background-size: 16px;
background-position: 5px center;
background-repeat: no-repeat;
background-image: url("data:image/svg+xml;charset=utf8,${encodeURIComponent(icons.search.replace("currentColor", "#888"))}");
}
.cmm-manager-status {
padding-left: 10px;
}
.cmm-manager-grid {
flex: auto;
border: 1px solid var(--border-color);
overflow: hidden;
}
.cmm-manager-selection {
display: flex;
flex-wrap: wrap;
gap: 10px;
align-items: center;
}
.cmm-manager-message {
}
.cmm-manager-footer {
display: flex;
flex-wrap: wrap;
gap: 10px;
align-items: center;
}
.cmm-manager-grid .tg-turbogrid {
font-family: var(--grid-font);
font-size: 15px;
background: var(--bg-color);
}
.cmm-manager-grid .cmm-node-name a {
color: skyblue;
text-decoration: none;
word-break: break-word;
}
.cmm-manager-grid .cmm-node-desc a {
color: #5555FF;
font-weight: bold;
text-decoration: none;
}
.cmm-manager-grid .tg-cell a:hover {
text-decoration: underline;
}
.cmm-icon-passed {
width: 20px;
height: 20px;
position: absolute;
left: calc(50% - 10px);
top: calc(50% - 10px);
}
.cmm-manager .cmm-btn-enable {
background-color: blue;
color: white;
}
.cmm-manager .cmm-btn-disable {
background-color: MediumSlateBlue;
color: white;
}
.cmm-manager .cmm-btn-install {
background-color: black;
color: white;
}
.cmm-btn-download {
width: 18px;
height: 18px;
position: absolute;
left: calc(50% - 10px);
top: calc(50% - 10px);
cursor: pointer;
opacity: 0.8;
color: #fff;
}
.cmm-btn-download:hover {
opacity: 1;
}
.cmm-manager-light .cmm-btn-download {
color: #000;
}
@keyframes cmm-btn-loading-bg {
0% {
left: 0;
}
100% {
left: -105px;
}
}
.cmm-manager button.cmm-btn-loading {
position: relative;
overflow: hidden;
border-color: rgb(0 119 207 / 80%);
background-color: var(--comfy-input-bg);
}
.cmm-manager button.cmm-btn-loading::after {
position: absolute;
top: 0;
left: 0;
content: "";
width: 500px;
height: 100%;
background-image: repeating-linear-gradient(
-45deg,
rgb(0 119 207 / 30%),
rgb(0 119 207 / 30%) 10px,
transparent 10px,
transparent 15px
);
animation: cmm-btn-loading-bg 2s linear infinite;
}
.cmm-manager-light .cmm-node-name a {
color: blue;
}
.cmm-manager-light .cm-warn-note {
background-color: #ccc !important;
}
.cmm-manager-light .cmm-btn-install {
background-color: #333;
}
`;
const pageHtml = ` const pageHtml = `
<div class="cmm-manager-header"> <div class="cmm-manager-header">
<label>Filter <label>Filter
@@ -64,6 +283,14 @@ export class ModelManager {
} }
init() { init() {
if (!document.querySelector(`style[context="${this.id}"]`)) {
const $style = document.createElement("style");
$style.setAttribute("context", this.id);
$style.innerHTML = pageCss;
document.head.appendChild($style);
}
this.element = $el("div", { this.element = $el("div", {
parent: document.body, parent: document.body,
className: "comfy-modal cmm-manager" className: "comfy-modal cmm-manager"
@@ -81,13 +308,10 @@ export class ModelManager {
value: "" value: ""
}, { }, {
label: "Installed", label: "Installed",
value: "installed" value: "True"
}, { }, {
label: "Not Installed", label: "Not Installed",
value: "not_installed" value: "False"
}, {
label: "In Workflow",
value: "in_workflow"
}]; }];
this.typeList = [{ this.typeList = [{
@@ -257,31 +481,12 @@ export class ModelManager {
rowFilter: (rowItem) => { rowFilter: (rowItem) => {
const searchableColumns = ["name", "type", "base", "description", "filename", "save_path"]; const searchableColumns = ["name", "type", "base", "description", "filename", "save_path"];
const models_extensions = ['.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft'];
let shouldShown = grid.highlightKeywordsFilter(rowItem, searchableColumns, this.keywords); let shouldShown = grid.highlightKeywordsFilter(rowItem, searchableColumns, this.keywords);
if (shouldShown) { if (shouldShown) {
if(this.filter) { if(this.filter && rowItem.installed !== this.filter) {
if (this.filter == "in_workflow") { return false;
rowItem.in_workflow = null;
if (Array.isArray(app.graph._nodes)) {
app.graph._nodes.forEach((item, i) => {
if (Array.isArray(item.widgets_values)) {
item.widgets_values.forEach((_item, i) => {
if (rowItem.in_workflow === null && _item !== null && models_extensions.includes("." + _item.toString().split('.').pop())) {
let filename = _item.match(/([^\/]+)(?=\.\w+$)/)[0];
if (grid.highlightKeywordsFilter(rowItem, searchableColumns, filename)) {
rowItem.in_workflow = "True";
grid.highlightKeywordsFilter(rowItem, searchableColumns, "");
}
}
});
}
});
}
}
return ((this.filter == "installed" && rowItem.installed == "True") || (this.filter == "not_installed" && rowItem.installed == "False") || (this.filter == "in_workflow" && rowItem.in_workflow == "True"));
} }
if(this.type && rowItem.type !== this.type) { if(this.type && rowItem.type !== this.type) {
@@ -356,7 +561,7 @@ export class ModelManager {
sortable: false, sortable: false,
align: 'center', align: 'center',
formatter: (url, rowItem, columnItem) => { formatter: (url, rowItem, columnItem) => {
return `<a class="cmm-btn-download" tooltip="Download file" href="${url}" target="_blank">${icons.download}</a>`; return `<a class="cmm-btn-download" title="Download file" href="${url}" target="_blank">${icons.download}</a>`;
} }
}, { }, {
id: 'size', id: 'size',

View File

@@ -1,619 +0,0 @@
const hasOwn = function(obj, key) {
return Object.prototype.hasOwnProperty.call(obj, key);
};
const isNum = function(num) {
if (typeof num !== 'number' || isNaN(num)) {
return false;
}
const isInvalid = function(n) {
if (n === Number.MAX_VALUE || n === Number.MIN_VALUE || n === Number.NEGATIVE_INFINITY || n === Number.POSITIVE_INFINITY) {
return true;
}
return false;
};
if (isInvalid(num)) {
return false;
}
return true;
};
const toNum = (num) => {
if (typeof (num) !== 'number') {
num = parseFloat(num);
}
if (isNaN(num)) {
num = 0;
}
num = Math.round(num);
return num;
};
const clamp = function(value, min, max) {
return Math.max(min, Math.min(max, value));
};
const isWindow = (obj) => {
return Boolean(obj && obj === obj.window);
};
const isDocument = (obj) => {
return Boolean(obj && obj.nodeType === 9);
};
const isElement = (obj) => {
return Boolean(obj && obj.nodeType === 1);
};
// ===========================================================================================
export const toRect = (obj) => {
if (obj) {
return {
left: toNum(obj.left || obj.x),
top: toNum(obj.top || obj.y),
width: toNum(obj.width),
height: toNum(obj.height)
};
}
return {
left: 0,
top: 0,
width: 0,
height: 0
};
};
export const getElement = (selector) => {
if (typeof selector === 'string' && selector) {
if (selector.startsWith('#')) {
return document.getElementById(selector.slice(1));
}
return document.querySelector(selector);
}
if (isDocument(selector)) {
return selector.body;
}
if (isElement(selector)) {
return selector;
}
};
export const getRect = (target, fixed) => {
if (!target) {
return toRect();
}
if (isWindow(target)) {
return {
left: 0,
top: 0,
width: window.innerWidth,
height: window.innerHeight
};
}
const elem = getElement(target);
if (!elem) {
return toRect(target);
}
const br = elem.getBoundingClientRect();
const rect = toRect(br);
// fix offset
if (!fixed) {
rect.left += window.scrollX;
rect.top += window.scrollY;
}
rect.width = elem.offsetWidth;
rect.height = elem.offsetHeight;
return rect;
};
// ===========================================================================================
const calculators = {
bottom: (info, containerRect, targetRect) => {
info.space = containerRect.top + containerRect.height - targetRect.top - targetRect.height - info.height;
info.top = targetRect.top + targetRect.height;
info.left = Math.round(targetRect.left + targetRect.width * 0.5 - info.width * 0.5);
},
top: (info, containerRect, targetRect) => {
info.space = targetRect.top - info.height - containerRect.top;
info.top = targetRect.top - info.height;
info.left = Math.round(targetRect.left + targetRect.width * 0.5 - info.width * 0.5);
},
right: (info, containerRect, targetRect) => {
info.space = containerRect.left + containerRect.width - targetRect.left - targetRect.width - info.width;
info.top = Math.round(targetRect.top + targetRect.height * 0.5 - info.height * 0.5);
info.left = targetRect.left + targetRect.width;
},
left: (info, containerRect, targetRect) => {
info.space = targetRect.left - info.width - containerRect.left;
info.top = Math.round(targetRect.top + targetRect.height * 0.5 - info.height * 0.5);
info.left = targetRect.left - info.width;
}
};
// with order
export const getDefaultPositions = () => {
return Object.keys(calculators);
};
const calculateSpace = (info, containerRect, targetRect) => {
const calculator = calculators[info.position];
calculator(info, containerRect, targetRect);
if (info.space >= 0) {
info.passed += 1;
}
};
// ===========================================================================================
const calculateAlignOffset = (info, containerRect, targetRect, alignType, sizeType) => {
const popoverStart = info[alignType];
const popoverSize = info[sizeType];
const containerStart = containerRect[alignType];
const containerSize = containerRect[sizeType];
const targetStart = targetRect[alignType];
const targetSize = targetRect[sizeType];
const targetCenter = targetStart + targetSize * 0.5;
// size overflow
if (popoverSize > containerSize) {
const overflow = (popoverSize - containerSize) * 0.5;
info[alignType] = containerStart - overflow;
info.offset = targetCenter - containerStart + overflow;
return;
}
const space1 = popoverStart - containerStart;
const space2 = (containerStart + containerSize) - (popoverStart + popoverSize);
// both side passed, default to center
if (space1 >= 0 && space2 >= 0) {
if (info.passed) {
info.passed += 2;
}
info.offset = popoverSize * 0.5;
return;
}
// one side passed
if (info.passed) {
info.passed += 1;
}
if (space1 < 0) {
const min = containerStart;
info[alignType] = min;
info.offset = targetCenter - min;
return;
}
// space2 < 0
const max = containerStart + containerSize - popoverSize;
info[alignType] = max;
info.offset = targetCenter - max;
};
const calculateHV = (info, containerRect) => {
if (['top', 'bottom'].includes(info.position)) {
info.top = clamp(info.top, containerRect.top, containerRect.top + containerRect.height - info.height);
return ['left', 'width'];
}
info.left = clamp(info.left, containerRect.left, containerRect.left + containerRect.width - info.width);
return ['top', 'height'];
};
const calculateOffset = (info, containerRect, targetRect) => {
const [alignType, sizeType] = calculateHV(info, containerRect);
calculateAlignOffset(info, containerRect, targetRect, alignType, sizeType);
info.offset = clamp(info.offset, 0, info[sizeType]);
};
// ===========================================================================================
const calculateDistance = (info, previousPositionInfo) => {
if (!previousPositionInfo) {
return;
}
// no change if position no change with previous
if (info.position === previousPositionInfo.position) {
return;
}
const ax = info.left + info.width * 0.5;
const ay = info.top + info.height * 0.5;
const bx = previousPositionInfo.left + previousPositionInfo.width * 0.5;
const by = previousPositionInfo.top + previousPositionInfo.height * 0.5;
const dx = Math.abs(ax - bx);
const dy = Math.abs(ay - by);
info.distance = Math.round(Math.sqrt(dx * dx + dy * dy));
};
// ===========================================================================================
const calculatePositionInfo = (info, containerRect, targetRect, previousPositionInfo) => {
calculateSpace(info, containerRect, targetRect);
calculateOffset(info, containerRect, targetRect);
calculateDistance(info, previousPositionInfo);
};
// ===========================================================================================
const calculateBestPosition = (containerRect, targetRect, infoMap, withOrder, previousPositionInfo) => {
// position space: +1
// align space:
// two side passed: +2
// one side passed: +1
const safePassed = 3;
if (previousPositionInfo) {
const prevInfo = infoMap[previousPositionInfo.position];
if (prevInfo) {
calculatePositionInfo(prevInfo, containerRect, targetRect);
if (prevInfo.passed >= safePassed) {
return prevInfo;
}
prevInfo.calculated = true;
}
}
const positionList = [];
Object.values(infoMap).forEach((info) => {
if (!info.calculated) {
calculatePositionInfo(info, containerRect, targetRect, previousPositionInfo);
}
positionList.push(info);
});
positionList.sort((a, b) => {
if (a.passed !== b.passed) {
return b.passed - a.passed;
}
if (withOrder && a.passed >= safePassed && b.passed >= safePassed) {
return a.index - b.index;
}
if (a.space !== b.space) {
return b.space - a.space;
}
return a.index - b.index;
});
// logTable(positionList);
return positionList[0];
};
// const logTable = (() => {
// let time_id;
// return (info) => {
// clearTimeout(time_id);
// time_id = setTimeout(() => {
// console.table(info);
// }, 10);
// };
// })();
// ===========================================================================================
const getAllowPositions = (positions, defaultAllowPositions) => {
if (!positions) {
return;
}
if (Array.isArray(positions)) {
positions = positions.join(',');
}
positions = String(positions).split(',').map((it) => it.trim().toLowerCase()).filter((it) => it);
positions = positions.filter((it) => defaultAllowPositions.includes(it));
if (!positions.length) {
return;
}
return positions;
};
const isPositionChanged = (info, previousPositionInfo) => {
if (!previousPositionInfo) {
return true;
}
if (info.left !== previousPositionInfo.left) {
return true;
}
if (info.top !== previousPositionInfo.top) {
return true;
}
return false;
};
// ===========================================================================================
// const log = (name, time) => {
// if (time > 0.1) {
// console.log(name, time);
// }
// };
export const getBestPosition = (containerRect, targetRect, popoverRect, positions, previousPositionInfo) => {
const defaultAllowPositions = getDefaultPositions();
let withOrder = true;
let allowPositions = getAllowPositions(positions, defaultAllowPositions);
if (!allowPositions) {
allowPositions = defaultAllowPositions;
withOrder = false;
}
// console.log('withOrder', withOrder);
// const start_time = performance.now();
const infoMap = {};
allowPositions.forEach((k, i) => {
infoMap[k] = {
position: k,
index: i,
top: 0,
left: 0,
width: popoverRect.width,
height: popoverRect.height,
space: 0,
offset: 0,
passed: 0,
distance: 0
};
});
// log('infoMap', performance.now() - start_time);
const bestPosition = calculateBestPosition(containerRect, targetRect, infoMap, withOrder, previousPositionInfo);
// check left/top
bestPosition.changed = isPositionChanged(bestPosition, previousPositionInfo);
return bestPosition;
};
// ===========================================================================================
const getTemplatePath = (width, height, arrowOffset, arrowSize, borderRadius) => {
const p = (px, py) => {
return [px, py].join(',');
};
const px = function(num, alignEnd) {
const floor = Math.floor(num);
let n = num < floor + 0.5 ? floor + 0.5 : floor + 1.5;
if (alignEnd) {
n -= 1;
}
return n;
};
const pxe = function(num) {
return px(num, true);
};
const ls = [];
const innerLeft = px(arrowSize);
const innerRight = pxe(width - arrowSize);
arrowOffset = clamp(arrowOffset, innerLeft, innerRight);
const innerTop = px(arrowSize);
const innerBottom = pxe(height - arrowSize);
const startPoint = p(innerLeft, innerTop + borderRadius);
const arrowPoint = p(arrowOffset, 1);
const LT = p(innerLeft, innerTop);
const RT = p(innerRight, innerTop);
const AOT = p(arrowOffset - arrowSize, innerTop);
const RRT = p(innerRight - borderRadius, innerTop);
ls.push(`M${startPoint}`);
ls.push(`V${innerBottom - borderRadius}`);
ls.push(`Q${p(innerLeft, innerBottom)} ${p(innerLeft + borderRadius, innerBottom)}`);
ls.push(`H${innerRight - borderRadius}`);
ls.push(`Q${p(innerRight, innerBottom)} ${p(innerRight, innerBottom - borderRadius)}`);
ls.push(`V${innerTop + borderRadius}`);
if (arrowOffset < innerLeft + arrowSize + borderRadius) {
ls.push(`Q${RT} ${RRT}`);
ls.push(`H${arrowOffset + arrowSize}`);
ls.push(`L${arrowPoint}`);
if (arrowOffset < innerLeft + arrowSize) {
ls.push(`L${LT}`);
ls.push(`L${startPoint}`);
} else {
ls.push(`L${AOT}`);
ls.push(`Q${LT} ${startPoint}`);
}
} else if (arrowOffset > innerRight - arrowSize - borderRadius) {
if (arrowOffset > innerRight - arrowSize) {
ls.push(`L${RT}`);
} else {
ls.push(`Q${RT} ${p(arrowOffset + arrowSize, innerTop)}`);
}
ls.push(`L${arrowPoint}`);
ls.push(`L${AOT}`);
ls.push(`H${innerLeft + borderRadius}`);
ls.push(`Q${LT} ${startPoint}`);
} else {
ls.push(`Q${RT} ${RRT}`);
ls.push(`H${arrowOffset + arrowSize}`);
ls.push(`L${arrowPoint}`);
ls.push(`L${AOT}`);
ls.push(`H${innerLeft + borderRadius}`);
ls.push(`Q${LT} ${startPoint}`);
}
return ls.join('');
};
const getPathData = function(position, width, height, arrowOffset, arrowSize, borderRadius) {
const handlers = {
bottom: () => {
const d = getTemplatePath(width, height, arrowOffset, arrowSize, borderRadius);
return {
d,
transform: ''
};
},
top: () => {
const d = getTemplatePath(width, height, width - arrowOffset, arrowSize, borderRadius);
return {
d,
transform: `rotate(180,${width * 0.5},${height * 0.5})`
};
},
left: () => {
const d = getTemplatePath(height, width, arrowOffset, arrowSize, borderRadius);
const x = (width - height) * 0.5;
const y = (height - width) * 0.5;
return {
d,
transform: `translate(${x} ${y}) rotate(90,${height * 0.5},${width * 0.5})`
};
},
right: () => {
const d = getTemplatePath(height, width, height - arrowOffset, arrowSize, borderRadius);
const x = (width - height) * 0.5;
const y = (height - width) * 0.5;
return {
d,
transform: `translate(${x} ${y}) rotate(-90,${height * 0.5},${width * 0.5})`
};
}
};
return handlers[position]();
};
// ===========================================================================================
// position style cache
const styleCache = {
// position: '',
// top: {},
// bottom: {},
// left: {},
// right: {}
};
export const getPositionStyle = (info, options = {}) => {
const o = {
bgColor: '#fff',
borderColor: '#ccc',
borderRadius: 5,
arrowSize: 10
};
Object.keys(o).forEach((k) => {
if (hasOwn(options, k)) {
const d = o[k];
const v = options[k];
if (typeof d === 'string') {
// string
if (typeof v === 'string' && v) {
o[k] = v;
}
} else {
// number
if (isNum(v) && v >= 0) {
o[k] = v;
}
}
}
});
const key = [
info.width,
info.height,
info.offset,
o.arrowSize,
o.borderRadius,
o.bgColor,
o.borderColor
].join('-');
const positionCache = styleCache[info.position];
if (positionCache && key === positionCache.key) {
const st = positionCache.style;
st.changed = styleCache.position !== info.position;
styleCache.position = info.position;
return st;
}
// console.log(options);
const data = getPathData(info.position, info.width, info.height, info.offset, o.arrowSize, o.borderRadius);
// console.log(data);
const viewBox = [0, 0, info.width, info.height].join(' ');
const svg = [
`<svg viewBox="${viewBox}" xmlns="http://www.w3.org/2000/svg">`,
`<path d="${data.d}" fill="${o.bgColor}" stroke="${o.borderColor}" transform="${data.transform}" />`,
'</svg>'
].join('');
// console.log(svg);
const backgroundImage = `url("data:image/svg+xml;charset=utf8,${encodeURIComponent(svg)}")`;
const background = `${backgroundImage} center no-repeat`;
const padding = `${o.arrowSize + o.borderRadius}px`;
const style = {
background,
backgroundImage,
padding,
changed: true
};
styleCache.position = info.position;
styleCache[info.position] = {
key,
style
};
return style;
};

View File

@@ -70,8 +70,8 @@ class WorkflowMetadataExtension {
if (cnr_id === "comfy-core") return; // don't allow hijacking comfy-core name if (cnr_id === "comfy-core") return; // don't allow hijacking comfy-core name
if (cnr_id) nodeProperties.cnr_id = cnr_id; if (cnr_id) nodeProperties.cnr_id = cnr_id;
else nodeProperties.aux_id = aux_id; else nodeProperties.aux_id = aux_id;
if (ver) nodeProperties.ver = ver.trim(); if (ver) nodeProperties.ver = ver;
} else if (["nodes", "comfy_extras", "comfy_api_nodes"].includes(moduleType)) { } else if (["nodes", "comfy_extras"].includes(moduleType)) {
nodeProperties.cnr_id = "comfy-core"; nodeProperties.cnr_id = "comfy-core";
nodeProperties.ver = this.comfyCoreVersion; nodeProperties.ver = this.comfyCoreVersion;
} }

View File

@@ -749,8 +749,8 @@
"save_path": "loras/HyperSD/SDXL", "save_path": "loras/HyperSD/SDXL",
"description": "Hyper-SD LoRA (4steps) - SDXL", "description": "Hyper-SD LoRA (4steps) - SDXL",
"reference": "https://huggingface.co/ByteDance/Hyper-SD", "reference": "https://huggingface.co/ByteDance/Hyper-SD",
"filename": "Hyper-SDXL-4steps-lora.safetensors", "filename": "Hyper-SD15-4steps-lora.safetensors",
"url": "https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-4steps-lora.safetensors", "url": "https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-4steps-lora.safetensors",
"size": "787MB" "size": "787MB"
}, },
{ {
@@ -4750,310 +4750,6 @@
"filename": "diffusion_pytorch_model.safetensors", "filename": "diffusion_pytorch_model.safetensors",
"url": "https://huggingface.co/Kwai-Kolors/Kolors/resolve/main/vae/diffusion_pytorch_model.safetensors", "url": "https://huggingface.co/Kwai-Kolors/Kolors/resolve/main/vae/diffusion_pytorch_model.safetensors",
"size": "335MB" "size": "335MB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp8_e4m3fn)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (fp8_e4m3fn)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp8_scaled)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (fp8_scaled)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_scaled.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_bf16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp8_e4m3fn)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (fp8_e4m3fn)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp8_scaled)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (fp8_scaled)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp8_scaled.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 1.3B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 1.3B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_1.3B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors",
"size": "2.84GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 1.3B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 1.3B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_1.3B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_1.3B_fp16.safetensors",
"size": "2.84GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_bf16.safetensors",
"size": "28.6GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp16.safetensors",
"size": "28.6GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (fp8_e4m3fn)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (fp8_e4m3fn)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp8_e4m3fn.safetensors",
"size": "14.3GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (fp8_scaled)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (fp8_scaled)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp8_scaled.safetensors",
"size": "14.3GB"
},
{
"name": "Comfy-Org/Wan2.1 VAE",
"type": "vae",
"base": "Wan2.1",
"save_path": "vae",
"description": "Wan2.1 VAE model",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan_2.1_vae.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors",
"size": "254MB"
},
{
"name": "Comfy-Org/clip_vision_h.safetensors",
"type": "clip_vision",
"base": "clip_vision_h",
"save_path": "clip_vision",
"description": "clip_vision_h model for Wan2.1",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "clip_vision_h.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/clip_vision/clip_vision_h.safetensors",
"size": "1.26GB"
},
{
"name": "Comfy-Org/umt5_xxl_fp16.safetensors",
"type": "clip",
"base": "umt5_xxl",
"save_path": "text_encoders",
"description": "umt5_xxl_fp16 text encoder for Wan2.1",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "umt5_xxl_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp16.safetensors",
"size": "11.4GB"
},
{
"name": "Comfy-Org/umt5_xxl_fp8_e4m3fn_scaled.safetensors",
"type": "clip",
"base": "umt5_xxl",
"save_path": "text_encoders",
"description": "umt5_xxl_fp8_e4m3fn_scaled text encoder for Wan2.1",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "umt5_xxl_fp8_e4m3fn_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors",
"size": "6.74GB"
},
{
"name": "lllyasviel/FramePackI2V_HY",
"type": "FramePackI2V",
"base": "FramePackI2V",
"save_path": "diffusers/lllyasviel",
"description": "[SNAPSHOT] This is the f1k1_x_g9_f1k1f2k2f16k4_td FramePack for HY. [w/You cannot download this item on ComfyUI-Manager versions below V3.18]",
"reference": "https://huggingface.co/lllyasviel/FramePackI2V_HY",
"filename": "<huggingface>",
"url": "lllyasviel/FramePackI2V_HY",
"size": "25.75GB"
},
{
"name": "LTX-Video Spatial Upscaler v0.9.7",
"type": "upscale",
"base": "upscale",
"save_path": "default",
"description": "Spatial upscaler model for LTX-Video. This model enhances the spatial resolution of generated videos.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-spatial-upscaler-0.9.7.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-spatial-upscaler-0.9.7.safetensors",
"size": "505MB"
},
{
"name": "LTX-Video Temporal Upscaler v0.9.7",
"type": "upscale",
"base": "upscale",
"save_path": "default",
"description": "Temporal upscaler model for LTX-Video. This model enhances the temporal resolution and smoothness of generated videos.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-temporal-upscaler-0.9.7.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-temporal-upscaler-0.9.7.safetensors",
"size": "524MB"
},
{
"name": "LTX-Video 13B v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "High-resolution quality LTX-Video 13B model.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-dev.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-dev.safetensors",
"size": "28.6GB"
},
{
"name": "LTX-Video 13B FP8 v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Quantized version of the LTX-Video 13B model, optimized for lower VRAM usage while maintaining high quality.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-dev-fp8.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-dev-fp8.safetensors",
"size": "15.7GB"
},
{
"name": "LTX-Video 13B Distilled v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Distilled version of the LTX-Video 13B model, providing improved efficiency while maintaining high-resolution quality.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-distilled.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-distilled.safetensors",
"size": "28.6GB"
},
{
"name": "LTX-Video 13B Distilled FP8 v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Quantized distilled version of the LTX-Video 13B model, optimized for even lower VRAM usage while maintaining quality.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-distilled-fp8.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-distilled-fp8.safetensors",
"size": "15.7GB"
},
{
"name": "LTX-Video 13B Distilled LoRA v0.9.7",
"type": "lora",
"base": "LTX-Video",
"save_path": "loras",
"description": "A LoRA adapter that transforms the standard LTX-Video 13B model into a distilled version when loaded.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-distilled-lora128.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-distilled-lora128.safetensors",
"size": "1.33GB"
},
{
"name": "Latent Bridge Matching for Image Relighting",
"type": "diffusion_model",
"base": "LBM",
"save_path": "diffusion_models/LBM",
"description": "Latent Bridge Matching (LBM) Relighting model",
"reference": "https://huggingface.co/jasperai/LBM_relighting",
"filename": "LBM_relighting.safetensors",
"url": "https://huggingface.co/jasperai/LBM_relighting/resolve/main/model.safetensors",
"size": "5.02GB"
} }
] ]
} }

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,5 @@
{ {
"custom_nodes": [ "custom_nodes": [
{
"author": "SanDiegoDude",
"title": "ComfyUI-HiDream-Sampler [WIP]",
"reference": "https://github.com/SanDiegoDude/ComfyUI-HiDream-Sampler",
"files": [
"https://github.com/SanDiegoDude/ComfyUI-HiDream-Sampler"
],
"install_type": "git-clone",
"description": "A collection of enhanced nodes for ComfyUI that provide powerful additional functionality to your workflows.\nNOTE: The files in the repo are not organized."
},
{ {
"author": "PramaLLC", "author": "PramaLLC",
"title": "ComfyUI BEN - Background Erase Network", "title": "ComfyUI BEN - Background Erase Network",

View File

@@ -11,646 +11,6 @@
{
"author": "syaofox",
"title": "ComfyUI_fnodes [REMOVED]",
"reference": "https://github.com/syaofox/ComfyUI_fnodes",
"files": [
"https://github.com/syaofox/ComfyUI_fnodes"
],
"install_type": "git-clone",
"description": "ComfyUI_fnodes is a collection of custom nodes designed for ComfyUI. These nodes provide additional functionality that can enhance your ComfyUI workflows.\nFile manipulation tools, Image resizing tools, IPAdapter tools, Image processing tools, Mask tools, Face analysis tools, Sampler tools, Miscellaneous tools"
},
{
"author": "Hangover3832",
"title": "ComfyUI-Hangover-Moondream [DEPRECATED]",
"reference": "https://github.com/Hangover3832/ComfyUI-Hangover-Moondream",
"files": [
"https://github.com/Hangover3832/ComfyUI-Hangover-Moondream"
],
"install_type": "git-clone",
"description": "Moondream is a lightweight multimodal large language model.\n[w/WARN:Additional python code will be downloaded from huggingface and executed. You have to trust this creator if you want to use this node!]"
},
{
"author": "Hangover3832",
"title": "Recognize Anything Model (RAM) for ComfyUI [DEPRECATED]",
"reference": "https://github.com/Hangover3832/ComfyUI-Hangover-Recognize_Anything",
"files": [
"https://github.com/Hangover3832/ComfyUI-Hangover-Recognize_Anything"
],
"install_type": "git-clone",
"description": "This is an image recognition node for ComfyUI based on the RAM++ model from [a/xinyu1205](https://huggingface.co/xinyu1205).\nThis node outputs a string of tags with all the recognized objects and elements in the image in English or Chinese language.\nFor image tagging and captioning."
},
{
"author": "Hangover3832",
"title": "ComfyUI-Hangover-Nodes [DEPRECATED]",
"reference": "https://github.com/Hangover3832/ComfyUI-Hangover-Nodes",
"files": [
"https://github.com/Hangover3832/ComfyUI-Hangover-Nodes"
],
"install_type": "git-clone",
"description": "Nodes: MS kosmos-2 Interrogator, Save Image w/o Metadata, Image Scale Bounding Box. An implementation of Microsoft [a/kosmos-2](https://huggingface.co/microsoft/kosmos-2-patch14-224) image to text transformer."
},
{
"author": "SirLatore",
"title": "ComfyUI-IPAdapterWAN [REMOVED]",
"reference": "https://github.com/SirLatore/ComfyUI-IPAdapterWAN",
"files": [
"https://github.com/SirLatore/ComfyUI-IPAdapterWAN"
],
"install_type": "git-clone",
"description": "This extension adapts the [a/InstantX IP-Adapter for SD3.5-Large](https://huggingface.co/InstantX/SD3.5-Large-IP-Adapter) to work with Wan 2.1 and other UNet-based video/image models in ComfyUI.\nUnlike the original SD3 version (which depends on joint_blocks from MMDiT), this version performs sampling-time identity conditioning by dynamically injecting into attention layers — making it compatible with models like Wan 2.1, AnimateDiff, and other non-SD3 pipelines."
},
{
"author": "Jpzz",
"title": "ComfyUI-VirtualInteraction [UNSAFE/REMOVED]",
"reference": "https://github.com/Jpzz/ComfyUI-VirtualInteraction",
"files": [
"https://github.com/Jpzz/ComfyUI-VirtualInteraction"
],
"install_type": "git-clone",
"description": "NODES: virtual interaction custom node when using generative movie\n[w/This nodepack contains a node which is reading arbitrary excel file.]"
},
{
"author": "satche",
"title": "Prompt Factory [REMOVED]",
"reference": "https://github.com/satche/comfyui-prompt-factory",
"files": [
"https://github.com/satche/comfyui-prompt-factory"
],
"install_type": "git-clone",
"description": "A modular system that adds randomness to prompt generation"
},
{
"author": "MITCAP",
"title": "ComfyUI OpenAI DALL-E 3 Node [REMOVED]",
"reference": "https://github.com/MITCAP/OpenAI-ComfyUI",
"files": [
"https://github.com/MITCAP/OpenAI-ComfyUI"
],
"install_type": "git-clone",
"description": "This project provides custom nodes for ComfyUI that integrate with OpenAI's DALL-E 3 and GPT-4o models. The nodes allow users to generate images and describe images using OpenAI's API.\nNOTE: The files in the repo are not organized."
},
{
"author": "raspie10032",
"title": "ComfyUI NAI Prompt Converter [REMOVED]",
"reference": "https://github.com/raspie10032/ComfyUI_RS_NAI_Local_Prompt_converter",
"files": [
"https://github.com/raspie10032/ComfyUI_RS_NAI_Local_Prompt_converter"
],
"install_type": "git-clone",
"description": "A custom node extension for ComfyUI that enables conversion between different prompt formats: NovelAI V4, ComfyUI, and old NovelAI."
},
{
"author": "holchan",
"title": "ComfyUI-ModelDownloader [REMOVED]",
"reference": "https://github.com/holchan/ComfyUI-ModelDownloader",
"files": [
"https://github.com/holchan/ComfyUI-ModelDownloader"
],
"install_type": "git-clone",
"description": "A ComfyUI node to download models(Checkpoints and LoRA) from external links and act as an output standalone node."
},
{
"author": "Kur0butiMegane",
"title": "Comfyui-StringUtils [DEPRECATED]",
"reference": "https://github.com/Kur0butiMegane/Comfyui-StringUtils",
"files": [
"https://github.com/Kur0butiMegane/Comfyui-StringUtils"
],
"install_type": "git-clone",
"description": "NODES: Prompt Normalizer, String Splitter, String Line Selector, Extract Markup Value"
},
{
"author": "Apache0ne",
"title": "ComfyUI-LantentCompose [REMOVED]",
"reference": "https://github.com/Apache0ne/ComfyUI-LantentCompose",
"files": [
"https://github.com/Apache0ne/ComfyUI-LantentCompose"
],
"install_type": "git-clone",
"description": "Interpolate sdxl latents using slerp with and without a mask. use with unsample nodes for best effect.\nNOTE: The files in the repo are not organized."
},
{
"author": "jax-explorer",
"title": "ComfyUI-H-flow [REMOVED]",
"reference": "https://github.com/jax-explorer/ComfyUI-H-flow",
"files": [
"https://github.com/jax-explorer/ComfyUI-H-flow"
],
"install_type": "git-clone",
"description": "NODES: Wan2-1 Image To Video, LLM Task, Save Image, Save Video, Show Text, FluxPro Ultra, IdeogramV2 Turbo, Runway Image To Video, Kling Image To Video, Replace Text, Join Text, Test Image, Test Text"
},
{
"author": "Apache0ne",
"title": "SambaNova [REMOVED]",
"id": "SambaNovaAPI",
"reference": "https://github.com/Apache0ne/SambaNova",
"files": [
"https://github.com/Apache0ne/SambaNova"
],
"install_type": "git-clone",
"description": "Super Fast LLM's llama3.1-405B,70B,8B and more"
},
{
"author": "Apache0ne",
"title": "ComfyUI-EasyUrlLoader [REMOVED]",
"id": "easy-url-loader",
"reference": "https://github.com/Apache0ne/ComfyUI-EasyUrlLoader",
"files": [
"https://github.com/Apache0ne/ComfyUI-EasyUrlLoader"
],
"install_type": "git-clone",
"description": "A simple YT downloader node for ComfyUI using video Urls. Can be used with VHS nodes etc."
},
{
"author": "nxt5656",
"title": "ComfyUI-Image2OSS [REMOVED]",
"reference": "https://github.com/nxt5656/ComfyUI-Image2OSS",
"files": [
"https://github.com/nxt5656/ComfyUI-Image2OSS"
],
"install_type": "git-clone",
"description": "Upload the image to Alibaba Cloud OSS."
},
{
"author": "ainewsto",
"title": "Comfyui_Comfly",
"reference": "https://github.com/ainewsto/Comfyui_Comfly",
"files": [
"https://github.com/ainewsto/Comfyui_Comfly"
],
"install_type": "git-clone",
"description": "NODES: Comfly_Mj, Comfly_mjstyle, Comfly_upload, Comfly_Mju, Comfly_Mjv, Comfly_kling_videoPreview\nNOTE: Comfyui_Comfly_v2 is introduced."
},
{
"author": "shinich39",
"title": "comfyui-to-inpaint",
"reference": "https://github.com/shinich39/comfyui-to-inpaint",
"files": [
"https://github.com/shinich39/comfyui-to-inpaint"
],
"install_type": "git-clone",
"description": "Send preview image to inpaint workflow."
},
{
"author": "magic-quill",
"title": "ComfyUI_MagicQuill [NOT MAINTAINED]",
"id": "MagicQuill",
"reference": "https://github.com/magic-quill/ComfyUI_MagicQuill",
"files": [
"https://github.com/magic-quill/ComfyUI_MagicQuill"
],
"install_type": "git-clone",
"description": "Towards GPT-4 like large language and visual assistant.\nNOTE: The current version has not been maintained for a long time and does not work. Please use https://github.com/brantje/ComfyUI_MagicQuill instead."
},
{
"author": "shinich39",
"title": "comfyui-event-handler [USAFE/REMOVED]",
"reference": "https://github.com/shinich39/comfyui-event-handler",
"files": [
"https://github.com/shinich39/comfyui-event-handler"
],
"install_type": "git-clone",
"description": "Javascript code will run when an event fires. [w/This node allows you to execute arbitrary JavaScript code as input for the workflow.]"
},
{
"author": "Moooonet",
"title": "ComfyUI-ArteMoon [REMOVED]",
"reference": "https://github.com/Moooonet/ComfyUI-ArteMoon",
"files": [
"https://github.com/Moooonet/ComfyUI-ArteMoon"
],
"install_type": "git-clone",
"description": "This plugin works with [a/IF_AI_Tools](https://github.com/if-ai/ComfyUI-IF_AI_tools) to build a workflow in ComfyUI that uses AI to assist in generating prompts."
},
{
"author": "ryanontheinside",
"title": "ComfyUI-MediaPipe-Vision [REMOVED]",
"reference": "https://github.com/ryanontheinside/ComfyUI-MediaPipe-Vision",
"files": [
"https://github.com/ryanontheinside/ComfyUI-MediaPipe-Vision"
],
"install_type": "git-clone",
"description": "A centralized wrapper of all MediaPipe vision tasks for ComfyUI."
},
{
"author": "shinich39",
"title": "comfyui-textarea-command [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-textarea-command",
"files": [
"https://github.com/shinich39/comfyui-textarea-command"
],
"install_type": "git-clone",
"description": "Add command and comment in textarea. (e.g. // Disabled line)"
},
{
"author": "shinich39",
"title": "comfyui-parse-image [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-parse-image",
"files": [
"https://github.com/shinich39/comfyui-parse-image"
],
"install_type": "git-clone",
"description": "Extract metadata from image."
},
{
"author": "shinich39",
"title": "comfyui-put-image [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-put-image",
"files": [
"https://github.com/shinich39/comfyui-put-image"
],
"install_type": "git-clone",
"description": "Load image from directory."
},
{
"author": "fredconex",
"title": "TripoSG Nodes for ComfyUI [REMOVED]",
"reference": "https://github.com/fredconex/ComfyUI-TripoSG",
"files": [
"https://github.com/fredconex/ComfyUI-TripoSG"
],
"install_type": "git-clone",
"description": "Created by Alfredo Fernandes inspired by Hunyuan3D nodes by Kijai. This extension adds TripoSG 3D mesh generation capabilities to ComfyUI, allowing you to generate 3D meshes from a single image using the TripoSG model."
},
{
"author": "fredconex",
"title": "ComfyUI-PaintTurbo [REMOVED]",
"reference": "https://github.com/fredconex/ComfyUI-PaintTurbo",
"files": [
"https://github.com/fredconex/ComfyUI-PaintTurbo"
],
"install_type": "git-clone",
"description": "NODES: Hunyuan3D Texture Mesh"
},
{
"author": "zhuanqianfish",
"title": "TaesdDecoder [REMOVED]",
"reference": "https://github.com/zhuanqianfish/TaesdDecoder",
"files": [
"https://github.com/zhuanqianfish/TaesdDecoder"
],
"install_type": "git-clone",
"description": "use TAESD decoded image.you need donwload taesd_decoder.pth and taesdxl_decoder.pth to vae_approx folder first.\n It will result in a slight loss of image quality and a significant decrease in peak video memory during decoding."
},
{
"author": "myAiLemon",
"title": "MagicAutomaticPicture [REMOVED]",
"reference": "https://github.com/myAiLemon/MagicAutomaticPicture",
"files": [
"https://github.com/myAiLemon/MagicAutomaticPicture"
],
"install_type": "git-clone",
"description": "A comfyui node package that can generate pictures and automatically save positive prompts and eliminate unwanted prompts"
},
{
"author": "thisiseddy-ab",
"title": "ComfyUI-Edins-Ultimate-Pack [REMOVED]",
"reference": "https://github.com/thisiseddy-ab/ComfyUI-Edins-Ultimate-Pack",
"files": [
"https://github.com/thisiseddy-ab/ComfyUI-Edins-Ultimate-Pack"
],
"install_type": "git-clone",
"description": "Well i needet a Tiled Ksampler that still works for Comfy UI there were none so i made one, in this Package i will put all Nodes i will develop for Comfy Ui still in beta alot will change.."
},
{
"author": "Davros666",
"title": "safetriggers [REMOVED]",
"reference": "https://github.com/Davros666/safetriggers",
"files": [
"https://github.com/Davros666/safetriggers"
],
"install_type": "git-clone",
"description": "ComfyUI Nodes for READING TRIGGERS, TRIGGER-WORDS, TRIGGER-PHRASES FROM LoRAs"
},
{
"author": "cubiq",
"title": "Simple Math [REMOVED]",
"id": "simplemath",
"reference": "https://github.com/cubiq/ComfyUI_SimpleMath",
"files": [
"https://github.com/cubiq/ComfyUI_SimpleMath"
],
"install_type": "git-clone",
"description": "custom node for ComfyUI to perform simple math operations"
},
{
"author": "lucafoscili",
"title": "LF Nodes [DEPRECATED]",
"reference": "https://github.com/lucafoscili/comfyui-lf",
"files": [
"https://github.com/lucafoscili/comfyui-lf"
],
"install_type": "git-clone",
"description": "Custom nodes with a touch of extra UX, including: history for primitives, JSON manipulation, logic switches with visual feedback, LLM chat... and more!"
},
{
"author": "AI2lab",
"title": "comfyUI-tool-2lab [REMOVED]",
"id": "tool-2lab",
"reference": "https://github.com/AI2lab/comfyUI-tool-2lab",
"files": [
"https://github.com/AI2lab/comfyUI-tool-2lab"
],
"install_type": "git-clone",
"description": "tool set for developing workflow and publish to web api server"
},
{
"author": "AI2lab",
"title": "comfyUI-DeepSeek-2lab [REMOVED]",
"id": "deepseek",
"reference": "https://github.com/AI2lab/comfyUI-DeepSeek-2lab",
"files": [
"https://github.com/AI2lab/comfyUI-DeepSeek-2lab"
],
"install_type": "git-clone",
"description": "Unofficial implementation of DeepSeek for ComfyUI"
},
{
"author": "AI2lab",
"title": "comfyUI-kling-api-2lab [REMOVED]",
"reference": "https://github.com/AI2lab/comfyUI-kling-api-2lab",
"files": [
"https://github.com/AI2lab/comfyUI-kling-api-2lab"
],
"install_type": "git-clone",
"description": "Unofficial implementation of KLing for ComfyUI"
},
{
"author": "ZhiHui6",
"title": "comfyui_zhihui_nodes [REMOVED]",
"reference": "https://github.com/ZhiHui6/comfyui_zhihui_nodes",
"files": [
"https://github.com/ZhiHui6/comfyui_zhihui_nodes"
],
"install_type": "git-clone",
"description": "NODES: Prompt Preset, Video Batch Loader, Video Combiner"
},
{
"author": "ImagineerNL",
"title": "comfyui_potrace_svg [REMOVED]",
"reference": "https://github.com/ImagineerNL/comfyui_potrace_svg",
"files": [
"https://github.com/ImagineerNL/comfyui_potrace_svg"
],
"install_type": "git-clone",
"description": "This project converts raster images into SVG format using the Potrace library."
},
{
"author": "kayselmecnun",
"title": "ComfyUI-Qwen-25-VL [REMOVED]",
"reference": "https://github.com/kayselmecnun/ComfyUI-Qwen-25-VL",
"files": [
"https://github.com/kayselmecnun/ComfyUI-Qwen-25-VL"
],
"install_type": "git-clone",
"description": "A custom Comfy UI node for using Qwen2.5-VL-3B/7B-Instruct models"
},
{
"author": "IfnotFr",
"title": "⚡ ComfyUI Connect [REMOVED]",
"reference": "https://github.com/IfnotFr/ComfyUI-Connect",
"files": [
"https://github.com/IfnotFr/ComfyUI-Connect"
],
"install_type": "git-clone",
"description": "Transform your ComfyUI into a powerful API, exposing all your saved workflows as ready-to-use HTTP endpoints."
},
{
"author": "ginlov",
"title": "segment_to_mask_comfyui [REMOVED]",
"reference": "https://github.com/ginlov/segment_to_mask_comfyui",
"files": [
"https://github.com/ginlov/segment_to_mask_comfyui"
],
"install_type": "git-clone",
"description": "Nodes:SegToMask"
},
{
"author": "TGu-97",
"title": "TGu Utilities [REMOVED]",
"id": "tgu",
"reference": "https://github.com/TGu-97/ComfyUI-TGu-utils",
"files": [
"https://github.com/TGu-97/ComfyUI-TGu-utils"
],
"install_type": "git-clone",
"description": "Nodes: MPN Switch, MPN Reroute, PN Switch. This is a set of custom nodes for ComfyUI. Mainly focus on control switches."
},
{
"author": "IfnotFr",
"title": "ComfyUI-Connect [REMOVED]",
"reference": "https://github.com/IfnotFr/ComfyUI-Connect",
"files": [
"https://github.com/IfnotFr/ComfyUI-Connect"
],
"install_type": "git-clone",
"description": "Transform your ComfyUI into a powerful API, exposing all your saved workflows as ready-to-use HTTP endpoints."
},
{
"author": "KurtHokke",
"title": "ComfyUI_KurtHokke-Nodes [REMOVED]",
"reference": "https://github.com/KurtHokke/ComfyUI_KurtHokke-Nodes",
"files": [
"https://github.com/KurtHokke/ComfyUI_KurtHokke-Nodes"
],
"install_type": "git-clone",
"description": "ComfyUI_KurtHokke-Nodes"
},
{
"author": "SpatialDeploy",
"title": "ComfyUI-Voxels [REMOVED]",
"reference": "https://github.com/SpatialDeploy/ComfyUI-Voxels",
"files": [
"https://github.com/SpatialDeploy/ComfyUI-Voxels"
],
"install_type": "git-clone",
"description": "Tools for creating voxel based videos"
},
{
"author": "shinich39",
"title": "comfyui-group-selection [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-group-selection",
"files": [
"https://github.com/shinich39/comfyui-group-selection"
],
"install_type": "git-clone",
"description": "Create a new group of nodes."
},
{
"author": "shinich39",
"title": "connect-from-afar [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-connect-from-afar",
"files": [
"https://github.com/shinich39/comfyui-connect-from-afar"
],
"install_type": "git-clone",
"description": "Connect a new link from out of screen."
},
{
"author": "shinich39",
"title": "comfyui-local-db [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-local-db",
"files": [
"https://github.com/shinich39/comfyui-local-db"
],
"install_type": "git-clone",
"description": "Store text to Key-Values pair json."
},
{
"author": "shinich39",
"title": "comfyui-model-db [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-model-db",
"files": [
"https://github.com/shinich39/comfyui-model-db"
],
"install_type": "git-clone",
"description": "Store settings by model."
},
{
"author": "shinich39",
"title": "comfyui-target-search [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-target-search",
"files": [
"https://github.com/shinich39/comfyui-target-search"
],
"install_type": "git-clone",
"description": "Move canvas to target on dragging connection."
},
{
"author": "chrisgoringe",
"title": "Image chooser [DEPRECATED]",
"id": "image-chooser",
"reference": "https://github.com/chrisgoringe/cg-image-picker",
"files": [
"https://github.com/chrisgoringe/cg-image-picker"
],
"install_type": "git-clone",
"description": "A custom node that pauses the flow while you choose which image (or latent) to pass on to the rest of the workflow."
},
{
"author": "weilin9999",
"title": "WeiLin-ComfyUI-prompt-all-in-one [DEPRECATED]",
"id": "prompt-all-in-one",
"reference": "https://github.com/weilin9999/WeiLin-ComfyUI-prompt-all-in-one",
"files": [
"https://github.com/weilin9999/WeiLin-ComfyUI-prompt-all-in-one"
],
"install_type": "git-clone",
"description": "Write prompt words like WebUI"
},
{
"author": "svetozarov",
"title": "AS_GeminiCaptioning Node [REMOVED]",
"reference": "https://github.com/svetozarov/AS_GeminiCaptioning",
"files": [
"https://github.com/svetozarov/AS_GeminiCaptioning"
],
"install_type": "git-clone",
"description": "A ComfyUI node that combines an image with simple text parameters to create a prompt, sends it to the Google Gemini API via the google-generativeai SDK, and returns the generated text response along with the original prompt and an execution log"
},
{
"author": "shinich39",
"title": "comfyui-load-image-in-seq [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-load-image-in-seq",
"files": [
"https://github.com/shinich39/comfyui-load-image-in-seq"
],
"install_type": "git-clone",
"description": "This node is load png image sequentially with metadata. Only supported for PNG format that has been created by ComfyUI.[w/renamed from comfyui-load-image-39. You need to remove previous one and reinstall to this.]"
},
{
"author": "shinich39",
"title": "comfyui-model-metadata [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-model-metadata",
"files": [
"https://github.com/shinich39/comfyui-model-metadata"
],
"install_type": "git-clone",
"description": "Print model metadata on note node"
},
{
"author": "shinich39",
"title": "comfyui-view-recommendations [REMOVED]",
"reference": "https://github.com/shinich39/comfyui-view-recommendations",
"files": [
"https://github.com/shinich39/comfyui-view-recommendations"
],
"install_type": "git-clone",
"description": "Load model generation data from civitai."
},
{
"author": "jonstreeter",
"title": "Comfyui-PySceneDetect [REMOVED]",
"reference": "https://github.com/jonstreeter/Comfyui-PySceneDetect",
"files": [
"https://github.com/jonstreeter/Comfyui-PySceneDetect"
],
"install_type": "git-clone",
"description": "NODES: PySceneDetect Video Processor"
},
{
"author": "muxueChen",
"title": "ComfyUI-NTQwen25-VL [REMOVED]",
"reference": "https://github.com/muxueChen/ComfyUI-NTQwen25-VL",
"files": [
"https://github.com/muxueChen/ComfyUI-NTQwen25-VL"
],
"install_type": "git-clone",
"description": "Qwen25-VL is a plugin for ComfyU"
},
{
"author": "Makki_Shizu",
"title": "ComfyUI-SaveAnimatedGIF [DEPRECATED]",
"id": "SaveAnimatedGIF",
"reference": "https://github.com/MakkiShizu/ComfyUI-SaveAnimatedGIF",
"files": [
"https://github.com/MakkiShizu/ComfyUI-SaveAnimatedGIF"
],
"install_type": "git-clone",
"description": "Save animated GIF format nodes in ComfyUI"
},
{
"author": "l1yongch1",
"title": "ComfyUI_PhiCaption [REMOVED]",
"reference": "https://github.com/l1yongch1/ComfyUI_PhiCaption",
"files": [
"https://github.com/l1yongch1/ComfyUI_PhiCaption"
],
"install_type": "git-clone",
"description": "In addition to achieving conventional single-image, single-round reverse engineering, it can also achieve single-image multi-round and multi-image single-round reverse engineering. Moreover, the Phi model has a better understanding of prompts."
},
{
"author": "nova-florealis",
"title": "comfyui-alien [REMOVED]",
"reference": "https://github.com/nova-florealis/comfyui-alien",
"files": [
"https://github.com/nova-florealis/comfyui-alien"
],
"install_type": "git-clone",
"description": "NODES: Text to Text (LLM), Text Output, Convert to Markdown, List Display (Debug)"
},
{
"author": "PluMaZero",
"title": "ComfyUI-SpaceFlower [REMOVED]",
"reference": "https://github.com/PluMaZero/ComfyUI-SpaceFlower",
"files": [
"https://github.com/PluMaZero/ComfyUI-SpaceFlower"
],
"install_type": "git-clone",
"description": "Nodes: SpaceFlower_Prompt, SpaceFlower_HangulPrompt, ..."
},
{
"author": "vahidzxc",
"title": "ComfyUI-My-Handy-Nodes [REMOVED]",
"reference": "https://github.com/vahidzxc/ComfyUI-My-Handy-Nodes",
"files": [
"https://github.com/vahidzxc/ComfyUI-My-Handy-Nodes"
],
"install_type": "git-clone",
"description": "NODES:VahCropImage"
},
{ {
"author": "Samulebotin", "author": "Samulebotin",
"title": "ComfyUI-FreeVC_wrapper [REMOVED]", "title": "ComfyUI-FreeVC_wrapper [REMOVED]",

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,309 +1,5 @@
{ {
"models": [ "models": [
{
"name": "Latent Bridge Matching for Image Relighting",
"type": "diffusion_model",
"base": "LBM",
"save_path": "diffusion_models/LBM",
"description": "Latent Bridge Matching (LBM) Relighting model",
"reference": "https://huggingface.co/jasperai/LBM_relighting",
"filename": "LBM_relighting.safetensors",
"url": "https://huggingface.co/jasperai/LBM_relighting/resolve/main/model.safetensors",
"size": "5.02GB"
},
{
"name": "LTX-Video 13B Distilled v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Distilled version of the LTX-Video 13B model, providing improved efficiency while maintaining high-resolution quality.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-distilled.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-distilled.safetensors",
"size": "28.6GB"
},
{
"name": "LTX-Video 13B Distilled FP8 v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Quantized distilled version of the LTX-Video 13B model, optimized for even lower VRAM usage while maintaining quality.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-distilled-fp8.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-distilled-fp8.safetensors",
"size": "15.7GB"
},
{
"name": "LTX-Video 13B Distilled LoRA v0.9.7",
"type": "lora",
"base": "LTX-Video",
"save_path": "loras",
"description": "A LoRA adapter that transforms the standard LTX-Video 13B model into a distilled version when loaded.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-distilled-lora128.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-distilled-lora128.safetensors",
"size": "1.33GB"
},
{
"name": "lllyasviel/FramePackI2V_HY",
"type": "FramePackI2V",
"base": "FramePackI2V",
"save_path": "diffusers/lllyasviel",
"description": "[SNAPSHOT] This is the f1k1_x_g9_f1k1f2k2f16k4_td FramePack for HY. [w/You cannot download this item on ComfyUI-Manager versions below V3.18]",
"reference": "https://huggingface.co/lllyasviel/FramePackI2V_HY",
"filename": "<huggingface>",
"url": "lllyasviel/FramePackI2V_HY",
"size": "25.75GB"
},
{
"name": "LTX-Video Spatial Upscaler v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Spatial upscaler model for LTX-Video. This model enhances the spatial resolution of generated videos.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-spatial-upscaler-0.9.7.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-spatial-upscaler-0.9.7.safetensors",
"size": "505MB"
},
{
"name": "LTX-Video Temporal Upscaler v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Temporal upscaler model for LTX-Video. This model enhances the temporal resolution and smoothness of generated videos.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-temporal-upscaler-0.9.7.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-temporal-upscaler-0.9.7.safetensors",
"size": "524MB"
},
{
"name": "LTX-Video 13B v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "High-resolution quality LTX-Video 13B model.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-dev.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-dev.safetensors",
"size": "28.6GB"
},
{
"name": "LTX-Video 13B FP8 v0.9.7",
"type": "checkpoint",
"base": "LTX-Video",
"save_path": "checkpoints/LTXV",
"description": "Quantized version of the LTX-Video 13B model, optimized for lower VRAM usage while maintaining high quality.",
"reference": "https://huggingface.co/Lightricks/LTX-Video",
"filename": "ltxv-13b-0.9.7-dev-fp8.safetensors",
"url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltxv-13b-0.9.7-dev-fp8.safetensors",
"size": "15.7GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp8_e4m3fn)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (fp8_e4m3fn)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp8_scaled)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 480p 14B (fp8_scaled)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_480p_14B_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_scaled.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_bf16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp16.safetensors",
"size": "32.8GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp8_e4m3fn)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (fp8_e4m3fn)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp8_scaled)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for i2v 720p 14B (fp8_scaled)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_i2v_720p_14B_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp8_scaled.safetensors",
"size": "16.4GB"
},
{
"name": "Comfy-Org/clip_vision_h.safetensors",
"type": "clip_vision",
"base": "clip_vision_h",
"save_path": "clip_vision",
"description": "clip_vision_h model for Wan2.1",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "clip_vision_h.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/clip_vision/clip_vision_h.safetensors",
"size": "1.26GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 1.3B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 1.3B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_1.3B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors",
"size": "2.84GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 1.3B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 1.3B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_1.3B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_1.3B_fp16.safetensors",
"size": "2.84GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (bf16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (bf16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_bf16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_bf16.safetensors",
"size": "28.6GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (fp16)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (fp16)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp16.safetensors",
"size": "28.6GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (fp8_e4m3fn)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (fp8_e4m3fn)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp8_e4m3fn.safetensors",
"size": "14.3GB"
},
{
"name": "Comfy-Org/Wan2.1 t2v 14B (fp8_scaled)",
"type": "diffusion_model",
"base": "Wan2.1",
"save_path": "diffusion_models/Wan2.1",
"description": "Wan2.1 difussion model for t2v 14B (fp8_scaled)",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan2.1_t2v_14B_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp8_scaled.safetensors",
"size": "14.3GB"
},
{
"name": "Comfy-Org/Wan2.1 VAE",
"type": "vae",
"base": "Wan2.1",
"save_path": "vae",
"description": "Wan2.1 VAE model",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "wan_2.1_vae.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors",
"size": "254MB"
},
{
"name": "Comfy-Org/umt5_xxl_fp16.safetensors",
"type": "clip",
"base": "umt5_xxl",
"save_path": "text_encoders",
"description": "umt5_xxl_fp16 text encoder for Wan2.1",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "umt5_xxl_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp16.safetensors",
"size": "11.4GB"
},
{
"name": "Comfy-Org/umt5_xxl_fp8_e4m3fn_scaled.safetensors",
"type": "clip",
"base": "umt5_xxl",
"save_path": "text_encoders",
"description": "umt5_xxl_fp8_e4m3fn_scaled text encoder for Wan2.1",
"reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged",
"filename": "umt5_xxl_fp8_e4m3fn_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors",
"size": "6.74GB"
},
{ {
"name": "Comfy-Org/hunyuan_video_image_to_video_720p_bf16.safetensors", "name": "Comfy-Org/hunyuan_video_image_to_video_720p_bf16.safetensors",
"type": "diffusion_model", "type": "diffusion_model",
@@ -691,6 +387,349 @@
"filename": "sigclip_vision_patch14_384.safetensors", "filename": "sigclip_vision_patch14_384.safetensors",
"url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors",
"size": "857MB" "size": "857MB"
},
{
"name": "comfyanonymous/flux_text_encoders - t5xxl (fp16)",
"type": "clip",
"base": "t5",
"save_path": "text_encoders/t5",
"description": "Text Encoders for FLUX (fp16)",
"reference": "https://huggingface.co/comfyanonymous/flux_text_encoders",
"filename": "t5xxl_fp16.safetensors",
"url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors",
"size": "9.79GB"
},
{
"name": "comfyanonymous/flux_text_encoders - t5xxl (fp8_e4m3fn)",
"type": "clip",
"base": "t5",
"save_path": "text_encoders/t5",
"description": "Text Encoders for FLUX (fp8_e4m3fn)",
"reference": "https://huggingface.co/comfyanonymous/flux_text_encoders",
"filename": "t5xxl_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors",
"size": "4.89GB"
},
{
"name": "comfyanonymous/flux_text_encoders - t5xxl (fp8_e4m3fn_scaled)",
"type": "clip",
"base": "t5",
"save_path": "text_encoders/t5",
"description": "Text Encoders for FLUX (fp16)",
"reference": "https://huggingface.co/comfyanonymous/flux_text_encoders",
"filename": "t5xxl_fp8_e4m3fn_scaled.safetensors",
"url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn_scaled.safetensors",
"size": "5.16GB"
},
{
"name": "FLUX.1 [Dev] Diffusion model (scaled fp8)",
"type": "diffusion_model",
"base": "FLUX.1",
"save_path": "diffusion_models/FLUX1",
"description": "FLUX.1 [Dev] Diffusion model (scaled fp8)[w/Due to the large size of the model, it is recommended to download it through a browser if possible.]",
"reference": "https://huggingface.co/comfyanonymous/flux_dev_scaled_fp8_test",
"filename": "flux_dev_fp8_scaled_diffusion_model.safetensors",
"url": "https://huggingface.co/comfyanonymous/flux_dev_scaled_fp8_test/resolve/main/flux_dev_fp8_scaled_diffusion_model.safetensors",
"size": "11.9GB"
},
{
"name": "kijai/MoGe_ViT_L_fp16.safetensors",
"type": "MoGe",
"base": "MoGe",
"save_path": "MoGe",
"description": "Safetensors versions of [a/https://github.com/microsoft/MoGe](https://github.com/microsoft/MoGe)",
"reference": "https://huggingface.co/Kijai/MoGe_safetensors",
"filename": "MoGe_ViT_L_fp16.safetensors",
"url": "https://huggingface.co/Kijai/MoGe_safetensors/resolve/main/MoGe_ViT_L_fp16.safetensors",
"size": "628MB"
},
{
"name": "kijai/MoGe_ViT_L_fp16.safetensors",
"type": "MoGe",
"base": "MoGe",
"save_path": "MoGe",
"description": "Safetensors versions of [a/https://github.com/microsoft/MoGe](https://github.com/microsoft/MoGe)",
"reference": "https://huggingface.co/Kijai/MoGe_safetensors",
"filename": "MoGe_ViT_L_fp16.safetensors",
"url": "https://huggingface.co/Kijai/MoGe_safetensors/resolve/main/MoGe_ViT_L_fp16.safetensors",
"size": "1.26GB"
},
{
"name": "pulid_flux_v0.9.1.safetensors",
"type": "PuLID",
"base": "FLUX",
"save_path": "pulid",
"description": "This is required for PuLID (FLUX)",
"reference": "https://huggingface.co/guozinan/PuLID",
"filename": "pulid_flux_v0.9.1.safetensors",
"url": "https://huggingface.co/guozinan/PuLID/resolve/main/pulid_flux_v0.9.1.safetensors",
"size": "1.14GB"
},
{
"name": "pulid_v1.1.safetensors",
"type": "PuLID",
"base": "SDXL",
"save_path": "pulid",
"description": "This is required for PuLID (SDXL)",
"reference": "https://huggingface.co/guozinan/PuLID",
"filename": "pulid_v1.1.safetensors",
"url": "https://huggingface.co/guozinan/PuLID/resolve/main/pulid_v1.1.safetensors",
"size": "984MB"
},
{
"name": "Kolors-IP-Adapter-Plus.bin (Kwai-Kolors/Kolors-IP-Adapter-Plus)",
"type": "IP-Adapter",
"base": "Kolors",
"save_path": "ipadapter",
"description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
"reference": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-Plus",
"filename": "Kolors-IP-Adapter-Plus.bin",
"url": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-Plus/resolve/main/ip_adapter_plus_general.bin",
"size": "1.01GB"
},
{
"name": "Kolors-IP-Adapter-FaceID-Plus.bin (Kwai-Kolors/Kolors-IP-Adapter-Plus)",
"type": "IP-Adapter",
"base": "Kolors",
"save_path": "ipadapter",
"description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
"reference": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-FaceID-Plus",
"filename": "Kolors-IP-Adapter-FaceID-Plus.bin",
"url": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-FaceID-Plus/resolve/main/ipa-faceid-plus.bin",
"size": "2.39GB"
},
{
"name": "CLIPVision model (Kwai-Kolors/Kolors-IP-Adapter-Plus/clip-vit-large)",
"type": "clip_vision",
"base": "ViT-L",
"save_path": "clip_vision",
"description": "CLIPVision model (This is required in cubiq/ComfyUI_IPAdapter_plus)",
"reference": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-Plus",
"filename": "clip-vit-large-patch14-336.bin",
"url": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-Plus/resolve/main/image_encoder/pytorch_model.bin",
"size": "1.71GB"
},
{
"name": "kijai/lotus depth d model v1.1 (fp16)",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus depth d model v1.1 (fp16). This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-depth-d-v-1-1-fp16.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-depth-d-v-1-1-fp16.safetensors",
"size": "1.74GB"
},
{
"name": "kijai/lotus depth g model v1.0 (fp16)",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus depth g model v1.0 (fp16). This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-depth-g-v1-0-fp16.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-depth-g-v1-0-fp16.safetensors",
"size": "1.74GB"
},
{
"name": "kijai/lotus depth g model v1.0",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus depth g model v1.0. This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-depth-g-v1-0.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-depth-g-v1-0.safetensors",
"size": "3.47GB"
},
{
"name": "kijai/lotus normal d model v1.0 (fp16)",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus normal d model v1.0 (fp16). This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-normal-d-v1-0-fp16.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-normal-d-v1-0-fp16.safetensors",
"size": "1.74GB"
},
{
"name": "kijai/lotus normal d model v1.0",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus normal d model v1.0. This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-normal-d-v1-0.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-normal-d-v1-0.safetensors",
"size": "3.47GB"
},
{
"name": "kijai/lotus normal g model v1.0 (fp16)",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus normal g model v1.0 (fp16). This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-normal-g-v1-0-fp16.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-normal-g-v1-0-fp16.safetensors",
"size": "1.74GB"
},
{
"name": "kijai/lotus normal g model v1.0",
"type": "diffusion_model",
"base": "lotus",
"save_path": "diffusion_models",
"description": "lotus normal g model v1.0. This model can be used in ComfyUI-Lotus custom nodes.",
"reference": "https://huggingface.co/Kijai/lotus-comfyui",
"filename": "lotus-normal-g-v1-0.safetensors",
"url": "https://huggingface.co/Kijai/lotus-comfyui/resolve/main/lotus-normal-g-v1-0.safetensors",
"size": "3.47GB"
},
{
"name": "Depth Pro model",
"type": "depth-pro",
"base": "depth-pro",
"save_path": "depth/ml-depth-pro",
"description": "Depth pro model for [a/ComfyUI-Depth-Pro](https://github.com/spacepxl/ComfyUI-Depth-Pro)",
"reference": "https://huggingface.co/spacepxl/ml-depth-pro",
"filename": "depth_pro.fp16.safetensors",
"url": "https://huggingface.co/spacepxl/ml-depth-pro/resolve/main/depth_pro.fp16.safetensors",
"size": "1.9GB"
},
{
"name": "jasperai/FLUX.1-dev-Controlnet-Upscaler",
"type": "controlnet",
"base": "FLUX.1",
"save_path": "controlnet/FLUX.1/jasperai-dev-Upscaler",
"description": "This is Flux.1-dev ControlNet for low resolution images developed by Jasper research team.",
"reference": "https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler",
"filename": "diffusion_pytorch_model.safetensors",
"url": "https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler/resolve/main/diffusion_pytorch_model.safetensors",
"size": "3.58GB"
},
{
"name": "jasperai/FLUX.1-dev-Controlnet-Depth",
"type": "controlnet",
"base": "FLUX.1",
"save_path": "controlnet/FLUX.1/jasperai-dev-Depth",
"description": "This is Flux.1-dev ControlNet for Depth map developed by Jasper research team.",
"reference": "https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Depth",
"filename": "diffusion_pytorch_model.safetensors",
"url": "https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Depth/resolve/main/diffusion_pytorch_model.safetensors",
"size": "3.58GB"
},
{
"name": "jasperai/Flux.1-dev-Controlnet-Surface-Normals",
"type": "controlnet",
"base": "FLUX.1",
"save_path": "controlnet/FLUX.1/jasperai-dev-Surface-Normals",
"description": "This is Flux.1-dev ControlNet for Surface Normals map developed by Jasper research team.",
"reference": "https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Surface-Normals",
"filename": "diffusion_pytorch_model.safetensors",
"url": "https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Surface-Normals/resolve/main/diffusion_pytorch_model.safetensors",
"size": "3.58GB"
},
{
"name": "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro (fp8_e4m3fn) by Kijai",
"type": "controlnet",
"base": "FLUX.1",
"save_path": "controlnet/FLUX.1",
"description": "FLUX.1 [Dev] Union Controlnet. Supports Canny, Tile, Depth, Blur, Pose, Gray, Low Quality\nVersion quantized to fp8_e4m3fn by Kijai",
"reference": "https://huggingface.co/Kijai/flux-fp8",
"filename": "flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Kijai/flux-fp8/resolve/main/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors",
"size": "3.3GB"
},
{
"name": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors [Long CLIP L]",
"type": "clip",
"base": "clip",
"save_path": "text_encoders/long_clip",
"description": "Greatly improved TEXT + Detail (as CLIP-L for Flux.1)",
"reference": "https://huggingface.co/zer0int",
"filename": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
"url": "https://huggingface.co/zer0int/CLIP-GmP-ViT-L-14/resolve/main/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
"size": "931MB"
},
{
"name": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors [Long CLIP L]",
"type": "clip",
"base": "clip",
"save_path": "text_encoders/long_clip",
"description": "Greatly improved TEXT + Detail (as CLIP-L for Flux.1)",
"reference": "https://huggingface.co/zer0int",
"filename": "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
"url": "https://huggingface.co/zer0int/CLIP-GmP-ViT-L-14/resolve/main/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
"size": "323MB"
},
{
"name": "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
"type": "controlnet",
"base": "FLUX.1",
"save_path": "controlnet/FLUX.1/Shakker-Labs-ControlNet-Union-Pro",
"description": "FLUX.1 [Dev] Union Controlnet. Supports Canny, Tile, Depth, Blur, Pose, Gray, Low Quality",
"reference": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
"filename": "diffusion_pytorch_model.safetensors",
"url": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro/resolve/main/diffusion_pytorch_model.safetensors",
"size": "6.6GB"
},
{
"name": "Hyper-SD LoRA (8steps) - FLUX.1 [Dev]",
"type": "lora",
"base": "FLUX.1",
"save_path": "loras/HyperSD/FLUX.1",
"description": "Hyper-SD LoRA (8steps) - FLUX.1 [Dev]",
"reference": "https://huggingface.co/ByteDance/Hyper-SD",
"filename": "Hyper-FLUX.1-dev-8steps-lora.safetensors",
"url": "https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-FLUX.1-dev-8steps-lora.safetensors",
"size": "1.39GB"
},
{
"name": "Hyper-SD LoRA (16steps) - FLUX.1 [Dev]",
"type": "lora",
"base": "FLUX.1",
"save_path": "loras/HyperSD/FLUX.1",
"description": "Hyper-SD LoRA (16steps) - FLUX.1 [Dev]",
"reference": "https://huggingface.co/ByteDance/Hyper-SD",
"filename": "Hyper-FLUX.1-dev-16steps-lora.safetensors",
"url": "https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-FLUX.1-dev-16steps-lora.safetensors",
"size": "1.39GB"
},
{
"name": "DMD2 LoRA (4steps)",
"type": "lora",
"base": "SDXL",
"save_path": "loras/DMD2",
"description": "DMD2 LoRA (4steps)",
"reference": "https://huggingface.co/tianweiy/DMD2",
"filename": "dmd2_sdxl_4step_lora.safetensors",
"url": "https://huggingface.co/tianweiy/DMD2/resolve/main/dmd2_sdxl_4step_lora.safetensors",
"size": "787MB"
},
{
"name": "DMD2 LoRA (4steps/fp16)",
"type": "lora",
"base": "SDXL",
"save_path": "loras/DMD2",
"description": "DMD2 LoRA (4steps/fp16)",
"reference": "https://huggingface.co/tianweiy/DMD2",
"filename": "dmd2_sdxl_4step_lora_fp16.safetensors",
"url": "https://huggingface.co/tianweiy/DMD2/resolve/main/dmd2_sdxl_4step_lora_fp16.safetensors",
"size": "394MB"
} }
] ]
} }

View File

@@ -1,15 +1,5 @@
{ {
"custom_nodes": [ "custom_nodes": [
{
"author": "Comfy-Org",
"title": "ComfyUI React Extension Template",
"reference": "https://github.com/Comfy-Org/ComfyUI-React-Extension-Template",
"files": [
"https://github.com/Comfy-Org/ComfyUI-React-Extension-Template"
],
"install_type": "git-clone",
"description": "A minimal template for creating React/TypeScript frontend extensions for ComfyUI, with complete boilerplate setup including internationalization and unit testing."
},
{ {
"author": "Suzie1", "author": "Suzie1",
"title": "Guide To Making Custom Nodes in ComfyUI", "title": "Guide To Making Custom Nodes in ComfyUI",
@@ -321,16 +311,6 @@
], ],
"description": "ComfyUI node for creating some Turtle Graphic demos.", "description": "ComfyUI node for creating some Turtle Graphic demos.",
"install_type": "git-clone" "install_type": "git-clone"
},
{
"author": "cozy-comfyui",
"title": "cozy_ex_dynamic",
"reference": "https://github.com/cozy-comfyui/cozy_ex_dynamic",
"files": [
"https://github.com/cozy-comfyui/cozy_ex_dynamic"
],
"description": "Dynamic Node examples for ComfyUI",
"install_type": "git-clone"
} }
] ]
} }

View File

@@ -21,27 +21,24 @@ import cm_global
import manager_downloader import manager_downloader
import folder_paths import folder_paths
manager_util.add_python_path_to_env() import datetime
if hasattr(datetime, 'datetime'):
import datetime as dt from datetime import datetime
if hasattr(dt, 'datetime'):
from datetime import datetime as dt_datetime
def current_timestamp(): def current_timestamp():
return dt_datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
else: else:
# NOTE: Occurs in some Mac environments. # NOTE: Occurs in some Mac environments.
import time import time
logging.error(f"[ComfyUI-Manager] fallback timestamp mode\n datetime module is invalid: '{dt.__file__}'") logging.error(f"[ComfyUI-Manager] fallback timestamp mode\n datetime module is invalid: '{datetime.__file__}'")
def current_timestamp(): def current_timestamp():
return str(time.time()).split('.')[0] return str(time.time()).split('.')[0]
security_check.security_check() security_check.security_check()
cm_global.pip_blacklist = {'torch', 'torchaudio', 'torchsde', 'torchvision'} manager_util.add_python_path_to_env()
cm_global.pip_downgrade_blacklist = ['torch', 'torchaudio', 'torchsde', 'torchvision', 'transformers', 'safetensors', 'kornia']
cm_global.pip_blacklist = {'torch', 'torchsde', 'torchvision'}
cm_global.pip_downgrade_blacklist = ['torch', 'torchsde', 'torchvision', 'transformers', 'safetensors', 'kornia']
def skip_pip_spam(x): def skip_pip_spam(x):
@@ -121,17 +118,12 @@ read_config()
read_uv_mode() read_uv_mode()
check_file_logging() check_file_logging()
if sys.version_info < (3, 13): cm_global.pip_overrides = {'numpy': 'numpy<2', 'ultralytics': 'ultralytics==8.3.40'}
cm_global.pip_overrides = {'numpy': 'numpy<2'}
else:
cm_global.pip_overrides = {}
if os.path.exists(manager_pip_overrides_path): if os.path.exists(manager_pip_overrides_path):
with open(manager_pip_overrides_path, 'r', encoding="UTF-8", errors="ignore") as json_file: with open(manager_pip_overrides_path, 'r', encoding="UTF-8", errors="ignore") as json_file:
cm_global.pip_overrides = json.load(json_file) cm_global.pip_overrides = json.load(json_file)
cm_global.pip_overrides['numpy'] = 'numpy<2'
if sys.version_info < (3, 13): cm_global.pip_overrides['ultralytics'] = 'ultralytics==8.3.40' # for security
cm_global.pip_overrides['numpy'] = 'numpy<2'
if os.path.exists(manager_pip_blacklist_path): if os.path.exists(manager_pip_blacklist_path):
@@ -626,7 +618,6 @@ def execute_lazy_install_script(repo_path, executable):
lines = manager_util.robust_readlines(requirements_path) lines = manager_util.robust_readlines(requirements_path)
for line in lines: for line in lines:
package_name = remap_pip_package(line.strip()) package_name = remap_pip_package(line.strip())
package_name = package_name.split('#')[0].strip()
if package_name and not is_installed(package_name): if package_name and not is_installed(package_name):
if '--index-url' in package_name: if '--index-url' in package_name:
s = package_name.split('--index-url') s = package_name.split('--index-url')
@@ -698,6 +689,14 @@ def execute_lazy_cnr_switch(target, zip_url, from_path, to_path, no_deps, custom
file.write('\n'.join(list(extracted))) file.write('\n'.join(list(extracted)))
def execute_migration(moves):
import shutil
for x in moves:
if os.path.exists(x[0]) and not os.path.exists(x[1]):
shutil.move(x[0], x[1])
print(f"[ComfyUI-Manager] MIGRATION: '{x[0]}' -> '{x[1]}'")
script_executed = False script_executed = False
def execute_startup_script(): def execute_startup_script():
@@ -755,6 +754,9 @@ def execute_startup_script():
execute_lazy_cnr_switch(script[0], script[2], script[3], script[4], script[5], script[6]) execute_lazy_cnr_switch(script[0], script[2], script[3], script[4], script[5], script[6])
execute_lazy_install_script(script[3], script[7]) execute_lazy_install_script(script[3], script[7])
elif script[1] == "#LAZY-MIGRATION":
execute_migration(script[2])
elif script[1] == "#LAZY-DELETE-NODEPACK": elif script[1] == "#LAZY-DELETE-NODEPACK":
execute_lazy_delete(script[2]) execute_lazy_delete(script[2])

View File

@@ -1,7 +1,7 @@
[project] [project]
name = "comfyui-manager" name = "comfyui-manager"
description = "ComfyUI-Manager provides features to install and manage custom nodes for ComfyUI, as well as various functionalities to assist with ComfyUI." description = "ComfyUI-Manager provides features to install and manage custom nodes for ComfyUI, as well as various functionalities to assist with ComfyUI."
version = "3.32.3" version = "3.30.3"
license = { file = "LICENSE.txt" } license = { file = "LICENSE.txt" }
dependencies = ["GitPython", "PyGithub", "matrix-client==0.4.0", "transformers", "huggingface-hub>0.20", "typer", "rich", "typing-extensions", "toml", "uv", "chardet"] dependencies = ["GitPython", "PyGithub", "matrix-client==0.4.0", "transformers", "huggingface-hub>0.20", "typer", "rich", "typing-extensions", "toml", "uv", "chardet"]

19
tests-api/.gitignore vendored
View File

@@ -1,19 +0,0 @@
# Python cache files
__pycache__/
*.py[cod]
*$py.class
# Pytest cache
.pytest_cache/
# Coverage reports
.coverage
htmlcov/
# Virtual environments
venv/
env/
ENV/
# Test-specific resources
resources/tmp/

View File

@@ -1,91 +0,0 @@
# ComfyUI-Manager API Tests
This directory contains tests for the ComfyUI-Manager API endpoints, validating the OpenAPI specification and ensuring API functionality.
## Setup
1. Install test dependencies:
```bash
pip install -r requirements-test.txt
```
2. Ensure ComfyUI is running with ComfyUI-Manager installed:
```bash
# Start ComfyUI with the default server
python main.py
```
## Running Tests
### Run all tests
```bash
pytest -xvs
```
### Run specific test files
```bash
# Run only the spec validation tests
pytest -xvs test_spec_validation.py
# Run only the custom node API tests
pytest -xvs test_customnode_api.py
```
### Run specific test functions
```bash
# Run a specific test
pytest -xvs test_customnode_api.py::test_get_custom_node_list
```
## Test Configuration
The tests use the following default configuration:
- Server URL: `http://localhost:8188`
- Server timeout: 2 seconds
- Wait between requests: 0.5 seconds
- Maximum retries: 3
You can override these settings with environment variables:
```bash
# Use a different server URL
COMFYUI_SERVER_URL=http://localhost:8189 pytest -xvs
```
## Test Categories
The tests are organized into the following categories:
1. **Spec Validation** (`test_spec_validation.py`): Validates that the OpenAPI specification is correct and complete.
2. **Custom Node API** (`test_customnode_api.py`): Tests for custom node management endpoints.
3. **Snapshot API** (`test_snapshot_api.py`): Tests for snapshot management endpoints.
4. **Queue API** (`test_queue_api.py`): Tests for queue management endpoints.
5. **Config API** (`test_config_api.py`): Tests for configuration endpoints.
6. **Model API** (`test_model_api.py`): Tests for model management endpoints (minimal as these are being deprecated).
## Test Implementation Details
### Fixtures
- `test_config`: Provides the test configuration
- `server_url`: Returns the server URL from the configuration
- `openapi_spec`: Loads the OpenAPI specification
- `api_client`: Creates a requests Session for API calls
- `api_request`: Helper function for making consistent API requests
### Utilities
- `validation.py`: Functions for validating responses against the OpenAPI schema
- `schema_utils.py`: Utilities for extracting and manipulating schemas
## Notes
- Some tests are skipped with `@pytest.mark.skip` to avoid modifying state in automated testing
- Security-level restricted endpoints have minimal tests to avoid security issues
- Tests focus on read operations rather than write operations where possible

View File

@@ -1 +0,0 @@
# Make tests-api directory a proper package

View File

@@ -1,237 +0,0 @@
"""
PyTest configuration and fixtures for API tests.
"""
import os
import sys
import json
import pytest
import requests
import tempfile
import time
import yaml
from pathlib import Path
from typing import Dict, Generator, Optional, Tuple
# Import test utilities
import sys
import os
from pathlib import Path
# Get the absolute path to the current file (conftest.py)
current_file = Path(os.path.abspath(__file__))
# Get the directory containing the current file (the tests-api directory)
tests_api_dir = current_file.parent
# Add the tests-api directory to the Python path
if str(tests_api_dir) not in sys.path:
sys.path.insert(0, str(tests_api_dir))
# Apply mocks for ComfyUI imports
from mocks.patch import apply_mocks
apply_mocks()
# Now we can import from utils.validation
from utils.validation import load_openapi_spec
# Default test configuration
DEFAULT_TEST_CONFIG = {
"server_url": "http://localhost:8188",
"server_timeout": 2, # seconds
"wait_between_requests": 0.5, # seconds
"max_retries": 3,
}
@pytest.fixture(scope="session")
def test_config() -> Dict:
"""
Load test configuration from environment variables or use defaults.
"""
config = DEFAULT_TEST_CONFIG.copy()
# Override from environment variables if present
if "COMFYUI_SERVER_URL" in os.environ:
config["server_url"] = os.environ["COMFYUI_SERVER_URL"]
return config
@pytest.fixture(scope="session")
def server_url(test_config: Dict) -> str:
"""
Get the server URL from the test configuration.
"""
return test_config["server_url"]
@pytest.fixture(scope="session")
def openapi_spec() -> Dict:
"""
Load the OpenAPI specification.
"""
return load_openapi_spec()
@pytest.fixture(scope="session")
def api_client(server_url: str, test_config: Dict) -> requests.Session:
"""
Create a requests Session for API calls.
"""
session = requests.Session()
# Check if the server is running
try:
response = session.get(f"{server_url}/", timeout=test_config["server_timeout"])
response.raise_for_status()
except (requests.ConnectionError, requests.Timeout, requests.HTTPError):
pytest.skip("ComfyUI server is not running or not accessible")
return session
@pytest.fixture(scope="function")
def temp_dir() -> Generator[Path, None, None]:
"""
Create a temporary directory for test files.
"""
with tempfile.TemporaryDirectory() as temp_dir:
yield Path(temp_dir)
class SecurityLevelContext:
"""
Context manager for setting and restoring security levels.
"""
def __init__(self, api_client: requests.Session, server_url: str, security_level: str):
self.api_client = api_client
self.server_url = server_url
self.security_level = security_level
self.original_level = None
async def __aenter__(self):
# Get the current security level (not directly exposed in API, would require more setup)
# For now, we'll just set the new level
# Set the new security level
# Note: In a real implementation, we would need a way to set this
# This is a placeholder - the actual implementation would depend on how
# security levels are managed in ComfyUI-Manager
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# Restore the original security level if needed
pass
@pytest.fixture
def security_level_context(api_client: requests.Session, server_url: str):
"""
Create a context manager for setting security levels.
"""
return lambda level: SecurityLevelContext(api_client, server_url, level)
def make_api_url(server_url: str, path: str) -> str:
"""
Construct a full API URL from the server URL and path.
"""
# Ensure the path starts with a slash
if not path.startswith("/"):
path = f"/{path}"
# Remove trailing slash from server_url if present
if server_url.endswith("/"):
server_url = server_url[:-1]
return f"{server_url}{path}"
@pytest.fixture
def api_request(api_client: requests.Session, server_url: str, test_config: Dict):
"""
Helper function for making API requests with consistent behavior.
"""
def _request(
method: str,
path: str,
params: Optional[Dict] = None,
json_data: Optional[Dict] = None,
headers: Optional[Dict] = None,
expected_status: int = 200,
retry_on_error: bool = True,
) -> Tuple[requests.Response, Optional[Dict]]:
"""
Make an API request with automatic validation.
Args:
method: HTTP method
path: API path
params: Query parameters
json_data: JSON request body
headers: HTTP headers
expected_status: Expected HTTP status code
retry_on_error: Whether to retry on connection errors
Returns:
Tuple of (Response object, JSON response data or None)
"""
method = method.lower()
url = make_api_url(server_url, path)
if headers is None:
headers = {}
# Add common headers
headers.setdefault("Accept", "application/json")
# Sleep between requests to avoid overwhelming the server
time.sleep(test_config["wait_between_requests"])
retries = test_config["max_retries"] if retry_on_error else 0
last_exception = None
for attempt in range(retries + 1):
try:
if method == "get":
response = api_client.get(url, params=params, headers=headers)
elif method == "post":
response = api_client.post(url, params=params, json=json_data, headers=headers)
elif method == "put":
response = api_client.put(url, params=params, json=json_data, headers=headers)
elif method == "delete":
response = api_client.delete(url, params=params, headers=headers)
else:
raise ValueError(f"Unsupported HTTP method: {method}")
# Check status code
assert response.status_code == expected_status, (
f"Expected status code {expected_status}, got {response.status_code}"
)
# Parse JSON response if possible
json_response = None
if response.headers.get("Content-Type", "").startswith("application/json"):
try:
json_response = response.json()
except json.JSONDecodeError:
if expected_status == 200:
raise ValueError("Response was not valid JSON")
return response, json_response
except (requests.ConnectionError, requests.Timeout) as e:
last_exception = e
if attempt < retries:
# Wait before retrying
time.sleep(1)
continue
break
if last_exception:
raise last_exception
raise RuntimeError("Failed to make API request")
return _request

View File

@@ -1 +0,0 @@
# Make tests-api/mocks directory a proper package

View File

@@ -1,26 +0,0 @@
"""
Mock CustomNodeManager for testing purposes
"""
class CustomNodeManager:
"""
Mock implementation of the CustomNodeManager class
"""
instance = None
def __init__(self):
self.custom_nodes = {}
self.node_paths = []
self.refresh_timeout = None
def get_node_path(self, node_class):
"""
Mock implementation to get the path for a node class
"""
return self.custom_nodes.get(node_class, None)
def update_node_paths(self):
"""
Mock implementation to update node paths
"""
pass

View File

@@ -1,116 +0,0 @@
"""
Patch module to mock imports for testing
"""
import sys
import importlib.util
import os
from pathlib import Path
# Import mock modules
from mocks.prompt_server import PromptServer
from mocks.custom_node_manager import CustomNodeManager
# Current directory
current_dir = Path(__file__).parent.parent # tests-api directory
# Define mocks
class MockModule:
"""Base class for mock modules"""
pass
# Create server mock module with PromptServer
server_mock = MockModule()
server_mock.PromptServer = PromptServer
prompt_server_instance = PromptServer()
server_mock.PromptServer.instance = prompt_server_instance
server_mock.PromptServer.inst = prompt_server_instance
# Create app mock module with custom_node_manager submodule
app_mock = MockModule()
app_custom_node_manager = MockModule()
app_custom_node_manager.CustomNodeManager = CustomNodeManager
app_custom_node_manager.CustomNodeManager.instance = CustomNodeManager()
# Create utils mock module with json_util submodule
utils_mock = MockModule()
utils_json_util = MockModule()
# Create utils.validation and utils.schema_utils submodules
utils_validation = MockModule()
utils_schema_utils = MockModule()
# Import actual modules (make sure path is set up correctly)
sys.path.insert(0, str(current_dir))
try:
# Import the validation module
from utils.validation import load_openapi_spec
utils_validation.load_openapi_spec = load_openapi_spec
# Import all schema_utils functions
from utils.schema_utils import (
get_all_paths,
get_grouped_paths,
get_methods_for_path,
find_paths_with_security,
get_content_types_for_response,
get_required_parameters
)
utils_schema_utils.get_all_paths = get_all_paths
utils_schema_utils.get_grouped_paths = get_grouped_paths
utils_schema_utils.get_methods_for_path = get_methods_for_path
utils_schema_utils.find_paths_with_security = find_paths_with_security
utils_schema_utils.get_content_types_for_response = get_content_types_for_response
utils_schema_utils.get_required_parameters = get_required_parameters
except ImportError as e:
print(f"Error importing test utilities: {e}")
# Define dummy functions if imports fail
def dummy_load_openapi_spec():
"""Dummy function for testing"""
return {"paths": {}}
utils_validation.load_openapi_spec = dummy_load_openapi_spec
def dummy_get_all_paths(spec):
return list(spec.get("paths", {}).keys())
utils_schema_utils.get_all_paths = dummy_get_all_paths
def dummy_get_grouped_paths(spec):
return {}
utils_schema_utils.get_grouped_paths = dummy_get_grouped_paths
def dummy_get_methods_for_path(spec, path):
return []
utils_schema_utils.get_methods_for_path = dummy_get_methods_for_path
def dummy_find_paths_with_security(spec, security_scheme=None):
return []
utils_schema_utils.find_paths_with_security = dummy_find_paths_with_security
def dummy_get_content_types_for_response(spec, path, method, status_code="200"):
return []
utils_schema_utils.get_content_types_for_response = dummy_get_content_types_for_response
def dummy_get_required_parameters(spec, path, method):
return []
utils_schema_utils.get_required_parameters = dummy_get_required_parameters
# Add merge_json_recursive from our mock utils
from mocks.utils import merge_json_recursive
utils_json_util.merge_json_recursive = merge_json_recursive
# Apply the mocks to sys.modules
def apply_mocks():
"""Apply all mocks to sys.modules"""
sys.modules['server'] = server_mock
sys.modules['app'] = app_mock
sys.modules['app.custom_node_manager'] = app_custom_node_manager
sys.modules['utils'] = utils_mock
sys.modules['utils.json_util'] = utils_json_util
sys.modules['utils.validation'] = utils_validation
sys.modules['utils.schema_utils'] = utils_schema_utils
# Make sure our actual utils module is importable
if current_dir not in sys.path:
sys.path.insert(0, str(current_dir))

View File

@@ -1,71 +0,0 @@
"""
Mock PromptServer for testing purposes
"""
class MockRoutes:
"""
Mock routing class with method decorators
"""
def __init__(self):
self.routes = {}
def get(self, path):
"""Decorator for GET routes"""
def decorator(f):
self.routes[('GET', path)] = f
return f
return decorator
def post(self, path):
"""Decorator for POST routes"""
def decorator(f):
self.routes[('POST', path)] = f
return f
return decorator
def put(self, path):
"""Decorator for PUT routes"""
def decorator(f):
self.routes[('PUT', path)] = f
return f
return decorator
def delete(self, path):
"""Decorator for DELETE routes"""
def decorator(f):
self.routes[('DELETE', path)] = f
return f
return decorator
class PromptServer:
"""
Mock implementation of the PromptServer class
"""
instance = None
inst = None
def __init__(self):
self.routes = MockRoutes()
self.registered_paths = set()
self.base_url = "http://127.0.0.1:8188" # Assuming server is running on default port
self.queue_lock = None
def add_route(self, method, path, handler, *args, **kwargs):
"""
Add a mock route to the server
"""
self.routes.routes[(method.upper(), path)] = handler
self.registered_paths.add(path)
async def send_msg(self, message, data=None):
"""
Mock send_msg method (does nothing in the mock)
"""
pass
def send_sync(self, message, data=None):
"""
Mock send_sync method (does nothing in the mock)
"""
pass

View File

@@ -1,20 +0,0 @@
"""
Mock utils module for testing purposes
"""
def merge_json_recursive(a, b):
"""
Mock implementation of merge_json_recursive
"""
if isinstance(a, dict) and isinstance(b, dict):
result = a.copy()
for key, value in b.items():
if key in result and isinstance(result[key], (dict, list)) and isinstance(value, (dict, list)):
result[key] = merge_json_recursive(result[key], value)
else:
result[key] = value
return result
elif isinstance(a, list) and isinstance(b, list):
return a + b
else:
return b

View File

@@ -1,382 +0,0 @@
openapi: 3.0.3
info:
title: ComfyUI-Manager API
description: API for managing ComfyUI extensions, custom nodes, and models
version: 1.0.0
contact:
name: ComfyUI Community
url: https://github.com/comfyanonymous/ComfyUI
servers:
- url: http://localhost:8188
description: Local ComfyUI server
paths:
/customnode/getlist:
get:
summary: Get the list of custom nodes
description: Returns the list of custom nodes from all configured channels
parameters:
- name: mode
in: query
description: "The mode to retrieve (local=installed nodes, remote=available nodes)"
schema:
type: string
enum: [local, remote]
default: remote
responses:
'200':
description: List of custom nodes
content:
application/json:
schema:
type: object
properties:
nodes:
type: array
items:
$ref: '#/components/schemas/CustomNode'
'500':
description: Server error
/customnode/get_node_mappings:
get:
summary: Get mappings between node class names and their custom nodes
description: Returns mappings that help identify which custom node package provides specific node classes
parameters:
- name: mode
in: query
description: "The mode for mappings (local=installed nodes, nickname=node nicknames)"
schema:
type: string
enum: [local, nickname]
default: local
required: true
responses:
'200':
description: Node mappings
content:
application/json:
schema:
type: object
additionalProperties:
type: string
'500':
description: Server error
/customnode/get_node_alternatives:
get:
summary: Get alternative nodes for specific node classes
description: Returns alternative implementations of node classes from different custom node packages
parameters:
- name: mode
in: query
description: "The mode to retrieve alternatives (local=installed nodes, remote=all available nodes)"
schema:
type: string
enum: [local, remote]
default: remote
responses:
'200':
description: Node alternatives
content:
application/json:
schema:
type: object
additionalProperties:
type: array
items:
type: string
'500':
description: Server error
/externalmodel/getlist:
get:
summary: Get the list of external models
description: Returns the list of models from all configured channels
parameters:
- name: mode
in: query
description: "The mode to retrieve (local=installed models, remote=available models)"
schema:
type: string
enum: [local, remote]
default: remote
responses:
'200':
description: List of external models
content:
application/json:
schema:
type: object
properties:
models:
type: array
items:
$ref: '#/components/schemas/ExternalModel'
'500':
description: Server error
/manager/get_config:
get:
summary: Get manager configuration
description: Returns the current configuration of ComfyUI-Manager
parameters:
- name: key
in: query
description: "The configuration key to retrieve"
schema:
type: string
required: true
responses:
'200':
description: Configuration value
content:
application/json:
schema:
type: object
properties:
value:
type: string
'400':
description: Invalid key or missing parameter
'500':
description: Server error
/manager/set_config:
post:
summary: Set manager configuration
description: Updates the configuration of ComfyUI-Manager
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- key
- value
properties:
key:
type: string
description: "The configuration key to update"
value:
type: string
description: "The new value for the configuration key"
responses:
'200':
description: Configuration updated successfully
content:
application/json:
schema:
type: object
properties:
success:
type: boolean
'400':
description: Invalid key or value
'500':
description: Server error
/snapshot/getlist:
get:
summary: Get the list of snapshots
description: Returns the list of saved snapshots
responses:
'200':
description: List of snapshots
content:
application/json:
schema:
type: object
properties:
snapshots:
type: array
items:
$ref: '#/components/schemas/Snapshot'
'500':
description: Server error
/comfyui_manager/queue/status:
get:
summary: Get queue status
description: Returns the current status of the operation queue
responses:
'200':
description: Queue status
content:
application/json:
schema:
$ref: '#/components/schemas/QueueStatus'
'500':
description: Server error
components:
schemas:
CustomNode:
type: object
required:
- name
- title
- reference
properties:
name:
type: string
description: "Internal name/ID of the custom node"
title:
type: string
description: "Display title of the custom node"
reference:
type: string
description: "Reference URL (usually GitHub repository URL)"
description:
type: string
description: "Description of what the custom node does"
install_type:
type: string
enum: [git, pip, copy]
description: "Installation method for the custom node"
files:
type: array
items:
type: string
description: "List of files provided by this custom node"
node_class_names:
type: array
items:
type: string
description: "List of node class names provided by this custom node"
installed:
type: boolean
description: "Whether the custom node is installed"
version:
type: string
description: "Version of the custom node"
tags:
type: array
items:
type: string
description: "Tags associated with the custom node"
ExternalModel:
type: object
required:
- name
- type
- url
properties:
name:
type: string
description: "Name of the model"
type:
type: string
description: "Type of the model (checkpoint, lora, embedding, etc.)"
url:
type: string
description: "Download URL for the model"
description:
type: string
description: "Description of the model"
size:
type: integer
description: "Size of the model in bytes"
installed:
type: boolean
description: "Whether the model is installed"
version:
type: string
description: "Version of the model"
tags:
type: array
items:
type: string
description: "Tags associated with the model"
Snapshot:
type: object
required:
- name
- date
properties:
name:
type: string
description: "Name of the snapshot"
date:
type: string
format: date-time
description: "Date when the snapshot was created"
description:
type: string
description: "Description of the snapshot"
nodes:
type: array
items:
type: string
description: "List of custom nodes in the snapshot"
models:
type: array
items:
type: string
description: "List of models in the snapshot"
QueueStatus:
type: object
properties:
pending:
type: array
items:
$ref: '#/components/schemas/QueueItem'
description: "List of pending operations in the queue"
completed:
type: array
items:
$ref: '#/components/schemas/QueueItem'
description: "List of completed operations in the queue"
failed:
type: array
items:
$ref: '#/components/schemas/QueueItem'
description: "List of failed operations in the queue"
running:
type: boolean
description: "Whether the queue is currently running"
QueueItem:
type: object
required:
- id
- type
- target
properties:
id:
type: string
description: "Unique ID of the queue item"
type:
type: string
enum: [install, update, uninstall]
description: "Type of operation"
target:
type: string
description: "Target of the operation (e.g., custom node name, model name)"
status:
type: string
enum: [pending, processing, completed, failed]
description: "Current status of the operation"
error:
type: string
description: "Error message if the operation failed"
created_at:
type: string
format: date-time
description: "Time when the operation was added to the queue"
completed_at:
type: string
format: date-time
description: "Time when the operation was completed"
securitySchemes:
ApiKeyAuth:
type: apiKey
in: header
name: X-API-Key
description: "API key for authentication"

View File

@@ -1,6 +0,0 @@
pytest>=7.3.1
requests>=2.31.0
openapi-spec-validator>=0.6.0
jsonschema>=4.17.3
pytest-asyncio>=0.21.0
pyyaml>=6.0

View File

@@ -1,270 +0,0 @@
"""
Tests for configuration endpoints.
"""
import pytest
from typing import Callable, Dict, List, Tuple
from utils.validation import validate_response
def test_get_preview_method(
api_request: Callable
):
"""
Test getting the current preview method.
"""
# Make the API request
path = "/manager/preview_method"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Verify the response is one of the valid preview methods
assert response.text in ["auto", "latent2rgb", "taesd", "none"]
def test_get_db_mode(
api_request: Callable
):
"""
Test getting the current database mode.
"""
# Make the API request
path = "/manager/db_mode"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Verify the response is one of the valid database modes
assert response.text in ["channel", "local", "remote"]
def test_get_component_policy(
api_request: Callable
):
"""
Test getting the current component policy.
"""
# Make the API request
path = "/manager/policy/component"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Component policy could be any string
assert response.text is not None
def test_get_update_policy(
api_request: Callable
):
"""
Test getting the current update policy.
"""
# Make the API request
path = "/manager/policy/update"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Verify the response is one of the valid update policies
assert response.text in ["stable", "nightly", "nightly-comfyui"]
def test_get_channel_url_list(
api_request: Callable,
openapi_spec: Dict
):
"""
Test getting the channel URL list.
"""
# Make the API request
path = "/manager/channel_url_list"
response, json_data = api_request(
method="get",
path=path,
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response contains the expected fields
assert "selected" in json_data
assert "list" in json_data
assert isinstance(json_data["list"], list)
# Each channel should have a name and URL
if json_data["list"]:
first_channel = json_data["list"][0]
assert "name" in first_channel
assert "url" in first_channel
def test_get_manager_version(
api_request: Callable
):
"""
Test getting the manager version.
"""
# Make the API request
path = "/manager/version"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Verify the response is a version string
assert response.text.startswith("V") # Version strings start with V
def test_get_manager_notice(
api_request: Callable
):
"""
Test getting the manager notice.
"""
# Make the API request
path = "/manager/notice"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Verify the response is HTML content
assert response.headers.get("Content-Type", "").startswith("text/html") or "ComfyUI" in response.text
@pytest.mark.skip(reason="State-modifying operations")
class TestConfigChanges:
"""
Tests for changing configuration settings.
These are skipped to avoid modifying state in automated tests.
"""
@pytest.fixture(scope="class", autouse=True)
def save_original_config(self, api_request: Callable):
"""
Save the original configuration to restore after tests.
"""
# Save original values
response, _ = api_request(
method="get",
path="/manager/preview_method",
expected_status=200,
)
self.original_preview_method = response.text
response, _ = api_request(
method="get",
path="/manager/db_mode",
expected_status=200,
)
self.original_db_mode = response.text
response, _ = api_request(
method="get",
path="/manager/policy/update",
expected_status=200,
)
self.original_update_policy = response.text
yield
# Restore original values
api_request(
method="get",
path="/manager/preview_method",
params={"value": self.original_preview_method},
expected_status=200,
)
api_request(
method="get",
path="/manager/db_mode",
params={"value": self.original_db_mode},
expected_status=200,
)
api_request(
method="get",
path="/manager/policy/update",
params={"value": self.original_update_policy},
expected_status=200,
)
def test_set_preview_method(self, api_request: Callable):
"""
Test setting the preview method.
"""
# Set to a different value (taesd)
api_request(
method="get",
path="/manager/preview_method",
params={"value": "taesd"},
expected_status=200,
)
# Verify it was changed
response, _ = api_request(
method="get",
path="/manager/preview_method",
expected_status=200,
)
assert response.text == "taesd"
def test_set_db_mode(self, api_request: Callable):
"""
Test setting the database mode.
"""
# Set to local mode
api_request(
method="get",
path="/manager/db_mode",
params={"value": "local"},
expected_status=200,
)
# Verify it was changed
response, _ = api_request(
method="get",
path="/manager/db_mode",
expected_status=200,
)
assert response.text == "local"
def test_set_update_policy(self, api_request: Callable):
"""
Test setting the update policy.
"""
# Set to stable
api_request(
method="get",
path="/manager/policy/update",
params={"value": "stable"},
expected_status=200,
)
# Verify it was changed
response, _ = api_request(
method="get",
path="/manager/policy/update",
expected_status=200,
)
assert response.text == "stable"

View File

@@ -1,200 +0,0 @@
"""
Tests for custom node management endpoints.
"""
import pytest
from pathlib import Path
from typing import Callable, Dict, Tuple
from utils.validation import validate_response
@pytest.mark.parametrize(
"mode",
["local", "remote"]
)
def test_get_custom_node_list(
api_request: Callable,
openapi_spec: Dict,
mode: str
):
"""
Test the endpoint for listing custom nodes.
"""
# Make the API request
path = "/customnode/getlist"
response, json_data = api_request(
method="get",
path=path,
params={"mode": mode, "skip_update": "true"},
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response contains the expected fields
assert "channel" in json_data
assert "node_packs" in json_data
assert isinstance(json_data["node_packs"], dict)
# If there are any node packs, verify they have the expected structure
if json_data["node_packs"]:
# Take the first node pack to validate
first_node_pack = next(iter(json_data["node_packs"].values()))
assert "title" in first_node_pack
assert "name" in first_node_pack
def test_get_installed_nodes(
api_request: Callable,
openapi_spec: Dict
):
"""
Test the endpoint for listing installed nodes.
"""
# Make the API request
path = "/customnode/installed"
response, json_data = api_request(
method="get",
path=path,
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response is a dictionary of node packs
assert isinstance(json_data, dict)
@pytest.mark.parametrize(
"mode",
["local", "nickname"]
)
def test_get_node_mappings(
api_request: Callable,
openapi_spec: Dict,
mode: str
):
"""
Test the endpoint for getting node-to-package mappings.
"""
# Make the API request
path = "/customnode/getmappings"
response, json_data = api_request(
method="get",
path=path,
params={"mode": mode},
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response is a dictionary mapping extension IDs to node info
assert isinstance(json_data, dict)
# If there are any mappings, verify they have the expected structure
if json_data:
# Take the first mapping to validate
first_mapping = next(iter(json_data.values()))
assert isinstance(first_mapping, list)
assert len(first_mapping) == 2
assert isinstance(first_mapping[0], list) # List of node classes
assert isinstance(first_mapping[1], dict) # Metadata
@pytest.mark.parametrize(
"mode",
["local", "remote"]
)
def test_get_node_alternatives(
api_request: Callable,
openapi_spec: Dict,
mode: str
):
"""
Test the endpoint for getting alternative node options.
"""
# Make the API request
path = "/customnode/alternatives"
response, json_data = api_request(
method="get",
path=path,
params={"mode": mode},
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response is a dictionary
assert isinstance(json_data, dict)
def test_fetch_updates(
api_request: Callable
):
"""
Test the endpoint for fetching updates.
This might modify state, so we just check for a valid response.
"""
# Make the API request with skip_update=true to avoid actual updates
path = "/customnode/fetch_updates"
response, _ = api_request(
method="get",
path=path,
params={"mode": "local"},
# Don't validate JSON since this endpoint doesn't return JSON
expected_status=200,
retry_on_error=False, # Don't retry as this might have side effects
)
# Just check the status code is as expected (covered by api_request)
assert response.status_code in [200, 201]
@pytest.mark.skip(reason="Queue endpoints are better tested with queue operations")
def test_queue_update_all(
api_request: Callable
):
"""
Test the endpoint for queuing updates for all nodes.
Skipping as this would actually modify the installation.
"""
pass
@pytest.mark.skip(reason="Security-restricted endpoint")
def test_install_node_via_git_url(
api_request: Callable
):
"""
Test the endpoint for installing a node via Git URL.
Skipping as this requires high security level and would modify the installation.
"""
pass

View File

@@ -1,23 +0,0 @@
import os
import sys
# Print current working directory
print(f"Current directory: {os.getcwd()}")
# Print module search path
print(f"System path: {sys.path}")
# Try to import
try:
from utils.validation import load_openapi_spec
print("Import successful!")
except ImportError as e:
print(f"Import error: {e}")
# Try direct import
try:
sys.path.insert(0, os.path.join(os.getcwd(), "custom_nodes/ComfyUI-Manager/tests-api"))
from utils.validation import load_openapi_spec
print("Direct import successful!")
except ImportError as e:
print(f"Direct import error: {e}")

View File

@@ -1,62 +0,0 @@
"""
Tests for model management endpoints.
These features are scheduled for deprecation, so tests are minimal.
"""
import pytest
from typing import Callable, Dict
from utils.validation import validate_response
@pytest.mark.parametrize(
"mode",
["local", "remote"]
)
def test_get_external_model_list(
api_request: Callable,
openapi_spec: Dict,
mode: str
):
"""
Test the endpoint for listing external models.
"""
# Make the API request
path = "/externalmodel/getlist"
response, json_data = api_request(
method="get",
path=path,
params={"mode": mode},
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response contains the expected fields
assert "models" in json_data
assert isinstance(json_data["models"], list)
# If there are any models, verify they have the expected structure
if json_data["models"]:
first_model = json_data["models"][0]
assert "name" in first_model
assert "type" in first_model
assert "url" in first_model
assert "filename" in first_model
assert "installed" in first_model
@pytest.mark.skip(reason="State-modifying operation that requires auth")
def test_install_model():
"""
Test queuing a model installation.
Skipped to avoid modifying state and requires authentication.
This feature is also scheduled for deprecation.
"""
pass

View File

@@ -1,213 +0,0 @@
"""
Tests for queue management endpoints.
"""
import pytest
import time
from pathlib import Path
from typing import Callable, Dict, Tuple
from utils.validation import validate_response
def test_get_queue_status(
api_request: Callable,
openapi_spec: Dict
):
"""
Test the endpoint for getting queue status.
"""
# Make the API request
path = "/manager/queue/status"
response, json_data = api_request(
method="get",
path=path,
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response contains the expected fields
assert "total_count" in json_data
assert "done_count" in json_data
assert "in_progress_count" in json_data
assert "is_processing" in json_data
# Type checks
assert isinstance(json_data["total_count"], int)
assert isinstance(json_data["done_count"], int)
assert isinstance(json_data["in_progress_count"], int)
assert isinstance(json_data["is_processing"], bool)
def test_reset_queue(
api_request: Callable
):
"""
Test the endpoint for resetting the queue.
"""
# Make the API request
path = "/manager/queue/reset"
response, _ = api_request(
method="get",
path=path,
expected_status=200,
)
# Now check the queue status to verify it was reset
response2, json_data = api_request(
method="get",
path="/manager/queue/status",
expected_status=200,
)
# Queue should be empty after reset
assert json_data["total_count"] == json_data["done_count"] + json_data["in_progress_count"]
@pytest.mark.skip(reason="State-modifying operation that requires auth")
def test_queue_install_node():
"""
Test queuing a node installation.
Skipped to avoid modifying state and requires authentication.
"""
pass
@pytest.mark.skip(reason="State-modifying operation that requires auth")
def test_queue_update_node():
"""
Test queuing a node update.
Skipped to avoid modifying state and requires authentication.
"""
pass
@pytest.mark.skip(reason="State-modifying operation that requires auth")
def test_queue_uninstall_node():
"""
Test queuing a node uninstallation.
Skipped to avoid modifying state and requires authentication.
"""
pass
@pytest.mark.skip(reason="State-modifying operation")
def test_queue_start():
"""
Test starting the queue.
Skipped to avoid modifying state.
"""
pass
class TestQueueOperations:
"""
Test a complete queue workflow.
These tests are grouped to ensure proper sequencing but are still skipped
to avoid modifying state in automated tests.
"""
@pytest.fixture(scope="class")
def node_data(self) -> Dict:
"""
Create test data for a node operation.
"""
# This would be replaced with actual data for a known safe node
return {
"ui_id": "test_node_1",
"id": "comfyui-manager", # Manager itself
"version": "latest",
"channel": "default",
"mode": "local",
}
@pytest.mark.skip(reason="State-modifying operation")
def test_queue_operation_sequence(
self,
api_request: Callable,
node_data: Dict
):
"""
Test the queue operation sequence.
"""
# 1. Reset the queue
api_request(
method="get",
path="/manager/queue/reset",
expected_status=200,
)
# 2. Queue a node operation (we'll use the manager itself)
api_request(
method="post",
path="/manager/queue/update",
json_data=node_data,
expected_status=200,
)
# 3. Check queue status - should have one operation
response, json_data = api_request(
method="get",
path="/manager/queue/status",
expected_status=200,
)
assert json_data["total_count"] > 0
assert not json_data["is_processing"] # Queue hasn't started yet
# 4. Start the queue
api_request(
method="get",
path="/manager/queue/start",
expected_status=200,
)
# 5. Check queue status again - should be processing
response, json_data = api_request(
method="get",
path="/manager/queue/status",
expected_status=200,
)
# Queue should be processing or already done
assert json_data["is_processing"] or json_data["done_count"] == json_data["total_count"]
# 6. Wait for queue to complete (with timeout)
max_wait_time = 60 # seconds
start_time = time.time()
completed = False
while time.time() - start_time < max_wait_time:
response, json_data = api_request(
method="get",
path="/manager/queue/status",
expected_status=200,
)
if json_data["done_count"] == json_data["total_count"] and not json_data["is_processing"]:
completed = True
break
time.sleep(2) # Wait before checking again
assert completed, "Queue did not complete within timeout period"
@pytest.mark.skip(reason="State-modifying operation")
def test_concurrent_queue_operations(
self,
api_request: Callable,
node_data: Dict
):
"""
Test concurrent queue operations.
"""
# This would test adding multiple operations to the queue
# and verifying they all complete correctly
pass

View File

@@ -1,198 +0,0 @@
"""
Tests for snapshot management endpoints.
"""
import pytest
import time
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict, List, Optional
from utils.validation import validate_response
def test_get_snapshot_list(
api_request: Callable,
openapi_spec: Dict
):
"""
Test the endpoint for listing snapshots.
"""
# Make the API request
path = "/snapshot/getlist"
response, json_data = api_request(
method="get",
path=path,
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Verify the response contains the expected fields
assert "items" in json_data
assert isinstance(json_data["items"], list)
def test_get_current_snapshot(
api_request: Callable,
openapi_spec: Dict
):
"""
Test the endpoint for getting the current snapshot.
"""
# Make the API request
path = "/snapshot/get_current"
response, json_data = api_request(
method="get",
path=path,
expected_status=200,
)
# Validate response structure against the schema
assert json_data is not None
validate_response(
response_data=json_data,
path=path,
method="get",
spec=openapi_spec,
)
# Check for basic snapshot structure
assert "snapshot_date" in json_data
assert "custom_nodes" in json_data
@pytest.mark.skip(reason="This test creates a snapshot which is a state-modifying operation")
def test_save_snapshot(
api_request: Callable
):
"""
Test the endpoint for saving a new snapshot.
Skipped to avoid modifying state in tests.
"""
pass
@pytest.mark.skip(reason="This test removes a snapshot which is a destructive operation")
def test_remove_snapshot(
api_request: Callable
):
"""
Test the endpoint for removing a snapshot.
Skipped to avoid modifying state in tests.
"""
pass
@pytest.mark.skip(reason="This test restores a snapshot which is a state-modifying operation")
def test_restore_snapshot(
api_request: Callable
):
"""
Test the endpoint for restoring a snapshot.
Skipped to avoid modifying state in tests.
"""
pass
class TestSnapshotWorkflow:
"""
Test the complete snapshot workflow (create, list, get, remove).
These tests are grouped to ensure proper sequencing but are still skipped
to avoid modifying state in automated tests.
"""
@pytest.fixture(scope="class")
def snapshot_name(self) -> str:
"""
Generate a unique snapshot name for testing.
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
return f"test_snapshot_{timestamp}"
@pytest.mark.skip(reason="State-modifying test")
def test_create_snapshot(
self,
api_request: Callable,
snapshot_name: str
):
"""
Test creating a snapshot.
"""
# Make the API request to save a snapshot
response, _ = api_request(
method="get",
path="/snapshot/save",
expected_status=200,
)
# Verify a snapshot was created (would need to check the snapshot list)
response2, json_data = api_request(
method="get",
path="/snapshot/getlist",
expected_status=200,
)
# The most recently created snapshot should be first in the list
assert json_data["items"]
# Store the snapshot name for later tests
self.actual_snapshot_name = json_data["items"][0]
@pytest.mark.skip(reason="State-modifying test")
def test_get_snapshot_details(
self,
api_request: Callable,
openapi_spec: Dict
):
"""
Test getting details of the created snapshot.
"""
# This would check the current snapshot, not a specific one
# since there's no direct API to get a specific snapshot
response, json_data = api_request(
method="get",
path="/snapshot/get_current",
expected_status=200,
)
# Validate the snapshot data
assert json_data is not None
validate_response(
response_data=json_data,
path="/snapshot/get_current",
method="get",
spec=openapi_spec,
)
@pytest.mark.skip(reason="State-modifying test")
def test_remove_test_snapshot(
self,
api_request: Callable
):
"""
Test removing the test snapshot.
"""
# Make the API request to remove the snapshot
response, _ = api_request(
method="get",
path="/snapshot/remove",
params={"target": self.actual_snapshot_name},
expected_status=200,
)
# Verify the snapshot was removed
response2, json_data = api_request(
method="get",
path="/snapshot/getlist",
expected_status=200,
)
# The snapshot should no longer be in the list
assert self.actual_snapshot_name not in json_data["items"]

View File

@@ -1,150 +0,0 @@
"""
Tests for validating the OpenAPI specification.
"""
import json
import pytest
import yaml
from typing import Dict, Any, List, Tuple
from pathlib import Path
from openapi_spec_validator import validate_spec
from utils.validation import load_openapi_spec
from utils.schema_utils import (
get_all_paths,
get_methods_for_path,
find_paths_with_security,
get_required_parameters
)
def test_spec_is_valid():
"""
Test that the OpenAPI specification is valid according to the spec validator.
"""
spec = load_openapi_spec()
validate_spec(spec)
def test_spec_has_info():
"""
Test that the OpenAPI specification has basic info.
"""
spec = load_openapi_spec()
assert "info" in spec
assert "title" in spec["info"]
assert "version" in spec["info"]
assert spec["info"]["title"] == "ComfyUI-Manager API"
def test_spec_has_paths():
"""
Test that the OpenAPI specification has paths defined.
"""
spec = load_openapi_spec()
assert "paths" in spec
assert len(spec["paths"]) > 0
def test_paths_have_responses():
"""
Test that all paths have responses defined.
"""
spec = load_openapi_spec()
for path, path_item in spec["paths"].items():
for method, operation in path_item.items():
if method.lower() not in {"get", "post", "put", "delete", "patch", "options", "head"}:
continue
assert "responses" in operation, f"Path {path} method {method} has no responses"
assert len(operation["responses"]) > 0, f"Path {path} method {method} has empty responses"
def test_responses_have_schemas():
"""
Test that responses with application/json content type have schemas.
"""
spec = load_openapi_spec()
for path, path_item in spec["paths"].items():
for method, operation in path_item.items():
if method.lower() not in {"get", "post", "put", "delete", "patch", "options", "head"}:
continue
for status, response in operation["responses"].items():
if "content" not in response:
continue
if "application/json" in response["content"]:
assert "schema" in response["content"]["application/json"], (
f"Path {path} method {method} status {status} "
f"application/json content has no schema"
)
def test_required_parameters_have_schemas():
"""
Test that all required parameters have schemas.
"""
spec = load_openapi_spec()
for path, path_item in spec["paths"].items():
for method, operation in path_item.items():
if method.lower() not in {"get", "post", "put", "delete", "patch", "options", "head"}:
continue
if "parameters" not in operation:
continue
for param in operation["parameters"]:
if param.get("required", False):
assert "schema" in param, (
f"Path {path} method {method} required parameter {param.get('name')} has no schema"
)
def test_security_schemes_defined():
"""
Test that security schemes are properly defined.
"""
spec = load_openapi_spec()
# Get paths requiring security
secure_paths = find_paths_with_security(spec)
if secure_paths:
assert "components" in spec, "Spec has secure paths but no components"
assert "securitySchemes" in spec["components"], "Spec has secure paths but no securitySchemes"
# Check each security reference is defined
for path, method in secure_paths:
operation = spec["paths"][path][method]
for security_req in operation["security"]:
for scheme_name in security_req:
assert scheme_name in spec["components"]["securitySchemes"], (
f"Security scheme {scheme_name} used by {method.upper()} {path} "
f"is not defined in components.securitySchemes"
)
def test_common_endpoint_groups_present():
"""
Test that the spec includes the main endpoint groups.
"""
spec = load_openapi_spec()
paths = get_all_paths(spec)
# Define the expected endpoint prefixes
expected_prefixes = [
"/customnode/",
"/externalmodel/",
"/manager/",
"/snapshot/",
"/comfyui_manager/",
]
# Check that at least one path exists for each expected prefix
for prefix in expected_prefixes:
matching_paths = [p for p in paths if p.startswith(prefix)]
assert matching_paths, f"No endpoints found with prefix {prefix}"

View File

@@ -1 +0,0 @@
# Make utils directory a proper package

View File

@@ -1,174 +0,0 @@
"""
Schema utilities for extracting and manipulating OpenAPI schemas.
"""
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple
from .validation import load_openapi_spec
def get_all_paths(spec: Dict[str, Any]) -> List[str]:
"""
Get all paths defined in the OpenAPI specification.
Args:
spec: The OpenAPI specification
Returns:
List of all paths
"""
return list(spec.get("paths", {}).keys())
def get_grouped_paths(spec: Dict[str, Any]) -> Dict[str, List[str]]:
"""
Group paths by their top-level segment.
Args:
spec: The OpenAPI specification
Returns:
Dictionary mapping top-level segments to lists of paths
"""
result = {}
for path in get_all_paths(spec):
segments = path.strip("/").split("/")
if not segments:
continue
top_segment = segments[0]
if top_segment not in result:
result[top_segment] = []
result[top_segment].append(path)
return result
def get_methods_for_path(spec: Dict[str, Any], path: str) -> List[str]:
"""
Get all HTTP methods defined for a path.
Args:
spec: The OpenAPI specification
path: The API path
Returns:
List of HTTP methods (lowercase)
"""
if path not in spec.get("paths", {}):
return []
return [
method.lower()
for method in spec["paths"][path].keys()
if method.lower() in {"get", "post", "put", "delete", "patch", "options", "head"}
]
def find_paths_with_security(
spec: Dict[str, Any],
security_scheme: Optional[str] = None
) -> List[Tuple[str, str]]:
"""
Find all paths that require security.
Args:
spec: The OpenAPI specification
security_scheme: Optional specific security scheme to filter by
Returns:
List of (path, method) tuples that require security
"""
result = []
for path, path_item in spec.get("paths", {}).items():
for method, operation in path_item.items():
if method.lower() not in {"get", "post", "put", "delete", "patch", "options", "head"}:
continue
if "security" in operation:
if security_scheme is None:
result.append((path, method.lower()))
else:
# Check if this security scheme is required
for security_req in operation["security"]:
if security_scheme in security_req:
result.append((path, method.lower()))
break
return result
def get_content_types_for_response(
spec: Dict[str, Any],
path: str,
method: str,
status_code: str = "200"
) -> List[str]:
"""
Get content types defined for a response.
Args:
spec: The OpenAPI specification
path: The API path
method: The HTTP method
status_code: The HTTP status code
Returns:
List of content types
"""
method = method.lower()
if path not in spec["paths"]:
return []
if method not in spec["paths"][path]:
return []
if "responses" not in spec["paths"][path][method]:
return []
if status_code not in spec["paths"][path][method]["responses"]:
return []
response_def = spec["paths"][path][method]["responses"][status_code]
if "content" not in response_def:
return []
return list(response_def["content"].keys())
def get_required_parameters(
spec: Dict[str, Any],
path: str,
method: str
) -> List[Dict[str, Any]]:
"""
Get all required parameters for a path/method.
Args:
spec: The OpenAPI specification
path: The API path
method: The HTTP method
Returns:
List of parameter objects that are required
"""
method = method.lower()
if path not in spec["paths"]:
return []
if method not in spec["paths"][path]:
return []
if "parameters" not in spec["paths"][path][method]:
return []
return [
param for param in spec["paths"][path][method]["parameters"]
if param.get("required", False)
]

View File

@@ -1,155 +0,0 @@
"""
Validation utilities for API tests.
"""
import json
import jsonschema
import yaml
from pathlib import Path
from typing import Any, Dict, Optional, Union
def load_openapi_spec(spec_path: Union[str, Path] = None) -> Dict[str, Any]:
"""
Load the OpenAPI specification document.
Args:
spec_path: Path to the OpenAPI specification file
Returns:
The OpenAPI specification as a dictionary
"""
if spec_path is None:
# Default to the root openapi.yaml file
spec_path = Path(__file__).parents[2] / "openapi.yaml"
with open(spec_path, "r") as f:
if str(spec_path).endswith(".yaml") or str(spec_path).endswith(".yml"):
return yaml.safe_load(f)
else:
return json.load(f)
def get_schema_for_path(
spec: Dict[str, Any],
path: str,
method: str,
status_code: str = "200",
content_type: str = "application/json"
) -> Optional[Dict[str, Any]]:
"""
Extract the response schema for a specific path, method, and status code.
Args:
spec: The OpenAPI specification
path: The API path (e.g., "/customnode/getlist")
method: The HTTP method (e.g., "get", "post")
status_code: The HTTP status code (default: "200")
content_type: The response content type (default: "application/json")
Returns:
The schema for the specified path and method, or None if not found
"""
method = method.lower()
if path not in spec["paths"]:
return None
if method not in spec["paths"][path]:
return None
if "responses" not in spec["paths"][path][method]:
return None
if status_code not in spec["paths"][path][method]["responses"]:
return None
response_def = spec["paths"][path][method]["responses"][status_code]
if "content" not in response_def:
return None
if content_type not in response_def["content"]:
return None
if "schema" not in response_def["content"][content_type]:
return None
return response_def["content"][content_type]["schema"]
def validate_response_schema(
response_data: Any,
schema: Dict[str, Any],
spec: Dict[str, Any] = None
) -> bool:
"""
Validate a response against a schema from the OpenAPI specification.
Args:
response_data: The response data to validate
schema: The schema to validate against
spec: The complete OpenAPI specification (for resolving references)
Returns:
True if validation succeeds, raises an exception otherwise
"""
if spec is None:
spec = load_openapi_spec()
# Create a resolver for references within the schema
resolver = jsonschema.RefResolver.from_schema(spec)
# Validate the response against the schema
jsonschema.validate(
instance=response_data,
schema=schema,
resolver=resolver
)
return True
def validate_response(
response_data: Any,
path: str,
method: str,
status_code: str = "200",
content_type: str = "application/json",
spec: Dict[str, Any] = None
) -> bool:
"""
Validate a response against the schema defined in the OpenAPI specification.
Args:
response_data: The response data to validate
path: The API path
method: The HTTP method
status_code: The HTTP status code (default: "200")
content_type: The response content type (default: "application/json")
spec: The OpenAPI specification (loaded from default location if None)
Returns:
True if validation succeeds, raises an exception otherwise
"""
if spec is None:
spec = load_openapi_spec()
schema = get_schema_for_path(
spec=spec,
path=path,
method=method,
status_code=status_code,
content_type=content_type
)
if schema is None:
raise ValueError(
f"No schema found for {method.upper()} {path} "
f"with status {status_code} and content type {content_type}"
)
return validate_response_schema(
response_data=response_data,
schema=schema,
spec=spec
)