update DB

This commit is contained in:
dr.lt.data
2024-03-25 11:49:46 +09:00
parent 3369b27e36
commit d0d1c737c5
6 changed files with 232 additions and 77 deletions

View File

@@ -5868,16 +5868,6 @@
"install_type": "git-clone",
"description": "Nodes:Abracadabra Summary, Abracadabra"
},
{
"author": "XINZHANG-ops",
"title": "comfyui-xin-nodes",
"reference": "https://github.com/XINZHANG-ops/comfyui-xin-nodes",
"files": [
"https://github.com/XINZHANG-ops/comfyui-xin-nodes"
],
"install_type": "git-clone",
"description": "Nodes:ImageSizeClassifer, RandomInt, ShowValue"
},
{
"author": "cerspense",
"title": "cspnodes",
@@ -6468,6 +6458,26 @@
"install_type": "git-clone",
"description": "A simple sidebar tweak to force fixe the ComfyUI menu to the top of the screen. Reaaally quick and dirty. May break with some ComfyUI setup."
},
{
"author": "alisson-anjos",
"title": "ComfyUI-LLaVA-Describer",
"reference": "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer",
"files": [
"https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer"
],
"install_type": "git-clone",
"description": "This is an extension for ComfyUI to extract descriptions from your images using the multimodal model called LLaVa. The LLaVa model - Large Language and Vision Assistant, although trained on a relatively small dataset, demonstrates exceptional capabilities in understanding images and answering questions about them. This model shows behaviors similar to multimodal models like GPT-4, even when presented with unseen images and instructions."
},
{
"author": "chaosaiart",
"title": "Chaosaiart-Nodes",
"reference": "https://github.com/chaosaiart/Chaosaiart-Nodes",
"files": [
"https://github.com/chaosaiart/Chaosaiart-Nodes"
],
"install_type": "git-clone",
"description": "This extension provides various custom nodes to assist in configuring the workflow structure."
},
@@ -6846,16 +6856,6 @@
],
"install_type": "unzip",
"description": "This is a node to convert an image into a CMYK Halftone dot image."
},
{
"author": "alisson-anjos",
"title": "ComfyUI-LLaVA-Describer",
"reference": "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer",
"files": [
"https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer"
],
"install_type": "git-clone",
"description": "This is an extension for ComfyUI to extract descriptions from your images using the multimodal model called LLaVa. The LLaVa model - Large Language and Vision Assistant, although trained on a relatively small dataset, demonstrates exceptional capabilities in understanding images and answering questions about them. This model shows behaviors similar to multimodal models like GPT-4, even when presented with unseen images and instructions."
}
]
}