Merge pull request #516 from alisson-anjos/main

Add LLaVa-Describer custom nodes
This commit is contained in:
Dr.Lt.Data
2024-03-25 11:36:33 +09:00
committed by GitHub

View File

@@ -6846,6 +6846,16 @@
],
"install_type": "unzip",
"description": "This is a node to convert an image into a CMYK Halftone dot image."
},
{
"author": "alisson-anjos",
"title": "ComfyUI-LLaVA-Describer",
"reference": "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer",
"files": [
"https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer"
],
"install_type": "git-clone",
"description": "This is an extension for ComfyUI to extract descriptions from your images using the multimodal model called LLaVa. The LLaVa model - Large Language and Vision Assistant, although trained on a relatively small dataset, demonstrates exceptional capabilities in understanding images and answering questions about them. This model shows behaviors similar to multimodal models like GPT-4, even when presented with unseen images and instructions."
}
]
}