Add human-readable names for nodes
This commit is contained in:
parent
9a27030519
commit
5bcbc007dd
48
nodes.py
48
nodes.py
|
@ -1018,6 +1018,54 @@ NODE_CLASS_MAPPINGS = {
|
||||||
"VAEEncodeTiled": VAEEncodeTiled,
|
"VAEEncodeTiled": VAEEncodeTiled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
# Sampling
|
||||||
|
"KSampler": "KSampler",
|
||||||
|
"KSamplerAdvanced": "KSampler (Advanced)",
|
||||||
|
# Loaders
|
||||||
|
"CheckpointLoader": "Load Checkpoint",
|
||||||
|
"CheckpointLoaderSimple": "Load Checkpoint (Simple)",
|
||||||
|
"VAELoader": "Load VAE",
|
||||||
|
"LoraLoader": "Load LoRA",
|
||||||
|
"CLIPLoader": "Load CLIP",
|
||||||
|
"ControlNetLoader": "Load ControlNet Model",
|
||||||
|
"DiffControlNetLoader": "Load ControlNet Model (diff)",
|
||||||
|
"StyleModelLoader": "Load Style Model",
|
||||||
|
"CLIPVisionLoader": "Load CLIP Vision",
|
||||||
|
"UpscaleModelLoader": "Load Upscale Model",
|
||||||
|
# Conditioning
|
||||||
|
"CLIPVisionEncode": "CLIP Vision Encode",
|
||||||
|
"StyleModelApply": "Apply Style Model",
|
||||||
|
"CLIPTextEncode": "CLIP Text Encode (Prompt)",
|
||||||
|
"CLIPSetLastLayer": "CLIP Set Last Layer",
|
||||||
|
"ConditioningCombine": "Conditioning (Combine)",
|
||||||
|
"ConditioningSetArea": "Conditioning (Set Area)",
|
||||||
|
"ControlNetApply": "Apply ControlNet",
|
||||||
|
# Latent
|
||||||
|
"VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
|
||||||
|
"SetLatentNoiseMask": "Set Latent Noise Mask",
|
||||||
|
"VAEDecode": "VAE Decode",
|
||||||
|
"VAEEncode": "VAE Encode",
|
||||||
|
"LatentRotate": "Rotate Latent",
|
||||||
|
"LatentFlip": "Flip Latent",
|
||||||
|
"LatentCrop": "Crop Latent",
|
||||||
|
"EmptyLatentImage": "Empty Latent Image",
|
||||||
|
"LatentUpscale": "Upscale Latent",
|
||||||
|
"LatentComposite": "Latent Composite",
|
||||||
|
# Image
|
||||||
|
"SaveImage": "Save Image",
|
||||||
|
"PreviewImage": "Preview Image",
|
||||||
|
"LoadImage": "Load Image",
|
||||||
|
"LoadImageMask": "Load Image (as Mask)",
|
||||||
|
"ImageScale": "Upscale Image",
|
||||||
|
"ImageUpscaleWithModel": "Upscale Image (using Model)",
|
||||||
|
"ImageInvert": "Invert Image",
|
||||||
|
"ImagePadForOutpaint": "Pad Image for Outpainting",
|
||||||
|
# _for_testing
|
||||||
|
"VAEDecodeTiled": "VAE Decode (Tiled)",
|
||||||
|
"VAEEncodeTiled": "VAE Encode (Tiled)",
|
||||||
|
}
|
||||||
|
|
||||||
def load_custom_node(module_path):
|
def load_custom_node(module_path):
|
||||||
module_name = os.path.basename(module_path)
|
module_name = os.path.basename(module_path)
|
||||||
if os.path.isfile(module_path):
|
if os.path.isfile(module_path):
|
||||||
|
|
|
@ -153,7 +153,8 @@ class PromptServer():
|
||||||
info['input'] = obj_class.INPUT_TYPES()
|
info['input'] = obj_class.INPUT_TYPES()
|
||||||
info['output'] = obj_class.RETURN_TYPES
|
info['output'] = obj_class.RETURN_TYPES
|
||||||
info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output']
|
info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output']
|
||||||
info['name'] = x #TODO
|
info['name'] = x
|
||||||
|
info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[x] if x in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else x
|
||||||
info['description'] = ''
|
info['description'] = ''
|
||||||
info['category'] = 'sd'
|
info['category'] = 'sd'
|
||||||
if hasattr(obj_class, 'CATEGORY'):
|
if hasattr(obj_class, 'CATEGORY'):
|
||||||
|
|
|
@ -777,7 +777,7 @@ class ComfyApp {
|
||||||
app.#invokeExtensionsAsync("nodeCreated", this);
|
app.#invokeExtensionsAsync("nodeCreated", this);
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: nodeData.name,
|
title: nodeData.display_name,
|
||||||
comfyClass: nodeData.name,
|
comfyClass: nodeData.name,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
Loading…
Reference in New Issue