|
| 1 | +import logging |
| 2 | +from enum import Enum |
| 3 | +from typing import Optional |
| 4 | +from typing_extensions import override |
| 5 | + |
| 6 | +import torch |
| 7 | +from pydantic import BaseModel, Field |
| 8 | + |
| 9 | +from comfy_api.latest import ComfyExtension, io as comfy_io |
| 10 | +from comfy_api_nodes.util.validation_utils import ( |
| 11 | + validate_image_aspect_ratio_range, |
| 12 | + get_number_of_images, |
| 13 | +) |
| 14 | +from comfy_api_nodes.apis.client import ( |
| 15 | + ApiEndpoint, |
| 16 | + HttpMethod, |
| 17 | + SynchronousOperation, |
| 18 | +) |
| 19 | +from comfy_api_nodes.apinode_utils import download_url_to_image_tensor, upload_images_to_comfyapi, validate_string |
| 20 | + |
| 21 | + |
| 22 | +BYTEPLUS_ENDPOINT = "/proxy/byteplus/api/v3/images/generations" |
| 23 | + |
| 24 | + |
| 25 | +class Text2ImageModelName(str, Enum): |
| 26 | + seedream3 = "seedream-3-0-t2i-250415" |
| 27 | + |
| 28 | + |
| 29 | +class Image2ImageModelName(str, Enum): |
| 30 | + seededit3 = "seededit-3-0-i2i-250628" |
| 31 | + |
| 32 | + |
| 33 | +class Text2ImageTaskCreationRequest(BaseModel): |
| 34 | + model: Text2ImageModelName = Text2ImageModelName.seedream3 |
| 35 | + prompt: str = Field(...) |
| 36 | + response_format: Optional[str] = Field("url") |
| 37 | + size: Optional[str] = Field(None) |
| 38 | + seed: Optional[int] = Field(0, ge=0, le=2147483647) |
| 39 | + guidance_scale: Optional[float] = Field(..., ge=1.0, le=10.0) |
| 40 | + watermark: Optional[bool] = Field(True) |
| 41 | + |
| 42 | + |
| 43 | +class Image2ImageTaskCreationRequest(BaseModel): |
| 44 | + model: Image2ImageModelName = Image2ImageModelName.seededit3 |
| 45 | + prompt: str = Field(...) |
| 46 | + response_format: Optional[str] = Field("url") |
| 47 | + image: str = Field(..., description="Base64 encoded string or image URL") |
| 48 | + size: Optional[str] = Field("adaptive") |
| 49 | + seed: Optional[int] = Field(..., ge=0, le=2147483647) |
| 50 | + guidance_scale: Optional[float] = Field(..., ge=1.0, le=10.0) |
| 51 | + watermark: Optional[bool] = Field(True) |
| 52 | + |
| 53 | + |
| 54 | +class ImageTaskCreationResponse(BaseModel): |
| 55 | + model: str = Field(...) |
| 56 | + created: int = Field(..., description="Unix timestamp (in seconds) indicating time when the request was created.") |
| 57 | + data: list = Field([], description="Contains information about the generated image(s).") |
| 58 | + error: dict = Field({}, description="Contains `code` and `message` fields in case of error.") |
| 59 | + |
| 60 | + |
| 61 | +RECOMMENDED_PRESETS = [ |
| 62 | + ("1024x1024 (1:1)", 1024, 1024), |
| 63 | + ("864x1152 (3:4)", 864, 1152), |
| 64 | + ("1152x864 (4:3)", 1152, 864), |
| 65 | + ("1280x720 (16:9)", 1280, 720), |
| 66 | + ("720x1280 (9:16)", 720, 1280), |
| 67 | + ("832x1248 (2:3)", 832, 1248), |
| 68 | + ("1248x832 (3:2)", 1248, 832), |
| 69 | + ("1512x648 (21:9)", 1512, 648), |
| 70 | + ("2048x2048 (1:1)", 2048, 2048), |
| 71 | + ("Custom", None, None), |
| 72 | +] |
| 73 | + |
| 74 | + |
| 75 | +def get_image_url_from_response(response: ImageTaskCreationResponse) -> str: |
| 76 | + if response.error: |
| 77 | + error_msg = f"ByteDance request failed. Code: {response.error['code']}, message: {response.error['message']}" |
| 78 | + logging.info(error_msg) |
| 79 | + raise RuntimeError(error_msg) |
| 80 | + logging.info("ByteDance task succeeded, image URL: %s", response.data[0]["url"]) |
| 81 | + return response.data[0]["url"] |
| 82 | + |
| 83 | + |
| 84 | +class ByteDanceImageNode(comfy_io.ComfyNode): |
| 85 | + |
| 86 | + @classmethod |
| 87 | + def define_schema(cls): |
| 88 | + return comfy_io.Schema( |
| 89 | + node_id="ByteDanceImageNode", |
| 90 | + display_name="ByteDance Image", |
| 91 | + category="api node/image/ByteDance", |
| 92 | + description="Generate images using ByteDance models via api based on prompt", |
| 93 | + inputs=[ |
| 94 | + comfy_io.Combo.Input( |
| 95 | + "model", |
| 96 | + options=[model.value for model in Text2ImageModelName], |
| 97 | + default=Text2ImageModelName.seedream3.value, |
| 98 | + tooltip="Model name", |
| 99 | + ), |
| 100 | + comfy_io.String.Input( |
| 101 | + "prompt", |
| 102 | + multiline=True, |
| 103 | + tooltip="The text prompt used to generate the image", |
| 104 | + ), |
| 105 | + comfy_io.Combo.Input( |
| 106 | + "size_preset", |
| 107 | + options=[label for label, _, _ in RECOMMENDED_PRESETS], |
| 108 | + tooltip="Pick a recommended size. Select Custom to use the width and height below", |
| 109 | + ), |
| 110 | + comfy_io.Int.Input( |
| 111 | + "width", |
| 112 | + default=1024, |
| 113 | + min=512, |
| 114 | + max=2048, |
| 115 | + step=64, |
| 116 | + tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", |
| 117 | + ), |
| 118 | + comfy_io.Int.Input( |
| 119 | + "height", |
| 120 | + default=1024, |
| 121 | + min=512, |
| 122 | + max=2048, |
| 123 | + step=64, |
| 124 | + tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", |
| 125 | + ), |
| 126 | + comfy_io.Int.Input( |
| 127 | + "seed", |
| 128 | + default=0, |
| 129 | + min=0, |
| 130 | + max=2147483647, |
| 131 | + step=1, |
| 132 | + display_mode=comfy_io.NumberDisplay.number, |
| 133 | + control_after_generate=True, |
| 134 | + tooltip="Seed to use for generation", |
| 135 | + optional=True, |
| 136 | + ), |
| 137 | + comfy_io.Float.Input( |
| 138 | + "guidance_scale", |
| 139 | + default=2.5, |
| 140 | + min=1.0, |
| 141 | + max=10.0, |
| 142 | + step=0.01, |
| 143 | + display_mode=comfy_io.NumberDisplay.number, |
| 144 | + tooltip="Higher value makes the image follow the prompt more closely", |
| 145 | + optional=True, |
| 146 | + ), |
| 147 | + comfy_io.Boolean.Input( |
| 148 | + "watermark", |
| 149 | + default=True, |
| 150 | + tooltip="Whether to add an \"AI generated\" watermark to the image", |
| 151 | + optional=True, |
| 152 | + ), |
| 153 | + ], |
| 154 | + outputs=[ |
| 155 | + comfy_io.Image.Output(), |
| 156 | + ], |
| 157 | + hidden=[ |
| 158 | + comfy_io.Hidden.auth_token_comfy_org, |
| 159 | + comfy_io.Hidden.api_key_comfy_org, |
| 160 | + comfy_io.Hidden.unique_id, |
| 161 | + ], |
| 162 | + is_api_node=True, |
| 163 | + ) |
| 164 | + |
| 165 | + @classmethod |
| 166 | + async def execute( |
| 167 | + cls, |
| 168 | + model: str, |
| 169 | + prompt: str, |
| 170 | + size_preset: str, |
| 171 | + width: int, |
| 172 | + height: int, |
| 173 | + seed: int, |
| 174 | + guidance_scale: float, |
| 175 | + watermark: bool, |
| 176 | + ) -> comfy_io.NodeOutput: |
| 177 | + validate_string(prompt, strip_whitespace=True, min_length=1) |
| 178 | + w = h = None |
| 179 | + for label, tw, th in RECOMMENDED_PRESETS: |
| 180 | + if label == size_preset: |
| 181 | + w, h = tw, th |
| 182 | + break |
| 183 | + |
| 184 | + if w is None or h is None: |
| 185 | + w, h = width, height |
| 186 | + if not (512 <= w <= 2048) or not (512 <= h <= 2048): |
| 187 | + raise ValueError( |
| 188 | + f"Custom size out of range: {w}x{h}. " |
| 189 | + "Both width and height must be between 512 and 2048 pixels." |
| 190 | + ) |
| 191 | + |
| 192 | + payload = Text2ImageTaskCreationRequest( |
| 193 | + model=model, |
| 194 | + prompt=prompt, |
| 195 | + size=f"{w}x{h}", |
| 196 | + seed=seed, |
| 197 | + guidance_scale=guidance_scale, |
| 198 | + watermark=watermark, |
| 199 | + ) |
| 200 | + auth_kwargs = { |
| 201 | + "auth_token": cls.hidden.auth_token_comfy_org, |
| 202 | + "comfy_api_key": cls.hidden.api_key_comfy_org, |
| 203 | + } |
| 204 | + response = await SynchronousOperation( |
| 205 | + endpoint=ApiEndpoint( |
| 206 | + path=BYTEPLUS_ENDPOINT, |
| 207 | + method=HttpMethod.POST, |
| 208 | + request_model=Text2ImageTaskCreationRequest, |
| 209 | + response_model=ImageTaskCreationResponse, |
| 210 | + ), |
| 211 | + request=payload, |
| 212 | + auth_kwargs=auth_kwargs, |
| 213 | + ).execute() |
| 214 | + return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) |
| 215 | + |
| 216 | + |
| 217 | +class ByteDanceImageEditNode(comfy_io.ComfyNode): |
| 218 | + |
| 219 | + @classmethod |
| 220 | + def define_schema(cls): |
| 221 | + return comfy_io.Schema( |
| 222 | + node_id="ByteDanceImageEditNode", |
| 223 | + display_name="ByteDance Image Edit", |
| 224 | + category="api node/video/ByteDance", |
| 225 | + description="Edit images using ByteDance models via api based on prompt", |
| 226 | + inputs=[ |
| 227 | + comfy_io.Combo.Input( |
| 228 | + "model", |
| 229 | + options=[model.value for model in Image2ImageModelName], |
| 230 | + default=Image2ImageModelName.seededit3.value, |
| 231 | + tooltip="Model name", |
| 232 | + ), |
| 233 | + comfy_io.Image.Input( |
| 234 | + "image", |
| 235 | + tooltip="The base image to edit", |
| 236 | + ), |
| 237 | + comfy_io.String.Input( |
| 238 | + "prompt", |
| 239 | + multiline=True, |
| 240 | + default="", |
| 241 | + tooltip="Instruction to edit image", |
| 242 | + ), |
| 243 | + comfy_io.Int.Input( |
| 244 | + "seed", |
| 245 | + default=0, |
| 246 | + min=0, |
| 247 | + max=2147483647, |
| 248 | + step=1, |
| 249 | + display_mode=comfy_io.NumberDisplay.number, |
| 250 | + control_after_generate=True, |
| 251 | + tooltip="Seed to use for generation", |
| 252 | + optional=True, |
| 253 | + ), |
| 254 | + comfy_io.Float.Input( |
| 255 | + "guidance_scale", |
| 256 | + default=5.5, |
| 257 | + min=1.0, |
| 258 | + max=10.0, |
| 259 | + step=0.01, |
| 260 | + display_mode=comfy_io.NumberDisplay.number, |
| 261 | + tooltip="Higher value makes the image follow the prompt more closely", |
| 262 | + optional=True, |
| 263 | + ), |
| 264 | + comfy_io.Boolean.Input( |
| 265 | + "watermark", |
| 266 | + default=True, |
| 267 | + tooltip="Whether to add an \"AI generated\" watermark to the image", |
| 268 | + optional=True, |
| 269 | + ), |
| 270 | + ], |
| 271 | + outputs=[ |
| 272 | + comfy_io.Image.Output(), |
| 273 | + ], |
| 274 | + hidden=[ |
| 275 | + comfy_io.Hidden.auth_token_comfy_org, |
| 276 | + comfy_io.Hidden.api_key_comfy_org, |
| 277 | + comfy_io.Hidden.unique_id, |
| 278 | + ], |
| 279 | + is_api_node=True, |
| 280 | + ) |
| 281 | + |
| 282 | + @classmethod |
| 283 | + async def execute( |
| 284 | + cls, |
| 285 | + model: str, |
| 286 | + image: torch.Tensor, |
| 287 | + prompt: str, |
| 288 | + seed: int, |
| 289 | + guidance_scale: float, |
| 290 | + watermark: bool, |
| 291 | + ) -> comfy_io.NodeOutput: |
| 292 | + validate_string(prompt, strip_whitespace=True, min_length=1) |
| 293 | + if get_number_of_images(image) != 1: |
| 294 | + raise ValueError("Exactly one input image is required.") |
| 295 | + validate_image_aspect_ratio_range(image, (1, 3), (3, 1)) |
| 296 | + auth_kwargs = { |
| 297 | + "auth_token": cls.hidden.auth_token_comfy_org, |
| 298 | + "comfy_api_key": cls.hidden.api_key_comfy_org, |
| 299 | + } |
| 300 | + source_url = (await upload_images_to_comfyapi( |
| 301 | + image, |
| 302 | + max_images=1, |
| 303 | + mime_type="image/png", |
| 304 | + auth_kwargs=auth_kwargs, |
| 305 | + ))[0] |
| 306 | + payload = Image2ImageTaskCreationRequest( |
| 307 | + model=model, |
| 308 | + prompt=prompt, |
| 309 | + image=source_url, |
| 310 | + seed=seed, |
| 311 | + guidance_scale=guidance_scale, |
| 312 | + watermark=watermark, |
| 313 | + ) |
| 314 | + response = await SynchronousOperation( |
| 315 | + endpoint=ApiEndpoint( |
| 316 | + path=BYTEPLUS_ENDPOINT, |
| 317 | + method=HttpMethod.POST, |
| 318 | + request_model=Image2ImageTaskCreationRequest, |
| 319 | + response_model=ImageTaskCreationResponse, |
| 320 | + ), |
| 321 | + request=payload, |
| 322 | + auth_kwargs=auth_kwargs, |
| 323 | + ).execute() |
| 324 | + return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) |
| 325 | + |
| 326 | + |
| 327 | +class ByteDanceExtension(ComfyExtension): |
| 328 | + @override |
| 329 | + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: |
| 330 | + return [ |
| 331 | + ByteDanceImageNode, |
| 332 | + ByteDanceImageEditNode, |
| 333 | + ] |
| 334 | + |
| 335 | +async def comfy_entrypoint() -> ByteDanceExtension: |
| 336 | + return ByteDanceExtension() |
0 commit comments