Skip to content

[Template Bug]: Wan2.2 Image to Video(Subgraph) #754

@notavailableuser99

Description

@notavailableuser99

Confirmation

  • I have confirmed this is a workflow template mistake

Template Name

Wan2.2 Image to Video(Subgraph)

Problem Description

I get the message:
1 ERROR

TypeError: Trying to convert Float8_e4m3fn to the MPS backend but it does not have support for that dtype.

Site of the template:
https://docs.comfy.org/tutorials/video/wan/wan2_2

macOS Sequoia / mac Mini M2 Pro

ComfyUI Mode

Nodes 2.0 Mode

Error Message / Logs

TypeError: Trying to convert Float8_e4m3fn to the MPS backend but it does not have support for that dtype.


  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/execution.py", line 525, in execute
    output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data)
                                                              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/execution.py", line 334, in get_output_data
    return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data)
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/execution.py", line 308, in _async_map_node_over_list
    await process_inputs(input_dict, i)

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/execution.py", line 296, in process_inputs
    result = f(**inputs)
             ^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/nodes.py", line 1625, in sample
    return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/nodes.py", line 1556, in common_ksampler
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/sample.py", line 66, in sample
    samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/samplers.py", line 1180, in sample
    return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/samplers.py", line 1070, in sample
    return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/samplers.py", line 1052, in sample
    output = executor.execute(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed, latent_shapes=latent_shapes)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/patcher_extension.py", line 112, in execute
    return self.original(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/samplers.py", line 985, in outer_sample
    self.inner_model, self.conds, self.loaded_models = comfy.sampler_helpers.prepare_sampling(self.model_patcher, noise.shape, self.conds, self.model_options)
                                                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/sampler_helpers.py", line 142, in prepare_sampling
    return executor.execute(model, noise_shape, conds, model_options=model_options, force_full_load=force_full_load, force_offload=force_offload)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/patcher_extension.py", line 112, in execute
    return self.original(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/sampler_helpers.py", line 156, in _prepare_sampling
    comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required, force_full_load=force_full_load)

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_management.py", line 819, in load_models_gpu
    loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_management.py", line 581, in model_load
    self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights)

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_management.py", line 612, in model_use_more_vram
    return self.model.partially_load(self.device, extra_memory, force_patch_weights=force_patch_weights)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_patcher.py", line 1065, in partially_load
    raise e

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_patcher.py", line 1062, in partially_load
    self.load(device_to, lowvram_model_memory=current_used + extra_memory, force_patch_weights=force_patch_weights, full_load=full_load)

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_patcher.py", line 860, in load
    self.patch_weight_to_device(key, device_to=device_to)

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/model_patcher.py", line 703, in patch_weight_to_device
    temp_weight = convert_func(temp_weight, inplace=True)
                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Applications/ComfyUI.app/Contents/Resources/ComfyUI/comfy/ops.py", line 1092, in convert_weight
    return weight.dequantize()
           ^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/comfy_kitchen/tensor/base.py", line 286, in dequantize
    full = self.layout_cls.dequantize(qdata, self._params)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/comfy_kitchen/tensor/fp8.py", line 69, in dequantize
    return ck.dequantize_per_tensor_fp8(qdata, params.scale, params.orig_dtype)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/comfy_kitchen/__init__.py", line 87, in dequantize_per_tensor_fp8
    return torch.ops.comfy_kitchen.dequantize_fp8(x, scale, dtype_code)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/torch/_ops.py", line 1158, in __call__
    return self._op(*args, **(kwargs or {}))
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/torch/_library/custom_ops.py", line 335, in backend_impl
    result = self._backend_fns[device_type](*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/torch/_compile.py", line 51, in inner
    return disable_fn(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/torch/_dynamo/eval_frame.py", line 849, in _fn
    return fn(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/torch/_library/custom_ops.py", line 367, in wrapped_fn
    return fn(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/comfy_kitchen/backends/eager/quantization.py", line 405, in _op_dequantize_fp8
    return impl(**kwargs)
           ^^^^^^^^^^^^^^

  File "/Users/g4work1/Applications/.venv/lib/python3.12/site-packages/comfy_kitchen/backends/eager/quantization.py", line 60, in dequantize_per_tensor_fp8
    dq_tensor = x.to(dtype=output_type) * scale.to(dtype=output_type)
                ^^^^^^^^^^^^^^^^^^^^^^^

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't working

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions