-
Notifications
You must be signed in to change notification settings - Fork 2.8k
Description
Is there an existing issue for this problem?
- I have searched the existing issues
Install method
Invoke's Launcher
Operating system
Linux
GPU vendor
AMD (ROCm)
GPU model
AMD RYZEN AI MAX+ 395 w/ Radeon 8060S
GPU VRAM
128GB (unified memory)
Version number
v6.11.1
Browser
No response
System Information
{
"version": "6.11.1",
"dependencies": {
"absl-py" : "2.4.0",
"accelerate" : "1.12.0",
"annotated-types" : "0.7.0",
"anyio" : "4.12.1",
"attrs" : "25.4.0",
"bidict" : "0.23.1",
"bitsandbytes" : "0.49.2",
"blake3" : "1.0.8",
"certifi" : "2022.12.7",
"cffi" : "2.0.0",
"charset-normalizer" : "2.1.1",
"click" : "8.3.1",
"coloredlogs" : "15.0.1",
"compel" : "2.1.1",
"contourpy" : "1.3.3",
"CUDA" : "N/A",
"cycler" : "0.12.1",
"Deprecated" : "1.3.1",
"diffusers" : "0.36.0",
"dnspython" : "2.8.0",
"dynamicprompts" : "0.31.0",
"einops" : "0.8.2",
"fastapi" : "0.118.3",
"fastapi-events" : "0.12.2",
"filelock" : "3.20.0",
"flatbuffers" : "25.12.19",
"fonttools" : "4.61.1",
"fsspec" : "2025.12.0",
"gguf" : "0.17.1",
"h11" : "0.16.0",
"hf-xet" : "1.2.0",
"httpcore" : "1.0.9",
"httptools" : "0.7.1",
"httpx" : "0.28.1",
"huggingface_hub" : "0.36.2",
"humanfriendly" : "10.0",
"idna" : "3.4",
"importlib_metadata" : "7.1.0",
"InvokeAI" : "6.11.1",
"jax" : "0.7.1",
"jaxlib" : "0.7.1",
"Jinja2" : "3.1.6",
"kiwisolver" : "1.4.9",
"MarkupSafe" : "2.1.5",
"matplotlib" : "3.10.8",
"mediapipe" : "0.10.14",
"ml_dtypes" : "0.5.4",
"mpmath" : "1.3.0",
"networkx" : "3.6.1",
"numpy" : "1.26.4",
"onnx" : "1.16.1",
"onnxruntime" : "1.19.2",
"opencv-contrib-python": "4.11.0.86",
"opt_einsum" : "3.4.0",
"packaging" : "24.1",
"picklescan" : "1.0.3",
"pillow" : "12.0.0",
"prompt_toolkit" : "3.0.52",
"protobuf" : "4.25.8",
"psutil" : "7.2.2",
"pycparser" : "3.0",
"pydantic" : "2.12.5",
"pydantic-settings" : "2.13.0",
"pydantic_core" : "2.41.5",
"pyparsing" : "3.3.2",
"PyPatchMatch" : "1.0.2",
"python-dateutil" : "2.9.0.post0",
"python-dotenv" : "1.2.1",
"python-engineio" : "4.13.1",
"python-multipart" : "0.0.22",
"python-socketio" : "5.16.1",
"pytorch-triton-rocm" : "3.3.1",
"PyWavelets" : "1.9.0",
"PyYAML" : "6.0.3",
"regex" : "2026.1.15",
"requests" : "2.28.1",
"safetensors" : "0.7.0",
"scipy" : "1.17.0",
"semver" : "3.0.4",
"sentencepiece" : "0.2.0",
"setuptools" : "70.2.0",
"simple-websocket" : "1.1.0",
"six" : "1.17.0",
"sounddevice" : "0.5.5",
"spandrel" : "0.4.1",
"starlette" : "0.48.0",
"sympy" : "1.14.0",
"tokenizers" : "0.22.2",
"torch" : "2.7.1+rocm6.3",
"torchsde" : "0.2.6",
"torchvision" : "0.22.1+rocm6.3",
"tqdm" : "4.66.5",
"trampoline" : "0.1.2",
"transformers" : "4.57.6",
"typing-inspection" : "0.4.2",
"typing_extensions" : "4.15.0",
"urllib3" : "1.26.13",
"uvicorn" : "0.41.0",
"uvloop" : "0.22.1",
"watchfiles" : "1.1.1",
"wcwidth" : "0.6.0",
"websockets" : "16.0",
"wrapt" : "2.1.1",
"wsproto" : "1.3.2",
"zipp" : "3.19.2"
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "127.0.0.1",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": [""],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": false,
"patchmatch": true,
"models_dir": "models",
"convert_cache_dir": "models/.convert_cache",
"download_cache_dir": "models/.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "outputs",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console"],
"log_format": "color",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": false,
"model_cache_keep_alive_min": 0,
"device_working_mem_gb": 3,
"enable_partial_loading": false,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": null,
"device": "auto",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "auto",
"attention_slice_size": "auto",
"force_tiled_decode": false,
"pil_compress_level": 1,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "blake3_single",
"remote_api_tokens": null,
"scan_models_on_startup": false,
"unsafe_disable_picklescan": false,
"allow_unknown_models": true
},
"set_config_fields": ["legacy_models_yaml_path"]
}
What happened
I have an AMD Ryzen AI Max+ 395 with 128GB of RAM. I am running Ubuntu 25.10. I am trying to use Invoke to generate images, making use of the AMD hardware. I launch the AppImage with "./Invoke...AppImage --no-sandbox" as a normal user. I configure a prompt and press 'Invoke'. When I do so, a red "Server error" box pops up and image generation aborts.
This issue was previously reported here, but was closed 2 weeks ago due to inactivity:
What you expected to happen
I expected an image to be generated.
How to reproduce the problem
No response
Additional context
This is the output error trace:
Started Invoke process with PID 421610
[2026-02-16 21:52:53,979]::[InvokeAI]::INFO --> Using torch device: AMD Radeon Graphics
[2026-02-16 21:52:53,980]::[InvokeAI]::INFO --> cuDNN version: 3003000
patchmatch.patch_match: INFO - Compiling and loading c extensions from "/opt/Invoke/.venv/lib/python3.12/site-packages/patchmatch".
patchmatch.patch_match: ERROR - patchmatch failed to load or compile (Command 'make clean && make' returned non-zero exit status 2.).
patchmatch.patch_match: INFO - Refer to https://invoke-ai.github.io/InvokeAI/installation/060_INSTALL_PATCHMATCH/ for installation instructions.
[2026-02-16 21:52:55,208]::[InvokeAI]::INFO --> Patchmatch not loaded (nonfatal)
[2026-02-16 21:52:55,434]::[InvokeAI]::INFO --> InvokeAI version 6.11.1
[2026-02-16 21:52:55,435]::[InvokeAI]::INFO --> Root directory = /opt/Invoke
[2026-02-16 21:52:55,435]::[InvokeAI]::INFO --> Initializing database at /opt/Invoke/databases/invokeai.db
[2026-02-16 21:52:55,437]::[ModelManagerService]::INFO --> [MODEL CACHE] Calculated model RAM cache size: 59820.16 MB. Heuristics applied: [1, 2].
[2026-02-16 21:52:55,454]::[InvokeAI]::INFO --> Invoke running on http://127.0.0.1:9090 (Press CTRL+C to quit)
[2026-02-16 21:53:34,020]::[InvokeAI]::INFO --> Executing queue item 27, session 5e64432b-2b02-47c6-abd1-d853690d914b
torch_dtypeis deprecated! Usedtypeinstead!
[2026-02-16 21:53:34,354]::[ModelManagerService]::INFO --> [MODEL CACHE] Loaded model '303d7685-7323-45ab-ad8d-bf4033d65db6:text_encoder' (CLIPTextModel) onto cuda device in 0.29s. Total model size: 234.72MB, VRAM: 234.72MB (100.0%)
[2026-02-16 21:53:34,392]::[ModelManagerService]::INFO --> [MODEL CACHE] Loaded model '303d7685-7323-45ab-ad8d-bf4033d65db6:tokenizer' (CLIPTokenizer) onto cuda device in 0.00s. Total model size: 0.00MB, VRAM: 0.00MB (0.0%)
/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/functional.py:2551: UserWarning: Ignoring invalid value for boolean flag AMD_SERIALIZE_KERNEL: 3valid values are 0 or 1. (Triggered internally at /pytorch/c10/util/env.cpp:86.)
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
[2026-02-16 21:53:34,408]::[InvokeAI]::ERROR --> Error while invoking session 5e64432b-2b02-47c6-abd1-d853690d914b, invocation 35325991-cb8f-4e6d-9b18-f4a029723de5 (sdxl_compel_prompt): HIP error: invalid device function
HIP kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing AMD_SERIALIZE_KERNEL=3
Compile withTORCH_USE_HIP_DSAto enable device-side assertions.
[2026-02-16 21:53:34,408]::[InvokeAI]::ERROR --> Traceback (most recent call last):
File "/opt/Invoke/.venv/lib/python3.12/site-packages/invokeai/app/services/session_processor/session_processor_default.py", line 130, in run_node
output = invocation.invoke_internal(context=context, services=self._services)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/invokeai/app/invocations/baseinvocation.py", line 244, in invoke_internal
output = self.invoke(context)
^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/utils/contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/invokeai/app/invocations/compel.py", line 280, in invoke
c1, c1_pooled = self.run_clip_compel(context, self.clip, self.prompt, False, "lora_te1", zero_on_empty=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/invokeai/app/invocations/compel.py", line 226, in run_clip_compel
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/compel/compel.py", line 190, in build_conditioning_tensor_for_conjunction
this_conditioning, this_options = self.build_conditioning_tensor_for_prompt_object(p)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/compel/compel.py", line 222, in build_conditioning_tensor_for_prompt_object
return self._get_conditioning_for_flattened_prompt(prompt), {}
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/compel/compel.py", line 285, in _get_conditioning_for_flattened_prompt
return self.conditioning_provider.get_embeddings_for_weighted_prompt_fragments(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/compel/embeddings_provider.py", line 129, in get_embeddings_for_weighted_prompt_fragments
base_embedding = self.build_weighted_embedding_tensor(tokens, per_token_weights, mask, device=device)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/compel/embeddings_provider.py", line 409, in build_weighted_embedding_tensor
empty_z = self._encode_token_ids_to_embeddings(empty_token_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/compel/embeddings_provider.py", line 442, in _encode_token_ids_to_embeddings
text_encoder_output = self.text_encoder(token_ids,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/transformers/utils/generic.py", line 918, in wrapper
output = func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/transformers/models/clip/modeling_clip.py", line 706, in forward
return self.text_model(
^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/transformers/models/clip/modeling_clip.py", line 603, in forward
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/transformers/models/clip/modeling_clip.py", line 246, in forward
inputs_embeds = self.token_embedding(input_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/invokeai/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/custom_embedding.py", line 29, in forward
return super().forward(input)
^^^^^^^^^^^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/modules/sparse.py", line 190, in forward
return F.embedding(
^^^^^^^^^^^^
File "/opt/Invoke/.venv/lib/python3.12/site-packages/torch/nn/functional.py", line 2551, in embedding
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RuntimeError: HIP error: invalid device function
HIP kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing AMD_SERIALIZE_KERNEL=3
Compile with TORCH_USE_HIP_DSA to enable device-side assertions.
[2026-02-16 21:53:34,424]::[InvokeAI]::INFO --> Graph stats: 5e64432b-2b02-47c6-abd1-d853690d914b
Node Calls Seconds VRAM Change
sdxl_model_loader 1 0.007s +0.000G
integer 1 0.001s +0.000G
noise 1 0.003s +0.000G
sdxl_compel_prompt 1 0.371s +0.236G
TOTAL GRAPH EXECUTION TIME: 0.382s
TOTAL GRAPH WALL TIME: 0.383s
RAM used by InvokeAI process: 1.70G (+0.358G)
RAM used to load models: 0.23G
VRAM in use: 0.236G
RAM cache statistics:
Model cache hits: 2
Model cache misses: 2
Models cached: 2
Models cleared from cache: 0
Cache high water mark: 0.23/58.42G
Discord username
No response