Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,15 @@ public PendingCommand(
CompletionSource = completionSource;
CancellationToken = cancellationToken;
CancellationRegistration = registration;
QueuedAt = DateTime.UtcNow;
}

public string CommandJson { get; }
public TaskCompletionSource<string> CompletionSource { get; }
public CancellationToken CancellationToken { get; }
public CancellationTokenRegistration CancellationRegistration { get; }
public bool IsExecuting { get; set; }
public DateTime QueuedAt { get; }

public void Dispose()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,7 @@ private Task SendPongAsync(CancellationToken token)
var payload = new JObject
{
["type"] = "pong",
["session_id"] = _sessionId // Include session ID for server-side tracking
};
return SendJsonAsync(payload, token);
}
Expand Down Expand Up @@ -652,6 +653,10 @@ private async Task SendJsonAsync(JObject payload, CancellationToken token)

private async Task HandleSocketClosureAsync(string reason)
{
// Capture stack trace for debugging disconnection triggers
var stackTrace = new System.Diagnostics.StackTrace(true);
McpLog.Debug($"[WebSocket] HandleSocketClosureAsync called. Reason: {reason}\nStack trace:\n{stackTrace}");

if (_lifecycleCts == null || _lifecycleCts.IsCancellationRequested)
{
return;
Expand Down
36 changes: 29 additions & 7 deletions Server/src/transport/legacy/unity_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,10 @@ def read_status_file(target_hash: str | None = None) -> dict | None:
for attempt in range(attempts + 1):
try:
# Ensure connected (handshake occurs within connect())
t_conn_start = time.time()
if not self.sock and not self.connect():
raise ConnectionError("Could not connect to Unity")
logger.info("[TIMING-STDIO] connect took %.3fs command=%s", time.time() - t_conn_start, command_type)

# Build payload
if command_type == 'ping':
Expand All @@ -324,20 +326,24 @@ def read_status_file(target_hash: str | None = None) -> dict | None:
with contextlib.suppress(Exception):
logger.debug(
f"send {len(payload)} bytes; mode={mode}; head={payload[:32].decode('utf-8', 'ignore')}")
t_send_start = time.time()
if self.use_framing:
header = struct.pack('>Q', len(payload))
self.sock.sendall(header)
self.sock.sendall(payload)
else:
self.sock.sendall(payload)
logger.info("[TIMING-STDIO] sendall took %.3fs command=%s", time.time() - t_send_start, command_type)

# During retry bursts use a short receive timeout and ensure restoration
restore_timeout = None
if attempt > 0 and last_short_timeout is None:
restore_timeout = self.sock.gettimeout()
self.sock.settimeout(1.0)
try:
t_recv_start = time.time()
response_data = self.receive_full_response(self.sock)
logger.info("[TIMING-STDIO] receive took %.3fs command=%s len=%d", time.time() - t_recv_start, command_type, len(response_data))
with contextlib.suppress(Exception):
logger.debug(
f"recv {len(response_data)} bytes; mode={mode}")
Expand Down Expand Up @@ -419,7 +425,8 @@ def read_status_file(target_hash: str | None = None) -> dict | None:

# Cap backoff depending on state
if status and status.get('reloading'):
cap = 0.8
# Domain reload can take 10-20s; use longer waits
cap = 5.0
elif fast_error:
cap = 0.25
else:
Expand Down Expand Up @@ -761,22 +768,36 @@ def send_command_with_retry(
Uses config.reload_retry_ms and config.reload_max_retries by default. Preserves the
structured failure if retries are exhausted.
"""
t_retry_start = time.time()
logger.info("[TIMING-STDIO] send_command_with_retry START command=%s", command_type)
t_get_conn = time.time()
conn = get_unity_connection(instance_id)
logger.info("[TIMING-STDIO] get_unity_connection took %.3fs command=%s", time.time() - t_get_conn, command_type)
if max_retries is None:
max_retries = getattr(config, "reload_max_retries", 40)
if retry_ms is None:
retry_ms = getattr(config, "reload_retry_ms", 250)
# Default to 20s to handle domain reloads (which can take 10-20s after tests or script changes).
#
# NOTE: This wait can impact agentic workflows where domain reloads happen
# frequently (e.g., after test runs, script compilation). The 20s default
# balances handling slow reloads vs. avoiding unnecessary delays.
#
# TODO: Make this more deterministic by detecting Unity's actual reload state
# rather than blindly waiting up to 20s. See Issue #657.
#
# Configurable via: UNITY_MCP_RELOAD_MAX_WAIT_S (default: 20.0, max: 20.0)
try:
max_wait_s = float(os.environ.get(
"UNITY_MCP_RELOAD_MAX_WAIT_S", "2.0"))
"UNITY_MCP_RELOAD_MAX_WAIT_S", "20.0"))
except ValueError as e:
raw_val = os.environ.get("UNITY_MCP_RELOAD_MAX_WAIT_S", "2.0")
raw_val = os.environ.get("UNITY_MCP_RELOAD_MAX_WAIT_S", "20.0")
logger.warning(
"Invalid UNITY_MCP_RELOAD_MAX_WAIT_S=%r, using default 2.0: %s",
"Invalid UNITY_MCP_RELOAD_MAX_WAIT_S=%r, using default 20.0: %s",
raw_val, e)
max_wait_s = 2.0
# Clamp to [0, 30] to prevent misconfiguration from causing excessive waits
max_wait_s = max(0.0, min(max_wait_s, 30.0))
max_wait_s = 20.0
# Clamp to [0, 20] to prevent misconfiguration from causing excessive waits
max_wait_s = max(0.0, min(max_wait_s, 20.0))
Comment on lines +780 to +800
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Reload wait is still capped at 20s—objective calls for 30s.
Issue #654 notes reloads can take 10–30s, but the default, clamp, and log text still enforce 20s, which can prematurely abort retries. Please bump the default and cap (and update messaging) to 30s.

♻️ Proposed fix
-    # Default to 20s to handle domain reloads (which can take 10-20s after tests or script changes).
+    # Default to 30s to handle domain reloads (which can take 10-30s after tests or script changes).
@@
-    # Configurable via: UNITY_MCP_RELOAD_MAX_WAIT_S (default: 20.0, max: 20.0)
+    # Configurable via: UNITY_MCP_RELOAD_MAX_WAIT_S (default: 30.0, max: 30.0)
     try:
-        max_wait_s = float(os.environ.get(
-            "UNITY_MCP_RELOAD_MAX_WAIT_S", "20.0"))
+        max_wait_s = float(os.environ.get(
+            "UNITY_MCP_RELOAD_MAX_WAIT_S", "30.0"))
     except ValueError as e:
-        raw_val = os.environ.get("UNITY_MCP_RELOAD_MAX_WAIT_S", "20.0")
+        raw_val = os.environ.get("UNITY_MCP_RELOAD_MAX_WAIT_S", "30.0")
         logger.warning(
-            "Invalid UNITY_MCP_RELOAD_MAX_WAIT_S=%r, using default 20.0: %s",
+            "Invalid UNITY_MCP_RELOAD_MAX_WAIT_S=%r, using default 30.0: %s",
             raw_val, e)
-        max_wait_s = 20.0
-    # Clamp to [0, 20] to prevent misconfiguration from causing excessive waits
-    max_wait_s = max(0.0, min(max_wait_s, 20.0))
+        max_wait_s = 30.0
+    # Clamp to [0, 30] to prevent misconfiguration from causing excessive waits
+    max_wait_s = max(0.0, min(max_wait_s, 30.0))
🤖 Prompt for AI Agents
In `@Server/src/transport/legacy/unity_connection.py` around lines 780 - 800,
Update the reload wait default and clamp to 30s: change the environment default
for UNITY_MCP_RELOAD_MAX_WAIT_S from "20.0" to "30.0", adjust any hard-coded
fallback max_wait_s values (currently set to 20.0) to 30.0, and modify the clamp
statement that enforces the range to use 30.0 as the upper bound (max_wait_s =
max(0.0, min(max_wait_s, 30.0))). Also update the logger.warning message text
(and any nearby comments referencing 20s) to reflect the new 30s default so
messages and docs match the new cap; ensure you reference the same variable name
max_wait_s and the env var UNITY_MCP_RELOAD_MAX_WAIT_S in your changes.


# If retry_on_reload=False, disable connection-level retries too (issue #577)
# Commands that trigger compilation/reload shouldn't retry on disconnect
Expand Down Expand Up @@ -847,6 +868,7 @@ def send_command_with_retry(
instance_id or "default",
waited,
)
logger.info("[TIMING-STDIO] send_command_with_retry DONE total=%.3fs command=%s", time.time() - t_retry_start, command_type)
return response


Expand Down
5 changes: 5 additions & 0 deletions Server/src/transport/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@ class ExecuteCommandMessage(BaseModel):
params: dict[str, Any]
timeout: float


class PingMessage(BaseModel):
"""Server-initiated ping to detect dead connections."""
type: str = "ping"

# Incoming (Plugin -> Server)


Expand Down
126 changes: 114 additions & 12 deletions Server/src/transport/plugin_hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os
import time
import uuid
from typing import Any
from typing import Any, ClassVar

from starlette.endpoints import WebSocketEndpoint
from starlette.websockets import WebSocket
Expand All @@ -21,6 +21,7 @@
WelcomeMessage,
RegisteredMessage,
ExecuteCommandMessage,
PingMessage,
RegisterMessage,
RegisterToolsMessage,
PongMessage,
Expand All @@ -29,7 +30,7 @@
SessionDetails,
)

logger = logging.getLogger("mcp-for-unity-server")
logger = logging.getLogger(__name__)


class PluginDisconnectedError(RuntimeError):
Expand Down Expand Up @@ -63,6 +64,10 @@ class PluginHub(WebSocketEndpoint):
KEEP_ALIVE_INTERVAL = 15
SERVER_TIMEOUT = 30
COMMAND_TIMEOUT = 30
# Server-side ping interval (seconds) - how often to send pings to Unity
PING_INTERVAL = 10
# Max time (seconds) to wait for pong before considering connection dead
PING_TIMEOUT = 20
# Timeout (seconds) for fast-fail commands like ping/read_console/get_editor_state.
# Keep short so MCP clients aren't blocked during Unity compilation/reload/unfocused throttling.
FAST_FAIL_TIMEOUT = 2.0
Expand All @@ -78,6 +83,10 @@ class PluginHub(WebSocketEndpoint):
_pending: dict[str, dict[str, Any]] = {}
_lock: asyncio.Lock | None = None
_loop: asyncio.AbstractEventLoop | None = None
# session_id -> last pong timestamp (monotonic)
_last_pong: ClassVar[dict[str, float]] = {}
# session_id -> ping task
_ping_tasks: ClassVar[dict[str, asyncio.Task]] = {}

@classmethod
def configure(
Expand Down Expand Up @@ -176,12 +185,20 @@ async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None:
(sid for sid, ws in cls._connections.items() if ws is websocket), None)
if session_id:
cls._connections.pop(session_id, None)
# Stop the ping loop for this session
ping_task = cls._ping_tasks.pop(session_id, None)
if ping_task and not ping_task.done():
ping_task.cancel()
# Clean up last pong tracking
cls._last_pong.pop(session_id, None)
# Fail-fast any in-flight commands for this session to avoid waiting for COMMAND_TIMEOUT.
pending_ids = [
command_id
for command_id, entry in cls._pending.items()
if entry.get("session_id") == session_id
]
if pending_ids:
logger.debug(f"Cancelling {len(pending_ids)} pending commands for disconnected session")
for command_id in pending_ids:
entry = cls._pending.get(command_id)
future = entry.get("future") if isinstance(
Expand Down Expand Up @@ -364,10 +381,18 @@ async def _handle_register(self, websocket: WebSocket, payload: RegisterMessage)
session = await registry.register(session_id, project_name, project_hash, unity_version, project_path, user_id=user_id)
async with lock:
cls._connections[session.session_id] = websocket
# Initialize last pong time and start ping loop for this session
cls._last_pong[session_id] = time.monotonic()
# Cancel any existing ping task for this session (shouldn't happen, but be safe)
old_task = cls._ping_tasks.pop(session_id, None)
if old_task and not old_task.done():
old_task.cancel()
# Start the server-side ping loop
ping_task = asyncio.create_task(cls._ping_loop(session_id, websocket))
cls._ping_tasks[session_id] = ping_task

if user_id:
logger.info(
f"Plugin registered: {project_name} ({project_hash}) for user {user_id}")
logger.info(f"Plugin registered: {project_name} ({project_hash}) for user {user_id}")
else:
logger.info(f"Plugin registered: {project_name} ({project_hash})")

Expand Down Expand Up @@ -429,11 +454,77 @@ async def _handle_command_result(self, payload: CommandResultMessage) -> None:
async def _handle_pong(self, payload: PongMessage) -> None:
cls = type(self)
registry = cls._registry
lock = cls._lock
if registry is None:
return
session_id = payload.session_id
if session_id:
await registry.touch(session_id)
# Record last pong time for staleness detection (under lock for consistency)
if lock is not None:
async with lock:
cls._last_pong[session_id] = time.monotonic()

@classmethod
async def _ping_loop(cls, session_id: str, websocket: WebSocket) -> None:
"""Server-initiated ping loop to detect dead connections.

Sends periodic pings to the Unity client. If no pong is received within
PING_TIMEOUT seconds, the connection is considered dead and closed.
This helps detect connections that die silently (e.g., Windows OSError 64).
"""
logger.debug(f"[Ping] Starting ping loop for session {session_id}")
try:
while True:
Comment on lines +469 to +478
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (bug_risk): Consider cleaning up _ping_tasks/_last_pong in _ping_loop to avoid relying solely on on_disconnect.

Relying on on_disconnect for cleanup means that in shutdown/exception edge cases (e.g. _lock set to None, close() failing, or on_disconnect not firing), the loop can exit the while and hit finally without removing this session from _ping_tasks or clearing _last_pong. Consider doing that cleanup in the finally block so the ping loop fully manages its own lifecycle and avoids stale entries.

Suggested implementation:

            await registry.touch(session_id)
            # Record last pong time for staleness detection
            cls._last_pong[session_id] = time.monotonic()

    @classmethod
    def _cleanup_ping_session(cls, session_id: str) -> None:
        """Cleanup ping tracking state for a session.

        This is used by the ping loop's finally block to fully manage its own
        lifecycle, and can also be safely called from on_disconnect as needed.
        """
        # Remove this session's ping task and staleness tracking, if present.
        # Using pop(..., None) makes this idempotent and safe to call multiple times.
        cls._ping_tasks.pop(session_id, None)
        cls._last_pong.pop(session_id, None)

    @classmethod
    async def _ping_loop(cls, session_id: str, websocket: WebSocket) -> None:

To fully implement your suggestion, the following additional changes are needed in the same file:

  1. Wrap the body of _ping_loop in a try/finally and call the new helper in the finally block, so the ping loop always cleans up its own state:
    @classmethod
    async def _ping_loop(cls, session_id: str, websocket: WebSocket) -> None:
        """Server-initiated ping loop to detect dead connections.

        Sends periodic pings to the Unity client. If no pong is received within
        PING_TIMEOUT seconds, the connection is considered dead and closed.
        This helps detect connections that die silently (e.g., Windows OSError 64).
        """
        logger.debug(f"[Ping] Starting ping loop for session {session_id}")
        try:
            while True:
                await asyncio.sleep(cls.PING_INTERVAL)
                # ... existing ping / timeout logic remains unchanged ...
        finally:
            # Ensure we don't leave stale entries if on_disconnect never fires
            cls._cleanup_ping_session(session_id)
  1. Anywhere else you’re cleaning up _ping_tasks / _last_pong (likely in on_disconnect), update that code to call cls._cleanup_ping_session(session_id) instead of manipulating _ping_tasks / _last_pong directly. Because _cleanup_ping_session uses pop(..., None), calling it from both on_disconnect and _ping_loop is safe and idempotent.

These changes ensure that the ping loop fully manages its own lifecycle and avoids stale entries even in shutdown/exception edge cases where on_disconnect might not execute.

await asyncio.sleep(cls.PING_INTERVAL)

# Check if we're still supposed to be running and get last pong time (under lock)
lock = cls._lock
if lock is None:
break
async with lock:
if session_id not in cls._connections:
logger.debug(f"[Ping] Session {session_id} no longer in connections, stopping ping loop")
break
# Read last pong time under lock for consistency
last_pong = cls._last_pong.get(session_id, 0)

# Check staleness: has it been too long since we got a pong?
elapsed = time.monotonic() - last_pong
if elapsed > cls.PING_TIMEOUT:
logger.warning(
f"[Ping] Session {session_id} stale: no pong for {elapsed:.1f}s "
f"(timeout={cls.PING_TIMEOUT}s). Closing connection."
)
try:
await websocket.close(code=1001) # Going away
except Exception as close_ex:
logger.debug(f"[Ping] Error closing stale websocket: {close_ex}")
break

# Send a ping to the client
try:
ping_msg = PingMessage()
await websocket.send_json(ping_msg.model_dump())
logger.debug(f"[Ping] Sent ping to session {session_id}")
except Exception as send_ex:
# Send failed - connection is dead
logger.warning(
f"[Ping] Failed to send ping to session {session_id}: {send_ex}. "
"Connection likely dead."
)
try:
await websocket.close(code=1006) # Abnormal closure
except Exception:
pass
Comment on lines +492 to +519
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🌐 Web query:

RFC 6455 WebSocket close code 1006 must not be sent close frame specification

💡 Result:

In RFC 6455, WebSocket close code 1006 (“abnormal closure”) is reserved and must not be sent on the wire:

  • “1006 is a reserved value and MUST NOT be set as a status code in a Close control frame by an endpoint.” It is only for local/reporting use to indicate the connection closed abnormally (e.g., the TCP connection dropped) without sending or receiving a Close control frame. [1]
  • RFC 6455 also defines that if the connection is closed and no Close control frame was received, then the implementation considers the connection close code to be 1006 (again, internal/observed state—not a transmitted close frame). [2]

So: you will never legitimately see a peer send a Close frame with code 1006; implementations use it to report “abnormal closure” when the closing handshake didn’t happen. [1][2]

Sources:
[1] RFC 6455, Section 7.4.1 “Defined Status Codes” (1006) https://datatracker.ietf.org/doc/html/rfc6455
[2] RFC 6455, Section 7.1.5 “The WebSocket Connection Close Code” (1006 when no Close frame received) https://datatracker.ietf.org/doc/html/rfc6455


🏁 Script executed:

# Find and examine the file mentioned in the review
fd -t f "plugin_hub.py" | head -5

Repository: CoplayDev/unity-mcp

Length of output: 97


🏁 Script executed:

# Read the specific lines mentioned in the review
sed -n '492,519p' Server/src/transport/plugin_hub.py

Repository: CoplayDev/unity-mcp

Length of output: 1455


🏁 Script executed:

# Let's see more context around these lines to understand the full function
sed -n '470,530p' Server/src/transport/plugin_hub.py

Repository: CoplayDev/unity-mcp

Length of output: 2947


Replace close code 1006 with 1001 and log close failures.

Line 517 uses close code 1006, which RFC 6455 explicitly reserves and forbids sending on the wire—it must only be used internally to report abnormal closure when no Close frame was exchanged. Sending it violates the specification and can cause protocol violations. Additionally, the exception handler at lines 518–519 silently swallows errors without logging, hiding diagnostics crucial for debugging connection failures.

🔧 Proposed fix
                        await websocket.close(code=1006)  # Abnormal closure
-                    except Exception:
-                        pass
+                        await websocket.close(code=1001)  # Going away
+                    except Exception as close_ex:
+                        logger.debug(f"[Ping] Error closing websocket after send failure: {close_ex}")
🧰 Tools
🪛 Ruff (0.14.14)

[warning] 501-501: Do not catch blind exception: Exception

(BLE001)


[warning] 510-510: Do not catch blind exception: Exception

(BLE001)


[error] 518-519: try-except-pass detected, consider logging the exception

(S110)


[warning] 518-518: Do not catch blind exception: Exception

(BLE001)

🤖 Prompt for AI Agents
In `@Server/src/transport/plugin_hub.py` around lines 492 - 519, In the exception
handler for failed ping sends (the block that creates PingMessage, calls
websocket.send_json and handles send_ex), replace the prohibited close code 1006
with 1001 when calling websocket.close and ensure any exception thrown by
websocket.close is logged instead of swallowed; update the logger call to
include context (session_id and the close exception) and reference the same
symbols (PingMessage, websocket.close, cls.PING_TIMEOUT, logger) so the error
path logs close failures for debugging.

break

except asyncio.CancelledError:
logger.debug(f"[Ping] Ping loop cancelled for session {session_id}")
except Exception as ex:
logger.warning(f"[Ping] Ping loop error for session {session_id}: {ex}")
finally:
logger.debug(f"[Ping] Ping loop ended for session {session_id}")

@classmethod
async def _get_connection(cls, session_id: str) -> WebSocket:
Expand Down Expand Up @@ -465,19 +556,30 @@ async def _resolve_session_id(cls, unity_instance: str | None, user_id: str | No
if cls._registry is None:
raise RuntimeError("Plugin registry not configured")

# Bound waiting for Unity sessions so calls fail fast when editors are not ready.
# Bound waiting for Unity sessions. Default to 20s to handle domain reloads
# (which can take 10-20s after test runs or script changes).
#
# NOTE: This wait can impact agentic workflows where domain reloads happen
# frequently (e.g., after test runs, script compilation). The 20s default
# balances handling slow reloads vs. avoiding unnecessary delays.
#
# TODO: Make this more deterministic by detecting Unity's actual reload state
# (e.g., via status file, heartbeat, or explicit "reloading" signal from Unity)
# rather than blindly waiting up to 20s. See Issue #657.
#
# Configurable via: UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S (default: 20.0, max: 20.0)
try:
max_wait_s = float(
os.environ.get("UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S", "2.0"))
os.environ.get("UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S", "20.0"))
except ValueError as e:
raw_val = os.environ.get(
"UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S", "2.0")
"UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S", "20.0")
logger.warning(
"Invalid UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S=%r, using default 2.0: %s",
"Invalid UNITY_MCP_SESSION_RESOLVE_MAX_WAIT_S=%r, using default 20.0: %s",
raw_val, e)
max_wait_s = 2.0
# Clamp to [0, 30] to prevent misconfiguration from causing excessive waits
max_wait_s = max(0.0, min(max_wait_s, 30.0))
max_wait_s = 20.0
# Clamp to [0, 20] to prevent misconfiguration from causing excessive waits
max_wait_s = max(0.0, min(max_wait_s, 20.0))
retry_ms = float(getattr(config, "reload_retry_ms", 250))
sleep_seconds = max(0.05, min(0.25, retry_ms / 1000.0))

Expand Down Expand Up @@ -613,7 +715,7 @@ async def send_command_for_instance(
"Invalid UNITY_MCP_SESSION_READY_WAIT_SECONDS=%r, using default 6.0: %s",
raw_val, e)
max_wait_s = 6.0
max_wait_s = max(0.0, min(max_wait_s, 30.0))
max_wait_s = max(0.0, min(max_wait_s, 20.0))
if max_wait_s > 0:
deadline = time.monotonic() + max_wait_s
while time.monotonic() < deadline:
Expand Down
7 changes: 4 additions & 3 deletions Server/src/transport/unity_instance_middleware.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,10 @@ async def _inject_unity_instance(self, context: MiddlewareContext) -> None:
# The 'active_instance' (Name@hash) might be valid for stdio even if PluginHub fails.

session_id: str | None = None
# Only validate via PluginHub if we are actually using HTTP transport
# OR if we want to support hybrid mode. For now, let's be permissive.
if PluginHub.is_configured():
# Only validate via PluginHub if we are actually using HTTP transport.
# For stdio transport, skip PluginHub entirely - we only need the instance ID.
from transport.unity_transport import _is_http_transport
if _is_http_transport() and PluginHub.is_configured():
try:
# resolving session_id might fail if the plugin disconnected
# We only need session_id for HTTP transport routing.
Expand Down
2 changes: 1 addition & 1 deletion Server/src/transport/unity_transport.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
from models.models import MCPResponse
from models.unity_response import normalize_unity_response

logger = logging.getLogger(__name__)
T = TypeVar("T")
logger = logging.getLogger("mcp-for-unity-server")


def _is_http_transport() -> bool:
Expand Down
Loading