Closed
Description
This warning is benign in terms of Navie's operation, but it's worth looking at.
20085 [Stderr] Failed to calculate number of tokens, falling back to approximate count Error: Unknown model
20085 [Stderr] at getEncodingNameForModel (/snapshot/appmap-js/node_modules/js-tiktoken/dist/lite.cjs:239:13)
20085 [Stderr] at encodingForModel (/snapshot/appmap-js/node_modules/@langchain/core/dist/utils/tiktoken.cjs:23:59)
20085 [Stderr] at ChatOpenAI.getNumTokens (/snapshot/appmap-js/node_modules/@langchain/core/dist/language_models/base.cjs:205:75)
20085 [Stderr] at /snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1268:42
20085 [Stderr] at Array.map (<anonymous>)
20085 [Stderr] at ChatOpenAI.getNumTokensFromMessages (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1267:60)
20085 [Stderr] at ChatOpenAI.getEstimatedTokenCountFromPrompt (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1215:34)
20085 [Stderr] at ChatOpenAI._generate (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1141:49)
20085 [Stderr] at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
20085 [Stderr] at async Promise.allSettled (index 0)
20085 [Stderr] at async ChatOpenAI._generateUncached (/snapshot/appmap-js/node_modules/@langchain/core/dist/language_models/chat_models.cjs:176:29)
20085 [Stderr] at async LLMChain._call (/snapshot/appmap-js/node_modules/langchain/dist/chains/llm_chain.cjs:162:37)
20085 [Stderr] at async LLMChain.invoke (/snapshot/appmap-js/node_modules/langchain/dist/chains/base.cjs:58:28)
20085 [Stderr] at async LLMChain.predict (/snapshot/appmap-js/node_modules/langchain/dist/chains/llm_chain.cjs:188:24)
20085 [Stderr] at async ConversationSummaryMemory.predictNewSummary (/snapshot/appmap-js/node_modules/langchain/dist/memory/summary.cjs:71:16)
20085 [Stderr] at async LangchainMemoryService.predictSummary (/snapshot/appmap-js/packages/navie/dist/services/memory-service.js:19:25)
20085 [Stderr] at async ExplainCommand.execute (/snapshot/appmap-js/packages/navie/dist/commands/explain-command.js:102:29)
20085 [Stderr] at async Navie.execute (/snapshot/appmap-js/packages/navie/dist/navie.js:163:30)
20085 [Stderr] at async LocalNavie.ask (/snapshot/appmap-js/packages/cli/built/rpc/explain/navie/navie-local.js)
20085 [Stderr] at async Explain.explain (/snapshot/appmap-js/packages/cli/built/rpc/explain/explain.js)
20085 [Stderr] Failed to calculate number of tokens, falling back to approximate count Error: Unknown model
20085 [Stderr] at getEncodingNameForModel (/snapshot/appmap-js/node_modules/js-tiktoken/dist/lite.cjs:239:13)
20085 [Stderr] at encodingForModel (/snapshot/appmap-js/node_modules/@langchain/core/dist/utils/tiktoken.cjs:23:59)
20085 [Stderr] at ChatOpenAI.getNumTokens (/snapshot/appmap-js/node_modules/@langchain/core/dist/language_models/base.cjs:205:75)
20085 [Stderr] at /snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1269:42
20085 [Stderr] at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
20085 [Stderr] at async Promise.all (index 0)
20085 [Stderr] at async ChatOpenAI.getNumTokensFromMessages (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1267:33)
20085 [Stderr] at async ChatOpenAI.getEstimatedTokenCountFromPrompt (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1215:23)
20085 [Stderr] at async ChatOpenAI._generate (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1141:38)
20085 [Stderr] at async Promise.allSettled (index 0)
20085 [Stderr] at async ChatOpenAI._generateUncached (/snapshot/appmap-js/node_modules/@langchain/core/dist/language_models/chat_models.cjs:176:29)
20085 [Stderr] at async LLMChain._call (/snapshot/appmap-js/node_modules/langchain/dist/chains/llm_chain.cjs:162:37)
20085 [Stderr] at async LLMChain.invoke (/snapshot/appmap-js/node_modules/langchain/dist/chains/base.cjs:58:28)
20085 [Stderr] at async LLMChain.predict (/snapshot/appmap-js/node_modules/langchain/dist/chains/llm_chain.cjs:188:24)
20085 [Stderr] at async ConversationSummaryMemory.predictNewSummary (/snapshot/appmap-js/node_modules/langchain/dist/memory/summary.cjs:71:16)
20085 [Stderr] at async LangchainMemoryService.predictSummary (/snapshot/appmap-js/packages/navie/dist/services/memory-service.js:19:25)
20085 [Stderr] at async ExplainCommand.execute (/snapshot/appmap-js/packages/navie/dist/commands/explain-command.js:102:29)
20085 [Stderr] at async Navie.execute (/snapshot/appmap-js/packages/navie/dist/navie.js:163:30)
20085 [Stderr] at async LocalNavie.ask (/snapshot/appmap-js/packages/cli/built/rpc/explain/navie/navie-local.js)
20085 [Stderr] at async Explain.explain (/snapshot/appmap-js/packages/cli/built/rpc/explain/explain.js)
20085 [Stderr] Failed to calculate number of tokens, falling back to approximate count Error: Unknown model
20085 [Stderr] at getEncodingNameForModel (/snapshot/appmap-js/node_modules/js-tiktoken/dist/lite.cjs:239:13)
20085 [Stderr] at encodingForModel (/snapshot/appmap-js/node_modules/@langchain/core/dist/utils/tiktoken.cjs:23:59)
20085 [Stderr] at ChatOpenAI.getNumTokens (/snapshot/appmap-js/node_modules/@langchain/core/dist/language_models/base.cjs:205:75)
20085 [Stderr] at /snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1249:35
20085 [Stderr] at Array.map (<anonymous>)
20085 [Stderr] at ChatOpenAI.getNumTokensFromGenerations (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1243:64)
20085 [Stderr] at ChatOpenAI._generate (/snapshot/appmap-js/node_modules/@langchain/openai/dist/chat_models.cjs:1142:53)
20085 [Stderr] at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
20085 [Stderr] at async Promise.allSettled (index 0)
20085 [Stderr] at async ChatOpenAI._generateUncached (/snapshot/appmap-js/node_modules/@langchain/core/dist/language_models/chat_models.cjs:176:29)
20085 [Stderr] at async LLMChain._call (/snapshot/appmap-js/node_modules/langchain/dist/chains/llm_chain.cjs:162:37)
20085 [Stderr] at async LLMChain.invoke (/snapshot/appmap-js/node_modules/langchain/dist/chains/base.cjs:58:28)
20085 [Stderr] at async LLMChain.predict (/snapshot/appmap-js/node_modules/langchain/dist/chains/llm_chain.cjs:188:24)
20085 [Stderr] at async ConversationSummaryMemory.predictNewSummary (/snapshot/appmap-js/node_modules/langchain/dist/memory/summary.cjs:71:16)
20085 [Stderr] at async LangchainMemoryService.predictSummary (/snapshot/appmap-js/packages/navie/dist/services/memory-service.js:19:25)
20085 [Stderr] at async ExplainCommand.execute (/snapshot/appmap-js/packages/navie/dist/commands/explain-command.js:102:29)
20085 [Stderr] at async Navie.execute (/snapshot/appmap-js/packages/navie/dist/navie.js:163:30)
20085 [Stderr] at async LocalNavie.ask (/snapshot/appmap-js/packages/cli/built/rpc/explain/navie/navie-local.js)
20085 [Stderr] at async Explain.explain (/snapshot/appmap-js/packages/cli/built/rpc/explain/explain.js)
20085 [Stderr] Tokens (prompt/compl/total): 0/0/0
20085 [Stderr] [local-navie] Completed question a83a953b-398e-4977-b904-4475e03dd18b in 11764ms
20085 [Stdout] [local-navie] Processing question e506a0f2-0c14-445f-bccc-4cddab3885b7 in thread ad345769-4e18-4288-8c65-e539bb18f02e
20085 [Stderr] Using model claude-3.5-sonnet
20085 [Stderr] Using completion service openai