1- using System . Text ;
21using System . Text . Json ;
32using System . Net . Http . Json ;
43using Microsoft . Extensions . Logging ;
54using ProjectVG . Infrastructure . Integrations . LLMClient . Models ;
6- using ProjectVG . Common . Constants ;
75using Microsoft . Extensions . Configuration ;
86
97namespace ProjectVG . Infrastructure . Integrations . LLMClient
@@ -18,105 +16,52 @@ public LLMClient(HttpClient httpClient, ILogger<LLMClient> logger, IConfiguratio
1816 {
1917 _httpClient = httpClient ;
2018 _logger = logger ;
21-
22- // JSON 직렬화 옵션 설정
23- _jsonOptions = new JsonSerializerOptions
24- {
19+
20+ _jsonOptions = new JsonSerializerOptions {
2521 PropertyNamingPolicy = JsonNamingPolicy . CamelCase ,
2622 WriteIndented = false
2723 } ;
2824
29- // HTTP 클라이언트 기본 설정
30- _httpClient . BaseAddress = new Uri ( configuration [ "LLM:BaseUrl" ] ?? "http://localhost:5601/" ) ;
25+ _httpClient . BaseAddress = new Uri ( configuration [ "LLM:BaseUrl" ] ?? "" ) ;
3126 _httpClient . Timeout = TimeSpan . FromSeconds ( 30 ) ;
3227 _httpClient . DefaultRequestHeaders . Add ( "Accept" , "application/json" ) ;
3328 }
34-
3529 public async Task < LLMResponse > SendRequestAsync ( LLMRequest request )
3630 {
37- try
38- {
39- _logger . LogDebug ( "LLM 요청 시작: {Model}, 사용자 메시지: {UserPrompt}" ,
40- request . Model ,
41- request . UserPrompt [ ..Math . Min ( 50 , request . UserPrompt . Length ) ] ) ;
31+ try {
32+ if ( _logger . IsEnabled ( LogLevel . Debug ) )
33+ _logger . LogDebug ( "LLM 요청 시작: {Model}" , request . Model ) ;
4234
4335 using var jsonContent = JsonContent . Create ( request , options : _jsonOptions ) ;
4436 using var response = await _httpClient . PostAsync ( "api/v1/chat" , jsonContent ) ;
4537
46- if ( ! response . IsSuccessStatusCode )
47- {
38+ if ( ! response . IsSuccessStatusCode ) {
4839 var errorContent = await response . Content . ReadAsStringAsync ( ) ;
49- _logger . LogWarning ( "LLM 서비스 오류: {StatusCode}, {Error}" , response . StatusCode , errorContent ) ;
50-
51- return new LLMResponse
52- {
53- Success = false ,
54- Error = $ "서비스 오류: { response . StatusCode } "
55- } ;
40+ _logger . LogDebug ( "LLM 오류: {StatusCode}, {Error}" , response . StatusCode , errorContent ) ;
41+ throw new HttpRequestException ( $ "LLM 서비스 오류: { response . StatusCode } ") ;
5642 }
5743
5844 var responseContent = await response . Content . ReadAsStringAsync ( ) ;
5945 var llmResponse = JsonSerializer . Deserialize < LLMResponse > ( responseContent , _jsonOptions ) ;
6046
61- if ( llmResponse ? . Success == true )
62- {
63- _logger . LogInformation ( "LLM 요청 성공: 토큰 {TotalTokens}, 응답 길이 {ResponseLength}" ,
64- llmResponse . TotalTokens ,
65- llmResponse . OutputText ? . Length ?? 0 ) ;
47+ if ( llmResponse == null ) {
48+ _logger . LogDebug ( "응답 파싱 실패" ) ;
49+ throw new InvalidOperationException ( "응답을 파싱할 수 없습니다." ) ;
6650 }
6751
68- return llmResponse ?? new LLMResponse
69- {
70- Success = false ,
71- Error = "응답을 파싱할 수 없습니다."
72- } ;
73- }
74- catch ( HttpRequestException ex )
75- {
76- _logger . LogWarning ( "LLM 서비스 연결 오류 - Mock 응답 반환: {Error}" , ex . Message ) ;
77-
78- // 임시 Mock 응답 (개발 환경에서만)
79- return new LLMResponse
80- {
81- Success = true ,
82- Id = "mock-chatcmpl-" + Guid . NewGuid ( ) . ToString ( "N" ) [ ..8 ] ,
83- RequestId = request . RequestId ?? "" ,
84- Object = "response" ,
85- CreatedAt = DateTimeOffset . UtcNow . ToUnixTimeSeconds ( ) ,
86- Status = "completed" ,
87- Model = request . Model ?? "gpt-4o-mini" ,
88- OutputText = "안녕하세요! 저는 현재 Mock 모드로 동작하고 있습니다. 실제 LLM 서비스가 연결되지 않았습니다." ,
89- InputTokens = 30 ,
90- OutputTokens = 20 ,
91- TotalTokens = 50 ,
92- CachedTokens = 0 ,
93- ReasoningTokens = 0 ,
94- TextFormatType = "text" ,
95- Cost = 5 ,
96- ResponseTime = 0.1 ,
97- UseUserApiKey = request . UseUserApiKey ?? false
98- } ;
99- }
100- catch ( TaskCanceledException ex )
101- {
102- _logger . LogError ( ex , "LLM 요청 시간 초과" ) ;
103- return new LLMResponse
104- {
105- Success = false ,
106- Error = "요청 시간이 초과되었습니다."
107- } ;
52+ _logger . LogDebug ( "LLM 성공: 토큰 {TotalTokens}, 응답길이 {ResponseLength}" ,
53+ llmResponse . TotalTokens ,
54+ llmResponse . OutputText ? . Length ?? 0 ) ;
55+
56+ return llmResponse ;
10857 }
109- catch ( Exception ex )
110- {
111- _logger . LogError ( ex , "LLM 요청 처리 중 예외 발생" ) ;
112- return new LLMResponse
113- {
114- Success = false ,
115- Error = "요청 처리 중 오류가 발생했습니다."
116- } ;
58+ catch ( Exception ex ) {
59+ _logger . LogDebug ( ex , "LLM 요청 처리 중 예외 발생" ) ;
60+ throw ;
11761 }
11862 }
11963
64+
12065 public async Task < LLMResponse > CreateTextResponseAsync (
12166 string systemMessage ,
12267 string userMessage ,
@@ -126,8 +71,7 @@ public async Task<LLMResponse> CreateTextResponseAsync(
12671 int ? maxTokens = 1000 ,
12772 float ? temperature = 0.7f )
12873 {
129- var request = new LLMRequest
130- {
74+ var request = new LLMRequest {
13175 RequestId = Guid . NewGuid ( ) . ToString ( ) ,
13276 SystemPrompt = systemMessage ,
13377 UserPrompt = userMessage ,
@@ -143,4 +87,4 @@ public async Task<LLMResponse> CreateTextResponseAsync(
14387 return await SendRequestAsync ( request ) ;
14488 }
14589 }
146- }
90+ }
0 commit comments