Skip to content

Commit 3d399a4

Browse files
committed
TTS 인프라 개선
1 parent 511bf25 commit 3d399a4

File tree

2 files changed

+24
-80
lines changed

2 files changed

+24
-80
lines changed
Lines changed: 23 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
1-
using System.Text;
21
using System.Text.Json;
32
using System.Net.Http.Json;
43
using Microsoft.Extensions.Logging;
54
using ProjectVG.Infrastructure.Integrations.LLMClient.Models;
6-
using ProjectVG.Common.Constants;
75
using Microsoft.Extensions.Configuration;
86

97
namespace ProjectVG.Infrastructure.Integrations.LLMClient
@@ -18,105 +16,52 @@ public LLMClient(HttpClient httpClient, ILogger<LLMClient> logger, IConfiguratio
1816
{
1917
_httpClient = httpClient;
2018
_logger = logger;
21-
22-
// JSON 직렬화 옵션 설정
23-
_jsonOptions = new JsonSerializerOptions
24-
{
19+
20+
_jsonOptions = new JsonSerializerOptions {
2521
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
2622
WriteIndented = false
2723
};
2824

29-
// HTTP 클라이언트 기본 설정
30-
_httpClient.BaseAddress = new Uri(configuration["LLM:BaseUrl"] ?? "http://localhost:5601/");
25+
_httpClient.BaseAddress = new Uri(configuration["LLM:BaseUrl"] ?? "");
3126
_httpClient.Timeout = TimeSpan.FromSeconds(30);
3227
_httpClient.DefaultRequestHeaders.Add("Accept", "application/json");
3328
}
34-
3529
public async Task<LLMResponse> SendRequestAsync(LLMRequest request)
3630
{
37-
try
38-
{
39-
_logger.LogDebug("LLM 요청 시작: {Model}, 사용자 메시지: {UserPrompt}",
40-
request.Model,
41-
request.UserPrompt[..Math.Min(50, request.UserPrompt.Length)]);
31+
try {
32+
if (_logger.IsEnabled(LogLevel.Debug))
33+
_logger.LogDebug("LLM 요청 시작: {Model}", request.Model);
4234

4335
using var jsonContent = JsonContent.Create(request, options: _jsonOptions);
4436
using var response = await _httpClient.PostAsync("api/v1/chat", jsonContent);
4537

46-
if (!response.IsSuccessStatusCode)
47-
{
38+
if (!response.IsSuccessStatusCode) {
4839
var errorContent = await response.Content.ReadAsStringAsync();
49-
_logger.LogWarning("LLM 서비스 오류: {StatusCode}, {Error}", response.StatusCode, errorContent);
50-
51-
return new LLMResponse
52-
{
53-
Success = false,
54-
Error = $"서비스 오류: {response.StatusCode}"
55-
};
40+
_logger.LogDebug("LLM 오류: {StatusCode}, {Error}", response.StatusCode, errorContent);
41+
throw new HttpRequestException($"LLM 서비스 오류: {response.StatusCode}");
5642
}
5743

5844
var responseContent = await response.Content.ReadAsStringAsync();
5945
var llmResponse = JsonSerializer.Deserialize<LLMResponse>(responseContent, _jsonOptions);
6046

61-
if (llmResponse?.Success == true)
62-
{
63-
_logger.LogInformation("LLM 요청 성공: 토큰 {TotalTokens}, 응답 길이 {ResponseLength}",
64-
llmResponse.TotalTokens,
65-
llmResponse.OutputText?.Length ?? 0);
47+
if (llmResponse == null) {
48+
_logger.LogDebug("응답 파싱 실패");
49+
throw new InvalidOperationException("응답을 파싱할 수 없습니다.");
6650
}
6751

68-
return llmResponse ?? new LLMResponse
69-
{
70-
Success = false,
71-
Error = "응답을 파싱할 수 없습니다."
72-
};
73-
}
74-
catch (HttpRequestException ex)
75-
{
76-
_logger.LogWarning("LLM 서비스 연결 오류 - Mock 응답 반환: {Error}", ex.Message);
77-
78-
// 임시 Mock 응답 (개발 환경에서만)
79-
return new LLMResponse
80-
{
81-
Success = true,
82-
Id = "mock-chatcmpl-" + Guid.NewGuid().ToString("N")[..8],
83-
RequestId = request.RequestId ?? "",
84-
Object = "response",
85-
CreatedAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
86-
Status = "completed",
87-
Model = request.Model ?? "gpt-4o-mini",
88-
OutputText = "안녕하세요! 저는 현재 Mock 모드로 동작하고 있습니다. 실제 LLM 서비스가 연결되지 않았습니다.",
89-
InputTokens = 30,
90-
OutputTokens = 20,
91-
TotalTokens = 50,
92-
CachedTokens = 0,
93-
ReasoningTokens = 0,
94-
TextFormatType = "text",
95-
Cost = 5,
96-
ResponseTime = 0.1,
97-
UseUserApiKey = request.UseUserApiKey ?? false
98-
};
99-
}
100-
catch (TaskCanceledException ex)
101-
{
102-
_logger.LogError(ex, "LLM 요청 시간 초과");
103-
return new LLMResponse
104-
{
105-
Success = false,
106-
Error = "요청 시간이 초과되었습니다."
107-
};
52+
_logger.LogDebug("LLM 성공: 토큰 {TotalTokens}, 응답길이 {ResponseLength}",
53+
llmResponse.TotalTokens,
54+
llmResponse.OutputText?.Length ?? 0);
55+
56+
return llmResponse;
10857
}
109-
catch (Exception ex)
110-
{
111-
_logger.LogError(ex, "LLM 요청 처리 중 예외 발생");
112-
return new LLMResponse
113-
{
114-
Success = false,
115-
Error = "요청 처리 중 오류가 발생했습니다."
116-
};
58+
catch (Exception ex) {
59+
_logger.LogDebug(ex, "LLM 요청 처리 중 예외 발생");
60+
throw;
11761
}
11862
}
11963

64+
12065
public async Task<LLMResponse> CreateTextResponseAsync(
12166
string systemMessage,
12267
string userMessage,
@@ -126,8 +71,7 @@ public async Task<LLMResponse> CreateTextResponseAsync(
12671
int? maxTokens = 1000,
12772
float? temperature = 0.7f)
12873
{
129-
var request = new LLMRequest
130-
{
74+
var request = new LLMRequest {
13175
RequestId = Guid.NewGuid().ToString(),
13276
SystemPrompt = systemMessage,
13377
UserPrompt = userMessage,
@@ -143,4 +87,4 @@ public async Task<LLMResponse> CreateTextResponseAsync(
14387
return await SendRequestAsync(request);
14488
}
14589
}
146-
}
90+
}

ProjectVG.Infrastructure/Integrations/TextToSpeechClient/Models/TextToSpeechRequest.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,6 @@ public class VoiceSettings
5757
/// 음성 속도를 조절합니다. 값이 1보다 작으면 음성 속도가 느려지고, 값이 1보다 크면 음성 속도가 빨라집니다.
5858
/// </summary>
5959
[JsonPropertyName("speed")]
60-
public float Speed { get; set; } = 1.1f;
60+
public float Speed { get; set; } = 1.2f;
6161
}
6262
}

0 commit comments

Comments
 (0)