1
1
{-# LANGUAGE DuplicateRecordFields #-}
2
2
{-# LANGUAGE OverloadedRecordDot #-}
3
+ {-# LANGUAGE DeriveGeneric #-} -- Added DeriveGeneric for clarity, though Generic is imported
3
4
4
- import Control.Monad.IO.Class ( liftIO )
5
+ import Control.Monad ( when ) -- Import when
5
6
import System.Environment (getArgs )
6
7
import qualified Data.Aeson as Aeson
7
8
import Data.Aeson (FromJSON , ToJSON )
8
- import GHC.Generics
9
- import Network.HTTP.Client (newManager , httpLbs , parseRequest , Request (.. ), RequestBody (.. ), responseBody , responseStatus , defaultManagerSettings )
10
- import Network.HTTP.Types.Status (statusCode )
9
+ import GHC.Generics (Generic ) -- Explicitly import Generic
10
+ import Network.HTTP.Client
11
+ ( newManager
12
+ , httpLbs
13
+ , parseRequest
14
+ , Request (.. )
15
+ , RequestBody (.. )
16
+ , responseBody
17
+ , responseStatus
18
+ , defaultManagerSettings
19
+ , Manager -- Import Manager type
20
+ )
21
+ import Network.HTTP.Types.Status (statusIsSuccessful ) -- Import statusIsSuccessful
11
22
23
+ -- Data types for Ollama interaction
12
24
data OllamaRequest = OllamaRequest
13
25
{ model :: String
14
26
, prompt :: String
15
27
, stream :: Bool
16
- } deriving (Show , Generic , ToJSON )
28
+ } deriving (Show , Generic , ToJSON ) -- Derive Generic and ToJSON
17
29
18
30
data OllamaResponse = OllamaResponse
19
- { model :: String
20
- , created_at :: String
21
- , response :: String -- This matches the actual field name in the JSON
22
- , done :: Bool
23
- , done_reason :: String
24
- } deriving (Show , Generic , FromJSON )
31
+ { model :: String
32
+ , created_at :: String
33
+ , response :: String -- This matches the actual field name in the JSON
34
+ , done :: Bool
35
+ , done_reason :: Maybe String -- done_reason might be null/missing in some responses, using Maybe is safer
36
+ } deriving (Show , Generic , FromJSON ) -- Derive Generic and FromJSON
37
+
38
+ -- Function to call the Ollama API
39
+ callOllama :: Manager -> String -> String -> IO (Either String OllamaResponse )
40
+ callOllama manager modelName userPrompt = do
41
+ -- Note: parseRequest throws exceptions on invalid URLs, which is acceptable here.
42
+ initialRequest <- parseRequest " http://localhost:11434/api/generate"
43
+
44
+ let ollamaRequestBody = OllamaRequest
45
+ { model = modelName
46
+ , prompt = userPrompt
47
+ , stream = False -- Keeping stream as False for a single response
48
+ }
49
+
50
+ let request = initialRequest
51
+ { requestHeaders = [(" Content-Type" , " application/json" )]
52
+ , method = " POST"
53
+ , requestBody = RequestBodyLBS $ Aeson. encode ollamaRequestBody
54
+ }
55
+
56
+ -- httpLbs also throws exceptions on network errors, which `main` handles implicitly
57
+ httpResponse <- httpLbs request manager
58
+
59
+ let status = responseStatus httpResponse
60
+ body = responseBody httpResponse
61
+
62
+ if statusIsSuccessful status -- Use statusIsSuccessful for clarity
63
+ then do
64
+ let maybeOllamaResponse = Aeson. decode body :: Maybe OllamaResponse
65
+ case maybeOllamaResponse of
66
+ Just ollamaResponse -> return $ Right ollamaResponse
67
+ Nothing -> return $ Left $ " Error: Failed to parse JSON response. Body: " ++ show body
68
+ else do
69
+ return $ Left $ " Error: HTTP request failed with status " ++ show status ++ " . Body: " ++ show body
25
70
26
71
main :: IO ()
27
72
main = do
28
73
args <- getArgs
29
74
case args of
30
- [] -> putStrLn " Error: Please provide a prompt as a command-line argument."
31
- (arg: _) -> do
75
+ [] -> putStrLn " Usage: <program_name> <prompt> [model_name]"
76
+ (promptArg: modelArgs) -> do
77
+ let modelName = case modelArgs of
78
+ (m: _) -> m
79
+ [] -> " llama3.2:latest" -- Default model
80
+
32
81
manager <- newManager defaultManagerSettings
33
82
34
- initialRequest <- parseRequest " http://localhost:11434/api/generate"
35
-
36
- let ollamaRequestBody = OllamaRequest
37
- { model = " llama3.2:latest" -- You can change this to your preferred model
38
- , prompt = arg
39
- , stream = False
40
- }
41
-
42
- let request = initialRequest
43
- { requestHeaders = [(" Content-Type" , " application/json" )]
44
- , method = " POST"
45
- , requestBody = RequestBodyLBS $ Aeson. encode ollamaRequestBody
46
- }
47
-
48
- httpResponse <- httpLbs request manager
49
- -- liftIO $ putStrLn $ "httpResponse:" ++ show httpResponse -- debug
50
-
51
- let responseStatus' = responseStatus httpResponse
52
-
53
- if statusCode responseStatus' == 200
54
- then do
55
- let maybeOllamaResponse =
56
- Aeson. decode (responseBody httpResponse) :: Maybe OllamaResponse
57
- case maybeOllamaResponse of
58
- Just ollamaResponse -> do
59
- liftIO $ putStrLn $ " Response:\n\n " ++ ollamaResponse. response
60
- Nothing -> do
61
- liftIO $ putStrLn " Error: Failed to parse response"
62
- else do
63
- putStrLn $ " Error: " ++ show responseStatus'
83
+ putStrLn $ " Sending prompt '" ++ promptArg ++ " ' to model '" ++ modelName ++ " '..."
84
+
85
+ result <- callOllama manager modelName promptArg
86
+
87
+ case result of
88
+ Right ollamaResponse -> do
89
+ -- No need for liftIO here, putStrLn is already IO
90
+ putStrLn " \n --- Response ---"
91
+ putStrLn ollamaResponse. response
92
+ when (ollamaResponse. done_reason /= Nothing ) $ -- Check if done_reason is present
93
+ putStrLn $ " \n Done reason: " ++ show ollamaResponse. done_reason -- Show the reason if present
94
+ Left err -> do
95
+ -- No need for liftIO here either
96
+ putStrLn $ " API Error: " ++ err
0 commit comments