Skip to content

Commit e39f0e0

Browse files
author
xusenlin
committed
Add langchain_llm package
1 parent b371f68 commit e39f0e0

File tree

18 files changed

+4645
-0
lines changed

18 files changed

+4645
-0
lines changed

libs/langchain_llm/README.md

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
# Langchain LLM
2+
3+
## Get Started
4+
5+
### Install
6+
7+
```shell
8+
pip install langchain_llm
9+
```
10+
11+
## Inference Usage
12+
13+
### HuggingFace Inference
14+
15+
**Completion Usage**
16+
17+
```python
18+
from langchain_llm import HuggingFaceLLM
19+
20+
llm = HuggingFaceLLM(
21+
model_name="qwen-7b-chat",
22+
model_path="/data/checkpoints/Qwen-7B-Chat",
23+
load_model_kwargs={"device_map": "auto"},
24+
)
25+
26+
# invoke method
27+
prompt = "<|im_start|>user\n你是谁?<|im_end|>\n<|im_start|>assistant\n"
28+
print(llm.invoke(prompt, stop=["<|im_end|>"]))
29+
30+
# Token Streaming
31+
for chunk in llm.stream(prompt, stop=["<|im_end|>"]):
32+
print(chunk, end="", flush=True)
33+
34+
# openai usage
35+
print(llm.call_as_openai(prompt, stop=["<|im_end|>"]))
36+
37+
# Streaming
38+
for chunk in llm.call_as_openai(prompt, stop=["<|im_end|>"], stream=True):
39+
print(chunk.choices[0].text, end="", flush=True)
40+
```
41+
42+
**Chat Completion Usage**
43+
44+
```python
45+
from langchain_llm import ChatHuggingFace
46+
47+
chat_llm = ChatHuggingFace(llm=llm)
48+
49+
# invoke method
50+
query = "你是谁?"
51+
print(chat_llm.invoke(query))
52+
53+
# Token Streaming
54+
for chunk in chat_llm.stream(query):
55+
print(chunk.content, end="", flush=True)
56+
57+
# openai usage
58+
messages = [
59+
{"role": "user", "content": query}
60+
]
61+
print(chat_llm.call_as_openai(messages))
62+
63+
# Streaming
64+
for chunk in chat_llm.call_as_openai(messages, stream=True):
65+
print(chunk.choices[0].delta.content or "", end="", flush=True)
66+
```
67+
68+
### VLLM Inference
69+
70+
**Completion Usage**
71+
72+
```python
73+
from langchain_llm import VLLM
74+
75+
llm = VLLM(
76+
model_name="qwen",
77+
model="/data/checkpoints/Qwen-7B-Chat",
78+
trust_remote_code=True,
79+
)
80+
81+
# invoke method
82+
prompt = "<|im_start|>user\n你是谁?<|im_end|>\n<|im_start|>assistant\n"
83+
print(llm.invoke(prompt, stop=["<|im_end|>"]))
84+
85+
# openai usage
86+
print(llm.call_as_openai(prompt, stop=["<|im_end|>"]))
87+
```
88+
89+
**Chat Completion Usage**
90+
91+
```python
92+
from langchain_llm import ChatVLLM
93+
94+
chat_llm = ChatVLLM(llm=llm)
95+
96+
# invoke method
97+
query = "你是谁?"
98+
print(chat_llm.invoke(query))
99+
100+
# openai usage
101+
messages = [
102+
{"role": "user", "content": query}
103+
]
104+
print(chat_llm.call_as_openai(messages))
105+
```
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
from ._huggingface import (
2+
HuggingFaceLLM,
3+
ChatHuggingFace,
4+
)
5+
from ._vllm import XVLLM as VLLM
6+
from ._vllm import ChatVLLM
7+
8+
9+
__all__ = [
10+
"HuggingFaceLLM",
11+
"ChatHuggingFace",
12+
"VLLM",
13+
"ChatVLLM",
14+
]
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
from __future__ import annotations
2+
3+
from typing import Any, cast, Dict, Type
4+
5+
import pydantic
6+
7+
# --------------- Pydantic v2 compatibility ---------------
8+
9+
PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
10+
11+
12+
def model_json(model: pydantic.BaseModel, **kwargs) -> str:
13+
if PYDANTIC_V2:
14+
return model.model_dump_json(**kwargs)
15+
return model.json(**kwargs) # type: ignore
16+
17+
18+
def model_dump(model: pydantic.BaseModel, **kwargs) -> Dict[str, Any]:
19+
if PYDANTIC_V2:
20+
return model.model_dump(**kwargs)
21+
return cast(
22+
"dict[str, Any]",
23+
model.dict(**kwargs),
24+
)
25+
26+
27+
def model_parse(model: Type[pydantic.BaseModel], data: Any) -> pydantic.BaseModel:
28+
if PYDANTIC_V2:
29+
return model.model_validate(data)
30+
return model.parse_obj(data) # pyright: ignore[reportDeprecated]
31+
32+
33+
def disable_warnings(model: Type[pydantic.BaseModel]):
34+
# Disable warning for model_name settings
35+
if PYDANTIC_V2:
36+
model.model_config["protected_namespaces"] = ()

0 commit comments

Comments
 (0)