Skip to content

Commit

Permalink
Add a model to decorator to encapsulate calls to language models
Browse files Browse the repository at this point in the history
  • Loading branch information
rlouf committed Apr 7, 2023
1 parent e9c1658 commit 632e477
Show file tree
Hide file tree
Showing 8 changed files with 234 additions and 119 deletions.
109 changes: 48 additions & 61 deletions examples/meta_prompting.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,48 +12,42 @@
import argparse

import outlines
from outlines import compose
from outlines.text.models.openai import OpenAI
import outlines.text as text


def split_into_steps(question, model: str):
prompt = compose(
"""
${question}
def split_into_steps(question, model_name: str):
@text.model(model_name)
def solve(question):
"""${question}
Let's solve this problem by splitting it into steps.
""",
question=question,
)
answer = OpenAI(model)(prompt)
"""

answer, prompt = solve(question)

return prompt, answer


def fill_in_the_blanks(question, model: str):
meta_prompt = compose(
"""
${question}
def fill_in_the_blanks(question, model_name: str):
@text.model(model_name, stops_at=["."])
def determine_goal(question):
"""${question}
In order to solve this problem, we will analyze each of the options and determine
""",
question=question,
)
goal = OpenAI(model, stops_at=["."])(meta_prompt)

prompt = compose(
"""
${meta_prompt}${goal}. Let's begin.
""",
meta_prompt=meta_prompt,
goal=goal,
)
answer = OpenAI(model)(prompt)

return prompt, answer
@text.model(model_name, stops_at=["."])
def solve(memory):
"""${memory}. Let's begin."""

_, memory = determine_goal(question)
answer, full_interaction = solve(memory)

def ask_an_expert(question, model: str):
meta_prompt = compose(
return full_interaction, answer


def ask_an_expert(question, model_name: str):
@text.model(model_name, stops_at=['"'])
def find_expert(question):
"""
${question}
I entered my question into the Expert Generator
Expand All @@ -68,71 +62,65 @@ def ask_an_expert(question, model: str):
The Expert Generator beeped, indicating that it has
found the most qualified expert. The name displayed
on the screen: "
""",
question=question,
)
expert = OpenAI(model, stops_at=['"'])(meta_prompt)
"""

prompt = compose(
@text.model(model_name)
def get_answer(question, expert, memory):
"""
${prompt}${expert}"
${memory}
I am ready to ask my question.
"${expert}" I say,
${question}
""",
prompt=meta_prompt,
expert=expert,
question=question,
)
answer = OpenAI(model)(prompt)
return prompt, answer
"""

expert, memory = find_expert(question)
answer, full_interaction = get_answer(question, expert, memory)

def ask_an_expert_simple(question, model: str):
meta_prompt = compose(
return full_interaction, answer


def ask_an_expert_simple(question, model_name: str):
@text.model(model_name, stops_at=["\n", "."])
def find_expert(question):
"""
Q: ${question}
A: A good person to answer this question would be
""",
question=question,
)
expert = OpenAI(model, stops_at=["/n", "."])(meta_prompt)
"""

prompt = compose(
@text.model(model_name)
def get_answer(expert, memory):
"""
${meta_prompt}${expert}.
${memory}.
For instance,${expert} would answer
""",
meta_prompt=meta_prompt,
expert=expert,
)
answer = OpenAI(model)(prompt)
"""

return prompt, answer
expert, memory = find_expert(question)
answer, full_interaction = get_answer(expert, memory)

return full_interaction, answer


def run_example(model_fn, question, model):
print("\n-----------------------------------------\n")
question_s = outlines.text.string()
fn = outlines.chain([question_s], model_fn(question_s, model))
prompt, answer = fn(question)
print(f"{prompt}{answer}")
print(f"{prompt}")


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Meta Prompting examples")
parser.add_argument(
"--model",
type=str,
default="text-davinci-001",
default="openai/text-davinci-001",
help="The Large Language Model to use to run the examples.",
)
args = parser.parse_args()

math_q = "f(x) = x*x. What is f(f(3))?"
sat_q = compose(
"""
sat_q = """
Directions: In the following question, a related
pair of words or phrases is followed by five
pairs of words or phrases. Choose the pair
Expand All @@ -146,7 +134,6 @@ def run_example(model_fn, question, model):
E) CANDIDATE : AMBITION
"""
)
alignment_q = "What should humankind do to ensure that artificial general intelligence is aligned?"
meaning_q = "What is the meaning of life?"

Expand Down
2 changes: 1 addition & 1 deletion outlines/program.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from rich.panel import Panel

from outlines.graph import Variable, io_toposort
from outlines.text.models.model import LanguageModel
from outlines.text.models import LanguageModel
from outlines.text.var import StringConstant

COLORS = itertools.cycle(["deep_sky_blue2", "gold3", "deep_pink2"])
Expand Down
3 changes: 2 additions & 1 deletion outlines/text/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from .basic import *
from .compose import compose
from .models import model
from .var import as_string, string

__all__ = ["as_string", "string", "compose"]
__all__ = ["as_string", "model", "string", "compose"]
2 changes: 1 addition & 1 deletion outlines/text/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .model import LanguageModel
from .language_model import LanguageModel, model
2 changes: 1 addition & 1 deletion outlines/text/models/hugging_face.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from outlines.text.models.model import LanguageModel
from outlines.text.models.language_model import LanguageModel

try:
import torch
Expand Down
156 changes: 156 additions & 0 deletions outlines/text/models/language_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
import inspect

from outlines.graph import Apply, Op
from outlines.text.compose import compose
from outlines.text.var import StringVariable, as_string


class LanguageModel(Op):
"""An `Op` that produces a sample from a language model.
The output of language models in outlines is represented as a random
variable. Therefore, calling a language model will return a random sequence
(via ancestral sampling) by default. Other decoding methods are constructed
as graph transformations.
"""

def __init__(self, name=None):
"""Instantiate the `LanguageModel` `Op`.
Parameters
----------
name
The name of the `Op` in the graph.
"""
super().__init__()
self.name = name

def __call__(self, prompt, stops_at=None, name=None):
"""Create the `Apply` node that represents the `Op`'s application to inputs.
Parameters
----------
prompt
The prompt used to condition the language model's sampling procedure.
name
The name of the output variable in the graph.
"""
res = super().__call__(prompt)

if name is not None:
res.name = name

return res

def make_node(self, prompt):
prompt = as_string(prompt)
out = StringVariable()

return Apply(self, [prompt], [out])

def perform(self, prompt):
return NotImplementedError


def model(name: str, stops_at=None):
"""Decorator that allows to simplify calls to language models.
Prompts that are passed to language models are often rendered templates,
and the workflow typically looks like:
>>> import outlines
>>> from outlines.text.models.openai import OpenAI
>>>
>>> llm = OpenAI("davinci")
>>> tpl = "I have a ${question}"
>>> prompt = outlines.compose(tpl, question="How are you?")
>>> answer = llm(prompt)
While explicit, these 4 lines have the following defaults:
1. The prompt is hidden;
2. The language model instantiation is far from the prompt; prompt templates
are however attached to a specific language model call.
3. The intent behind the language model call is hidden.
To encapsulate the logic behind language model calls, we thus define the
template prompt inside a function and decorate the function with a model
specification. When that function is called, the template is rendered using
the arguments passed to the function, and the rendered prompt is passed to
a language model instantiated with the arguments passed to the decorator.
The previous example is equivalent to the following:
>>> import outlines
>>>
>>> @outlines.text.model("openai/davinci")
... def answer(question):
... "I have a ${question}"
...
>>> answer, _ = answer("How are you?")
Decorated functions return two objects: the first represents the output of
the language model call, the second represents the concatenation of the
rendered prompt with the output of the language model call. The latter can
be used in context where one expands an initial prompt with recursive calls
to language models.
"""
provider_name = name.split("/")[0]
model_name = name[len(provider_name) + 1 :]

if provider_name == "openai":
from outlines.text.models.openai import OpenAI

llm = OpenAI(model_name, stops_at) # type:ignore
elif provider_name == "hf":
from outlines.text.models.hugging_face import HFCausalLM

llm = HFCausalLM(model_name) # type:ignore
else:
raise NameError(f"The model provider {provider_name} is not available.")

def decorator(fn):
# Get the names of the parameters to the function, which must correspond
# to the variables defined in the template.
var_names = []
kwargs_data = {}
sig = inspect.signature(fn)
for parameter in sig.parameters.values():
if parameter.default == inspect._empty:
var_names.append(parameter.name)
else:
kwargs_data[parameter.name] = parameter.default

# The docstring contains the template that will be rendered to be used
# as a prompt to the language model.
template = inspect.cleandoc(fn.__doc__)

def wrapper(*args, **kwargs):
"""Call the LLM with the rendered template.
Building prompts with recursive calls to language models is common
in prompt engineering, we thus return both the raw answer from the
language model as well as the rendered prompt including the answer.
Returns
-------
A tuple that contains the result of the language model call, and the
rendered prompt concatenated with the result of the language model
call.
"""
args_data = {name: arg for name, arg in zip(var_names, args)}
kwargs_data.update(kwargs)
data = {**args_data, **kwargs_data}

prompt = compose(template, **data)
result = llm(prompt)
return result, prompt + result

return wrapper

return decorator
Loading

0 comments on commit 632e477

Please sign in to comment.