Skip to content

Commit 25639c9

Browse files
committed
Merge branch dev accepting all incoming changes
2 parents e23dd74 + ff84a44 commit 25639c9

28 files changed

+785
-0
lines changed

.pre-commit-config.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
fail_fast: true
2+
<<<<<<< HEAD
23
exclude: '^(?!promptolution/).*$|^promptolution/templates.py'
4+
=======
5+
exclude: '^(?!promptolution/).*$'
6+
>>>>>>> main
37
repos:
48
- repo: https://github.com/gitleaks/gitleaks
59
rev: v8.18.2
23.6 KB
Binary file not shown.

dist/promptolution-0.1.1.tar.gz

16.2 KB
Binary file not shown.

docs/release-notes.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# Release Notes
22

3+
<<<<<<< HEAD
34
## Release v0.2.0
45

56
### What's Changed
@@ -66,3 +67,6 @@
6667
* Added experiment results and evaluation notebooks
6768

6869
**Full Changelog**: [here](https://github.com/finitearth/promptolution/commits/0.1.0)
70+
=======
71+
coming soon...
72+
>>>>>>> main

poetry.lock

Lines changed: 94 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

promptolution/callbacks.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ def on_step_end(self, optimizer):
1717
"""
1818
pass
1919

20+
<<<<<<< HEAD
2021
def on_epoch_end(self, optimizer):
2122
"""Called at the end of each optimization epoch.
2223
@@ -31,6 +32,18 @@ def on_train_end(self, optimizer):
3132
Args:
3233
optimizer: The optimizer object that called the callback.
3334
"""
35+
=======
36+
def on_epoch_end(self, epoch):
37+
"""Called at the end of each optimization epoch.
38+
39+
Args:
40+
epoch: The current epoch number.
41+
"""
42+
pass
43+
44+
def on_train_end(self):
45+
"""Called at the end of the entire optimization process."""
46+
>>>>>>> main
3447
pass
3548

3649

@@ -57,11 +70,27 @@ def on_step_end(self, optimizer):
5770
self.logger.critical(f"*** Prompt {i}: Score: {score}")
5871
self.logger.critical(f"{prompt}")
5972

73+
<<<<<<< HEAD
6074
def on_train_end(self, optimizer, logs=None):
6175
"""Log information at the end of training.
6276
6377
Args:
6478
optimizer: The optimizer object that called the callback.
79+
=======
80+
def on_epoch_end(self, epoch, logs=None):
81+
"""Log information about the current epoch.
82+
83+
Args:
84+
epoch: The current epoch number.
85+
logs: Additional information to log.
86+
"""
87+
self.logger.critical(f"Epoch {epoch} - {logs}")
88+
89+
def on_train_end(self, logs=None):
90+
"""Log information at the end of training.
91+
92+
Args:
93+
>>>>>>> main
6594
logs: Additional information to log.
6695
"""
6796
self.logger.critical(f"Training ended - {logs}")
@@ -105,12 +134,17 @@ def on_step_end(self, optimizer):
105134
)
106135
df.to_csv(self.path, mode="a", header=False, index=False)
107136
137+
<<<<<<< HEAD
108138
def on_train_end(self, optimizer):
109139
"""Called at the end of training.
110140

111141
Args:
112142
optimizer: The optimizer object that called the callback.
113143
"""
144+
=======
145+
def on_train_end(self):
146+
"""Called at the end of training."""
147+
>>>>>>> main
114148
pass
115149
116150
@@ -173,10 +207,15 @@ def on_step_end(self, optimizer):
173207
"""
174208
self.pbar.update(1)
175209
210+
<<<<<<< HEAD
176211
def on_train_end(self, optimizer):
177212
"""Close the progress bar at the end of training.
178213

179214
Args:
180215
optimizer: The optimizer object that called the callback.
181216
"""
217+
=======
218+
def on_train_end(self):
219+
"""Close the progress bar at the end of training."""
220+
>>>>>>> main
182221
self.pbar.close()

promptolution/config.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
11
"""Configuration class for the promptolution library."""
2+
<<<<<<< HEAD
23
import configparser
4+
=======
5+
6+
from configparser import ConfigParser
7+
>>>>>>> main
38
from dataclasses import dataclass
49
from pathlib import Path
510
from typing import Any, Dict, Literal, Optional
@@ -43,6 +48,7 @@ class Config:
4348
n_eval_samples (int): how many examples to show to evaluation llm for evaluation.
4449
"""
4550

51+
<<<<<<< HEAD
4652
task_name: str = None
4753
ds_path: Path = None
4854
optimizer: str = None
@@ -52,6 +58,18 @@ class Config:
5258
n_steps: int = None
5359
init_pop_size: int = None
5460
logging_dir: Path = Path("logs/run.csv")
61+
=======
62+
task_name: str
63+
ds_path: str
64+
n_steps: int
65+
optimizer: str
66+
meta_prompt_path: str
67+
meta_llms: str
68+
downstream_llm: str
69+
evaluation_llm: str
70+
init_pop_size: int = 10
71+
logging_dir: str = "logs/run.csv"
72+
>>>>>>> main
5573
experiment_name: str = "experiment"
5674
include_task_desc: bool = True
5775
donor_random: bool = False
@@ -67,6 +85,7 @@ class Config:
6785
n_ds_samples_to_meta: Optional[int] = 2
6886
n_eval_samples: Optional[int] = 20
6987

88+
<<<<<<< HEAD
7089
def __post_init__(self):
7190
"""Validate the configuration after initialization."""
7291
self._validate_config()
@@ -75,6 +94,33 @@ def __post_init__(self):
7594
def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":
7695
"""Create a Config instance from a dictionary."""
7796
return cls(**cls._process_config_dict(config_dict))
97+
=======
98+
def __init__(self, config_path: str = None, **kwargs):
99+
"""Initialize the Config object."""
100+
if config_path:
101+
self.config_path = config_path
102+
self.config = ConfigParser()
103+
self.config.read(config_path)
104+
self._parse_config()
105+
else:
106+
for key, value in kwargs.items():
107+
setattr(self, key, value)
108+
109+
def _parse_config(self):
110+
"""Parse the configuration settings from the config file."""
111+
self.task_name = self.config["task"]["task_name"]
112+
self.ds_path = self.config["task"]["ds_path"]
113+
self.n_steps = int(self.config["task"]["steps"])
114+
self.random_seed = int(self.config["task"]["random_seed"])
115+
self.optimizer = self.config["optimizer"]["name"]
116+
self.meta_prompt_path = self.config["optimizer"]["meta_prompt_path"]
117+
self.meta_llm = self.config["meta_llm"]["name"]
118+
self.downstream_llm = self.config["downstream_llm"]["name"]
119+
self.evaluation_llm = self.config["evaluator_llm"]["name"]
120+
self.init_pop_size = int(self.config["optimizer"]["init_pop_size"])
121+
self.logging_dir = self.config["logging"]["dir"]
122+
self.experiment_name = self.config["experiment"]["name"]
123+
>>>>>>> main
78124

79125
@classmethod
80126
def from_file(cls, config_path: Path) -> "Config":

promptolution/llms/api_llm.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,22 @@
55
from logging import INFO, Logger
66
from typing import List
77

8+
<<<<<<< HEAD
89
import nest_asyncio
10+
=======
11+
>>>>>>> main
912
import openai
1013
import requests
1114
from langchain_anthropic import ChatAnthropic
1215
from langchain_community.chat_models.deepinfra import ChatDeepInfra, ChatDeepInfraException
1316
from langchain_core.messages import HumanMessage
1417
from langchain_openai import ChatOpenAI
1518

19+
<<<<<<< HEAD
20+
=======
21+
from promptolution.llms.deepinfra import ChatDeepInfra
22+
23+
>>>>>>> main
1624
logger = Logger(__name__)
1725
logger.setLevel(INFO)
1826

@@ -61,7 +69,11 @@ class APILLM:
6169
get_response_async: Asynchronously get responses for a list of prompts.
6270
"""
6371

72+
<<<<<<< HEAD
6473
def __init__(self, model_id: str, token: str = None):
74+
=======
75+
def __init__(self, model_id: str):
76+
>>>>>>> main
6577
"""Initialize the APILLM with a specific model.
6678
6779
Args:
@@ -118,7 +130,11 @@ def get_response(self, prompts: List[str]) -> List[str]:
118130
# If the loop exits, it means max retries were reached
119131
raise requests.exceptions.ConnectionError("Max retries exceeded. Connection could not be established.")
120132

133+
<<<<<<< HEAD
121134
async def get_response_async(self, prompts: list[str], max_concurrent_calls=200) -> list[str]:
135+
=======
136+
async def _get_response(self, prompts: list[str], max_concurrent_calls=200) -> list[str]:
137+
>>>>>>> main
122138
"""Asynchronously get responses for a list of prompts.
123139
124140
This method uses a semaphore to limit the number of concurrent API calls.

0 commit comments

Comments
 (0)