Skip to content

Commit

Permalink
pre-commit autoupdate
Browse files Browse the repository at this point in the history
  • Loading branch information
lslunis committed Mar 13, 2023
1 parent 920254b commit 8f3123e
Show file tree
Hide file tree
Showing 20 changed files with 10 additions and 28 deletions.
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
minimum_pre_commit_version: "2.9.0"
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
rev: v4.4.0
hooks:
- id: check-yaml
args: [--allow-multiple-documents]
Expand All @@ -20,15 +20,15 @@ repos:
- id: reorder-python-imports
args: [--py39-plus]
- repo: https://github.com/psf/black
rev: 22.3.0
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
rev: 3.9.2
rev: 6.0.0
hooks:
- id: flake8
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.991
rev: v1.1.1
hooks:
- id: mypy
additional_dependencies: [numpy, httpx, pytest, structlog, types-PyYAML]
Expand Down
1 change: 0 additions & 1 deletion ice/agents/approval.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ async def relevance(self, *, question, context, verbose=False, default=None):
return score

async def _check(self, prompt: str, candidate: str):

approval_prompt = f"""Evaluate whether the following output is correct.
Input:
Expand Down
2 changes: 1 addition & 1 deletion ice/agents/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def _compute_relative_probs(

def lookup_prob(choice: str):
scores = 0.0
for (token, prob) in prediction.items():
for token, prob in prediction.items():
if choice[len(choice_prefix) :].startswith(token):
scores += prob
return scores
Expand Down
3 changes: 1 addition & 2 deletions ice/agents/openai_reasoning.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ async def _parse_and_aggregate_responses(
# Parse the responses and aggregate the answers and reasonings
answers: Counter[str] = Counter()
reasonings: list[str] = []
for (i, response_text) in enumerate(response_texts):
for i, response_text in enumerate(response_texts):
# Check if the response contains the answer prefix
if answer_prefix not in response_text:
# If not, request an explicit answer from the API
Expand Down Expand Up @@ -200,7 +200,6 @@ def _parse_answer_and_reasoning(
def _format_result(
self, answers: Counter[str], reasonings: list[str]
) -> tuple[dict[str, float], str]:

# Join the reasonings with counts
joined_reasonings = self._join_texts_with_counts(reasonings)

Expand Down
1 change: 0 additions & 1 deletion ice/agents/squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

class SquadAgent(Agent):
def __init__(self, model_name: str = "z-uo/roberta-qasper"):

self.nlp = pipeline(
"question-answering", model=model_name, tokenizer=model_name
)
Expand Down
1 change: 0 additions & 1 deletion ice/metrics/gold_paragraphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ def get_containing_paragraph(


def get_gold_paragraph_df(question_short_name: str):

gold_standards = get_question_gold_standards(question_short_name)

entries = []
Expand Down
1 change: 0 additions & 1 deletion ice/metrics/nubia.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ class NubiaResponse(BaseModel):


async def _single_nubia(sample: Sample) -> list[NubiaResponse]:

samples = list(product(sample.left, sample.right))

async with httpx.AsyncClient(
Expand Down
2 changes: 1 addition & 1 deletion ice/metrics/rouge.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ async def _compute_single(sample: Sample) -> RougeResult:
)
return RougeResult.parse_obj(result_dict)

return [await (_compute_single(s)) for s in sample]
return [await _compute_single(s) for s in sample]


@diskcache()
Expand Down
1 change: 1 addition & 0 deletions ice/nn/bert_t5_t0_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ def T0_classify(

# Credit: https://stackoverflow.com/questions/39936527/python-removing-references-from-a-scientific-paper


# Remove citations
def remove_citations(s: str) -> str:
return re.sub(r"\s\([A-Z][a-z]+,\s[A-Z][a-z]?\.[^\)]*,\s\d{4}\)", "", s)
Expand Down
1 change: 0 additions & 1 deletion ice/nn/bert_t5_t0_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def extract_numbers(text: str) -> list[str]:


def classify_example():

abstract = """In this study we will examine the impact of the use of ..."""
paragraph = """[..] The adherence rate is 88.2%."""
numbers = extract_numbers(paragraph)
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/combine_abstract_answers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@


def make_prompt(question: str, abstracts: list[Abstract], answers: list[str]) -> str:

abstract_answers_str = "\n\n".join(
[
f"Title B{i}: {abstract.title}\nAbstract B{i}: {abstract.text}\nAnswer B{i}: {answer}"
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/comparisons_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ async def run(
num_paragraphs: int = 3,
answer_prefix: str = DEFAULT_ANSWER_PREFIX,
):

rank_paragraphs = RankParagraphs(mode=self.mode)

top_paragraphs = await rank_paragraphs.run(
Expand Down
2 changes: 0 additions & 2 deletions ice/recipes/consort_flow/baseline_elicit_answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ async def answer_like_elicit_qa(
question: str,
passage: str,
) -> str:

prompt = elicit_qa_prompt(
qa_question=question,
excerpt=passage,
Expand Down Expand Up @@ -68,7 +67,6 @@ def elicit_qa_prompt(
qa_question: str,
excerpt: str,
) -> str:

full_answer_prefix = "Answer:"

return f"""Answer the question "{qa_question}" based on the excerpt from a research paper. \
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/evaluate_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ async def run(
gold_result: Optional[str] = None,
question: Optional[str] = None,
) -> ResultComparison:

if self.mode == "test":
model_results, gold_results, question = self.test_data()
model_result = model_results[0]
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/placebo_description.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ async def get_gold_experiments(self, paper: Paper) -> list[str]:
async def placebo_for_experiment(
self, paper: Paper, experiment: str, record=recorder
) -> str:

# Generate the QA prompt
qa_prompt = self.make_prompt(paper, experiment)

Expand Down
5 changes: 1 addition & 4 deletions ice/recipes/placebo_dialogs.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,14 +73,13 @@ async def ask(self, question: str, multiline=True, answer_prefix=""):
async def multiple_choice(
self, question: str, answers: list[str]
) -> tuple[dict[str, float], "DialogState"]:

answer_prefix = longest_common_prefix(answers).rstrip()
new_context = f"{self.context}\n\nQ: {question}\n\nA: {answer_prefix}"
prediction = await self.agent.predict(context=new_context, default=" ")

def lookup_prob(answer: str):
scores = 0.0
for (token, prob) in prediction.items():
for token, prob in prediction.items():
if answer[len(answer_prefix) :].startswith(token):
scores += prob
return scores
Expand Down Expand Up @@ -129,7 +128,6 @@ def make_initial_paragraph_context(


class PlaceboDialogs(Recipe):

verbose = False

msg = SimpleNamespace(
Expand Down Expand Up @@ -367,7 +365,6 @@ async def aggregate_placebo_kind(
return {"answer": answer, "quotes": quotes, "component_answers": answers}

async def analyze_experiment(self, paper: Paper, experiment: Experiment):

paragraphs = [
paragraph
for paragraph in paper.paragraphs
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/primer/sequential_action.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,6 @@ async def sequential_action(
log: list[str] = []

for actions_left in range(max_actions, 0, -1):

sufficient_info = await is_info_sufficient(question, log)
if sufficient_info:
break
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/single_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ class SinglePrompt(Recipe):
default_answer_classification: Optional[str]

async def run(self, paper: Paper):

# Get the full paper text and truncate it
full_paper_text = get_paper_text(paper)
paper_text = truncate_by_tokens(full_paper_text, max_tokens=self.max_tokens)
Expand Down
1 change: 0 additions & 1 deletion ice/recipes/synthesize.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ def _get_reference(authors: list[str], year: Optional[int]) -> str:


async def synthesize(question: str, abstracts: list[Abstract]) -> str:

papers_str = "\n\n".join(
[
PAPER_FORMAT.format(
Expand Down
3 changes: 1 addition & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,7 @@ async def print_results(
"""
results_json: list[dict] = []

for (document_id, final_result) in results_by_doc.items():

for document_id, final_result in results_by_doc.items():
if json_out is not None:
results_json.extend(recipe.to_json(final_result))

Expand Down

0 comments on commit 8f3123e

Please sign in to comment.