Skip to content

Commit

Permalink
Merge branch 'UKPLab:master' into Prompting-on-evaluators
Browse files Browse the repository at this point in the history
  • Loading branch information
ArthurCamara committed Sep 23, 2024
2 parents 005039f + 7290448 commit f95cb46
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 7 deletions.
9 changes: 6 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
[project]
name = "sentence-transformers"
version = "3.2.0.dev0"
description = "Multilingual text embeddings"
license = { file = "LICENSE" }
description = "State-of-the-Art Text Embeddings"
license = { text = "Apache 2.0" }
readme = "README.md"
authors = [
{ name = "Nils Reimers", email = "info@nils-reimers.de" },
{ name = "Tom Aarsen" },
{ name = "Tom Aarsen", email = "tom.aarsen@huggingface.co" },
]
maintainers = [
{ name = "Tom Aarsen", email = "tom.aarsen@huggingface.co" }
]
requires-python = ">=3.8"
keywords = [
Expand Down
16 changes: 12 additions & 4 deletions sentence_transformers/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -714,8 +714,12 @@ def mine_hard_negatives(
except Exception:
pass

corpus_embeddings = model.encode(corpus, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True)
query_embeddings = model.encode(queries, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True)
corpus_embeddings = model.encode(
corpus, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True
)
query_embeddings = model.encode(
queries, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True
)
index.add(corpus_embeddings)

scores_list = []
Expand All @@ -731,8 +735,12 @@ def mine_hard_negatives(

else:
# Embed the corpus and the queries
corpus_embeddings = model.encode(corpus, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True)
query_embeddings = model.encode(queries, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True)
corpus_embeddings = model.encode(
corpus, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True
)
query_embeddings = model.encode(
queries, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True
)
scores = model.similarity(query_embeddings, corpus_embeddings).to(device)

# Keep only the range_max + max_positives highest scores. We offset by 1 to potentially include the positive pair
Expand Down

0 comments on commit f95cb46

Please sign in to comment.