Skip to content
This repository has been archived by the owner on Mar 1, 2023. It is now read-only.

Commit

Permalink
Was accidentally not updating papers with no collection
Browse files Browse the repository at this point in the history
  • Loading branch information
rabdill committed Feb 24, 2019
1 parent b9699f9 commit 038eefa
Showing 1 changed file with 8 additions and 1 deletion.
9 changes: 8 additions & 1 deletion spider/spider.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,8 @@ def refresh_article_stats(self, collection=None, cap=10000, id=None, get_authors
WHERE authors=0;
"""
cursor.execute(sql)
elif collection is None:
cursor.execute("SELECT id, url, doi FROM articles WHERE collection IS NULL AND last_crawled < now() - interval %s;", (config.refresh_interval,))
else:
cursor.execute("SELECT id, url, doi FROM articles WHERE collection=%s AND last_crawled < now() - interval %s;", (collection, config.refresh_interval))
else:
Expand Down Expand Up @@ -946,10 +948,15 @@ def full_run(spider):
# HACK: There are way more neuro papers, so we check twice as many in each run
if collection == 'neuroscience':
spider.refresh_article_stats(collection, config.refresh_category_cap)
spider.refresh_article_stats(collection, get_authors=True)
else:
spider.log.record("Skipping refresh of paper download stats: disabled in configuration file.", 'debug')

if config.crawl["refresh_stats"] is not False:
# Refresh the articles without a collection:
spider.refresh_article_stats()
# Grab authors for any articles that don't currently have any:
spider.refresh_article_stats(get_authors=True)

if config.crawl["fetch_crossref"] is not False:
spider.pull_todays_crossref_data()
else:
Expand Down

0 comments on commit 038eefa

Please sign in to comment.