-
Couldn't load subscription status.
- Fork 79
Plugin qiita-pet #1552
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Plugin qiita-pet #1552
Changes from all commits
cb60a2b
7290ab5
52eef66
183a098
a3157f9
f05352b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,6 +27,7 @@ | |
| from qiita_pet.exceptions import QiitaPetAuthorizationError | ||
| from qiita_ware.dispatchable import run_analysis | ||
| from qiita_db.analysis import Analysis | ||
| from qiita_db.artifact import Artifact | ||
| from qiita_db.job import Job, Command | ||
| from qiita_db.util import (get_db_files_base_dir, | ||
| check_access_to_analysis_result, | ||
|
|
@@ -163,7 +164,7 @@ def get(self, analysis_id): | |
| for proc_data_id, samples in viewitems(dropped_samples): | ||
| if not samples: | ||
| continue | ||
| proc_data = ProcessedData(proc_data_id) | ||
| proc_data = Artifact(proc_data_id) | ||
| data_type = proc_data.data_type() | ||
| study = proc_data.study | ||
| dropped[data_type].append((Study(study).title, len(samples), | ||
|
|
@@ -214,8 +215,7 @@ def get(self): | |
| level = self.get_argument('level', '') | ||
| user = self.current_user | ||
|
|
||
| analyses = [Analysis(a) for a in | ||
| user.shared_analyses | user.private_analyses] | ||
| analyses = user.shared_analyses | user.private_analyses | ||
|
|
||
| is_local_request = is_localhost(self.request.headers['host']) | ||
| gfi = partial(get_filepath_id, 'analysis') | ||
|
|
@@ -307,13 +307,21 @@ def get(self): | |
| # Format sel_data to get study IDs for the processed data | ||
| sel_data = defaultdict(dict) | ||
| proc_data_info = {} | ||
| sel_samps = Analysis(self.current_user.default_analysis).samples | ||
| sel_samps = self.current_user.default_analysis.samples | ||
| for pid, samps in viewitems(sel_samps): | ||
| proc_data = ProcessedData(pid) | ||
| proc_data = Artifact(pid) | ||
| sel_data[proc_data.study][pid] = samps | ||
| # Also get processed data info | ||
| proc_data_info[pid] = proc_data.processing_info | ||
| proc_data_info[pid]['data_type'] = proc_data.data_type() | ||
| # TODO plugin: | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just a note - we will be able to duplicate this information with the artifacts, however, this is highly tied up with target gene and is not gonna work for other datatypes - so we should rethink this information. |
||
| # proc_data_info[pid] = proc_data.processing_info | ||
| proc_data_info[pid] = {'processed_date': '10/10/1981', | ||
| 'algorithm': 'My algorithm', | ||
| 'reference_name': 'My reference name', | ||
| 'reference_version': 'My reference version', | ||
| 'sequence_filepath': 'My sequence filepath', | ||
| 'taxonomy_filepath': 'My taxonomy filepath', | ||
| 'tree_filepath': 'My taxonomy filepath'} | ||
| proc_data_info[pid]['data_type'] = proc_data.data_type | ||
| self.render("analysis_selected.html", sel_data=sel_data, | ||
| proc_info=proc_data_info) | ||
|
|
||
|
|
@@ -322,5 +330,5 @@ class AnalysisSummaryAJAX(BaseHandler): | |
| @authenticated | ||
| @execute_as_transaction | ||
| def get(self): | ||
| info = Analysis(self.current_user.default_analysis).summary_data() | ||
| info = self.current_user.default_analysis.summary_data() | ||
| self.write(dumps(info)) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -14,24 +14,25 @@ | |
| from tornado.gen import coroutine, Task | ||
| from pyparsing import ParseException | ||
|
|
||
| from qiita_db.artifact import Artifact | ||
| from qiita_db.user import User | ||
| from qiita_db.study import Study, StudyPerson | ||
| from qiita_db.search import QiitaStudySearch | ||
| from qiita_db.metadata_template.sample_template import SampleTemplate | ||
| from qiita_db.logger import LogEntry | ||
| from qiita_db.exceptions import QiitaDBIncompatibleDatatypeError | ||
| from qiita_db.util import get_table_cols | ||
| from qiita_db.util import get_table_cols, get_pubmed_ids_from_dois | ||
| from qiita_core.exceptions import IncompetentQiitaDeveloperError | ||
| from qiita_core.util import execute_as_transaction | ||
| from qiita_pet.handlers.base_handlers import BaseHandler | ||
| from qiita_pet.handlers.util import study_person_linkifier, pubmed_linkifier | ||
| from qiita_pet.handlers.util import ( | ||
| study_person_linkifier, doi_linkifier, pubmed_linkifier) | ||
|
|
||
|
|
||
| @execute_as_transaction | ||
| def _get_shared_links_for_study(study): | ||
| shared = [] | ||
| for person in study.shared_with: | ||
| person = User(person) | ||
| name = person.info['name'] | ||
| email = person.email | ||
| # Name is optional, so default to email if non existant | ||
|
|
@@ -69,15 +70,23 @@ def _build_single_study_info(study, info, study_proc, proc_samples): | |
| """ | ||
| PI = StudyPerson(info['principal_investigator_id']) | ||
| status = study.status | ||
| if info['pmid'] is not None: | ||
| info['pmid'] = ", ".join([pubmed_linkifier([p]) | ||
| for p in info['pmid']]) | ||
| if info['publication_doi'] is not None: | ||
| pmids = get_pubmed_ids_from_dois(info['publication_doi']).values() | ||
| info['pmid'] = ", ".join([pubmed_linkifier([p]) for p in pmids]) | ||
| info['publication_doi'] = ", ".join([doi_linkifier([p]) | ||
| for p in info['publication_doi']]) | ||
|
|
||
| else: | ||
| info['publication_doi'] = "" | ||
| info['pmid'] = "" | ||
| if info["number_samples_collected"] is None: | ||
| info["number_samples_collected"] = 0 | ||
| info["shared"] = _get_shared_links_for_study(study) | ||
| info["num_raw_data"] = len(study.raw_data()) | ||
| # raw data is any artifact that is not Demultiplexed or BIOM | ||
|
|
||
| info["num_raw_data"] = len([a for a in study.artifacts() | ||
| if a.artifact_type not in ['Demultiplexed', | ||
| 'BIOM']]) | ||
| info["status"] = status | ||
| info["study_id"] = study.id | ||
| info["pi"] = study_person_linkifier((PI.email, PI.name)) | ||
|
|
@@ -112,11 +121,12 @@ def _build_single_proc_data_info(proc_data_id, data_type, samples): | |
| dict | ||
| The information for the processed data, in the form {info: value, ...} | ||
| """ | ||
| proc_data = ProcessedData(proc_data_id) | ||
| proc_info = proc_data.processing_info | ||
| proc_data = Artifact(proc_data_id) | ||
| # TODO plugin: | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What is this TODO for? Is this worth raising an issue? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No, the issue is that some of this changes depends on my changes for the plugin, but the general qiita_pet update should not be blocked due to this. This way, it is easier that we have most of the things changed and I can just search for the TODO and connect the things together in a subsequent PR. |
||
| # proc_info = proc_data.processing_info | ||
| proc_info = {'processed_date': '10/10/1981'} | ||
| proc_info['pid'] = proc_data_id | ||
| proc_info['data_type'] = data_type | ||
| proc_info['samples'] = sorted(samples) | ||
| proc_info['processed_date'] = str(proc_info['processed_date']) | ||
| return proc_info | ||
|
|
||
|
|
@@ -166,30 +176,29 @@ def _build_study_info(user, study_proc=None, proc_samples=None): | |
| # No studies left so no need to continue | ||
| return [] | ||
|
|
||
| # get info for the studies | ||
| cols = ['study_id', 'email', 'principal_investigator_id', | ||
| 'pmid', 'study_title', 'metadata_complete', | ||
| 'publication_doi', 'study_title', 'metadata_complete', | ||
| 'number_samples_collected', 'study_abstract'] | ||
| study_info = Study.get_info(study_set, cols) | ||
|
|
||
| # get info for the studies | ||
| infolist = [] | ||
| for info in study_info: | ||
| # Convert DictCursor to proper dict | ||
| info = dict(info) | ||
| study = Study(info['study_id']) | ||
| for study in study_set: | ||
| # Build the processed data info for the study if none passed | ||
| if build_samples: | ||
| proc_data_list = study.processed_data() | ||
| proc_data_list = [ar for ar in study.artifacts() | ||
| if ar.artifact_type == 'BIOM'] | ||
| proc_samples = {} | ||
| study_proc = {study.id: defaultdict(list)} | ||
| for pid in proc_data_list: | ||
| proc_data = ProcessedData(pid) | ||
| study_proc[study.id][proc_data.data_type()].append(pid) | ||
| proc_samples[pid] = proc_data.samples | ||
| for proc_data in proc_data_list: | ||
| study_proc[study.id][proc_data.data_type].append(proc_data.id) | ||
| # there is only one prep template for each processed data | ||
| proc_samples[proc_data.id] = proc_data.prep_templates[0].keys() | ||
|
|
||
| info = dict(study.get_info(info_cols=cols)[0]) | ||
| study_info = _build_single_study_info(study, info, study_proc, | ||
| proc_samples) | ||
| infolist.append(study_info) | ||
|
|
||
| return infolist | ||
|
|
||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If information can't be NULL there should be an error somewhere (either the interface is assuming that is always there, or either the database should have a NOT NULL restriction and update the python code accordingly).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This shouldn't be blocking so I added an issue: #1553