Skip to content

Commit db67b61

Browse files
darioskyDario Varotto
authored andcommitted
Version 1.0.29: Explicit parameters and major rewrites
+ support for custom_fields and conditions
1 parent 5d41dfd commit db67b61

17 files changed

+435
-58
lines changed

CHANGELOG.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
11
# Changelog
22

3+
## v1.0.29 (2019-05-21)
4+
**dataset creation explicit parameters**
5+
6+
The Dataset parameters are not explictly passed in the constructor
7+
instead of being hidden in the kwargs.
8+
9+
This allows also to clearly support custom_fields and conditions.
10+
11+
A few new examples have been added or updated:
12+
[get historical flat files](ravenpackapi/examples/get_historical_flat_files.py) and
13+
[create a dataset with custom_fields and conditions](ravenpackapi/examples/indicator_datasets.py).
14+
315
## v1.0.28 (2019-05-15)
416
**dataset.count method**
517

conftest.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import logging
2+
import os
3+
4+
# set the DEBUG environ variable to enable verbose logging
5+
if 'DEBUG' in os.environ:
6+
logging.basicConfig()
7+
logging.getLogger('ravenpack').setLevel(logging.DEBUG)

pytest.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,4 @@ markers=
1111
slow: marks tests as slow (deselect with '-m "not slow"')
1212
datafile: the async datafile requests
1313
json: the sync json requests
14+
datasets: CRUD operations over datasets

ravenpackapi/core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from ravenpackapi.utils.constants import JSON_AVAILABLE_FIELDS, ENTITY_TYPES
1515

1616
_VALID_METHODS = ('get', 'post', 'put', 'delete')
17-
VERSION = '1.0.28'
17+
VERSION = '1.0.29'
1818

1919
logger = logging.getLogger("ravenpack.core")
2020

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
# Download the historical compressed flat files (with all entities or just companies)
2+
# they are decompressed and combined in a single csv file per year
3+
4+
import os
5+
import zipfile
6+
7+
import requests
8+
9+
from ravenpackapi import RPApi
10+
from ravenpackapi.util import parse_csv_line
11+
12+
api_key = os.environ['RP_API_KEY'] # set your API KEY here
13+
api = RPApi(api_key)
14+
15+
flat_type = 'companies' # can be 'companies' or 'full'
16+
full_list_api_url = 'https://app.ravenpack.com/downloads/history-list/%s' % flat_type
17+
response = requests.get(
18+
full_list_api_url,
19+
params=dict(token=api_key)
20+
)
21+
response.raise_for_status()
22+
for flat_file in response.json():
23+
local_filename = flat_file['name']
24+
output_filename = '%s.combined.csv' % local_filename
25+
if not os.path.isfile(output_filename):
26+
with open(output_filename, 'wb') as output:
27+
headers_written = False
28+
with requests.get(
29+
'https://app.ravenpack.com/history/getfile',
30+
dict(token=api_key, id=flat_file['id'], type=flat_type),
31+
stream=True,
32+
) as flatzip:
33+
flatzip.raise_for_status()
34+
if not os.path.isfile(local_filename):
35+
print("Downloading", local_filename, flat_file['size'])
36+
with open(local_filename, 'wb') as f:
37+
for chunk in flatzip.iter_content(chunk_size=8192):
38+
f.write(chunk)
39+
with zipfile.ZipFile(local_filename) as zipped:
40+
for fileinfo in zipped.namelist():
41+
print(fileinfo)
42+
with zipped.open(fileinfo) as csv:
43+
header_line = next(csv)
44+
headers = parse_csv_line(header_line)
45+
if not headers_written:
46+
output.write(header_line)
47+
headers_written = True
48+
for line in csv:
49+
row = parse_csv_line(line)
50+
output.write(line)

ravenpackapi/examples/indicator_datasets.py

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
# Begin creating a dataset with your desired filters (see the RPA user guide for syntax)
77
# You can then add functions (https://app.ravenpack.com/api-documentation/#indicator-syntax)
88
# Alternatively you can also create the dataset via the query builder and just use the dataset_uuid
9+
print("Creating a dataset with a few functions...")
910
dataset = Dataset(api,
1011
name='My Indicator dataset',
1112
filters={"relevance": {"$gt": 90}},
@@ -22,17 +23,67 @@
2223
dataset.save()
2324

2425
# you can also change the fields, (remember to save afterward)
26+
print("Updating fields...")
2527
dataset.fields = [
2628
{"avg": {"avg": {"field": "EVENT_SENTIMENT_SCORE", "lookback": 365}}},
2729
]
2830
dataset.save()
2931

3032
# Following this, you can then generate a datafile (for your desired date range)
33+
print("Requesting a datafile in the CSV format...")
3134
job = dataset.request_datafile(
3235
start_date='2018-04-10', end_date='2018-04-11',
3336
output_format='csv'
3437
)
3538
job.save_to_file('output.csv') # This will poll until the file is ready for download
39+
print("Saved to output.csv")
3640

3741
# a convenience function to delete all the dataset given a name
3842
# delete_all_datasets_by_name(api, 'My Indicator dataset')
43+
44+
# here's an example of another dataset with custom_fields and conditions
45+
print("Creating a new dataset with functions and conditions...")
46+
dataset = api.create_dataset(Dataset.from_dict(
47+
{
48+
"name": "Dataset with functions and conditions",
49+
"fields": [
50+
"timestamp_utc",
51+
"rp_entity_id",
52+
"entity_name",
53+
"AVG_REL"
54+
],
55+
"filters": {
56+
},
57+
"custom_fields": [
58+
{
59+
"AVG_REL": {
60+
"avg": {
61+
"field": "RELEVANCE",
62+
"mode": "daily"
63+
}
64+
}
65+
}
66+
],
67+
"conditions": {
68+
"$and": [
69+
{
70+
"AVG_REL": {
71+
"$gt": 30
72+
}
73+
},
74+
{
75+
"rp_entity_id": {
76+
"$in": [
77+
"ROLLUP"
78+
]
79+
}
80+
}
81+
]
82+
},
83+
"frequency": "daily",
84+
"tags": []
85+
}
86+
))
87+
88+
dataset.save()
89+
print("Dataset created:", dataset.id)

0 commit comments

Comments
 (0)