Skip to content

Commit a286b6f

Browse files
authored
Merge pull request #30 from CurtLH/update_cli
Update CLI
2 parents 51080bd + 4fca55a commit a286b6f

File tree

1 file changed

+53
-28
lines changed

1 file changed

+53
-28
lines changed

prism/cli.py

Lines changed: 53 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def main(ctx, base_url, tenant_name, client_id, client_secret, refresh_token):
3434
"""CLI for interacting with Workday’s Prism API"""
3535

3636
# initialize the prism class with your credentials
37-
p = prism.Prism(base_url, tenant_name, client_id, client_secret, refresh_token)
37+
p = prism.Prism(base_url, tenant_name, client_id, client_secret, refresh_token, version="v2")
3838

3939
# create the bearer token
4040
p.create_bearer_token()
@@ -44,64 +44,89 @@ def main(ctx, base_url, tenant_name, client_id, client_secret, refresh_token):
4444

4545

4646
@main.command()
47-
@click.option("--id", default=None, type=str, help="The ID of the dataset to obtain details about")
47+
@click.option("--name", default=None, type=str, help="The name of the table to obtain details about")
4848
@click.pass_context
49-
def list(ctx, id):
50-
"""List all datasets of type API"""
49+
def list(ctx, name):
50+
"""List all tables of type API"""
5151

5252
# get the initialized prism class
5353
p = ctx.obj["p"]
5454

55-
# list the datasets
56-
status = p.list_dataset(dataset_id=id)
55+
# list the tables
56+
status = p.list_table(table_name=name)
5757

5858
# print message
5959
if id is None:
60-
click.echo("There are {} API datasets".format(status["total"]))
60+
click.echo("There are {} API tables".format(status["total"]))
6161
click.echo(json.dumps(status["data"], indent=2, sort_keys=True))
6262
else:
6363
click.echo(json.dumps(status, indent=2, sort_keys=True))
6464

6565

6666
@main.command()
67+
@click.argument("table_name", type=str)
68+
@click.argument("schema_path", type=click.Path())
69+
@click.pass_context
70+
def create(ctx, table_name, schema_path):
71+
"""Create a new Prism table TABLE_NAME with schema from SCHEMA_PATH
72+
73+
Example: prism create my_table /home/data/schema.json
74+
"""
75+
76+
# get the initialized prism class
77+
p = ctx.obj["p"]
78+
79+
# read in your table schema
80+
schema = prism.load_schema(schema_path)
81+
82+
# clean up the table name
83+
table_name = table_name.replace(" ", "_")
84+
85+
# create an empty API table
86+
table = p.create_table(table_name, schema=schema["fields"])
87+
88+
# print message
89+
click.echo(json.dumps(table, indent=2, sort_keys=True))
90+
91+
92+
@main.command()
93+
@click.argument("gzip_file", type=click.Path())
94+
@click.argument("table_id", type=str)
6795
@click.option(
68-
"--dataset_name",
69-
type=str,
70-
required=True,
71-
help="The dataset name. The name must be unique and conform to the name validation rules",
96+
"--operation",
97+
type=click.Choice(["TruncateandInsert", "Insert", "Update", "Upsert", "Delete"]),
98+
default="TruncateandInsert",
99+
help="The Table load operation",
72100
)
73-
@click.option("--schema_path", type=click.Path(), required=True, help="The path to your schema file")
74-
@click.option("--data_path", type=click.Path(), required=True, help="The path to your gzip compressed data file")
75101
@click.pass_context
76-
def upload(ctx, dataset_name, schema_path, data_path):
77-
"""Upload a gzip CSV file"""
102+
def upload(ctx, gzip_file, table_id, operation):
103+
"""Upload GZIP_FILE to TABLE_ID
104+
105+
Example: prism upload /home/data/file.csv.gz bbab30e3018b01a723524ce18010811b
106+
"""
78107

79108
# get the initialized prism class
80109
p = ctx.obj["p"]
81110

82-
# clean up the dataset name
83-
dataset_name = dataset_name.replace(" ", "_")
111+
# get the details about the newly created table
112+
details = p.describe_table(table_id)
84113

85-
# create an empty API dataset
86-
dataset = p.create_dataset(dataset_name)
87-
88-
# read in your dataset schema
89-
schema = prism.load_schema(schema_path)
114+
# convert the details to a bucket schema
115+
bucket_schema = p.convert_describe_schema_to_bucket_schema(details)
90116

91117
# create a new bucket to hold your file
92-
bucket = p.create_bucket(schema, dataset["id"])
118+
bucket = p.create_bucket(bucket_schema, table_id, operation=operation)
93119

94-
# add your file the bucket you just created
95-
p.upload_file_to_bucket(bucket["id"], data_path)
120+
# add your file to the bucket you just created
121+
p.upload_file_to_bucket(bucket["id"], gzip_file)
96122

97123
# complete the bucket and upload your file
98124
p.complete_bucket(bucket["id"])
99125

100-
# check the status of the dataset you just created
101-
status = p.list_dataset(dataset["id"])
126+
# check the status of the table you just created
127+
status = p.list_table(table_id)
102128

103129
# print message
104-
click.echo("{} has successfully uploaded".format(dataset_name))
105130
click.echo(json.dumps(status["data"], indent=2, sort_keys=True))
106131

107132

0 commit comments

Comments
 (0)