-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathspark_read_geojson.Rd
47 lines (42 loc) · 1.51 KB
/
spark_read_geojson.Rd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spark_read_geojson.R
\name{spark_read_geojson}
\alias{spark_read_geojson}
\title{Reading GeoJSON files}
\usage{
spark_read_geojson(sc, name, path, magellanIndex = TRUE,
magellanIndexPrecision = 30L, ...)
}
\arguments{
\item{sc}{\code{\link[sparklyr]{spark_connection}} provided by sparklyr.}
\item{name}{The name to assign to the newly generated table (see also
\code{\link[sparklyr]{spark_read_source}}).}
\item{path}{The path to the GeoJSON file. This may be a local path or
an HDFS path.}
\item{magellanIndex}{\code{logical} specifying whether geometries should
be indexed when loading the data (see
\url{https://github.com/harsha2010/magellan#creating-indexes-while-loading-data}).
Indexing creates an additional column called "index" which holds the list of
ZOrder curves of the given precision (see argument \code{magellanIndexPrecision}).
Defaults to \code{TRUE}.}
\item{magellanIndexPrecision}{\code{integer} specifying the precision to use for creating
the ZOrder curves.}
\item{...}{Additional arguments passed to \code{\link[sparklyr]{spark_read_source}}.}
}
\value{
A \code{tbl_spark} which provides a \code{dplyr}-compatible reference to a
Spark DataFrame.
}
\description{
Imports data from GeoJSON files into Spark DataFrames.
}
\references{
\url{https://github.com/harsha2010/magellan}
\url{http://geojson.org/}
}
\seealso{
\code{\link[sparklyr]{spark_read_source}}
}
\concept{Spark serialization routines}
\keyword{connection}
\keyword{file,}