|
| 1 | +package com.sparkbyexamples.spark.dataframe.functions.string |
| 2 | + |
| 3 | +import org.apache.spark.sql.SparkSession |
| 4 | +import org.apache.spark.sql.functions._ |
| 5 | + |
| 6 | +object SplitExample extends App{ |
| 7 | + |
| 8 | + val spark = SparkSession.builder() |
| 9 | + .appName("SparkByExamples.com") |
| 10 | + .master("local") |
| 11 | + .getOrCreate() |
| 12 | + |
| 13 | + val data = Seq(("James, A, Smith","2018","M",3000), |
| 14 | + ("Michael, Rose, Jones","2010","M",4000), |
| 15 | + ("Robert,K,Williams","2010","M",4000), |
| 16 | + ("Maria,Anne,Jones","2005","F",4000), |
| 17 | + ("Jen,Mary,Brown","2010","",-1) |
| 18 | + ) |
| 19 | + |
| 20 | + import spark.sqlContext.implicits._ |
| 21 | + val df = data.toDF("name","dob_year","gender","salary") |
| 22 | + df.printSchema() |
| 23 | + df.show(false) |
| 24 | + |
| 25 | + val df2 = df.select(split(col("name"),",").getItem(0).as("FirstName"), |
| 26 | + split(col("name"),",").getItem(1).as("MiddleName"), |
| 27 | + split(col("name"),",").getItem(2).as("LastName")) |
| 28 | + .drop("name") |
| 29 | + |
| 30 | + df2.printSchema() |
| 31 | + df2.show(false) |
| 32 | + |
| 33 | + |
| 34 | + val splitDF = df.withColumn("FirstName",split(col("name"),",").getItem(0)) |
| 35 | + .withColumn("MiddleName",split(col("name"),",").getItem(1)) |
| 36 | + .withColumn("LastName",split(col("name"),",").getItem(2)) |
| 37 | + .withColumn("NameArray",split(col("name"),",")) |
| 38 | + .drop("name") |
| 39 | + splitDF.printSchema() |
| 40 | + splitDF.show(false) |
| 41 | + |
| 42 | + df.createOrReplaceTempView("PERSON") |
| 43 | + spark.sql("select SPLIT(name,',') as NameArray from PERSON") |
| 44 | + .show(false) |
| 45 | + |
| 46 | + |
| 47 | +} |
0 commit comments