-
Notifications
You must be signed in to change notification settings - Fork 1
/
cart_lg_dataframe_cached.py
35 lines (29 loc) · 1.04 KB
/
cart_lg_dataframe_cached.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import time
import random
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql import HiveContext, SQLContext
import math
from pyspark.mllib.random import RandomRDDs
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark.sql.types import Row
spark = SparkSession.builder.config("spark.sql.crossJoin.enabled","true").getOrCreate()
n=5000000
# create rdd of random floats
nRow = n
nCol = 5
seed = 5
numPartitions=32
rdd = RandomRDDs.normalVectorRDD(spark, nRow, nCol,numPartitions,seed)
sc = spark.sparkContext
# convert each tuple in the rdd to a row
randomNumberRdd = rdd.map(lambda x: Row(A=float(x[0]), B=float(x[1]), C=float(x[2]), D=float(x[3])))
# create dataframe from rdd
schemaRandomNumberDF = spark.createDataFrame(randomNumberRdd)
# cache the dataframe
schemaRandomNumberDF.cache()
cross_df = schemaRandomNumberDF.crossJoin(schemaRandomNumberDF)
cross_df.show()
print "----------Count in cross-join--------------- {0}".format(cross_df.count())