from
pyspark.sql
import
DataFrame
from
pyspark.rdd
import
RDD
from
pyspark.sql
import
SparkSession
spark
=
SparkSession.builder.getOrCreate()
data
=
spark.sparkContext.parallelize([(
"1"
,
"sravan"
,
"vignan"
,
67
,
89
),
(
"2"
,
"ojaswi"
,
"vvit"
,
78
,
89
),
(
"3"
,
"rohith"
,
"vvit"
,
100
,
80
),
(
"4"
,
"sridevi"
,
"vignan"
,
78
,
80
),
(
"1"
,
"sravan"
,
"vignan"
,
89
,
98
),
(
"5"
,
"gnanesh"
,
"iit"
,
94
,
98
)])
print
(
isinstance
(data, RDD))