怎样使用sbt构建spark的项目
本篇文章为大家展示了怎样使用sbt构建spark的项目,内容简明扼要并且容易理解,绝对能使你眼前一亮,通过这篇文章的详细介绍希望你能有所收获。
创新互联专业为企业提供红塔网站建设、红塔做网站、红塔网站设计、红塔网站制作等企业网站建设、网页设计与制作、红塔企业网站模板建站服务,十载红塔做网站经验,不只是建网站,更提供有价值的思路和整体网络服务。
用Intellij 构建sbt项目 scala 使用2.10.4
name := "gstorm" version := "1.0" version := "1.0" //Older Scala Version scalaVersion := "2.10.4" val overrideScalaVersion = "2.11.8" val sparkVersion = "2.0.0" val sparkXMLVersion = "0.3.3" val sparkCsvVersion = "1.4.0" val sparkElasticVersion = "2.3.4" val sscKafkaVersion = "2.0.1" val sparkMongoVersion = "1.0.0" val sparkCassandraVersion = "1.6.0" //Override Scala Version to the above 2.11.8 version ivyScala := ivyScala.value map { _.copy(overrideScalaVersion = true) } resolvers ++= Seq( "All Spark Repository -> bintray-spark-packages" at "https://dl.bintray.com/spark-packages/maven/" ) libraryDependencies ++= Seq( "org.apache.spark" %% "spark-core" % sparkVersion exclude("jline", "2.12"), "org.apache.spark" %% "spark-sql" % sparkVersion excludeAll(ExclusionRule(organization = "jline"), ExclusionRule("name", "2.12")), "org.apache.spark" %% "spark-hive" % sparkVersion, "org.apache.spark" %% "spark-yarn" % sparkVersion, "com.databricks" %% "spark-xml" % sparkXMLVersion, "com.databricks" %% "spark-csv" % sparkCsvVersion, "org.apache.spark" %% "spark-graphx" % sparkVersion, "org.apache.spark" %% "spark-catalyst" % sparkVersion, "org.apache.spark" %% "spark-streaming" % sparkVersion, // "com.101tec" % "zkclient" % "0.9", "org.elasticsearch" %% "elasticsearch-spark" % sparkElasticVersion, // "org.apache.spark" %% "spark-streaming-kafka-0-10_2.11" % sscKafkaVersion, "org.MongoDB.spark" % "mongo-spark-connector_2.11" % sparkMongoVersion, "com.stratio.datasource" % "spark-mongodb_2.10" % "0.11.1", "dibbhatt" % "kafka-spark-consumer" % "1.0.8", "net.liftweb" %% "lift-webkit" % "2.6.2" )
WordCount.scala
import org.apache.spark.sql.SparkSession object WordCount { def main(args: Array[String]): Unit = { val spark = SparkSession .builder() .appName("Spark SQL Example") .master("local[2]") .config("spark.sql.codegen.WordCount", "true") .getOrCreate() val sc = spark.sparkContext val textFile = sc.textFile("hdfs://hadoop:9000/words.txt") val wordCounts = textFile.flatMap(line => line.split(" ")).map(word => (word, 1)).reduceByKey((a, b) => a + b) wordCounts.collect.foreach(println) } }
上述内容就是怎样使用sbt构建spark的项目,你们学到知识或技能了吗?如果还想学到更多技能或者丰富自己的知识储备,欢迎关注创新互联行业资讯频道。
本文名称:怎样使用sbt构建spark的项目
分享路径:http://myzitong.com/article/gseood.html