本文主要是介绍spark学习(5)--之spark计算结果保存到oracle中,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
在spark把计算结果保存到oracle中的操作和前边的学习到的spark计算步骤基本一样,都是
第一步创建SparkContext对象来连接spark
第二步读取文件
第三步执行计算
第四步就就开始往hadoop中保存或者oracle中保存
在创建工程的时候我们要导入spark中lib的包还需要把oracle中的驱动导入到程序当中,oracle的驱动在安装oracle的路径C:\oracle\product\10.2.0\db_1\jdbc\lib\ojdbc14.jar
这里我们主要是使用jdbc来往oracle中保存数据,需要注意保存到数据中的操作可能有个错误就是序列化问题,代码如下:
package demoimport org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import java.sql.Connection
import java.sql.DriverManagerobject MyCountToOracle {def main(args: Array[String]): Unit = {//创建sparkcontextval conf= new SparkConf().setAppName("MyWebCount").setMaster("local");val sc=new SparkContext(conf)//读入数据val rdd1=sc.textFile("G:/msdownld.tmp/localhost_access_log.2017-07-30.txt").map((line:String)=>{//[30/Jul/2017:12:54:56 +0800] "GET /MyDemoWeb/java.jsp HTTP/1.1" 200 240 192.168.88.1 - - val line1=line.substring(line.indexOf("\"")+1, line.lastIndexOf("\""))val line2=line1.substring(line1.indexOf(" ")+1, line1.lastIndexOf(" "))val pageName=line2.substring(line2.lastIndexOf("/")+1);(pageName,1)})val rdd2=rdd1.reduceByKey(_+_)//通过网页名称进行排序val rdd3=rdd2.sortBy(_._2, true);//创建oracle链接Class.forName("oracle.jdbc.OracleDriver") //注册Oracle的驱动val conn:Connection=DriverManager.getConnection("jdbc:oracle:thin:@192.168.112.130:1521/orcl", "scott", "tiger")val statement=conn.prepareStatement("insert into pageview values(?,?)") //循环遍历写入数据库rdd3.foreach(f=>{statement.setString(1, f._1)statement.setInt(2, f._2)statement.executeUpdate();})statement.close()conn.close()//讲sparkcontext对象关闭掉sc.stop()}
}
它会报一个如下的错误:
Exception in thread "main" org.apache.spark.SparkException: Task not serializableat org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:298)at org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:288)at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:108)at org.apache.spark.SparkContext.clean(SparkContext.scala:2094)at org.apache.spark.rdd.RDD$$anonfun$foreach$1.apply(RDD.scala:916)at org.apache.spark.rdd.RDD$$anonfun$foreach$1.apply(RDD.scala:915)at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)at org.apache.spark.rdd.RDD.foreach(RDD.scala:915)at demo.MyCountToOracle$.main(MyCountToOracle.scala:33)at demo.MyCountToOracle.main(MyCountToOracle.scala)
Caused by: java.io.NotSerializableException: oracle.jdbc.driver.T4CPreparedStatement
Serialization stack:- object not serializable (class: oracle.jdbc.driver.T4CPreparedStatement, value: oracle.jdbc.driver.T4CPreparedStatement@43d38654)- field (class: demo.MyCountToOracle$$anonfun$main$1, name: statement$1, type: interface java.sql.PreparedStatement)- object (class demo.MyCountToOracle$$anonfun$main$1, <function1>)at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:40)at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:46)at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100)at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:295)
这个是因为RDD是由分区组成,而T4CPreparedStatement没有实现序列化,所以不过在分区之间进行操作导致的解决这种问题,就需要用到一个算子foreachPartion。
package demoimport org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import java.sql.Connection
import java.sql.DriverManagerobject MyCountToOracle1 {def main(args: Array[String]): Unit = {//创建sparkcontextval conf= new SparkConf().setAppName("MyWebCount").setMaster("local");val sc=new SparkContext(conf)//读入数据val rdd1=sc.textFile("G:/msdownld.tmp/localhost_access_log.2017-07-30.txt").map((line:String)=>{//[30/Jul/2017:12:54:56 +0800] "GET /MyDemoWeb/java.jsp HTTP/1.1" 200 240 192.168.88.1 - - val line1=line.substring(line.indexOf("\"")+1, line.lastIndexOf("\""))val line2=line1.substring(line1.indexOf(" ")+1, line1.lastIndexOf(" "))val pageName=line2.substring(line2.lastIndexOf("/")+1);(pageName,1)})val rdd2=rdd1.reduceByKey(_+_)//通过网页名称进行排序val rdd3=rdd2.sortBy(_._2, true);rdd3.foreachPartition(saveAsOracle)//讲sparkcontext对象关闭掉sc.stop()}def saveAsOracle(iter:Iterator[(String,Int)]):Unit={//创建oracle链接Class.forName("oracle.jdbc.OracleDriver") //注册Oracle的驱动val conn:Connection=DriverManager.getConnection("jdbc:oracle:thin:@192.168.112.130:1521/orcl", "scott", "tiger")val statement=conn.prepareStatement("insert into pageview values(?,?)") //循环遍历写入数据库iter.foreach(f=>{statement.setString(1, f._1)statement.setInt(2, f._2)statement.executeUpdate();})statement.close()conn.close()}
}
这篇关于spark学习(5)--之spark计算结果保存到oracle中的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!