val hbaseConf =
HBaseConfiguration.create()
hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, ":")
hbaseConf.set(TableInputFormat.INPUT_TABLE, "")
val hBaseRDD =
sc.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result])
val count =
hBaseRDD.count()
Exception in thread "main" java.lang.RuntimeException:
java.lang.NullPointerException
at
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.
at
org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.
at
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.
at
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.
at
org.apache.hadoop.hbase.client.ClientScanner.(ClientScanner.
at
org.apache.hadoop.hbase.client.HTable.getScanner(HTable.
at
org.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.
at
org.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.
at
org.apache.hadoop.hbase.client.MetaScanner.allTableRegions(MetaScanner.
at
org.apache.hadoop.hbase.client.HRegionLocator.getAllRegionLocations(HRegionLocator.
at
org.apache.hadoop.hbase.util.RegionSizeCalculator.init(RegionSizeCalculator.
at
org.apache.hadoop.hbase.util.RegionSizeCalculator.(RegionSizeCalculator.
at
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplits(TableInputFormatBase.
at
org.apache.hadoop.hbase.mapreduce.TableInputFormat.getSplits(TableInputFormat.
at
org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:125)
at
org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
at
org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
at
scala.Option.getOrElse(Option.scala:121)
at
org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
at
org.apache.spark.SparkContext.runJob(SparkContext.scala:1968)
at
org.apache.spark.rdd.RDD.count(RDD.scala:1158)
at
com.pr.fortest.HbaseDirectReader$.main(HbaseDirectReader.scala:41)
at
com.pr.fortest.HbaseDirectReader.main(HbaseDirectReader.scala)
at
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.
at
at
org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:751)
at
org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:187)
at
org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:212)
at
org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:126)
at
org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.NullPointerException
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.getMetaReplicaNodes(ZooKeeperWatcher.
at
org.apache.hadoop.hbase.zookeeper.MetaTableLocator.blockUntilAvailable(MetaTableLocator.
at
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getMetaRegionLocation(ZooKeeperRegistry.
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateMeta(ConnectionManager.java:1186
)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:11
53)
at
org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadRepl
icas.
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.
at
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.
... 31 more
错误信息中有ZooKeeperWatcher.getMetaReplicaNodes,说明读取不到数据库的meta数据。zookeeper中hbase的根目录为/hbase,是不是这个不对导致meta信息读取不到。