fsanaulla / chronicler-spark

InfluxDB connector to Apache Spark on top of Chronicler



Build Status Maven Central Join the chat at https://gitter.im/chronicler/Lobby Codacy Badge

Open-source InfluxDB connector for Apache Spark on top of Chronicler.

Get Started

At the beginning add required module to your build.sbt:

// For RDD
libraryDependencies += "com.github.fsanaulla" %% "chronicler-spark-rdd" % <version>

// For Dataset
libraryDependencies += "com.github.fsanaulla" %% "chronicler-spark-ds" % <version>

// For Structured Streaming
libraryDependencies += "com.github.fsanaulla" %% "chronicler-spark-structured-streaming" % <version>

// For DStream
libraryDependencies += "com.github.fsanaulla" %% "chronicler-spark-streaming" % <version>


For RDD[T]:

import com.github.fsanaulla.chronicler.spark.rdd._

val rdd: RDD[T] = _
rdd.saveToInfluxDB("dbName", "measurementName")

For Dataset[T]:

import com.github.fsanaulla.chronicler.spark.ds._

val ds: Dataset[T] = _
ds.saveToInfluxDB("dbName", "measurementName")

For DataStreamWriter[T]

import com.github.fsanaulla.chronicler.spark.structured.streaming._

val structStream: DataStreamWriter[T] = _
val saved = structStream.saveToInfluxDB("dbName", "measurementName")

For DStream[T]:

import com.github.fsanaulla.chronicler.spark.streaming._

val stream: DStream[T] = _
stream.saveToInfluxDB("dbName", "measurementName")