Showing content from https://spark.apache.org/docs/latest/api/scala/org/apache/spark/rdd/JdbcRDD.html below:
Spark 4.0.0 ScalaDoc - org.apache.spark.rdd.JdbcRDD
class JdbcRDD[T] extends RDD[T] with Logging
î·î
Ordering
- Alphabetic
- By Inheritance
Inherited
- JdbcRDD
- RDD
- Logging
- Serializable
- AnyRef
- Any
Visibility
- Public
- Protected
Type Members
- î
implicit class LogStringContext extends AnyRef
Value Members
- î
final def !=(arg0: Any): Boolean
- î
final def ##: Int
- î
def ++(other: RDD[T]): RDD[T]
- î
final def ==(arg0: Any): Boolean
- î
def aggregate[U](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U)(implicit arg0: ClassTag[U]): U
- î
final def asInstanceOf[T0]: T0
- î
def barrier(): RDDBarrier[T]
- î
def cache(): JdbcRDD.this.type
- î
def cartesian[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[(T, U)]
- î
def checkpoint(): Unit
- î
def cleanShuffleDependencies(blocking: Boolean = false): Unit
- î
def clearDependencies(): Unit
- î
def clone(): AnyRef
- î
def coalesce(numPartitions: Int, shuffle: Boolean = false, partitionCoalescer: Option[PartitionCoalescer] = Option.empty)(implicit ord: Ordering[T] = null): RDD[T]
- î
def collect[U](f: PartialFunction[T, U])(implicit arg0: ClassTag[U]): RDD[U]
- î
def collect(): Array[T]
- î
def compute(thePart: Partition, context: TaskContext): Iterator[T]
- î
def context: SparkContext
- î
def count(): Long
- î
def countApprox(timeout: Long, confidence: Double = 0.95): PartialResult[BoundedDouble]
- î
def countApproxDistinct(relativeSD: Double = 0.05): Long
- î
def countApproxDistinct(p: Int, sp: Int): Long
- î
def countByValue()(implicit ord: Ordering[T] = null): Map[T, Long]
- î
def countByValueApprox(timeout: Long, confidence: Double = 0.95)(implicit ord: Ordering[T] = null): PartialResult[Map[T, BoundedDouble]]
- î
final def dependencies: Seq[Dependency[_]]
- î
def distinct(): RDD[T]
- î
def distinct(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T]
- î
final def eq(arg0: AnyRef): Boolean
- î
def equals(arg0: AnyRef): Boolean
- î
def filter(f: (T) => Boolean): RDD[T]
- î
def first(): T
- î
def firstParent[U](implicit arg0: ClassTag[U]): RDD[U]
- î
def flatMap[U](f: (T) => IterableOnce[U])(implicit arg0: ClassTag[U]): RDD[U]
- î
def fold(zeroValue: T)(op: (T, T) => T): T
- î
def foreach(f: (T) => Unit): Unit
- î
def foreachPartition(f: (Iterator[T]) => Unit): Unit
- î
def getCheckpointFile: Option[String]
- î
final def getClass(): Class[_ <: AnyRef]
- î
def getDependencies: Seq[Dependency[_]]
- î
final def getNumPartitions: Int
- î
def getOutputDeterministicLevel: DeterministicLevel.Value
- î
def getPartitions: Array[Partition]
- î
def getPreferredLocations(split: Partition): Seq[String]
- î
def getResourceProfile(): ResourceProfile
- î
def getStorageLevel: StorageLevel
- î
def glom(): RDD[Array[T]]
- î
def groupBy[K](f: (T) => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null): RDD[(K, Iterable[T])]
- î
def groupBy[K](f: (T) => K, numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])]
- î
def groupBy[K](f: (T) => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])]
- î
def hashCode(): Int
- î
val id: Int
- î
def initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- î
def initializeLogIfNecessary(isInterpreter: Boolean): Unit
- î
def intersection(other: RDD[T], numPartitions: Int): RDD[T]
- î
def intersection(other: RDD[T], partitioner: Partitioner)(implicit ord: Ordering[T] = null): RDD[T]
- î
def intersection(other: RDD[T]): RDD[T]
- î
lazy val isBarrier_: Boolean
- î
def isCheckpointed: Boolean
- î
def isEmpty(): Boolean
- î
final def isInstanceOf[T0]: Boolean
- î
def isTraceEnabled(): Boolean
- î
final def iterator(split: Partition, context: TaskContext): Iterator[T]
- î
def keyBy[K](f: (T) => K): RDD[(K, T)]
- î
def localCheckpoint(): JdbcRDD.this.type
- î
def log: Logger
- î
def logDebug(msg: => String, throwable: Throwable): Unit
- î
def logDebug(entry: LogEntry, throwable: Throwable): Unit
- î
def logDebug(entry: LogEntry): Unit
- î
def logDebug(msg: => String): Unit
- î
def logError(msg: => String, throwable: Throwable): Unit
- î
def logError(entry: LogEntry, throwable: Throwable): Unit
- î
def logError(entry: LogEntry): Unit
- î
def logError(msg: => String): Unit
- î
def logInfo(msg: => String, throwable: Throwable): Unit
- î
def logInfo(entry: LogEntry, throwable: Throwable): Unit
- î
def logInfo(entry: LogEntry): Unit
- î
def logInfo(msg: => String): Unit
- î
def logName: String
- î
def logTrace(msg: => String, throwable: Throwable): Unit
- î
def logTrace(entry: LogEntry, throwable: Throwable): Unit
- î
def logTrace(entry: LogEntry): Unit
- î
def logTrace(msg: => String): Unit
- î
def logWarning(msg: => String, throwable: Throwable): Unit
- î
def logWarning(entry: LogEntry, throwable: Throwable): Unit
- î
def logWarning(entry: LogEntry): Unit
- î
def logWarning(msg: => String): Unit
- î
def map[U](f: (T) => U)(implicit arg0: ClassTag[U]): RDD[U]
- î
def mapPartitions[U](f: (Iterator[T]) => Iterator[U], preservesPartitioning: Boolean = false)(implicit arg0: ClassTag[U]): RDD[U]
- î
def mapPartitionsWithEvaluator[U](evaluatorFactory: PartitionEvaluatorFactory[T, U])(implicit arg0: ClassTag[U]): RDD[U]
- î
def mapPartitionsWithIndex[U](f: (Int, Iterator[T]) => Iterator[U], preservesPartitioning: Boolean = false)(implicit arg0: ClassTag[U]): RDD[U]
- î
def max()(implicit ord: Ordering[T]): T
- î
def min()(implicit ord: Ordering[T]): T
- î
var name: String
- î
final def ne(arg0: AnyRef): Boolean
- î
final def notify(): Unit
- î
final def notifyAll(): Unit
- î
def parent[U](j: Int)(implicit arg0: ClassTag[U]): RDD[U]
- î
val partitioner: Option[Partitioner]
- î
final def partitions: Array[Partition]
- î
def persist(): JdbcRDD.this.type
- î
def persist(newLevel: StorageLevel): JdbcRDD.this.type
- î
def pipe(command: Seq[String], env: Map[String, String] = Map(), printPipeContext: ((String) => Unit) => Unit = null, printRDDElement: (T, (String) => Unit) => Unit = null, separateWorkingDir: Boolean = false, bufferSize: Int = 8192, encoding: String = Codec.defaultCharsetCodec.name): RDD[String]
- î
def pipe(command: String, env: Map[String, String]): RDD[String]
- î
def pipe(command: String): RDD[String]
- î
final def preferredLocations(split: Partition): Seq[String]
- î
def randomSplit(weights: Array[Double], seed: Long = Utils.random.nextLong): Array[RDD[T]]
- î
def reduce(f: (T, T) => T): T
- î
def repartition(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T]
- î
def sample(withReplacement: Boolean, fraction: Double, seed: Long = Utils.random.nextLong): RDD[T]
- î
def saveAsObjectFile(path: String): Unit
- î
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit
- î
def saveAsTextFile(path: String): Unit
- î
def setName(_name: String): JdbcRDD.this.type
- î
def sortBy[K](f: (T) => K, ascending: Boolean = true, numPartitions: Int = this.partitions.length)(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T]
- î
def sparkContext: SparkContext
- î
def subtract(other: RDD[T], p: Partitioner)(implicit ord: Ordering[T] = null): RDD[T]
- î
def subtract(other: RDD[T], numPartitions: Int): RDD[T]
- î
def subtract(other: RDD[T]): RDD[T]
- î
final def synchronized[T0](arg0: => T0): T0
- î
def take(num: Int): Array[T]
- î
def takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T]
- î
def takeSample(withReplacement: Boolean, num: Int, seed: Long = Utils.random.nextLong): Array[T]
- î
def toDebugString: String
- î
def toJavaRDD(): JavaRDD[T]
- î
def toLocalIterator: Iterator[T]
- î
def toString(): String
- î
def top(num: Int)(implicit ord: Ordering[T]): Array[T]
- î
def treeAggregate[U](zeroValue: U, seqOp: (U, T) => U, combOp: (U, U) => U, depth: Int, finalAggregateOnExecutor: Boolean)(implicit arg0: ClassTag[U]): U
- î
def treeAggregate[U](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U, depth: Int = 2)(implicit arg0: ClassTag[U]): U
- î
def treeReduce(f: (T, T) => T, depth: Int = 2): T
- î
def union(other: RDD[T]): RDD[T]
- î
def unpersist(blocking: Boolean = false): JdbcRDD.this.type
- î
final def wait(arg0: Long, arg1: Int): Unit
- î
final def wait(arg0: Long): Unit
- î
final def wait(): Unit
- î
def withLogContext(context: Map[String, String])(body: => Unit): Unit
- î
def withResources(rp: ResourceProfile): JdbcRDD.this.type
- î
def zip[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[(T, U)]
- î
def zipPartitions[B, C, D, V](rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[D], arg3: ClassTag[V]): RDD[V]
- î
def zipPartitions[B, C, D, V](rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[D], arg3: ClassTag[V]): RDD[V]
- î
def zipPartitions[B, C, V](rdd2: RDD[B], rdd3: RDD[C])(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[V]): RDD[V]
- î
def zipPartitions[B, C, V](rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[V]): RDD[V]
- î
def zipPartitions[B, V](rdd2: RDD[B])(f: (Iterator[T], Iterator[B]) => Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[V]): RDD[V]
- î
def zipPartitions[B, V](rdd2: RDD[B], preservesPartitioning: Boolean)(f: (Iterator[T], Iterator[B]) => Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[V]): RDD[V]
- î
def zipPartitionsWithEvaluator[U](rdd2: RDD[T], evaluatorFactory: PartitionEvaluatorFactory[T, U])(implicit arg0: ClassTag[U]): RDD[U]
- î
def zipWithIndex(): RDD[(T, Long)]
- î
def zipWithUniqueId(): RDD[(T, Long)]
Deprecated Value Members
- î
def finalize(): Unit
Inherited from Logging
Inherited from AnyRef
Inherited from Any
RetroSearch is an open source project built by @garambo
| Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4