Showing content from https://api-docs.databricks.com/scala/spark/latest/org/apache/spark/sql/SQLContext.html below:
Databricks Scala Spark API - org.apache.spark.sql.SQLContext
class SQLContext extends Logging with Serializable
î· î
Ordering
- Grouped
- Alphabetic
- By Inheritance
Inherited
- SQLContext
- Serializable
- Serializable
- Logging
- AnyRef
- Any
Value Members
- î
final def !=(arg0: Any): Boolean
- î
final def ##(): Int
- î
final def ==(arg0: Any): Boolean
- î
final def asInstanceOf[T0]: T0
- î
def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame
- î
def cacheTable(tableName: String): Unit
- î
def clearCache(): Unit
- î
def clone(): AnyRef
- î
def createDataFrame(data: List[_], beanClass: Class[_]): DataFrame
- î
def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame
- î
def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame
- î
def createDataFrame(rows: List[Row], schema: StructType): DataFrame
- î
def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame
- î
def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame
- î
def createDataFrame[A <: Product](data: Seq[A])(implicit arg0: scala.reflect.api.JavaUniverse.TypeTag[A]): DataFrame
- î
def createDataFrame[A <: Product](rdd: RDD[A])(implicit arg0: scala.reflect.api.JavaUniverse.TypeTag[A]): DataFrame
- î
def createDataset[T](data: List[T])(implicit arg0: Encoder[T]): Dataset[T]
- î
def createDataset[T](data: RDD[T])(implicit arg0: Encoder[T]): Dataset[T]
- î
def createDataset[T](data: Seq[T])(implicit arg0: Encoder[T]): Dataset[T]
- î
def dropTempTable(tableName: String): Unit
- î
def emptyDataFrame: DataFrame
- î
final def eq(arg0: AnyRef): Boolean
- î
def equals(arg0: Any): Boolean
- î
def experimental: ExperimentalMethods
- î
def finalize(): Unit
- î
def getAllConfs: Map[String, String]
- î
final def getClass(): Class[_]
- î
def getConf(key: String, defaultValue: String): String
- î
def getConf(key: String): String
- î
def hashCode(): Int
- î
def initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- î
def initializeLogIfNecessary(isInterpreter: Boolean): Unit
- î
def isCached(tableName: String): Boolean
- î
final def isInstanceOf[T0]: Boolean
- î
def isTraceEnabled(): Boolean
- î
def listenerManager: ExecutionListenerManager
- î
def log: Logger
- î
def logDebug(msg: â String, throwable: Throwable): Unit
- î
def logDebug(msg: â String): Unit
- î
def logError(msg: â String, throwable: Throwable): Unit
- î
def logError(msg: â String): Unit
- î
def logInfo(msg: â String, throwable: Throwable): Unit
- î
def logInfo(msg: â String): Unit
- î
def logName: String
- î
def logTrace(msg: â String, throwable: Throwable): Unit
- î
def logTrace(msg: â String): Unit
- î
def logWarning(msg: â String, throwable: Throwable): Unit
- î
def logWarning(msg: â String): Unit
- î
final def ne(arg0: AnyRef): Boolean
- î
def newSession(): SQLContext
- î
final def notify(): Unit
- î
final def notifyAll(): Unit
- î
def range(start: Long, end: Long, step: Long, numPartitions: Int): DataFrame
- î
def range(start: Long, end: Long, step: Long): DataFrame
- î
def range(start: Long, end: Long): DataFrame
- î
def range(end: Long): DataFrame
- î
def read: DataFrameReader
- î
def readStream: DataStreamReader
- î
def setConf(key: String, value: String): Unit
- î
def setConf(props: Properties): Unit
- î
def sparkContext: SparkContext
- î
val sparkSession: SparkSession
- î
def sql(sqlText: String): DataFrame
- î
def streams: StreamingQueryManager
- î
final def synchronized[T0](arg0: â T0): T0
- î
def table(tableName: String): DataFrame
- î
def tableNames(databaseName: String): Array[String]
- î
def tableNames(): Array[String]
- î
def tables(databaseName: String): DataFrame
- î
def tables(): DataFrame
- î
def toString(): String
- î
def udf: UDFRegistration
- î
def uncacheTable(tableName: String): Unit
- î
final def wait(): Unit
- î
final def wait(arg0: Long, arg1: Int): Unit
- î
final def wait(arg0: Long): Unit
- î
object implicits extends SQLImplicits with Serializable
Deprecated Value Members
- î
def applySchema(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame
- î
def applySchema(rdd: RDD[_], beanClass: Class[_]): DataFrame
- î
def applySchema(rowRDD: JavaRDD[Row], schema: StructType): DataFrame
- î
def applySchema(rowRDD: RDD[Row], schema: StructType): DataFrame
- î
def createExternalTable(tableName: String, source: String, schema: StructType, options: Map[String, String]): DataFrame
- î
def createExternalTable(tableName: String, source: String, schema: StructType, options: Map[String, String]): DataFrame
- î
def createExternalTable(tableName: String, source: String, options: Map[String, String]): DataFrame
- î
def createExternalTable(tableName: String, source: String, options: Map[String, String]): DataFrame
- î
def createExternalTable(tableName: String, path: String, source: String): DataFrame
- î
def createExternalTable(tableName: String, path: String): DataFrame
- î
def jdbc(url: String, table: String, theParts: Array[String]): DataFrame
- î
def jdbc(url: String, table: String, columnName: String, lowerBound: Long, upperBound: Long, numPartitions: Int): DataFrame
- î
def jdbc(url: String, table: String): DataFrame
- î
def jsonFile(path: String, samplingRatio: Double): DataFrame
- î
def jsonFile(path: String, schema: StructType): DataFrame
- î
def jsonFile(path: String): DataFrame
- î
def jsonRDD(json: JavaRDD[String], samplingRatio: Double): DataFrame
- î
def jsonRDD(json: RDD[String], samplingRatio: Double): DataFrame
- î
def jsonRDD(json: JavaRDD[String], schema: StructType): DataFrame
- î
def jsonRDD(json: RDD[String], schema: StructType): DataFrame
- î
def jsonRDD(json: JavaRDD[String]): DataFrame
- î
def jsonRDD(json: RDD[String]): DataFrame
- î
def load(source: String, schema: StructType, options: Map[String, String]): DataFrame
- î
def load(source: String, schema: StructType, options: Map[String, String]): DataFrame
- î
def load(source: String, options: Map[String, String]): DataFrame
- î
def load(source: String, options: Map[String, String]): DataFrame
- î
def load(path: String, source: String): DataFrame
- î
def load(path: String): DataFrame
- î
def parquetFile(paths: String*): DataFrame
Inherited from Serializable
Inherited from Serializable
Inherited from Logging
Inherited from Any
Basic Operations
Cached Table Management
Configuration
dataframe
Custom DataFrame Creation
Custom Dataset Creation
Persistent Catalog DDL
Generic Data Sources
Specific Data Sources
Support functions for language integrated queries
RetroSearch is an open source project built by @garambo
| Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4