class SQLConf extends Serializable with Logging
A class that enables the setting and getting of mutable config parameters/hints.
In the presence of a SQLContext, these can be set and queried by passing SET commands into Spark SQL's query functions (i.e. sql()). Otherwise, users of this class can modify the hints by programmatically calling the setters and getters of this class.
SQLConf is thread-safe (internally synchronized, so safe to be used in multiple threads).
- Alphabetic
- By Inheritance
- SQLConf
- Logging
- Serializable
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Instance Constructors
- new SQLConf()
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def adaptiveExecutionEnabled: Boolean
- def adaptiveExecutionLogLevel: String
- def addSingleFileInAddFile: Boolean
- def advancedPartitionPredicatePushdownEnabled: Boolean
- def allowAutoGeneratedAliasForView: Boolean
- def allowNegativeScaleOfDecimalEnabled: Boolean
- def allowNonEmptyLocationInCTAS: Boolean
- def allowStarWithSingleTableIdentifierInCount: Boolean
- def allowsTempViewCreationWithMultipleNameparts: Boolean
- def analyzerMaxIterations: Int
************************ Spark SQL Params/Hints *******************
- def ansiEnabled: Boolean
- def ansiRelationPrecedence: Boolean
- def arrowLocalRelationThreshold: Long
- def arrowMaxRecordsPerBatch: Int
- def arrowPySparkEnabled: Boolean
- def arrowPySparkFallbackEnabled: Boolean
- def arrowPySparkSelfDestructEnabled: Boolean
- def arrowSafeTypeConversion: Boolean
- def arrowSparkREnabled: Boolean
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def autoBroadcastJoinThreshold: Long
- def autoBucketedScanEnabled: Boolean
- def autoSizeUpdateEnabled: Boolean
- def avroCompressionCodec: String
- def avroDeflateLevel: Int
- def avroFilterPushDown: Boolean
- def broadcastHashJoinOutputPartitioningExpandLimit: Int
- def broadcastTimeout: Long
- def bucketingEnabled: Boolean
- def bucketingMaxBuckets: Int
- def cacheVectorizedReaderEnabled: Boolean
- def cartesianProductExecBufferInMemoryThreshold: Int
- def cartesianProductExecBufferSpillThreshold: Int
- def caseSensitiveAnalysis: Boolean
- def caseSensitiveInferenceMode: SQLConf.HiveCaseSensitiveInferenceMode.Value
- def castDatetimeToString: Boolean
- def cboEnabled: Boolean
- def charVarcharAsString: Boolean
- def checkpointLocation: Option[String]
- def checkpointRenamedFileCheck: Boolean
- def clear(): Unit
- def cliPrintHeader: Boolean
- def clone(): SQLConf
- Definition Classes
- SQLConf → AnyRef
- def coalesceBucketsInJoinEnabled: Boolean
- def coalesceBucketsInJoinMaxBucketRatio: Int
- def coalesceShufflePartitionsEnabled: Boolean
- def codegenCacheMaxEntries: Int
- def codegenComments: Boolean
- def codegenFallback: Boolean
- def codegenSplitAggregateFunc: Boolean
- def columnBatchSize: Int
- def columnNameOfCorruptRecord: String
- def concatBinaryAsString: Boolean
- def constraintPropagationEnabled: Boolean
- def contains(key: String): Boolean
Return whether a given key is set in this SQLConf.
- def continuousStreamingEpochBacklogQueueSize: Int
- def continuousStreamingExecutorPollIntervalMs: Long
- def continuousStreamingExecutorQueueSize: Int
- def convertCTAS: Boolean
- def copy(entries: (ConfigEntry[_], Any)*): SQLConf
- def crossJoinEnabled: Boolean
- def csvColumnPruning: Boolean
- def csvEnableDateTimeParsingFallback: Option[Boolean]
- def csvExpressionOptimization: Boolean
- def csvFilterPushDown: Boolean
- def dataFramePivotMaxValues: Int
- def dataFrameRetainGroupColumns: Boolean
- def dataFrameSelfJoinAutoResolveAmbiguity: Boolean
- def datetimeJava8ApiEnabled: Boolean
- def decimalOperationsAllowPrecisionLoss: Boolean
- def decorrelateInnerQueryEnabled: Boolean
- def defaultColumnAllowedProviders: String
- def defaultDataSourceName: String
- def defaultDatabase: String
- def defaultNumShufflePartitions: Int
- def defaultSizeInBytes: Long
- def disabledJdbcConnectionProviders: String
- def disabledV2StreamingMicroBatchReaders: String
- def disabledV2StreamingWriters: String
- def doubleQuotedIdentifiers: Boolean
- def dynamicPartitionPruningEnabled: Boolean
- def dynamicPartitionPruningFallbackFilterRatio: Double
- def dynamicPartitionPruningReuseBroadcastOnly: Boolean
- def dynamicPartitionPruningUseStats: Boolean
- def eltOutputAsString: Boolean
- def enableDefaultColumns: Boolean
- def enableRadixSort: Boolean
- def enableTwoLevelAggMap: Boolean
- def enableVectorizedHashMap: Boolean
- def enforceReservedKeywords: Boolean
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def errorMessageFormat: ErrorMessageFormat.Value
- def escapedStringLiterals: Boolean
- def exchangeReuseEnabled: Boolean
- def exponentLiteralAsDecimalEnabled: Boolean
- def fallBackToHdfsForStatsEnabled: Boolean
- def fastFailFileFormatOutput: Boolean
- def fastHashAggregateRowMaxCapacityBit: Int
- def fetchShuffleBlocksInBatch: Boolean
- def fileCommitProtocolClass: String
- def fileCompressionFactor: Double
- def fileSinkLogCleanupDelay: Long
- def fileSinkLogCompactInterval: Int
- def fileSinkLogDeletion: Boolean
- def fileSourceLogCleanupDelay: Long
- def fileSourceLogCompactInterval: Int
- def fileSourceLogDeletion: Boolean
- def fileStreamSinkMetadataIgnored: Boolean
- def filesMaxPartitionBytes: Long
- def filesMinPartitionNum: Option[Int]
- def filesOpenCostInBytes: Long
- def filesourcePartitionFileCacheSize: Long
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable])
- def gatherFastStats: Boolean
- def getAllConfs: Map[String, String]
Return all the configuration properties that have been set (i.e.
Return all the configuration properties that have been set (i.e. not the default). This creates a new copy of the config properties in the form of a Map.
- def getAllDefinedConfs: Seq[(String, String, String, String)]
Return all the configuration definitions that have been defined in SQLConf.
Return all the configuration definitions that have been defined in SQLConf. Each definition contains key, defaultValue and doc.
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def getConf[T](entry: OptionalConfigEntry[T]): Option[T]
Return the value of an optional Spark SQL configuration property for the given key.
Return the value of an optional Spark SQL configuration property for the given key. If the key is not set yet, returns None.
- def getConf[T](entry: ConfigEntry[T]): T
Return the value of Spark SQL configuration property for the given key.
Return the value of Spark SQL configuration property for the given key. If the key is not set yet, return
defaultValuein ConfigEntry. - def getConf[T](entry: ConfigEntry[T], defaultValue: T): T
Return the value of Spark SQL configuration property for the given key.
Return the value of Spark SQL configuration property for the given key. If the key is not set yet, return
defaultValue. This is useful whendefaultValuein ConfigEntry is not the desired one. - def getConfString(key: String, defaultValue: String): String
Return the
stringvalue of Spark SQL configuration property for the given key.Return the
stringvalue of Spark SQL configuration property for the given key. If the key is not set yet, returndefaultValue. - def getConfString(key: String): String
Return the value of Spark SQL configuration property for the given key.
Return the value of Spark SQL configuration property for the given key.
- Annotations
- @throws("if key is not set")
- def groupByAliases: Boolean
- def groupByOrdinal: Boolean
- def groupingIdWithAppendedUserGroupByEnabled: Boolean
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def hintErrorHandler: HintErrorHandler
Returns the error handler for handling hint errors.
- def histogramEnabled: Boolean
- def histogramNumBins: Int
- def histogramNumericPropagateInputType: Boolean
- def hiveThriftServerSingleSession: Boolean
- def hugeMethodLimit: Int
- def ignoreCorruptFiles: Boolean
- def ignoreDataLocality: Boolean
- def ignoreMissingFiles: Boolean
- def ignoreMissingParquetFieldId: Boolean
- def inMemoryPartitionPruning: Boolean
- def inMemoryTableScanStatisticsEnabled: Boolean
- def inferDictAsStruct: Boolean
- def initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- Attributes
- protected
- Definition Classes
- Logging
- def initializeLogIfNecessary(isInterpreter: Boolean): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def integerGroupingIdEnabled: Boolean
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def isModifiable(key: String): Boolean
- def isOrcSchemaMergingEnabled: Boolean
- def isParquetBinaryAsString: Boolean
- def isParquetINT96AsTimestamp: Boolean
- def isParquetINT96TimestampConversion: Boolean
- def isParquetSchemaMergingEnabled: Boolean
- def isParquetSchemaRespectSummaries: Boolean
- def isReplEagerEvalEnabled: Boolean
- def isStateSchemaCheckEnabled: Boolean
- def isTraceEnabled(): Boolean
- Attributes
- protected
- Definition Classes
- Logging
- def isUnsupportedOperationCheckEnabled: Boolean
- def joinReorderCardWeight: Double
- def joinReorderDPStarFilter: Boolean
- def joinReorderDPThreshold: Int
- def joinReorderEnabled: Boolean
- def jsonEnableDateTimeParsingFallback: Option[Boolean]
- def jsonEnablePartialResults: Boolean
- def jsonExpressionOptimization: Boolean
- def jsonFilterPushDown: Boolean
- def jsonGeneratorIgnoreNullFields: Boolean
- def jsonWriteNullIfWithDefaultValue: Boolean
- def legacyInferArrayTypeFromFirstElement: Boolean
- def legacyIntervalEnabled: Boolean
- def legacyMsSqlServerNumericMappingEnabled: Boolean
- def legacyParquetNanosAsLong: Boolean
- def legacyPathOptionBehavior: Boolean
- def legacySizeOfNull: Boolean
- def legacyStatisticalAggregate: Boolean
- def legacyTimeParserPolicy: SQLConf.LegacyBehaviorPolicy.Value
- def limitInitialNumPartitions: Int
- def limitScaleUpFactor: Int
- def literalPickMinimumPrecision: Boolean
- def log: Logger
- Attributes
- protected
- Definition Classes
- Logging
- def logDebug(msg: => String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logDebug(msg: => String): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logError(msg: => String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logError(msg: => String): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logInfo(msg: => String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logInfo(msg: => String): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logName: String
- Attributes
- protected
- Definition Classes
- Logging
- def logTrace(msg: => String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logTrace(msg: => String): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logWarning(msg: => String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def logWarning(msg: => String): Unit
- Attributes
- protected
- Definition Classes
- Logging
- def loggingMaxLinesForCodegen: Int
- def manageFilesourcePartitions: Boolean
- def maxBatchesToRetainInMemory: Int
- def maxConcurrentOutputFileWriters: Int
- def maxMetadataStringLength: Int
- def maxNestedViewDepth: Int
- def maxPlanStringLength: Int
- def maxRecordsPerFile: Long
- def maxToStringFields: Int
- def metadataCacheTTL: Long
- def metastoreDropPartitionsByName: Boolean
- def metastorePartitionPruning: Boolean
- def metastorePartitionPruningFallbackOnException: Boolean
- def metastorePartitionPruningFastFallback: Boolean
- def metastorePartitionPruningInSetThreshold: Int
- def methodSplitThreshold: Int
- def minBatchesToRetain: Int
- def nameNonStructGroupingKeyAsValue: Boolean
- def ndvMaxError: Double
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def nestedPruningOnExpressions: Boolean
- def nestedSchemaPruningEnabled: Boolean
- def nonEmptyPartitionRatioForBroadcastJoin: Double
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- def numShufflePartitions: Int
- def objectAggSortBasedFallbackThreshold: Int
- def offHeapColumnVectorEnabled: Boolean
- def optimizeNullAwareAntiJoin: Boolean
- def optimizerExcludedRules: Option[String]
- def optimizerInSetConversionThreshold: Int
- def optimizerInSetSwitchThreshold: Int
- def optimizerMaxIterations: Int
- def optimizerMetadataOnly: Boolean
- def orcAggregatePushDown: Boolean
- def orcCompressionCodec: String
- def orcFilterPushDown: Boolean
- def orcVectorizedReaderBatchSize: Int
- def orcVectorizedReaderEnabled: Boolean
- def orcVectorizedReaderNestedColumnEnabled: Boolean
- def orcVectorizedWriterBatchSize: Int
- def orderByOrdinal: Boolean
- def pandasGroupedMapAssignColumnsByName: Boolean
- def pandasUDFBufferSize: Int
- def parallelFileListingInStatsComputation: Boolean
- def parallelPartitionDiscoveryParallelism: Int
- def parallelPartitionDiscoveryThreshold: Int
- def parquetAggregatePushDown: Boolean
- def parquetCompressionCodec: String
- def parquetFieldIdReadEnabled: Boolean
- def parquetFieldIdWriteEnabled: Boolean
- def parquetFilterPushDown: Boolean
- def parquetFilterPushDownDate: Boolean
- def parquetFilterPushDownDecimal: Boolean
- def parquetFilterPushDownInFilterThreshold: Int
- def parquetFilterPushDownStringPredicate: Boolean
- def parquetFilterPushDownTimestamp: Boolean
- def parquetInferTimestampNTZEnabled: Boolean
- def parquetOutputCommitterClass: String
- def parquetOutputTimestampType: SQLConf.ParquetOutputTimestampType.Value
- def parquetRecordFilterEnabled: Boolean
- def parquetVectorizedReaderBatchSize: Int
- def parquetVectorizedReaderEnabled: Boolean
- def parquetVectorizedReaderNestedColumnEnabled: Boolean
- def partitionColumnTypeInferenceEnabled: Boolean
- def partitionOverwriteMode: SQLConf.PartitionOverwriteMode.Value
- def percentileAccuracy: Int
- def planChangeBatches: Option[String]
- def planChangeLogLevel: String
- def planChangeRules: Option[String]
- def planStatsEnabled: Boolean
- def plannedWriteEnabled: Boolean
- def preferSortMergeJoin: Boolean
- def pysparkJVMStacktraceEnabled: Boolean
- def pysparkSimplifiedTraceback: Boolean
- def rangeExchangeSampleSizePerPartition: Int
- def readSideCharPadding: Boolean
- val reader: ConfigReader
- Attributes
- protected
- def redactOptions[K, V](options: Seq[(K, V)]): Seq[(K, V)]
Redacts the given option map according to the description of SQL_OPTIONS_REDACTION_PATTERN.
- def redactOptions[K, V](options: Map[K, V]): Map[K, V]
Redacts the given option map according to the description of SQL_OPTIONS_REDACTION_PATTERN.
- def replEagerEvalMaxNumRows: Int
- def replEagerEvalTruncate: Int
- def replaceDatabricksSparkAvroEnabled: Boolean
- def replaceExceptWithFilter: Boolean
- def resolver: Resolver
Returns the Resolver for the current configuration, which can be used to determine if two identifiers are equal.
- def runSQLonFile: Boolean
- def runtimeFilterBloomFilterEnabled: Boolean
- def runtimeFilterCreationSideThreshold: Long
- def runtimeFilterSemiJoinReductionEnabled: Boolean
- def runtimeRowLevelOperationGroupFilterEnabled: Boolean
- def serializerNestedSchemaPruningEnabled: Boolean
- def sessionLocalTimeZone: String
- def sessionWindowBufferInMemoryThreshold: Int
- def sessionWindowBufferSpillThreshold: Int
- def setCommandRejectsSparkCoreConfs: Boolean
- def setConf[T](entry: ConfigEntry[T], value: T): Unit
Set the given Spark SQL configuration property.
- def setConf(props: Properties): Unit
Set Spark SQL configuration properties.
- def setConfString(key: String, value: String): Unit
Set the given Spark SQL configuration property using a
stringvalue. - def setConfWithCheck(key: String, value: String): Unit
- Attributes
- protected
- def setOpsPrecedenceEnforced: Boolean
- val settings: Map[String, String]
Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap.
Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap.
- Attributes
- protected[spark]
- def sortBeforeRepartition: Boolean
- def sortMergeJoinExecBufferInMemoryThreshold: Int
- def sortMergeJoinExecBufferSpillThreshold: Int
- def starSchemaDetection: Boolean
- def starSchemaFTRatio: Double
- def stateStoreCompressionCodec: String
- def stateStoreFormatValidationEnabled: Boolean
- def stateStoreMinDeltasForSnapshot: Int
- def stateStoreProviderClass: String
- def stateStoreSkipNullsForStreamStreamJoins: Boolean
- def statefulOperatorCorrectnessCheckEnabled: Boolean
- def storeAnalyzedPlanForView: Boolean
- def storeAssignmentPolicy: SQLConf.StoreAssignmentPolicy.Value
- def streamingFileCommitProtocolClass: String
- def streamingMaintenanceInterval: Long
- def streamingMetricsEnabled: Boolean
- def streamingNoDataMicroBatchesEnabled: Boolean
- def streamingNoDataProgressEventInterval: Long
- def streamingPollingDelay: Long
- def streamingProgressRetention: Int
- def streamingSchemaInference: Boolean
- def streamingSessionWindowMergeSessionInLocalPartition: Boolean
- def stringRedactionPattern: Option[Regex]
- def subexpressionEliminationCacheMaxEntries: Int
- def subexpressionEliminationEnabled: Boolean
- def subqueryReuseEnabled: Boolean
- def supportQuotedRegexColumnName: Boolean
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def tableRelationCacheSize: Int
- def timestampType: AtomicType
- def toString(): String
- Definition Classes
- AnyRef → Any
- def topKSortFallbackThreshold: Int
- def truncateTableIgnorePermissionAcl: Boolean
- def uiExplainMode: String
- def unsetConf(entry: ConfigEntry[_]): Unit
- def unsetConf(key: String): Unit
- def useCompression: Boolean
- def useCurrentSQLConfigsForView: Boolean
- def useDeprecatedKafkaOffsetFetching: Boolean
- def useNullsForMissingDefaultColumnValues: Boolean
- def useObjectHashAggregation: Boolean
- def useV1Command: Boolean
- def v2BucketingEnabled: Boolean
- def v2BucketingPartiallyClusteredDistributionEnabled: Boolean
- def v2BucketingPushPartValuesEnabled: Boolean
- def validatePartitionColumns: Boolean
- def variableSubstituteEnabled: Boolean
- def verifyPartitionPath: Boolean
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- def warehousePath: String
- def wholeStageEnabled: Boolean
- def wholeStageMaxNumFields: Int
- def wholeStageSplitConsumeFuncByOperator: Boolean
- def wholeStageUseIdInClassName: Boolean
- def windowExecBufferInMemoryThreshold: Int
- def windowExecBufferSpillThreshold: Int
- def writeLegacyParquetFormat: Boolean