-
Notifications
You must be signed in to change notification settings - Fork 28.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-49249][SPARK-49122] Artifact isolation in Spark Classic #48120
base: master
Are you sure you want to change the base?
Changes from 3 commits
fe143d6
e2597c1
827e01e
73d13f9
caa4251
5043ce3
3c8fef5
255e85d
7e3ecfe
a9c20e0
355bea8
6564ccf
06a658c
225ec6f
e4f5a5c
7630e2f
7b8f1da
786d48f
1849ac5
a0ae922
bfa6d85
fe7947f
04a5bb2
24f99a5
39a8086
7cce314
80289b8
4542a21
0b021d9
97c7d6c
aa9c21d
a2849f8
508ee7b
fdcb05b
c9cf1a2
be49405
5c15612
3899b22
d8ec1d3
3bcda6d
216b467
4de3ce8
7a0910b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
import org.apache.spark.sql.api.java.UDF2 | ||
|
||
class IntSumUdf extends UDF2[Long, Long, Long] { | ||
override def call(t1: Long, t2: Long): Long = t1 + t2 | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -44,7 +44,7 @@ import org.apache.spark.util.Utils | |
* @since 1.3.0 | ||
*/ | ||
@Stable | ||
class UDFRegistration private[sql] (functionRegistry: FunctionRegistry) | ||
class UDFRegistration private[sql] (session: SparkSession, functionRegistry: FunctionRegistry) | ||
extends api.UDFRegistration | ||
with Logging { | ||
protected[sql] def registerPython(name: String, udf: UserDefinedPythonFunction): Unit = { | ||
|
@@ -121,7 +121,9 @@ class UDFRegistration private[sql] (functionRegistry: FunctionRegistry) | |
*/ | ||
private[sql] def registerJavaUDAF(name: String, className: String): Unit = { | ||
try { | ||
val clazz = Utils.classForName[AnyRef](className) | ||
val clazz = session.artifactManager.withResources { | ||
Utils.classForName[AnyRef](className, noSparkClassLoader = true) | ||
xupefei marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
if (!classOf[UserDefinedAggregateFunction].isAssignableFrom(clazz)) { | ||
throw QueryCompilationErrors | ||
.classDoesNotImplementUserDefinedAggregateFunctionError(className) | ||
|
@@ -145,9 +147,11 @@ class UDFRegistration private[sql] (functionRegistry: FunctionRegistry) | |
* @param returnDataType return type of udf. If it is null, spark would try to infer | ||
* via reflection. | ||
*/ | ||
private[sql] def registerJava(name: String, className: String, returnDataType: DataType): Unit = { | ||
def registerJava(name: String, className: String, returnDataType: DataType): Unit = { | ||
Comment on lines
-148
to
+151
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have to make this method public so I can call it from REPL. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am not against this. I am trying to understand the user facing consequences though. I'd probably prefer that we add support for Scala UDFs as well. That can be done in a follow-up though. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you file a follow-up? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will do. |
||
try { | ||
val clazz = Utils.classForName[AnyRef](className) | ||
val clazz = session.artifactManager.withResources { | ||
Utils.classForName[AnyRef](className) | ||
} | ||
val udfInterfaces = clazz.getGenericInterfaces | ||
.filter(_.isInstanceOf[ParameterizedType]) | ||
.map(_.asInstanceOf[ParameterizedType]) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -115,93 +115,95 @@ object SQLExecution extends Logging { | |
} | ||
val redactedConfigs = sparkSession.sessionState.conf.redactOptions(modifiedConfigs) | ||
|
||
withSQLConfPropagated(sparkSession) { | ||
var ex: Option[Throwable] = None | ||
var isExecutedPlanAvailable = false | ||
val startTime = System.nanoTime() | ||
val startEvent = SparkListenerSQLExecutionStart( | ||
executionId = executionId, | ||
rootExecutionId = Some(rootExecutionId), | ||
description = desc, | ||
details = callSite.longForm, | ||
physicalPlanDescription = "", | ||
sparkPlanInfo = SparkPlanInfo.EMPTY, | ||
time = System.currentTimeMillis(), | ||
modifiedConfigs = redactedConfigs, | ||
jobTags = sc.getJobTags(), | ||
jobGroupId = Option(sc.getLocalProperty(SparkContext.SPARK_JOB_GROUP_ID)) | ||
) | ||
try { | ||
body match { | ||
case Left(e) => | ||
sc.listenerBus.post(startEvent) | ||
JobArtifactSet.withActiveJobArtifactState(sparkSession.artifactManager.state) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you check how this interacts with all the stuff we do in Connect to make this work? I feel that we are duplicating code now. cc @vicennial There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. An FYI to other reviewers: look at this file with hidden whitespace. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, with this in the execution code path, we may not need SessionHolder#withSession in a few places and can be cleaned up. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @vicennial Is there a end-to-end test for this? I did some modifications and want to know if it won't break anything. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @xupefei The ReplE2ESuite has some tests for the overall client->artifact->execution with artifact flow. |
||
withSQLConfPropagated(sparkSession) { | ||
var ex: Option[Throwable] = None | ||
var isExecutedPlanAvailable = false | ||
val startTime = System.nanoTime() | ||
val startEvent = SparkListenerSQLExecutionStart( | ||
executionId = executionId, | ||
rootExecutionId = Some(rootExecutionId), | ||
description = desc, | ||
details = callSite.longForm, | ||
physicalPlanDescription = "", | ||
sparkPlanInfo = SparkPlanInfo.EMPTY, | ||
time = System.currentTimeMillis(), | ||
modifiedConfigs = redactedConfigs, | ||
jobTags = sc.getJobTags(), | ||
jobGroupId = Option(sc.getLocalProperty(SparkContext.SPARK_JOB_GROUP_ID)) | ||
) | ||
try { | ||
body match { | ||
case Left(e) => | ||
sc.listenerBus.post(startEvent) | ||
throw e | ||
case Right(f) => | ||
val planDescriptionMode = | ||
ExplainMode.fromString(sparkSession.sessionState.conf.uiExplainMode) | ||
val planDesc = queryExecution.explainString(planDescriptionMode) | ||
val planInfo = try { | ||
SparkPlanInfo.fromSparkPlan(queryExecution.executedPlan) | ||
} catch { | ||
case NonFatal(e) => | ||
logDebug("Failed to generate SparkPlanInfo", e) | ||
// If the queryExecution already failed before this, we are not able to generate | ||
// the the plan info, so we use and empty graphviz node to make the UI happy | ||
SparkPlanInfo.EMPTY | ||
} | ||
sc.listenerBus.post( | ||
startEvent.copy(physicalPlanDescription = planDesc, sparkPlanInfo = planInfo)) | ||
isExecutedPlanAvailable = true | ||
f() | ||
} | ||
} catch { | ||
case e: Throwable => | ||
ex = Some(e) | ||
throw e | ||
case Right(f) => | ||
val planDescriptionMode = | ||
ExplainMode.fromString(sparkSession.sessionState.conf.uiExplainMode) | ||
val planDesc = queryExecution.explainString(planDescriptionMode) | ||
val planInfo = try { | ||
SparkPlanInfo.fromSparkPlan(queryExecution.executedPlan) | ||
} catch { | ||
case NonFatal(e) => | ||
logDebug("Failed to generate SparkPlanInfo", e) | ||
// If the queryExecution already failed before this, we are not able to generate | ||
// the the plan info, so we use and empty graphviz node to make the UI happy | ||
SparkPlanInfo.EMPTY | ||
} | ||
sc.listenerBus.post( | ||
startEvent.copy(physicalPlanDescription = planDesc, sparkPlanInfo = planInfo)) | ||
isExecutedPlanAvailable = true | ||
f() | ||
} | ||
} catch { | ||
case e: Throwable => | ||
ex = Some(e) | ||
throw e | ||
} finally { | ||
val endTime = System.nanoTime() | ||
val errorMessage = ex.map { | ||
case e: SparkThrowable => | ||
SparkThrowableHelper.getMessage(e, ErrorMessageFormat.PRETTY) | ||
case e => | ||
Utils.exceptionString(e) | ||
} | ||
if (queryExecution.shuffleCleanupMode != DoNotCleanup | ||
&& isExecutedPlanAvailable) { | ||
val shuffleIds = queryExecution.executedPlan match { | ||
case ae: AdaptiveSparkPlanExec => | ||
ae.context.shuffleIds.asScala.keys | ||
case _ => | ||
Iterable.empty | ||
} finally { | ||
val endTime = System.nanoTime() | ||
val errorMessage = ex.map { | ||
case e: SparkThrowable => | ||
SparkThrowableHelper.getMessage(e, ErrorMessageFormat.PRETTY) | ||
case e => | ||
Utils.exceptionString(e) | ||
} | ||
shuffleIds.foreach { shuffleId => | ||
queryExecution.shuffleCleanupMode match { | ||
case RemoveShuffleFiles => | ||
// Same as what we do in ContextCleaner.doCleanupShuffle, but do not unregister | ||
// the shuffle on MapOutputTracker, so that stage retries would be triggered. | ||
// Set blocking to Utils.isTesting to deflake unit tests. | ||
sc.shuffleDriverComponents.removeShuffle(shuffleId, Utils.isTesting) | ||
case SkipMigration => | ||
SparkEnv.get.blockManager.migratableResolver.addShuffleToSkip(shuffleId) | ||
case _ => // this should not happen | ||
if (queryExecution.shuffleCleanupMode != DoNotCleanup | ||
&& isExecutedPlanAvailable) { | ||
val shuffleIds = queryExecution.executedPlan match { | ||
case ae: AdaptiveSparkPlanExec => | ||
ae.context.shuffleIds.asScala.keys | ||
case _ => | ||
Iterable.empty | ||
} | ||
shuffleIds.foreach { shuffleId => | ||
queryExecution.shuffleCleanupMode match { | ||
case RemoveShuffleFiles => | ||
// Same as what we do in ContextCleaner.doCleanupShuffle, but do not unregister | ||
// the shuffle on MapOutputTracker, so that stage retries would be triggered. | ||
// Set blocking to Utils.isTesting to deflake unit tests. | ||
sc.shuffleDriverComponents.removeShuffle(shuffleId, Utils.isTesting) | ||
case SkipMigration => | ||
SparkEnv.get.blockManager.migratableResolver.addShuffleToSkip(shuffleId) | ||
case _ => // this should not happen | ||
} | ||
} | ||
} | ||
val event = SparkListenerSQLExecutionEnd( | ||
executionId, | ||
System.currentTimeMillis(), | ||
// Use empty string to indicate no error, as None may mean events generated by old | ||
// versions of Spark. | ||
errorMessage.orElse(Some(""))) | ||
// Currently only `Dataset.withAction` and `DataFrameWriter.runCommand` specify the | ||
// `name` parameter. The `ExecutionListenerManager` only watches SQL executions with | ||
// name. We can specify the execution name in more places in the future, so that | ||
// `QueryExecutionListener` can track more cases. | ||
event.executionName = name | ||
event.duration = endTime - startTime | ||
event.qe = queryExecution | ||
event.executionFailure = ex | ||
sc.listenerBus.post(event) | ||
} | ||
val event = SparkListenerSQLExecutionEnd( | ||
executionId, | ||
System.currentTimeMillis(), | ||
// Use empty string to indicate no error, as None may mean events generated by old | ||
// versions of Spark. | ||
errorMessage.orElse(Some(""))) | ||
// Currently only `Dataset.withAction` and `DataFrameWriter.runCommand` specify the `name` | ||
// parameter. The `ExecutionListenerManager` only watches SQL executions with name. We | ||
// can specify the execution name in more places in the future, so that | ||
// `QueryExecutionListener` can track more cases. | ||
event.executionName = name | ||
event.duration = endTime - startTime | ||
event.qe = queryExecution | ||
event.executionFailure = ex | ||
sc.listenerBus.post(event) | ||
} | ||
} | ||
} finally { | ||
|
@@ -281,7 +283,10 @@ object SQLExecution extends Logging { | |
val activeSession = sparkSession | ||
val sc = sparkSession.sparkContext | ||
val localProps = Utils.cloneProperties(sc.getLocalProperties) | ||
val artifactState = JobArtifactSet.getCurrentJobArtifactState.orNull | ||
// `getCurrentJobArtifactState` will return a stat only in Spark Connect mode. In non-Connect | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it should be safe to use the SparkSession's jobArtifactState. They should be the same. cc @vicennial. |
||
// mode, we default back to the resources of the current Spark session. | ||
val artifactState = JobArtifactSet.getCurrentJobArtifactState.getOrElse( | ||
activeSession.artifactManager.state) | ||
exec.submit(() => JobArtifactSet.withActiveJobArtifactState(artifactState) { | ||
val originalSession = SparkSession.getActiveSession | ||
val originalLocalProps = sc.getLocalProperties | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you use a UDF defined in the REPL? If so how does this work with a JobArtifactSet? Do we layer the globally defined classpath over the session specific classpath? (I'd be nice to document this somewhere).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I added one more test, which defines a UDF that initialises an external class added as an artifact.
Can you elaborate? Afaik
JobArtifactSet
is not involved here since it's the artifact path that is applied when an active SparkSession is applied.Classpath - It's the other way around: the session classpath is laid over the global one.