diff --git a/.scalafmt-fp.conf b/.scalafmt-fp.conf
new file mode 100644
index 0000000000..352b7b82c5
--- /dev/null
+++ b/.scalafmt-fp.conf
@@ -0,0 +1,88 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+runner.dialect = scala212
+
+# Version is required to make sure IntelliJ picks the right version
+version = 3.4.3
+preset = default
+
+# Max column
+maxColumn = 120
+
+# This parameter simply says the .stripMargin method was not redefined by the user to assign
+# special meaning to indentation preceding the | character. Hence, that indentation can be modified.
+assumeStandardLibraryStripMargin = true
+align.stripMargin = true
+
+# Align settings
+align.preset = most
+align.closeParenSite = false
+align.openParenCallSite = false
+danglingParentheses.defnSite = false
+danglingParentheses.callSite = false
+danglingParentheses.ctrlSite = true
+danglingParentheses.tupleSite = false
+align.openParenCallSite = false
+align.openParenDefnSite = false
+align.openParenTupleSite = false
+
+# Newlines
+newlines.alwaysBeforeElseAfterCurlyIf = false
+newlines.afterCurlyLambdaParams = squash # No newline after lambda params
+newlines.inInterpolation = "avoid"
+newlines.avoidInResultType = true
+optIn.annotationNewlines = true
+
+# Scaladoc
+docstrings.style = Asterisk # Javadoc style
+docstrings.removeEmpty = true
+docstrings.oneline = fold
+docstrings.forceBlankLineBefore = true
+docstrings.wrap = no
+
+# Indentation
+indent.extendSite = 2 # This makes sure extend is not indented as the ctor parameters
+indentOperator.preset = spray
+
+# Rewrites
+rewrite.rules = [AvoidInfix, Imports, RedundantBraces, SortModifiers]
+
+# Imports
+rewrite.imports.sort = scalastyle
+rewrite.imports.groups = [
+ ["org.apache.streampark\\..*"],
+ ["org.apache.streampark.shaded\\..*"],
+ [".*"],
+ ["javax\\..*"],
+ ["java\\..*"],
+ ["scala\\..*"]
+]
+rewrite.imports.contiguousGroups = no
+importSelectors = singleline # Imports in a single line, like IntelliJ
+
+# Remove redundant braces in string interpolation.
+rewrite.redundantBraces.stringInterpolation = true
+rewrite.redundantBraces.defnBodies = false
+rewrite.redundantBraces.generalExpressions = false
+rewrite.redundantBraces.ifElseExpressions = false
+rewrite.redundantBraces.methodBodies = false
+rewrite.redundantBraces.includeUnitMethods = false
+rewrite.redundantBraces.maxBreaks = 1
+
+# Remove trailing commas
+rewrite.trailingCommas.style = "never"
diff --git a/dist-material/release-docs/LICENSE b/dist-material/release-docs/LICENSE
index a96de8a9dc..de8a35c679 100644
--- a/dist-material/release-docs/LICENSE
+++ b/dist-material/release-docs/LICENSE
@@ -569,6 +569,11 @@ The text of each license is the standard Apache 2.0 license. https://www.apache.
https://mvnrepository.com/artifact/org.pac4j/pac4j-springboot/4.5.7 Apache-2.0
https://mvnrepository.com/artifact/org.pac4j/pac4j-oauth/4.5.7 Apache-2.0
https://mvnrepository.com/artifact/org.pac4j/pac4j-oidc/4.5.7 Apache-2.0
+ https://mvnrepository.com/artifact/io.fabric8/kubernetes-client/6.8.0 Apache-2.0
+ https://mvnrepository.com/artifact/dev.zio/zio_2.12/2.0.15 Apache-2.0
+ https://mvnrepository.com/artifact/dev.zio/zio-streams_2.12/2.0.15 Apache-2.0
+ https://mvnrepository.com/artifact/dev.zio/zio-concurrent_2.12/2.0.15 Apache-2.0
+ https://mvnrepository.com/artifact/dev.zio/zio-http_2.12/3.0.0-RC2 Apache-2.0
https://maven.apache.org/wrapper Apache-2.0
mvnw files from https://github.com/apache/maven-wrapper Apache 2.0
streampark-console/streampark-console-service/src/main/assembly/bin/setclasspath.sh from https://github.com/apache/tomcat
@@ -716,6 +721,10 @@ The text of each license is also included in licenses/LICENSE-[project].txt.
https://mvnrepository.com/artifact/org.slf4j/slf4j-api/1.7.30 MIT
https://mvnrepository.com/artifact/org.projectlombok/lombok/1.18.24 MIT
https://mvnrepository.com/artifact/com.auth0/java-jwt/4.0.0 MIT
+ https://mvnrepository.com/artifact/com.lihaoyi/pprint_2.12/0.8.1 MIT
+ https://mvnrepository.com/artifact/com.lihaoyi/os-lib_2.12/0.8.1 MIT
+ https://mvnrepository.com/artifact/com.lihaoyi/upickle_2.12/0.8.1 MIT
+
========================================================================
diff --git a/pom.xml b/pom.xml
index f5165c7809..953d4569a6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -123,6 +123,9 @@
3.8.1
1.6.1
3.23.1
+ 2.0.15
+ 2.1.13
+ 0.8.1
3.10.1
3.2.4
@@ -523,7 +526,9 @@
**/.asf.yaml,
- **/.github/**
+ **/.github/**,
+ **/crd/**.yml,
+ **/crd/**.yaml
diff --git a/streampark-common/pom.xml b/streampark-common/pom.xml
index 6004b101ef..b936b82ca7 100644
--- a/streampark-common/pom.xml
+++ b/streampark-common/pom.xml
@@ -128,6 +128,32 @@
${streampark.shaded.version}
+
+
+ dev.zio
+ zio-logging_${scala.binary.version}
+ ${zio-logging.version}
+
+
+
+ dev.zio
+ zio-streams_${scala.binary.version}
+ ${zio.version}
+
+
+
+ dev.zio
+ zio-concurrent_${scala.binary.version}
+ ${zio.version}
+
+
+
+
+ com.lihaoyi
+ pprint_${scala.binary.version}
+ ${pprint.version}
+
+
diff --git a/streampark-common/src/main/scala/org/apache/streampark/common/conf/K8sFlinkConfig.scala b/streampark-common/src/main/scala/org/apache/streampark/common/conf/K8sFlinkConfig.scala
index e85b2fbcf4..425d8fe27a 100644
--- a/streampark-common/src/main/scala/org/apache/streampark/common/conf/K8sFlinkConfig.scala
+++ b/streampark-common/src/main/scala/org/apache/streampark/common/conf/K8sFlinkConfig.scala
@@ -17,7 +17,8 @@
package org.apache.streampark.common.conf
-/** Flink kubernetes Configuration */
+/** Flink kubernetes Configuration for v1 version */
+@deprecated("see: org.apache.streampark.flink.kubernetes.v2.Config")
object K8sFlinkConfig {
val jobStatusTrackTaskTimeoutSec: InternalOption = InternalOption(
diff --git a/streampark-common/src/main/scala/org/apache/streampark/common/conf/Workspace.scala b/streampark-common/src/main/scala/org/apache/streampark/common/conf/Workspace.scala
index 58ef8e2e93..03b0c96824 100644
--- a/streampark-common/src/main/scala/org/apache/streampark/common/conf/Workspace.scala
+++ b/streampark-common/src/main/scala/org/apache/streampark/common/conf/Workspace.scala
@@ -66,7 +66,7 @@ case class Workspace(storageType: StorageType) {
}
}
- private[conf] lazy val WORKSPACE: String = {
+ lazy val WORKSPACE: String = {
storageType match {
case StorageType.LFS =>
val path: String = getConfigValue[String](CommonConfig.STREAMPARK_WORKSPACE_LOCAL)
diff --git a/streampark-common/src/main/scala/org/apache/streampark/common/zio/LoggerBackend.scala b/streampark-common/src/main/scala/org/apache/streampark/common/zio/LoggerBackend.scala
new file mode 100644
index 0000000000..4ad4979cf2
--- /dev/null
+++ b/streampark-common/src/main/scala/org/apache/streampark/common/zio/LoggerBackend.scala
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.common.zio
+
+import org.apache.streampark.common.util.Logger
+
+import zio.{Cause, FiberId, FiberRefs, LogLevel, LogSpan, Runtime, Trace, ZLayer, ZLogger}
+import zio.logging.LoggerNameExtractor
+
+import scala.collection.concurrent.TrieMap
+
+/** ZIO logging Backend that bridging to [[org.apache.streampark.common.util.Logger]] */
+object LoggerBackend {
+
+ lazy val default: ZLayer[Any, Nothing, Unit] = Runtime.addLogger(provideLogger())
+
+ private val defaultLoggerName = getClass.getName
+ private val loggers = TrieMap[String, BridgeLogger]()
+
+ private def getLogger(loggerName: String): BridgeLogger = {
+ loggers.getOrElseUpdate(loggerName, BridgeLogger(loggerName))
+ }
+
+ private case class BridgeLogger(loggerName: String) extends Logger {
+ override protected def logName: String = loggerName
+
+ def trace(msg: String): Unit = super.logTrace(msg)
+ def info(msg: String): Unit = super.logInfo(msg)
+ def warn(msg: String): Unit = super.logWarn(msg)
+ def error(msg: String): Unit = super.logError(msg)
+ def debug(msg: String): Unit = super.logDebug(msg)
+ }
+
+ private def provideLogger(): ZLogger[String, Unit] = (
+ trace: Trace,
+ fiberId: FiberId,
+ logLevel: LogLevel,
+ message: () => String,
+ cause: Cause[Any],
+ context: FiberRefs,
+ spans: List[LogSpan],
+ annotations: Map[String, String]) => {
+
+ val loggerName =
+ LoggerNameExtractor.trace(trace, FiberRefs.empty, Map.empty).getOrElse(defaultLoggerName)
+ val logger = getLogger(loggerName)
+ val msg =
+ if (annotations.nonEmpty)
+ s"${annotations.map { case (k, v) => s"[$k=$v]" }.mkString(" ")} ${message()}"
+ else message()
+
+ logLevel match {
+ case LogLevel.None => logger.trace(msg)
+ case LogLevel.All => logger.trace(msg)
+ case LogLevel.Trace => logger.trace(msg)
+ case LogLevel.Debug => logger.debug(msg)
+ case LogLevel.Info => logger.info(msg)
+ case LogLevel.Warning => logger.warn(msg)
+ case LogLevel.Error => logger.error(msg)
+ case LogLevel.Fatal => logger.error(msg)
+ }
+ }
+
+}
diff --git a/streampark-common/src/main/scala/org/apache/streampark/common/zio/ZIOContainerSubscription.scala b/streampark-common/src/main/scala/org/apache/streampark/common/zio/ZIOContainerSubscription.scala
new file mode 100644
index 0000000000..0038a61cf2
--- /dev/null
+++ b/streampark-common/src/main/scala/org/apache/streampark/common/zio/ZIOContainerSubscription.scala
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.common.zio
+
+import org.apache.streampark.common.zio.ZIOExt.ZStreamOps
+
+import zio.{durationInt, Chunk, Duration, Ref, Schedule, UIO}
+import zio.concurrent.{ConcurrentMap, ConcurrentSet}
+import zio.stream.{UStream, ZStream}
+
+/** Subscription-ready data structure extension for ZIO Concurrent Collection. */
+object ZIOContainerSubscription {
+
+ private val defaultSubInterval: Duration = 500.millis
+
+ implicit class ConcurrentSetExtension[E](set: ConcurrentSet[E]) {
+
+ /*
+ * Subscribe to the ConcurrentSet and get the diff of the set between each interval.
+ * IN: [a, b, c], [a, b, c], [a, c]
+ * OUT: [a, b, c], [a, c]
+ */
+ def subscribe(interval: Duration = defaultSubInterval): UStream[Set[E]] =
+ ZStream
+ .fromZIO(set.toSet)
+ .repeat(Schedule.spaced(interval))
+ .diffPrev
+
+ /*
+ * Subscribe to the ConcurrentSet and get the diff of the each flattened element.
+ * IN: [a, b, c], [a, b, c], [a, c, e], [d]
+ * OUT: a, b, c, e, d
+ */
+ def flatSubscribe(interval: Duration = defaultSubInterval): UStream[E] =
+ ZStream
+ .fromZIO(Ref.make(Set.empty[E]))
+ .flatMap {
+ prevSet =>
+ subscribe(interval)
+ .mapZIO(cur => prevSet.get.map(prev => (prev, cur)))
+ .map { case (prev, cur) => cur -> cur.diff(prev) }
+ .tap { case (cur, _) => prevSet.set(cur) }
+ .flatMap { case (_, curDiff) => ZStream.fromIterable(curDiff) }
+ }
+ }
+
+ implicit class ConcurrentMapExtension[K, V](map: ConcurrentMap[K, V]) {
+
+ /*
+ * Subscribe to the ConcurrentMap and get the diff of the set between each interval.
+ * IN: [a -> 1, b -> 2], [a -> 1, b -> 2], [a -> 1, b -> 3]
+ * OUT: [a -> 1, b -> 2], [a -> 1, b -> 3]
+ */
+ def subscribe(interval: Duration = 500.millis): UStream[Chunk[(K, V)]] =
+ ZStream
+ .fromZIO(map.toChunk)
+ .repeat(Schedule.spaced(interval))
+ .diffPrev
+
+ /*
+ * Subscribe to the ConcurrentMap and get the diff of the each flattened kv element.
+ * IN: [a -> 1, b -> 2], [a -> 1, b -> 2, c -> 1], [a -> 1, b -> 3]
+ * OUT: a -> 1, b -> 2, c -> 1, b -> 3
+ */
+ def flatSubscribe(interval: Duration = 500.millis): UStream[(K, V)] =
+ ZStream
+ .fromZIO(Ref.make(Chunk.empty[(K, V)]))
+ .flatMap {
+ prevMap =>
+ subscribe(interval)
+ .mapZIO(cur => prevMap.get.map(prev => (prev, cur)))
+ .map { case (prev, cur) => cur -> cur.diff(prev) }
+ .tap { case (cur, _) => prevMap.set(cur) }
+ .flatMap { case (_, curDiff) => ZStream.fromIterable(curDiff) }
+ }
+
+ /*
+ * Subscribe to the values of ConcurrentMap and get the diff of the each kv element.
+ * IN: [a -> a1, b -> b2], [a -> a1, b -> b2, c -> c1], [a -> a1, b -> b3]
+ * OUT: [a1, b2], [c1], [b3]
+ */
+ def subscribeValues(interval: Duration = 500.millis): UStream[Chunk[V]] =
+ subscribe(interval).map(_.map(_._2))
+
+ /*
+ * Subscribe to the values of ConcurrentMap and get the diff of the each fattened kv element.
+ * IN: [a -> a1, b -> b2], [a -> a1, b -> b2, c -> c1], [a -> a1, b -> b3]
+ * OUT: [a1, b2, c1, a1, b3]
+ */
+ def flatSubscribeValues(interval: Duration = 500.millis): UStream[V] =
+ flatSubscribe(interval).map(_._2)
+
+ }
+
+ implicit class RefMapExtension[K, V](ref: Ref[Map[K, V]]) {
+
+ /*
+ * Subscribe to the Ref[Map] and get the diff of the set between each interval.
+ * IN: [a -> 1, b -> 2], [a -> 1, b -> 2], [a -> 1, b -> 3]
+ * OUT: [a -> 1, b -> 2], [a -> 1, b -> 3]
+ */
+ def subscribe(interval: Duration = defaultSubInterval): UStream[Chunk[(K, V)]] =
+ ZStream
+ .fromZIO(ref.get.map(m => Chunk.fromIterable(m)))
+ .repeat(Schedule.spaced(interval))
+ .diffPrev
+
+ /*
+ * Subscribe to the ConcurrentMap and get the diff of the each flattened kv element.
+ * IN: [a -> 1, b -> 2], [a -> 1, b -> 2, c -> 1], [a -> 1, b -> 3]
+ * OUT: a -> 1, b -> 2, c -> 1, b -> 3
+ */
+ def flatSubscribe(interval: Duration = defaultSubInterval) =
+ ZStream
+ .fromZIO(Ref.make(Chunk.empty[(K, V)]))
+ .flatMap {
+ prevMap =>
+ subscribe(interval)
+ .mapZIO(cur => prevMap.get.map(prev => (prev, cur)))
+ .map { case (prev, cur) => cur -> cur.diff(prev) }
+ .tap { case (cur, _) => prevMap.set(cur) }
+ .flatMap { case (_, curDiff) => ZStream.fromIterable(curDiff) }
+ }
+
+ /*
+ * Subscribe to the values of ConcurrentMap and get the diff of the each kv element.
+ * IN: [a -> a1, b -> b2], [a -> a1, b -> b2, c -> c1], [a -> a1, b -> b3]
+ * OUT: [a1, b2], [c1], [b3]
+ */
+ def subscribeValues(interval: Duration = 500.millis): UStream[Chunk[V]] =
+ subscribe(interval).map(_.map(_._2))
+
+ /*
+ * Subscribe to the values of ConcurrentMap and get the diff of the each fattened kv element.
+ * IN: [a -> a1, b -> b2], [a -> a1, b -> b2, c -> c1], [a -> a1, b -> b3]
+ * OUT: [a1, b2, c1, a1, b3]
+ */
+ def flatSubscribeValues(interval: Duration = 500.millis): UStream[V] =
+ flatSubscribe(interval).map(_._2)
+
+ def getValue(key: K): UIO[Option[V]] = ref.get.map(_.get(key))
+ }
+
+}
diff --git a/streampark-common/src/main/scala/org/apache/streampark/common/zio/ZIOExt.scala b/streampark-common/src/main/scala/org/apache/streampark/common/zio/ZIOExt.scala
new file mode 100644
index 0000000000..0a6ad0646d
--- /dev/null
+++ b/streampark-common/src/main/scala/org/apache/streampark/common/zio/ZIOExt.scala
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.common.zio
+
+import zio.{IO, Runtime, Unsafe, ZIO}
+import zio.stream.ZStream
+
+/** ZIO extension */
+object ZIOExt {
+
+ /* Unsafe run zio effect. */
+ @throws[Exception]
+ @inline def unsafeRun[E, A](zio: IO[E, A]): A = Unsafe.unsafe {
+ implicit u =>
+ Runtime.default.unsafe
+ .run(zio.provideLayer(Runtime.removeDefaultLoggers >>> LoggerBackend.default))
+ .getOrThrowFiberFailure()
+ }
+
+ implicit class IOOps[E, A](io: ZIO[Any, E, A]) {
+
+ /** unsafe run IO */
+ @throws[Throwable]
+ def runIO: A = ZIOExt.unsafeRun(io)
+ }
+
+ implicit class UIOOps[A](uio: ZIO[Any, Nothing, A]) {
+
+ /** unsafe run UIO */
+ @inline def runUIO: A = ZIOExt.unsafeRun(uio)
+ }
+
+ implicit class ZIOOps[R, E, A](zio: ZIO[R, E, A]) {
+
+ @inline def debugPretty: ZIO[R, E, A] =
+ zio
+ .tap(value => ZIO.succeed(println(toPrettyString(value))))
+ .tapErrorCause(cause => ZIO.succeed(println(s" ${cause.prettyPrint}")))
+
+ @inline def debugPretty(tag: String): ZIO[R, E, A] =
+ zio
+ .tap(value => ZIO.succeed(println(s"$tag: ${toPrettyString(value)}")))
+ .tapErrorCause(cause => ZIO.succeed(println(s" $tag: ${cause.prettyPrint}")))
+ }
+
+ implicit class OptionZIOOps[R, E, A](zio: ZIO[R, E, Option[A]]) {
+ @inline def someOrUnitZIO(effect: A => ZIO[R, E, _]): ZIO[R, E, Unit] =
+ zio.flatMap {
+ case Some(value) => effect(value).unit
+ case None => ZIO.unit
+ }
+
+ @inline def noneOrUnitZIO(effect: ZIO[R, E, _]): ZIO[R, E, Unit] =
+ zio.flatMap {
+ case Some(_) => ZIO.unit
+ case None => effect.unit
+ }
+ }
+
+ implicit class ZStreamOps[R, E, A](zstream: ZStream[R, E, A]) {
+ // noinspection DuplicatedCode
+ @inline def debugPretty: ZStream[R, E, A] =
+ zstream
+ .tap(value => ZIO.succeed(println(toPrettyString(value))))
+ .tapErrorCause(cause => ZIO.succeed(println(s" ${cause.prettyPrint}")))
+
+ // noinspection DuplicatedCode
+ @inline def debugPretty(tag: String): ZStream[R, E, A] =
+ zstream
+ .tap(value => ZIO.succeed(println(s"$tag: ${toPrettyString(value)}")))
+ .tapErrorCause(cause => ZIO.succeed(println(s" $tag: ${cause.prettyPrint}")))
+
+ /* Output a stream that does not repeat with the previous element. */
+ @inline def diffPrev: ZStream[R, E, A] = zstream.zipWithPrevious
+ .filter {
+ case (None, cur) => true
+ case (Some(prev), cur) => prev != cur
+ }
+ .map { case (_, cur) => cur }
+ }
+
+}
diff --git a/streampark-common/src/main/scala/org/apache/streampark/common/zio/package.scala b/streampark-common/src/main/scala/org/apache/streampark/common/zio/package.scala
new file mode 100644
index 0000000000..673773b9ed
--- /dev/null
+++ b/streampark-common/src/main/scala/org/apache/streampark/common/zio/package.scala
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.common
+
+import scala.language.implicitConversions
+
+package object zio {
+
+ /** Similar to python's pprint, format print any type of instance. */
+ @inline def toPrettyString(value: Any): String = value match {
+ case v: String => v
+ case v => pprint.apply(v, height = 2000, showFieldNames = true).render
+ }
+
+ implicit class PrettyStringOps(value: Any) {
+ @inline def prettyStr: String = toPrettyString(value)
+ }
+
+ /** Automatically converts value to Some value. */
+ implicit def liftValueAsSome[A](value: A): Option[A] = Some(value)
+
+}
diff --git a/streampark-console/streampark-console-service/pom.xml b/streampark-console/streampark-console-service/pom.xml
index bbc761000f..3b1b242a3b 100644
--- a/streampark-console/streampark-console-service/pom.xml
+++ b/streampark-console/streampark-console-service/pom.xml
@@ -365,6 +365,12 @@
${project.version}
+
+ org.apache.streampark
+ streampark-flink-kubernetes-engine_${scala.binary.version}
+ ${project.version}
+
+
org.apache.streampark
streampark-flink-sqlclient_${scala.binary.version}
@@ -676,7 +682,9 @@
package
-
+
diff --git a/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/enums/FlinkAppState.java b/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/enums/FlinkAppState.java
index 6e558a65a7..c4c00023ca 100644
--- a/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/enums/FlinkAppState.java
+++ b/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/enums/FlinkAppState.java
@@ -88,12 +88,14 @@ public enum FlinkAppState implements Serializable {
* Lost track of flink job temporarily. A complete loss of flink job tracking translates into LOST
* state.
*/
+ @Deprecated
SILENT(17),
/** Flink job has terminated vaguely, maybe FINISHED, CANCELED or FAILED. */
TERMINATED(18),
/** Flink job has terminated vaguely, maybe FINISHED, CANCELED or FAILED. */
+ @Deprecated
POS_TERMINATED(19),
/** Job SUCCEEDED on yarn. */
@@ -137,7 +139,11 @@ public static boolean isEndState(Integer appState) {
|| FlinkAppState.TERMINATED == flinkAppState;
}
- /** type conversion bridging */
+ /**
+ * Type conversion bridging Deprecated, see {@link
+ * org.apache.streampark.console.core.utils.FlinkAppStateConverter}
+ */
+ @Deprecated
public static class Bridge {
/** covert from org.apache.streampark.flink.k8s.enums.FlinkJobState */
public static FlinkAppState fromK8sFlinkJobState(Enumeration.Value flinkJobState) {
diff --git a/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/runner/EnvInitializer.java b/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/runner/EnvInitializer.java
index a982136195..011218f905 100644
--- a/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/runner/EnvInitializer.java
+++ b/streampark-console/streampark-console-service/src/main/java/org/apache/streampark/console/core/runner/EnvInitializer.java
@@ -26,9 +26,11 @@
import org.apache.streampark.common.fs.FsOperator;
import org.apache.streampark.common.util.SystemPropertyUtils;
import org.apache.streampark.common.util.Utils;
+import org.apache.streampark.common.zio.ZIOExt;
import org.apache.streampark.console.base.util.WebUtils;
import org.apache.streampark.console.core.entity.FlinkEnv;
import org.apache.streampark.console.core.service.SettingService;
+import org.apache.streampark.flink.kubernetes.v2.httpfs.EmbeddedFileServer;
import org.apache.commons.lang3.StringUtils;
@@ -98,6 +100,8 @@ public void run(ApplicationArguments args) throws Exception {
overrideSystemProp(ConfigConst.KEY_HADOOP_USER_NAME(), hadoopUserName);
// initialize local file system resources
storageInitialize(LFS);
+ // Launch the embedded http file server.
+ ZIOExt.unsafeRun(EmbeddedFileServer.launch());
}
private void initInternalConfig(Environment springEnv) {
diff --git a/streampark-console/streampark-console-service/src/main/scala/org/apache/streampark/console/core/utils/FlinkAppStateConverter.scala b/streampark-console/streampark-console-service/src/main/scala/org/apache/streampark/console/core/utils/FlinkAppStateConverter.scala
new file mode 100644
index 0000000000..93c3cfc6f7
--- /dev/null
+++ b/streampark-console/streampark-console-service/src/main/scala/org/apache/streampark/console/core/utils/FlinkAppStateConverter.scala
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.console.core.utils
+
+import org.apache.streampark.console.core.enums.FlinkAppState
+import org.apache.streampark.flink.kubernetes.v2.model.EvalJobState.EvalJobState
+
+import scala.util.Try
+
+object FlinkAppStateConverter {
+
+ /** Convert [[EvalJobState]] to [[FlinkAppState]]. */
+ def k8sEvalJobStateToFlinkAppState(jobState: EvalJobState): FlinkAppState = {
+ Try(FlinkAppState.valueOf(jobState.toString)).getOrElse(FlinkAppState.OTHER)
+ }
+
+}
diff --git a/streampark-flink/pom.xml b/streampark-flink/pom.xml
index 0445b9c818..19184ab60e 100644
--- a/streampark-flink/pom.xml
+++ b/streampark-flink/pom.xml
@@ -38,6 +38,7 @@
streampark-flink-proxy
streampark-flink-packer
streampark-flink-kubernetes
+ streampark-flink-kubernetes-v2
streampark-flink-sql-gateway
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/pom.xml b/streampark-flink/streampark-flink-kubernetes-v2/pom.xml
new file mode 100644
index 0000000000..f39d859616
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/pom.xml
@@ -0,0 +1,67 @@
+
+
+
+ 4.0.0
+
+ org.apache.streampark
+ streampark-flink
+ 2.2.0-SNAPSHOT
+
+
+ streampark-flink-kubernetes-v2
+ StreamPark : Flink Kubernetes Integration V2
+ pom
+
+
+ 6.8.0
+
+
+
+ streampark-flink-kubernetes-crd
+ streampark-flink-kubernetes-engine
+
+
+
+
+
+ com.diffplug.spotless
+ spotless-maven-plugin
+ ${maven-spotless-plugin.version}
+
+
+
+ ${spotless.scalafmt.version}
+ .scalafmt-fp.conf
+
+
+
+
+
+ spotless-check
+ validate
+
+ check
+
+
+
+
+
+
+
+
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/pom.xml b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/pom.xml
new file mode 100644
index 0000000000..9797b5292b
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/pom.xml
@@ -0,0 +1,68 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.streampark
+ streampark-flink-kubernetes-v2
+ 2.2.0-SNAPSHOT
+
+
+ streampark-flink-kubernetes-crd
+ StreamPark : Flink Kubernetes CRD
+
+
+
+ io.fabric8
+ kubernetes-client
+ ${fabric8.version}
+ provided
+
+
+ io.fabric8
+ generator-annotations
+ ${fabric8.version}
+ provided
+
+
+
+
+
+
+ io.fabric8
+ java-generator-maven-plugin
+ ${fabric8.version}
+
+
+ false
+ false
+
+
+
+
+ generate
+
+
+
+
+
+
+
+
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/src/main/resources/crd/flinkdeployments.flink.apache.org-v1.yml b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/src/main/resources/crd/flinkdeployments.flink.apache.org-v1.yml
new file mode 100755
index 0000000000..bf6bcb9cad
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/src/main/resources/crd/flinkdeployments.flink.apache.org-v1.yml
@@ -0,0 +1,9441 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: flinkdeployments.flink.apache.org
+spec:
+ group: flink.apache.org
+ names:
+ kind: FlinkDeployment
+ plural: flinkdeployments
+ shortNames:
+ - flinkdep
+ singular: flinkdeployment
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Last observed state of the job.
+ jsonPath: .status.jobStatus.state
+ name: Job Status
+ type: string
+ - description: "Lifecycle state of the Flink resource (including being rolled\
+ \ back, failed etc.)."
+ jsonPath: .status.lifecycleState
+ name: Lifecycle State
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ serviceAccount:
+ type: string
+ flinkVersion:
+ enum:
+ - v1_13
+ - v1_14
+ - v1_15
+ - v1_16
+ - v1_17
+ type: string
+ ingress:
+ properties:
+ template:
+ type: string
+ className:
+ type: string
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ podTemplate:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ activeDeadlineSeconds:
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ ephemeralContainers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ targetContainerName:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostUsers:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ initContainers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ os:
+ properties:
+ name:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ type: object
+ type: array
+ resourceClaims:
+ items:
+ properties:
+ name:
+ type: string
+ source:
+ properties:
+ resourceClaimName:
+ type: string
+ resourceClaimTemplateName:
+ type: string
+ type: object
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ schedulingGates:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ securityContext:
+ properties:
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ supplementalGroups:
+ items:
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ mode:
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ mode:
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ type: string
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ type: object
+ type: array
+ containerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ ephemeralContainerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ hostIP:
+ type: string
+ initContainerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ message:
+ type: string
+ nominatedNodeName:
+ type: string
+ phase:
+ type: string
+ podIP:
+ type: string
+ podIPs:
+ items:
+ properties:
+ ip:
+ type: string
+ type: object
+ type: array
+ qosClass:
+ type: string
+ reason:
+ type: string
+ startTime:
+ type: string
+ type: object
+ type: object
+ jobManager:
+ properties:
+ resource:
+ properties:
+ cpu:
+ type: number
+ memory:
+ type: string
+ ephemeralStorage:
+ type: string
+ type: object
+ replicas:
+ type: integer
+ podTemplate:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ activeDeadlineSeconds:
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ ephemeralContainers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ targetContainerName:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostUsers:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ initContainers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ os:
+ properties:
+ name:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ type: object
+ type: array
+ resourceClaims:
+ items:
+ properties:
+ name:
+ type: string
+ source:
+ properties:
+ resourceClaimName:
+ type: string
+ resourceClaimTemplateName:
+ type: string
+ type: object
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ schedulingGates:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ securityContext:
+ properties:
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ supplementalGroups:
+ items:
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ mode:
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ mode:
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ type: string
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ type: object
+ type: array
+ containerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ ephemeralContainerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ hostIP:
+ type: string
+ initContainerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ message:
+ type: string
+ nominatedNodeName:
+ type: string
+ phase:
+ type: string
+ podIP:
+ type: string
+ podIPs:
+ items:
+ properties:
+ ip:
+ type: string
+ type: object
+ type: array
+ qosClass:
+ type: string
+ reason:
+ type: string
+ startTime:
+ type: string
+ type: object
+ type: object
+ type: object
+ taskManager:
+ properties:
+ resource:
+ properties:
+ cpu:
+ type: number
+ memory:
+ type: string
+ ephemeralStorage:
+ type: string
+ type: object
+ replicas:
+ type: integer
+ podTemplate:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ activeDeadlineSeconds:
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ weight:
+ type: integer
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ ephemeralContainers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ targetContainerName:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostUsers:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ initContainers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ type: integer
+ name:
+ type: string
+ protocol:
+ type: string
+ type: object
+ type: array
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ type: integer
+ grpc:
+ properties:
+ port:
+ type: integer
+ service:
+ type: string
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ type: integer
+ periodSeconds:
+ type: integer
+ successThreshold:
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ type: integer
+ timeoutSeconds:
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: array
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ os:
+ properties:
+ name:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ type: object
+ type: array
+ resourceClaims:
+ items:
+ properties:
+ name:
+ type: string
+ source:
+ properties:
+ resourceClaimName:
+ type: string
+ resourceClaimTemplateName:
+ type: string
+ type: object
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ schedulingGates:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ securityContext:
+ properties:
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ type: object
+ supplementalGroups:
+ items:
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ matchLabelKeys:
+ items:
+ type: string
+ type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ type: object
+ type: array
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ mode:
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ creationTimestamp:
+ type: string
+ deletionGracePeriodSeconds:
+ type: integer
+ deletionTimestamp:
+ type: string
+ finalizers:
+ items:
+ type: string
+ type: array
+ generateName:
+ type: string
+ generation:
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ managedFields:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ fieldsType:
+ type: string
+ fieldsV1:
+ type: object
+ manager:
+ type: string
+ operation:
+ type: string
+ subresource:
+ type: string
+ time:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ namespace:
+ type: string
+ ownerReferences:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ blockOwnerDeletion:
+ type: boolean
+ controller:
+ type: boolean
+ kind:
+ type: string
+ name:
+ type: string
+ uid:
+ type: string
+ type: object
+ type: array
+ resourceVersion:
+ type: string
+ selfLink:
+ type: string
+ uid:
+ type: string
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ resources:
+ properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ type: object
+ mode:
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastProbeTime:
+ type: string
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ type: object
+ type: array
+ containerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ ephemeralContainerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ hostIP:
+ type: string
+ initContainerStatuses:
+ items:
+ properties:
+ containerID:
+ type: string
+ image:
+ type: string
+ imageID:
+ type: string
+ lastState:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ name:
+ type: string
+ ready:
+ type: boolean
+ restartCount:
+ type: integer
+ started:
+ type: boolean
+ state:
+ properties:
+ running:
+ properties:
+ startedAt:
+ type: string
+ type: object
+ terminated:
+ properties:
+ containerID:
+ type: string
+ exitCode:
+ type: integer
+ finishedAt:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ signal:
+ type: integer
+ startedAt:
+ type: string
+ type: object
+ waiting:
+ properties:
+ message:
+ type: string
+ reason:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ message:
+ type: string
+ nominatedNodeName:
+ type: string
+ phase:
+ type: string
+ podIP:
+ type: string
+ podIPs:
+ items:
+ properties:
+ ip:
+ type: string
+ type: object
+ type: array
+ qosClass:
+ type: string
+ reason:
+ type: string
+ startTime:
+ type: string
+ type: object
+ type: object
+ type: object
+ logConfiguration:
+ additionalProperties:
+ type: string
+ type: object
+ mode:
+ enum:
+ - native
+ - standalone
+ type: string
+ job:
+ properties:
+ jarURI:
+ type: string
+ parallelism:
+ type: integer
+ entryClass:
+ type: string
+ args:
+ items:
+ type: string
+ type: array
+ state:
+ enum:
+ - running
+ - suspended
+ type: string
+ savepointTriggerNonce:
+ type: integer
+ initialSavepointPath:
+ type: string
+ upgradeMode:
+ enum:
+ - savepoint
+ - last-state
+ - stateless
+ type: string
+ allowNonRestoredState:
+ type: boolean
+ type: object
+ restartNonce:
+ type: integer
+ flinkConfiguration:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ status:
+ properties:
+ clusterInfo:
+ additionalProperties:
+ type: string
+ type: object
+ jobManagerDeploymentStatus:
+ enum:
+ - READY
+ - DEPLOYED_NOT_READY
+ - DEPLOYING
+ - MISSING
+ - ERROR
+ type: string
+ reconciliationStatus:
+ properties:
+ reconciliationTimestamp:
+ type: integer
+ lastReconciledSpec:
+ type: string
+ lastStableSpec:
+ type: string
+ state:
+ enum:
+ - DEPLOYED
+ - UPGRADING
+ - ROLLING_BACK
+ - ROLLED_BACK
+ type: string
+ type: object
+ taskManager:
+ properties:
+ labelSelector:
+ type: string
+ replicas:
+ type: integer
+ type: object
+ jobStatus:
+ properties:
+ jobName:
+ type: string
+ jobId:
+ type: string
+ state:
+ type: string
+ startTime:
+ type: string
+ updateTime:
+ type: string
+ savepointInfo:
+ properties:
+ lastSavepoint:
+ properties:
+ timeStamp:
+ type: integer
+ location:
+ type: string
+ triggerType:
+ enum:
+ - MANUAL
+ - PERIODIC
+ - UPGRADE
+ - UNKNOWN
+ type: string
+ formatType:
+ enum:
+ - CANONICAL
+ - NATIVE
+ - UNKNOWN
+ type: string
+ triggerNonce:
+ type: integer
+ type: object
+ triggerId:
+ type: string
+ triggerTimestamp:
+ type: integer
+ triggerType:
+ enum:
+ - MANUAL
+ - PERIODIC
+ - UPGRADE
+ - UNKNOWN
+ type: string
+ formatType:
+ enum:
+ - CANONICAL
+ - NATIVE
+ - UNKNOWN
+ type: string
+ savepointHistory:
+ items:
+ properties:
+ timeStamp:
+ type: integer
+ location:
+ type: string
+ triggerType:
+ enum:
+ - MANUAL
+ - PERIODIC
+ - UPGRADE
+ - UNKNOWN
+ type: string
+ formatType:
+ enum:
+ - CANONICAL
+ - NATIVE
+ - UNKNOWN
+ type: string
+ triggerNonce:
+ type: integer
+ type: object
+ type: array
+ lastPeriodicSavepointTimestamp:
+ type: integer
+ type: object
+ type: object
+ error:
+ type: string
+ lifecycleState:
+ enum:
+ - CREATED
+ - SUSPENDED
+ - UPGRADING
+ - DEPLOYED
+ - STABLE
+ - ROLLING_BACK
+ - ROLLED_BACK
+ - FAILED
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ labelSelectorPath: .status.taskManager.labelSelector
+ specReplicasPath: .spec.taskManager.replicas
+ statusReplicasPath: .status.taskManager.replicas
+ status: {}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/src/main/resources/crd/flinksessionjobs.flink.apache.org-v1.yml b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/src/main/resources/crd/flinksessionjobs.flink.apache.org-v1.yml
new file mode 100755
index 0000000000..b83526fbf7
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-crd/src/main/resources/crd/flinksessionjobs.flink.apache.org-v1.yml
@@ -0,0 +1,205 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: flinksessionjobs.flink.apache.org
+spec:
+ group: flink.apache.org
+ names:
+ kind: FlinkSessionJob
+ plural: flinksessionjobs
+ shortNames:
+ - sessionjob
+ singular: flinksessionjob
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Last observed state of the job.
+ jsonPath: .status.jobStatus.state
+ name: Job Status
+ type: string
+ - description: "Lifecycle state of the Flink resource (including being rolled\
+ \ back, failed etc.)."
+ jsonPath: .status.lifecycleState
+ name: Lifecycle State
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ deploymentName:
+ type: string
+ job:
+ properties:
+ jarURI:
+ type: string
+ parallelism:
+ type: integer
+ entryClass:
+ type: string
+ args:
+ items:
+ type: string
+ type: array
+ state:
+ enum:
+ - running
+ - suspended
+ type: string
+ savepointTriggerNonce:
+ type: integer
+ initialSavepointPath:
+ type: string
+ upgradeMode:
+ enum:
+ - savepoint
+ - last-state
+ - stateless
+ type: string
+ allowNonRestoredState:
+ type: boolean
+ type: object
+ restartNonce:
+ type: integer
+ flinkConfiguration:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ status:
+ properties:
+ reconciliationStatus:
+ properties:
+ reconciliationTimestamp:
+ type: integer
+ lastReconciledSpec:
+ type: string
+ lastStableSpec:
+ type: string
+ state:
+ enum:
+ - DEPLOYED
+ - UPGRADING
+ - ROLLING_BACK
+ - ROLLED_BACK
+ type: string
+ type: object
+ jobStatus:
+ properties:
+ jobName:
+ type: string
+ jobId:
+ type: string
+ state:
+ type: string
+ startTime:
+ type: string
+ updateTime:
+ type: string
+ savepointInfo:
+ properties:
+ lastSavepoint:
+ properties:
+ timeStamp:
+ type: integer
+ location:
+ type: string
+ triggerType:
+ enum:
+ - MANUAL
+ - PERIODIC
+ - UPGRADE
+ - UNKNOWN
+ type: string
+ formatType:
+ enum:
+ - CANONICAL
+ - NATIVE
+ - UNKNOWN
+ type: string
+ triggerNonce:
+ type: integer
+ type: object
+ triggerId:
+ type: string
+ triggerTimestamp:
+ type: integer
+ triggerType:
+ enum:
+ - MANUAL
+ - PERIODIC
+ - UPGRADE
+ - UNKNOWN
+ type: string
+ formatType:
+ enum:
+ - CANONICAL
+ - NATIVE
+ - UNKNOWN
+ type: string
+ savepointHistory:
+ items:
+ properties:
+ timeStamp:
+ type: integer
+ location:
+ type: string
+ triggerType:
+ enum:
+ - MANUAL
+ - PERIODIC
+ - UPGRADE
+ - UNKNOWN
+ type: string
+ formatType:
+ enum:
+ - CANONICAL
+ - NATIVE
+ - UNKNOWN
+ type: string
+ triggerNonce:
+ type: integer
+ type: object
+ type: array
+ lastPeriodicSavepointTimestamp:
+ type: integer
+ type: object
+ type: object
+ error:
+ type: string
+ lifecycleState:
+ enum:
+ - CREATED
+ - SUSPENDED
+ - UPGRADING
+ - DEPLOYED
+ - STABLE
+ - ROLLING_BACK
+ - ROLLED_BACK
+ - FAILED
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/pom.xml b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/pom.xml
new file mode 100644
index 0000000000..e582bdaad0
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/pom.xml
@@ -0,0 +1,115 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.streampark
+ streampark-flink-kubernetes-v2
+ 2.2.0-SNAPSHOT
+
+
+ streampark-flink-kubernetes-engine_${scala.binary.version}
+ StreamPark : Flink Kubernetes Integration Engine
+
+
+ 3.0.0-RC2
+ 6.8.0
+ 0.9.1
+ 3.0.0
+ 2.14.2
+ true
+
+
+
+
+
+ org.apache.streampark
+ streampark-common_${scala.binary.version}
+ ${project.version}
+ provided
+
+
+
+ org.apache.streampark
+ streampark-flink-kubernetes-crd
+ ${project.version}
+
+
+
+
+ io.fabric8
+ kubernetes-client
+ ${fabric8.version}
+
+
+ io.fabric8
+ generator-annotations
+ ${fabric8.version}
+ provided
+
+
+
+
+ dev.zio
+ zio-http_${scala.binary.version}
+ ${zio-http.version}
+
+
+
+
+ com.lihaoyi
+ os-lib_${scala.binary.version}
+ ${os-lib.version}
+
+
+
+ com.lihaoyi
+ upickle_${scala.binary.version}
+ ${upickle.version}
+
+
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-yaml
+ ${jackson-dataformat-yaml.version}
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+
+
+
+
+ apache-release
+
+ true
+
+
+
+
+
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/Config.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/Config.scala
new file mode 100644
index 0000000000..945dcd8cf0
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/Config.scala
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.common.conf.{InternalOption, Workspace}
+
+object Config {
+
+ // ----- embedded http file server config -----
+
+ val EMBEDDED_HTTP_FILE_SERVER_LOCAL_MIRROR_DIR: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.httpfs.mirror-dir",
+ defaultValue = s"${Workspace.local.WORKSPACE}/mirror",
+ classType = classOf[String],
+ description = "Local mirror directory for embedded file server"
+ )
+
+ val EMBEDDED_HTTP_FILE_SERVER_PORT: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.httpfs.port",
+ defaultValue = 10030,
+ classType = classOf[Integer],
+ description = "Port of the embedded http file server"
+ )
+
+ // ----- observer config -----
+
+ val EVAL_FLINK_JOB_SNAPSHOT_PARALLELISM: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.job-snapshot.eval-parallelism",
+ defaultValue = 5,
+ classType = classOf[Integer],
+ description = "Parallelism of fibers evaluating flink job status"
+ )
+
+ val EVAL_FLINK_JOB_SNAP_INTERVAL_MILLIS: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.job-snapshot.eval-interval",
+ defaultValue = 1000L,
+ classType = classOf[Long],
+ description = "Interval for evaluating the status of the flink task, in milliseconds"
+ )
+
+ val POLL_FLINK_REST_INTERVAL: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.flink-rest.poll-interval",
+ defaultValue = 1000L,
+ classType = classOf[Long],
+ description = "Interval for polling the flink rest api, in milliseconds"
+ )
+
+ val RETRY_FLINK_REST_INTERVAL: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.flink-rest.poll-retry-interval",
+ defaultValue = 2000L,
+ classType = classOf[Long],
+ description = "Polling interval when the flink rest api request fails, in milliseconds"
+ )
+
+ val REACH_FLINK_REST_TYPE: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.flink-rest.access-type",
+ defaultValue = "IP",
+ classType = classOf[String],
+ description = "The type of the flink rest api, IP or DNS"
+ )
+
+ // ----- operator config -----
+
+ val LOG_FLINK_CR_YAML: InternalOption = InternalOption(
+ key = "streampark.flink-k8s.log-cr-yaml",
+ defaultValue = true,
+ classType = classOf[Boolean],
+ description = "Whether to log the yaml of the generated flink custom resource when submit flink job & cluster"
+ )
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/FlinkMemorySizeParser.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/FlinkMemorySizeParser.scala
new file mode 100644
index 0000000000..758c43eb0e
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/FlinkMemorySizeParser.scala
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import scala.util.Try
+
+object FlinkMemorySizeParser {
+
+ private val pattern = raw"(\d+)\s*([a-zA-Z]+)".r
+
+ def parse(text: String): Option[MemorySize] = Try {
+ val trimmed = text.trim
+ if (trimmed.isEmpty) return None
+
+ pattern.findFirstMatchIn(text) match {
+ case None => None
+ case Some(matched) =>
+ val size = matched.group(1).toLong
+ val unit = matched.group(2)
+ Unit.all.find(u => u.units.contains(unit)) match {
+ case None => None
+ case Some(hitUnit) => Some(MemorySize(size * hitUnit.multiplier))
+ }
+ }
+ }.getOrElse(None)
+
+ case class MemorySize(bytes: Long) {
+
+ def kibiBytes: Long = bytes >> 10
+ def mebiBytes: Long = bytes >> 20
+ def gibiBytes: Long = bytes >> 30
+ def tebiBytes: Long = bytes >> 40
+ }
+
+ sealed abstract class UnitADT(val units: Array[String], val multiplier: Long)
+ object Unit {
+ val all = Array(Bytes, KiloBytes, MegaBytes, GigaBytes, TeraBytes)
+ case object Bytes extends UnitADT(Array("b", "bytes"), 1L)
+ case object KiloBytes extends UnitADT(Array("k", "kb", "kibibytes"), 1024L)
+ case object MegaBytes extends UnitADT(Array("m", "mb", "mebibytes"), 1024L * 1024L)
+ case object GigaBytes extends UnitADT(Array("g", "gb", "gibibytes"), 1024L * 1024L * 1024L)
+ case object TeraBytes extends UnitADT(Array("t", "tb", "tebibytes"), 1024L * 1024L * 1024L * 1024L)
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/FlinkRestRequest.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/FlinkRestRequest.scala
new file mode 100644
index 0000000000..c96e88d274
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/FlinkRestRequest.scala
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.flink.kubernetes.v2.FlinkRestRequest._
+import org.apache.streampark.flink.kubernetes.v2.model.{FlinkPipeOprState, JobSavepointDef, JobSavepointStatus}
+
+import zio.{IO, ZIO}
+import zio.ZIO.attempt
+import zio.http.{Body, Client}
+import zio.http.Method.{PATCH, POST}
+import zio.json._
+
+import java.nio.charset.Charset
+
+import scala.language.implicitConversions
+import scala.util.chaining.scalaUtilChainingOps
+
+/** Flink rest-api request. */
+case class FlinkRestRequest(restUrl: String) {
+
+ type TriggerId = String
+
+ /**
+ * Get all job overview info
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#jobs-overview
+ */
+ def listJobOverviewInfo: IO[Throwable, Vector[JobOverviewInfo]] =
+ for {
+ res <- Client.request(s"$restUrl/jobs/overview")
+ rs <- res.body.asJson[JobOverviewRsp]
+ } yield rs.jobs
+
+ /**
+ * Get cluster overview
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#overview-1
+ */
+ def getClusterOverview: IO[Throwable, ClusterOverviewInfo] =
+ for {
+ res <- Client.request(s"$restUrl/overview")
+ rs <- res.body.asJson[ClusterOverviewInfo]
+ } yield rs
+
+ /**
+ * Get job manager configuration.
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#jobmanager-config
+ */
+ def getJobmanagerConfig: IO[Throwable, Map[String, String]] =
+ for {
+ res <- Client.request(s"$restUrl/jobmanager/config")
+ body <- res.body.asString
+ rs <- attempt {
+ ujson
+ .read(body)
+ .arr
+ .map(item => item("key").str -> item("value").str)
+ .toMap
+ }
+ } yield rs
+
+ /**
+ * Cancels job.
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#jobs-jobid-1
+ */
+ def cancelJob(jobId: String): IO[Throwable, Unit] = {
+ Client.request(s"$restUrl/jobs/$jobId?mode=cancel", method = PATCH).unit
+ }
+
+ /**
+ * Stops job with savepoint.
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#jobs-jobid-stop
+ */
+ def stopJobWithSavepoint(jobId: String, sptReq: StopJobSptReq): IO[Throwable, TriggerId] =
+ for {
+ res <- Client.request(s"$restUrl/jobs/$jobId/stop", method = POST, content = sptReq.toJson)
+ body <- res.body.asString
+ rs <- attempt(ujson.read(body)("request-id").str)
+ } yield rs
+
+ /**
+ * Triggers a savepoint of job.
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#jobs-jobid-savepoints
+ */
+ def triggerSavepoint(jobId: String, sptReq: TriggerSptReq): IO[Throwable, TriggerId] =
+ for {
+ res <- Client.request(s"$restUrl/jobs/$jobId/savepoints", method = POST, content = sptReq.toJson)
+ body <- res.body.asString
+ rs <- attempt(ujson.read(body)("request-id").str)
+ } yield rs
+
+ /**
+ * Get status of savepoint operation.
+ * see: https://nightlies.apache.org/flink/flink-docs-master/docs/ops/rest_api/#jobs-jobid-savepoints-triggerid
+ */
+ def getSavepointOperationStatus(jobId: String, triggerId: String): IO[Throwable, JobSavepointStatus] =
+ for {
+ res <- Client.request(s"$restUrl/jobs/$jobId/savepoints/$triggerId")
+ body <- res.body.asString
+ rs <- attempt {
+ val rspJson = ujson.read(body)
+ val status = rspJson("status")("id").str.pipe(FlinkPipeOprState.ofRaw)
+ val (location, failureCause) = rspJson("operation").objOpt match {
+ case None => None -> None
+ case Some(operation) =>
+ val loc = operation.get("location").flatMap(_.strOpt)
+ val failure = operation
+ .get("failure-cause")
+ .flatMap(_.objOpt.flatMap(map => map.get("stack-trace")))
+ .flatMap(_.strOpt)
+ loc -> failure
+ }
+ JobSavepointStatus(status, failureCause, location)
+ }
+ } yield rs
+}
+
+object FlinkRestRequest {
+
+ implicit def autoProvideClientLayer[A](zio: ZIO[Client, Throwable, A]): IO[Throwable, A] =
+ zio.provideLayer(Client.default)
+
+ implicit def liftStringBody(content: String): Body = Body.fromString(content, charset = Charset.forName("UTF-8"))
+
+ implicit class BodyExtension(body: Body) {
+ def asJson[A](implicit decoder: JsonDecoder[A]): IO[Throwable, A] = for {
+ data <- body.asString
+ rsp <- ZIO.fromEither(data.fromJson[A]).mapError(ParseJsonError)
+ } yield rsp
+ }
+
+ case class ParseJsonError(msg: String) extends Exception(msg)
+
+ // --- Flink rest api models ---
+
+ case class JobOverviewRsp(jobs: Vector[JobOverviewInfo])
+
+ object JobOverviewRsp {
+ implicit val codec: JsonCodec[JobOverviewRsp] = DeriveJsonCodec.gen[JobOverviewRsp]
+ }
+
+ case class JobOverviewInfo(
+ @jsonField("jid") jid: String,
+ name: String,
+ state: String,
+ @jsonField("start-time") startTime: Long,
+ @jsonField("end-time") endTime: Long,
+ @jsonField("last-modification") lastModifyTime: Long,
+ tasks: TaskStats)
+
+ object JobOverviewInfo {
+ implicit val codec: JsonCodec[JobOverviewInfo] = DeriveJsonCodec.gen[JobOverviewInfo]
+ }
+
+ case class TaskStats(
+ total: Int,
+ created: Int,
+ scheduled: Int,
+ deploying: Int,
+ running: Int,
+ finished: Int,
+ canceling: Int,
+ canceled: Int,
+ failed: Int,
+ reconciling: Int,
+ initializing: Int)
+
+ object TaskStats {
+ implicit val codec: JsonCodec[TaskStats] = DeriveJsonCodec.gen[TaskStats]
+ }
+
+ case class ClusterOverviewInfo(
+ @jsonField("flink-version") flinkVersion: String,
+ @jsonField("taskmanagers") taskManagers: Int,
+ @jsonField("slots-total") slotsTotal: Int,
+ @jsonField("slots-available") slotsAvailable: Int,
+ @jsonField("jobs-running") jobsRunning: Int,
+ @jsonField("jobs-finished") jobsFinished: Int,
+ @jsonField("jobs-cancelled") jobsCancelled: Int,
+ @jsonField("jobs-failed") jobsFailed: Int)
+
+ object ClusterOverviewInfo {
+ implicit val codec: JsonCodec[ClusterOverviewInfo] = DeriveJsonCodec.gen[ClusterOverviewInfo]
+ }
+
+ case class StopJobSptReq(
+ drain: Boolean = false,
+ formatType: Option[String] = None,
+ targetDirectory: Option[String],
+ triggerId: Option[String] = None)
+
+ object StopJobSptReq {
+ implicit val codec: JsonCodec[StopJobSptReq] = DeriveJsonCodec.gen[StopJobSptReq]
+
+ def apply(sptConf: JobSavepointDef): StopJobSptReq =
+ StopJobSptReq(sptConf.drain, sptConf.formatType, sptConf.savepointPath, sptConf.triggerId)
+ }
+
+ case class TriggerSptReq(
+ @jsonField("cancel-job") cancelJob: Boolean = false,
+ formatType: Option[String] = None,
+ @jsonField("target-directory") targetDirectory: Option[String],
+ triggerId: Option[String] = None)
+
+ object TriggerSptReq {
+ implicit val triggerSptReqCodec: JsonCodec[TriggerSptReq] = DeriveJsonCodec.gen[TriggerSptReq]
+
+ def apply(sptConf: JobSavepointDef): TriggerSptReq =
+ TriggerSptReq(cancelJob = false, sptConf.formatType, sptConf.savepointPath, sptConf.triggerId)
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/K8sTools.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/K8sTools.scala
new file mode 100644
index 0000000000..a26a633cd6
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/K8sTools.scala
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.common.util.Logger
+import org.apache.streampark.common.zio.ZIOExt.{unsafeRun, IOOps, OptionZIOOps, UIOOps}
+
+import io.fabric8.kubernetes.client._
+import io.fabric8.kubernetes.client.dsl.WatchAndWaitable
+import zio.{durationInt, Fiber, IO, Queue, Ref, Schedule, UIO, ZIO}
+import zio.stream.{UStream, ZStream}
+
+object K8sTools extends Logger {
+
+ /** Create new fabric8 k8s client */
+ def newK8sClient: KubernetesClient = new KubernetesClientBuilder().build
+
+ @inline def usingK8sClient[A](f: KubernetesClient => A): IO[Throwable, A] = ZIO.scoped {
+ ZIO
+ .acquireRelease(ZIO.attempt(newK8sClient))(client => ZIO.attempt(client.close()).ignore)
+ .flatMap(client => ZIO.attemptBlocking(f(client)))
+ }
+
+ /**
+ * Converts fabric8 callback-style Watch to ZStream style.
+ * Usage:
+ * {{{
+ * watchK8sResource(client => client.services().withName("my-service"))
+ * .flatMap(watch => watch.stream.debug.runCollect)
+ * }}}
+ */
+ def watchK8sResource[R](genWatch: KubernetesClient => WatchAndWaitable[R]): IO[Throwable, K8sWatcher[R]] = {
+ for {
+ queue <- Queue.unbounded[(Watcher.Action, R)]
+ client <- ZIO.attempt(newK8sClient)
+
+ watcherF = new Watcher[R]() {
+ override def reconnecting(): Boolean = true
+ override def eventReceived(action: Watcher.Action, resource: R): Unit = {
+ queue.offer((action, resource)).runIO
+ }
+ override def onClose(cause: WatcherException): Unit = {
+ logError("K8s Watcher was accidentally closed.", cause)
+ }
+ override def onClose(): Unit = {
+ super.onClose()
+ queue.shutdown.runIO
+ client.close()
+ }
+ }
+ watch <- ZIO.attemptBlocking(genWatch(client).watch(watcherF))
+ stream = ZStream.fromQueue(queue)
+ } yield K8sWatcher(watch, stream)
+ }
+
+ /** Rich Kubernetes watcher wrapper. */
+ case class K8sWatcher[R](watch: Watch, stream: UStream[(Watcher.Action, R)])
+
+ /**
+ * Safely and automatically retry subscriptions to k8s resources。
+ *
+ * @param genWatch The shape of building Watch resources monad from KubeClient.
+ * @param pipe The shape of consume watching stream.
+ *
+ * Usage:
+ * {{{
+ * watchK8sResourceForever(client =>
+ * client
+ * .services()
+ * .inNamespace("test")
+ * .withName("my-svc")) { stream =>
+ * stream
+ * .debug
+ * .map(_._2)}
+ * }}}
+ */
+ def watchK8sResourceForever[R](genWatch: KubernetesClient => WatchAndWaitable[R])(
+ pipe: UStream[(Watcher.Action, R)] => UStream[_]): K8sResourceWatcher[R] = {
+ K8sResourceWatcher(genWatch, pipe)
+ }
+
+ /** Rich Kubernetes watcher wrapper. */
+ case class K8sResourceWatcher[R](
+ genWatch: KubernetesClient => WatchAndWaitable[R],
+ pipe: UStream[(Watcher.Action, R)] => UStream[_]) {
+
+ private val queue: Queue[(Watcher.Action, R)] = Queue.unbounded[(Watcher.Action, R)].runUIO
+ private val clientRef: Ref[Option[KubernetesClient]] = unsafeRun(Ref.make(None))
+ private val watchRef: Ref[Option[Watch]] = unsafeRun(Ref.make(None))
+ private val consumeFiberRef: Ref[Option[Fiber.Runtime[_, _]]] = unsafeRun(Ref.make(None))
+ private val mainFiberRef: Ref[Option[Fiber.Runtime[_, _]]] = unsafeRun(Ref.make(None))
+
+ def launch: UIO[Unit] =
+ for {
+ fiber <- (innerStop *> innerRun).retry(Schedule.spaced(1.seconds)).forkDaemon
+ _ <- mainFiberRef.set(Some(fiber))
+ } yield ()
+
+ def stop: UIO[Unit] =
+ for {
+ _ <- innerStop
+ _ <- mainFiberRef.get.someOrUnitZIO(_.interrupt)
+ _ <- queue.shutdown
+ } yield ()
+
+ private def innerRun: ZIO[Any, Throwable, Unit] =
+ for {
+ client <- ZIO.attempt(newK8sClient)
+ _ <- clientRef.set(Some(client))
+
+ watcherShape = new Watcher[R]() {
+ override def reconnecting(): Boolean = true
+ override def eventReceived(action: Watcher.Action, resource: R): Unit = {
+ queue.offer((action, resource)).runIO
+ }
+ override def onClose(cause: WatcherException): Unit = {
+ logError("K8s Watcher was accidentally closed.", cause)
+ launch.runIO
+ }
+ }
+ watch <- ZIO.attemptBlocking(genWatch(client).watch(watcherShape))
+ _ <- watchRef.set(Some(watch))
+
+ fiber <- pipe(ZStream.fromQueue(queue)).runDrain.forkDaemon
+ _ <- consumeFiberRef.set(Some(fiber))
+ } yield ()
+
+ private def innerStop: UIO[Unit] =
+ for {
+ _ <- consumeFiberRef.get.someOrUnitZIO(_.interrupt)
+ _ <- watchRef.get.someOrUnitZIO(watch => ZIO.attemptBlocking(watch.close()).ignore)
+ _ <- clientRef.get.someOrUnitZIO(client => ZIO.attemptBlocking(client.close()).ignore)
+ } yield ()
+
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/EmbeddedFileServer.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/EmbeddedFileServer.scala
new file mode 100644
index 0000000000..ff16bc6071
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/EmbeddedFileServer.scala
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.httpfs
+
+import org.apache.streampark.common.zio.ZIOExt.UIOOps
+
+import zio.{Ref, UIO, ZIO}
+import zio.http._
+
+object EmbeddedFileServer {
+
+ private val routes = Http.collectHttp[Request] {
+ case Method.GET -> Root / "health" => Handler.ok.toHttp
+ case Method.GET -> Root / "fs" / subspace / name =>
+ Http.fromFileZIO(FileMirror.getLocalFile(subspace, name))
+ }
+
+ private val isLaunch: Ref[Boolean] = Ref.make(false).runUIO
+
+ /** Launch the netty-based internal http file server at port specified by fileServerPort param. */
+ def launch: UIO[Unit] = {
+ val serve = for {
+ _ <- ZIO.log(s"Launch internal http file server at port: $fileServerPort")
+ _ <- Server
+ .serve(routes.withDefaultErrorResponse)
+ .provide(Server.defaultWithPort(fileServerPort))
+ .forkDaemon
+ } yield ()
+ (serve *> isLaunch.set(true)).unlessZIO(isLaunch.get).unit
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/FileMirror.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/FileMirror.scala
new file mode 100644
index 0000000000..79df265ef8
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/FileMirror.scala
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.httpfs
+
+import zio.{IO, UIO, ZIO}
+
+import java.io.File
+
+object FileMirror {
+
+ private val mirrorRoot = os.Path(new File(localMirrorDir).getAbsolutePath)
+
+ /** Mirror the file to local mirror directory. Return tuple (namespace, file-name). */
+ def mirror(srcFilePath: String, subspace: String): IO[Throwable, (String, String)] = ZIO.attemptBlocking {
+ val srcPath = os.Path(new File(srcFilePath).getAbsolutePath)
+ val fileName = srcPath.last
+ os.copy(
+ from = srcPath,
+ to = mirrorRoot / subspace / fileName,
+ replaceExisting = true,
+ createFolders = true,
+ mergeFolders = true
+ )
+ subspace -> fileName
+ }
+
+ /** Get the http access url of the mirrored file resource. */
+ def getHttpUrl(subspace: String, name: String): UIO[String] = {
+ for {
+ httpHost <- FileServerPeerAddress.getEnsure
+ url = s"http://$httpHost:$fileServerPort/fs/$subspace/$name"
+ } yield url
+ }
+
+ def mirrorAndGetHttpUrl(srcFilePath: String, ns: String): ZIO[Any, Throwable, String] =
+ mirror(srcFilePath, ns)
+ .flatMap { case (ns, name) => getHttpUrl(ns, name) }
+
+ /** Get the local File of the mirrored file resource. */
+ def getLocalFile(subspace: String, name: String): IO[Throwable, File] = {
+ for {
+ localFile <- ZIO.succeed((mirrorRoot / subspace / name).toIO)
+ _ <- ZIO
+ .fail(FileNotFound(localFile.getAbsolutePath))
+ .whenZIO(ZIO.attempt(localFile.exists()).map(!_))
+ _ <- ZIO
+ .fail(NotAFile(localFile.getAbsolutePath))
+ .whenZIO(ZIO.attempt(localFile.isFile).map(!_))
+ } yield localFile
+ }
+
+ case class FileNotFound(path: String) extends Exception(s"File not found: $path")
+ case class NotAFile(path: String) extends Exception(s"Not a file: $path")
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/FileServerPeerAddress.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/FileServerPeerAddress.scala
new file mode 100644
index 0000000000..3658f41b40
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/FileServerPeerAddress.scala
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.httpfs
+
+import org.apache.streampark.common.zio.ZIOExt.{unsafeRun, UIOOps}
+import org.apache.streampark.flink.kubernetes.v2.K8sTools.{newK8sClient, usingK8sClient}
+
+import zio.{durationInt, Ref, UIO, ZIO}
+
+import java.net.{InetAddress, InetSocketAddress, Socket}
+
+import scala.util.Using
+
+object FileServerPeerAddress {
+
+ private val address: Ref[Option[String]] = unsafeRun(Ref.make(None))
+
+ private val STREAMPARK_K8S_SVC_NAME = "streampark-service"
+
+ // Auto calculate address when initialized.
+ infer
+ .map(Some(_))
+ .tap(address.set)
+ .tap(addr => ZIO.logInfo(s"Embedded HTTP file server K8s peer address: ${addr.getOrElse("unknown")}"))
+ .forkDaemon
+ .runUIO
+
+ /** Get the peer communication address snapshot. */
+ def get: UIO[Option[String]] = address.get
+
+ /** Get the address, blocking the caller until the address is calculated. */
+ def getEnsure: UIO[String] = address.get.flatMap {
+ case None => getEnsure.delay(100.millis)
+ case Some(addr) => ZIO.succeed(addr)
+ }
+
+ /** Refresh the peer communication address. */
+ def refresh: UIO[Unit] = infer.tap(r => address.set(Some(r))).unit
+
+ /** Infer the relative file service peer address for k8s resources. */
+ def infer: UIO[String] = {
+ inferInsidePod.some
+ .orElse(inferSocketReplyFromK8sApiServer.some)
+ .orElse(directLocalHost.some)
+ .orElse(ZIO.succeed("127.0.0.1"))
+ }
+
+ private def inferInsidePod: UIO[Option[String]] =
+ usingK8sClient { client =>
+ Option(client.getNamespace).flatMap { ns =>
+ Option(
+ client.services
+ .inNamespace(ns)
+ .withName(STREAMPARK_K8S_SVC_NAME)
+ .get()
+ ).map(_ => s"$STREAMPARK_K8S_SVC_NAME.$ns")
+ }
+ }.catchAll(_ => ZIO.succeed(None))
+
+ private def inferSocketReplyFromK8sApiServer: UIO[Option[String]] =
+ ZIO
+ .attemptBlocking {
+ val masterUrl = newK8sClient.getConfiguration.getMasterUrl
+
+ extractHostPortFromUrl(masterUrl).flatMap { case (host, port) =>
+ Using(new Socket()) { socket =>
+ socket.connect(new InetSocketAddress(host, port))
+ socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress].getAddress.getHostAddress
+ }.toOption
+ }
+ }
+ .catchAll(_ => ZIO.succeed(None))
+
+ private def directLocalHost: UIO[Option[String]] =
+ ZIO
+ .attemptBlocking(InetAddress.getLocalHost.getHostAddress)
+ .map(Some(_))
+ .catchAll(_ => ZIO.succeed(None))
+
+ private def extractHostPortFromUrl(url: String): Option[(String, Int)] = {
+ val p1 = url.split("://")
+ if (p1.length != 2) None
+ else {
+ val protocol = p1(0)
+ val p2 = p1(1).split("/").head.split(":")
+ if (p2.length == 2) Some(p2(0) -> p2(1).toInt)
+ else if (p2.length == 1) protocol match {
+ case "http" => Some(p2(0) -> 80)
+ case "https" => Some(p2(0) -> 443)
+ case _ => None
+ }
+ else None
+ }
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/package.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/package.scala
new file mode 100644
index 0000000000..e5382cd5ec
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/httpfs/package.scala
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.common.conf.InternalConfigHolder
+import org.apache.streampark.flink.kubernetes.v2.Config.{EMBEDDED_HTTP_FILE_SERVER_LOCAL_MIRROR_DIR, EMBEDDED_HTTP_FILE_SERVER_PORT}
+
+package object httpfs {
+
+ lazy val localMirrorDir: String = InternalConfigHolder.get(EMBEDDED_HTTP_FILE_SERVER_LOCAL_MIRROR_DIR)
+ lazy val fileServerPort: Int = InternalConfigHolder.get(EMBEDDED_HTTP_FILE_SERVER_PORT)
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/ClusterMetrics.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/ClusterMetrics.scala
new file mode 100644
index 0000000000..7ef4bb7da5
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/ClusterMetrics.scala
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+/**
+ * Flink custer metrics.
+ *
+ * see: [[org.apache.streampark.flink.kubernetes.model.FlinkMetricCV]]
+ */
+case class ClusterMetrics(
+ totalJmMemory: Integer = 0,
+ totalTmMemory: Integer = 0,
+ totalTm: Integer = 0,
+ totalSlot: Integer = 0,
+ availableSlot: Integer = 0,
+ runningJob: Integer = 0,
+ finishedJob: Integer = 0,
+ cancelledJob: Integer = 0,
+ failedJob: Integer = 0)
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkCRStatus.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkCRStatus.scala
new file mode 100644
index 0000000000..4a9d4b5c26
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkCRStatus.scala
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.model.EvalState.EvalState
+
+import io.fabric8.kubernetes.client.Watcher
+import org.apache.flink.v1beta1.{FlinkDeployment, FlinkDeploymentStatus, FlinkSessionJob, FlinkSessionJobStatus}
+import org.apache.flink.v1beta1.FlinkDeploymentStatus.JobManagerDeploymentStatus
+
+/*
+ * Flink K8s custom resource status.
+ *
+ * For the evaluation logic of the state, please refer to:
+ * - [[org.apache.streampark.flink.kubernetes.v2.model.DeployCRStatus.eval]]
+ * - [[org.apache.streampark.flink.kubernetes.v2.model.SessionJobCRStatus.eval]
+ */
+sealed trait FlinkCRStatus {
+ val namespace: String
+ val name: String
+ val evalState: EvalState
+ val error: Option[String]
+ val updatedTs: Long
+}
+
+/*
+ * Evaluated status for Flink K8s CR.
+ *
+ * - DEPLOYING: The CR is being deploying or rollback.
+ * - READY: The JobManager is ready.
+ * - SUSPENDED: The CR has been suspended.
+ * - FAILED: The job terminally failed or JobManager occurs error.
+ * - DELETED: The CR has been deleted.
+ */
+object EvalState extends Enumeration {
+ type EvalState = Value
+ val DEPLOYING, READY, SUSPENDED, FAILED, DELETED = Value
+}
+
+/**
+ * Flink deployment CR status snapshot.
+ * See: [[org.apache.flink.v1beta1.FlinkDeploymentStatus]]
+ */
+case class DeployCRStatus(
+ namespace: String,
+ name: String,
+ evalState: EvalState,
+ action: Watcher.Action,
+ lifecycle: FlinkDeploymentStatus.LifecycleState,
+ jmDeployStatus: JobManagerDeploymentStatus,
+ error: Option[String] = None,
+ updatedTs: Long)
+ extends FlinkCRStatus
+
+object DeployCRStatus {
+
+ import FlinkDeploymentStatus.LifecycleState
+
+ def eval(action: Watcher.Action, cr: FlinkDeployment): DeployCRStatus = {
+ val metadata = cr.getMetadata
+ val status = cr.getStatus
+ val lifecycle = status.getLifecycleState
+ val jmDeployStatus = status.getJobManagerDeploymentStatus
+
+ val evalState = (action, lifecycle, jmDeployStatus) match {
+ case (Watcher.Action.DELETED, _, _) => EvalState.DELETED
+ case (_, LifecycleState.FAILED, _) => EvalState.FAILED
+ case (_, _, JobManagerDeploymentStatus.ERROR) => EvalState.FAILED
+ case (_, LifecycleState.SUSPENDED, _) => EvalState.SUSPENDED
+ case (_, _, JobManagerDeploymentStatus.READY) => EvalState.READY
+ case _ => EvalState.DEPLOYING
+ }
+
+ DeployCRStatus(
+ namespace = metadata.getNamespace,
+ name = metadata.getName,
+ evalState = evalState,
+ action = action,
+ lifecycle = lifecycle,
+ jmDeployStatus = jmDeployStatus,
+ error = Option(cr.getStatus.getError),
+ updatedTs = System.currentTimeMillis
+ )
+ }
+}
+
+/**
+ * Flink Session Job CR status snapshot.
+ * See: [[org.apache.flink.v1beta1.FlinkSessionJobStatus]]
+ */
+case class SessionJobCRStatus(
+ namespace: String,
+ name: String,
+ refDeployName: String,
+ evalState: EvalState,
+ action: Watcher.Action,
+ lifecycle: FlinkSessionJobStatus.LifecycleState,
+ error: Option[String],
+ updatedTs: Long)
+ extends FlinkCRStatus
+
+object SessionJobCRStatus {
+
+ import FlinkSessionJobStatus.LifecycleState
+
+ def eval(action: Watcher.Action, cr: FlinkSessionJob): SessionJobCRStatus = {
+ val metadata = cr.getMetadata
+ val status = cr.getStatus
+ val lifecycle = status.getLifecycleState
+
+ val evalState = (action, lifecycle) match {
+ case (Watcher.Action.DELETED, _) => EvalState.DELETED
+ case (_, LifecycleState.STABLE) => EvalState.READY
+ case (_, LifecycleState.ROLLED_BACK) => EvalState.READY
+ case (_, LifecycleState.SUSPENDED) => EvalState.SUSPENDED
+ case (_, LifecycleState.FAILED) => EvalState.FAILED
+ case _ => EvalState.DEPLOYING
+ }
+
+ SessionJobCRStatus(
+ namespace = metadata.getNamespace,
+ name = metadata.getName,
+ refDeployName = cr.getSpec.getDeploymentName,
+ evalState = evalState,
+ action = action,
+ lifecycle = lifecycle,
+ error = Option(cr.getStatus.getError),
+ updatedTs = System.currentTimeMillis
+ )
+ }
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkDeploymentDef.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkDeploymentDef.scala
new file mode 100644
index 0000000000..b3c039bf24
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkDeploymentDef.scala
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.jacksonMapper
+import org.apache.streampark.flink.kubernetes.v2.model.FlinkDeploymentDef.mapPodToPodTemplate
+
+import io.fabric8.kubernetes.api.model.{ObjectMeta, Pod}
+import org.apache.flink.v1beta1.{flinkdeploymentspec, FlinkDeployment, FlinkDeploymentSpec}
+import org.apache.flink.v1beta1.FlinkDeploymentSpec.FlinkVersion
+import org.apache.flink.v1beta1.flinkdeploymentspec.{Ingress, TaskManager}
+
+import scala.jdk.CollectionConverters.mapAsJavaMapConverter
+import scala.reflect.ClassTag
+import scala.util.Try
+import scala.util.chaining.scalaUtilChainingOps
+
+/**
+ * Flink Deployment CR definition for application mode job or session cluster.
+ * Typed-safe Mirror of [[org.apache.streampark.shaded.org.apache.flink.kubernetes.operator.api.spec.FlinkDeploymentSpec]]
+ *
+ * @param namespace K8s CR namespace
+ * @param name K8s CR name
+ * @param image Flink docker image used to start the Job and TaskManager pods.
+ * @param imagePullPolicy Image pull policy of the Flink docker image.
+ * @param serviceAccount Kubernetes service used by the Flink deployment.
+ * @param flinkVersion Flink image version.
+ * @param jobManager Flink JobManager definition.
+ * @param taskManager Flink TaskManager definition.
+ * @param restartNonce Nonce used to manually trigger restart for the cluster/session job. In order to trigger restart,
+ * change the number to anything other than the current value.
+ * @param flinkConfiguration Flink configuration overrides for the Flink deployment or Flink session job.
+ * @param logConfiguration Log configuration overrides for the Flink deployment. Format logConfigFileName -> configContent.
+ * @param podTemplate Base pod template for job and task manager pods. Can be overridden by the jobManager and taskManager pod templates.
+ * @param ingress Ingress definition.
+ * @param mode Deployment mode of the Flink cluster, native or standalone, default: native.
+ * @param job Job definition for application deployments/session job. Null for session clusters.
+ * @param extJarPaths Additional jar dependencies path, only allows local paths likes "/streampark/ws/assets/flink-faker-0.5.3.jar"
+ */
+
+case class FlinkDeploymentDef(
+ namespace: String,
+ name: String,
+ image: String,
+ imagePullPolicy: Option[String] = None,
+ serviceAccount: String = "flink",
+ flinkVersion: FlinkVersion,
+ jobManager: JobManagerDef,
+ taskManager: TaskManagerDef,
+ restartNonce: Option[Long] = None,
+ flinkConfiguration: Map[String, String] = Map.empty,
+ logConfiguration: Map[String, String] = Map.empty,
+ podTemplate: Option[Pod] = None,
+ ingress: Option[IngressDef] = None,
+ mode: FlinkDeploymentSpec.Mode = FlinkDeploymentSpec.Mode._NATIVE,
+ job: Option[JobDef] = None,
+ extJarPaths: Array[String] = Array.empty) {
+
+ // noinspection DuplicatedCode
+ def toFlinkDeployment: FlinkDeployment = {
+ val spec = new FlinkDeploymentSpec()
+
+ spec.setImage(image)
+ imagePullPolicy.foreach(spec.setImagePullPolicy)
+ spec.setServiceAccount(serviceAccount)
+ spec.setFlinkVersion(flinkVersion)
+ restartNonce.foreach(spec.setRestartNonce(_))
+ podTemplate
+ .flatMap(pod => mapPodToPodTemplate(pod, classOf[flinkdeploymentspec.PodTemplate]).toOption)
+ .foreach(spec.setPodTemplate)
+ spec.setMode(mode)
+
+ val jmSpec = jobManager.toJobManagerSpec
+ spec.setJobManager(jmSpec)
+ val tmSpec = taskManager.toTaskManagerSpec
+ spec.setTaskManager(tmSpec)
+
+ if (flinkConfiguration.nonEmpty) spec.setFlinkConfiguration(flinkConfiguration.asJava)
+ if (logConfiguration.nonEmpty) spec.setLogConfiguration(logConfiguration.asJava)
+
+ ingress.map(_.toIngressSpec).foreach(spec.setIngress)
+ job.map(_.toFlinkDeploymentJobSpec).foreach(spec.setJob)
+
+ val deployment = new FlinkDeployment()
+ val metadata = new ObjectMeta()
+ metadata.setNamespace(namespace)
+ metadata.setName(name)
+ deployment.setMetadata(metadata)
+ deployment.setSpec(spec)
+ deployment.setStatus(null)
+ deployment
+ }
+}
+
+object FlinkDeploymentDef {
+ def mapPodToPodTemplate[A: ClassTag](pod: Pod, clz: Class[A]): Try[A] = Try {
+ val json = jacksonMapper.writeValueAsString(pod)
+ jacksonMapper.readValue(json, clz)
+ }
+}
+
+/**
+ * JobManager definition.
+ * Type-safe mirror of [[org.apache.flink.v1beta1.flinkdeploymentspec.JobManager]]
+ *
+ * @param cpu Amount of CPU allocated to the pod.
+ * @param memory Amount of memory allocated to the pod. Example: 1024m, 1g
+ * @param ephemeralStorage Amount of ephemeral storage allocated to the pod. Example: 1024m, 2G
+ * @param replicas Number of TaskManager replicas. If defined, takes precedence over parallelism
+ * @param podTemplate JobManager pod template.
+ */
+case class JobManagerDef(
+ cpu: Double,
+ memory: String,
+ ephemeralStorage: Option[String] = None,
+ replicas: Int = 1,
+ podTemplate: Option[Pod] = None) {
+
+ def toJobManagerSpec: flinkdeploymentspec.JobManager = {
+ val spec = new flinkdeploymentspec.JobManager()
+ val resource = new flinkdeploymentspec.jobmanager.Resource().pipe { rs =>
+ rs.setCpu(cpu)
+ rs.setMemory(memory)
+ rs.setEphemeralStorage(ephemeralStorage.orNull)
+ rs
+ }
+ spec.setResource(resource)
+ spec.setReplicas(replicas)
+ podTemplate
+ .flatMap(pod => mapPodToPodTemplate(pod, classOf[flinkdeploymentspec.jobmanager.PodTemplate]).toOption)
+ .foreach(spec.setPodTemplate)
+ spec
+ }
+}
+
+/**
+ * Taskmanager definition.
+ * Type-safe mirror of [[org.apache.flink.v1beta1.flinkdeploymentspec.TaskManager]]
+ *
+ * @param cpu Amount of CPU allocated to the pod.
+ * @param memory Amount of memory allocated to the pod. Example: 1024m, 1g
+ * @param ephemeralStorage Amount of ephemeral storage allocated to the pod. Example: 1024m, 2G
+ * @param replicas Number of TaskManager replicas. If defined, takes precedence over parallelism
+ * @param podTemplate TaskManager pod template.
+ */
+case class TaskManagerDef(
+ cpu: Double,
+ memory: String,
+ ephemeralStorage: Option[String] = None,
+ replicas: Option[Int] = None,
+ podTemplate: Option[Pod] = None) {
+
+ def toTaskManagerSpec: flinkdeploymentspec.TaskManager = {
+ val spec = new TaskManager()
+ val resource = new flinkdeploymentspec.taskmanager.Resource().pipe { rs =>
+ rs.setCpu(cpu)
+ rs.setMemory(memory)
+ rs.setEphemeralStorage(ephemeralStorage.orNull)
+ rs
+ }
+ spec.setResource(resource)
+ replicas.foreach(spec.setReplicas(_))
+ podTemplate
+ .flatMap(pod => mapPodToPodTemplate(pod, classOf[flinkdeploymentspec.taskmanager.PodTemplate]).toOption)
+ .foreach(spec.setPodTemplate)
+ spec
+ }
+}
+
+/**
+ * Ingress definition for JobManager.
+ * Type-safe mirror of [[org.apache.flink.v1beta1.flinkdeploymentspec.Ingress]]
+ *
+ * There are two predefined definitions:
+ * - [[org.apache.streampark.flink.kubernetes.v2.model.FlinkDeploymentDef.IngressDef.simplePathBased]]
+ * - [[org.apache.streampark.flink.kubernetes.v2.model.FlinkDeploymentDef.IngressDef.simpleDomainBased]]
+ *
+ * @param template Ingress template name.
+ * @param className Ingress class name.
+ * @param annotations Ingress annotation.
+ */
+case class IngressDef(
+ template: String,
+ className: Option[String] = None,
+ annotations: Map[String, String] = Map.empty) {
+
+ def toIngressSpec: flinkdeploymentspec.Ingress = {
+ val spec = new Ingress()
+ spec.setTemplate(template)
+ className.foreach(spec.setClassName)
+ if (annotations.nonEmpty) spec.setAnnotations(annotations.asJava)
+ spec
+ }
+}
+
+object IngressDef {
+
+ lazy val simplePathBased: IngressDef = IngressDef(
+ template = "/{{namespace}}/{{name}}(/|$)(.*)",
+ annotations = Map("nginx.ingress.kubernetes.io/rewrite-target" -> "/$2")
+ )
+
+ lazy val simpleDomainBased: IngressDef = IngressDef(
+ template = "{{name}}.{{namespace}}.flink.k8s.io"
+ )
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkSessionJobDef.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkSessionJobDef.scala
new file mode 100644
index 0000000000..ffa645770b
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/FlinkSessionJobDef.scala
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import io.fabric8.kubernetes.api.model.ObjectMeta
+import org.apache.flink.v1beta1.{FlinkSessionJob, FlinkSessionJobSpec}
+
+import scala.jdk.CollectionConverters.mapAsJavaMapConverter
+
+/**
+ * Flink Session job CR definition.
+ * Typed-safe Mirror of [[org.apache.flink.v1beta1.FlinkSessionJob]]
+ *
+ * @param namespace K8s CR namespace
+ * @param name K8s CR name
+ * @param deploymentName The name of the target session cluster deployment.
+ * @param job Job definition
+ * @param flinkConfiguration Flink configuration overrides for the Flink deployment or Flink session job.
+ * @param restartNonce Nonce used to manually trigger restart for the cluster/session job. In order to
+ * trigger restart, change the number to anything other than the current value.
+ */
+case class FlinkSessionJobDef(
+ namespace: String,
+ name: String,
+ deploymentName: String,
+ job: JobDef,
+ flinkConfiguration: Map[String, String] = Map.empty,
+ restartNonce: Option[Long] = None) {
+
+ // noinspection DuplicatedCode
+ def toFlinkSessionJob: FlinkSessionJob = {
+ val spec = new FlinkSessionJobSpec()
+ spec.setDeploymentName(deploymentName)
+ spec.setJob(job.toFlinkSessionJobSpec)
+ if (flinkConfiguration.nonEmpty) spec.setFlinkConfiguration(flinkConfiguration.asJava)
+ restartNonce.foreach(spec.setRestartNonce(_))
+
+ val sessionJob = new FlinkSessionJob()
+ val metadata = new ObjectMeta()
+ metadata.setNamespace(namespace)
+ metadata.setName(name)
+ sessionJob.setMetadata(metadata)
+ sessionJob.setSpec(spec)
+ sessionJob.setStatus(null)
+ sessionJob
+ }
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobDef.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobDef.scala
new file mode 100644
index 0000000000..b98cc58e6a
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobDef.scala
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.model.JobDef.{DesiredState, UpgradeMode}
+import org.apache.streampark.flink.kubernetes.v2.model.JobDef.DesiredState.DesiredState
+import org.apache.streampark.flink.kubernetes.v2.model.JobDef.UpgradeMode.UpgradeMode
+
+import org.apache.flink.v1beta1.{flinkdeploymentspec, flinksessionjobspec}
+
+import scala.jdk.CollectionConverters.seqAsJavaListConverter
+import scala.util.chaining.scalaUtilChainingOps
+
+/**
+ * Job definition.
+ * Type safe mirror for [[org.apache.flink.v1beta1.flinkdeploymentspec.Job]] and [[org.apache.flink.v1beta1.flinksessionjobspec.Job]]
+ *
+ * @param jarURI Optional URI of the job jar that only supports local file likes "/streampark/ws/assets/flink-faker-0.5.3.jar"
+ * @param parallelism Parallelism of the Flink job.
+ * @param entryClass Fully qualified main class name of the Flink job.
+ * @param args Arguments for the Flink job main class.
+ * @param state Desired state for the job.
+ * @param upgradeMode Upgrade mode of the Flink job.
+ * @param savepointTriggerNonce Nonce used to manually trigger savepoint for the running job. In order to trigger a savepoint,
+ * change the number to anything other than the current value.
+ * @param initialSavepointPath Savepoint path used by the job the first time it is deployed.
+ * Upgrades/redeployments will not be affected.
+ * @param allowNonRestoredState Allow checkpoint state that cannot be mapped to any job vertex in tasks.
+ */
+case class JobDef(
+ jarURI: String,
+ parallelism: Int,
+ entryClass: Option[String] = None,
+ args: Array[String] = Array.empty,
+ state: DesiredState = DesiredState.RUNNING,
+ upgradeMode: UpgradeMode = UpgradeMode.STATELESS,
+ savepointTriggerNonce: Option[Long] = None,
+ initialSavepointPath: Option[String] = None,
+ allowNonRestoredState: Option[Boolean] = None) {
+
+ // noinspection DuplicatedCode
+ def toFlinkDeploymentJobSpec: flinkdeploymentspec.Job = new flinkdeploymentspec.Job().pipe { spec =>
+ spec.setJarURI(jarURI)
+ spec.setParallelism(parallelism)
+ entryClass.foreach(spec.setEntryClass)
+ if (args.nonEmpty) spec.setArgs(args.toList.asJava)
+ spec.setState(DesiredState.toFlinkDeploymentEnum(state))
+ spec.setUpgradeMode(UpgradeMode.toFlinkDeploymentEnum(upgradeMode))
+
+ savepointTriggerNonce.foreach(spec.setSavepointTriggerNonce(_))
+ initialSavepointPath.foreach(spec.setInitialSavepointPath)
+ allowNonRestoredState.foreach(spec.setAllowNonRestoredState(_))
+ spec
+ }
+
+ // noinspection DuplicatedCode
+ def toFlinkSessionJobSpec: flinksessionjobspec.Job = new flinksessionjobspec.Job().pipe { spec =>
+ spec.setJarURI(jarURI)
+ spec.setParallelism(parallelism)
+ entryClass.foreach(spec.setEntryClass)
+ if (args.nonEmpty) spec.setArgs(args.toList.asJava)
+ spec.setState(DesiredState.toFlinkSessionJobEnum(state))
+ spec.setUpgradeMode(UpgradeMode.toFlinkSessionJobEnum(upgradeMode))
+
+ savepointTriggerNonce.foreach(spec.setSavepointTriggerNonce(_))
+ initialSavepointPath.foreach(spec.setInitialSavepointPath)
+ allowNonRestoredState.foreach(spec.setAllowNonRestoredState(_))
+ spec
+ }
+
+}
+
+object JobDef {
+
+ /**
+ * see:
+ * [[org.apache.flink.v1beta1.flinkdeploymentspec.Job.State]]
+ * [[org.apache.flink.v1beta1.flinksessionjobspec.Job.State]]
+ */
+ object DesiredState extends Enumeration {
+ type DesiredState = Value
+ val RUNNING, SUSPENDED = Value
+
+ def toFlinkDeploymentEnum(state: DesiredState): flinkdeploymentspec.Job.State = state match {
+ case RUNNING => flinkdeploymentspec.Job.State.RUNNING
+ case SUSPENDED => flinkdeploymentspec.Job.State.SUSPENDED
+ }
+
+ def toFlinkSessionJobEnum(state: DesiredState): flinksessionjobspec.Job.State = state match {
+ case RUNNING => flinksessionjobspec.Job.State.RUNNING
+ case SUSPENDED => flinksessionjobspec.Job.State.SUSPENDED
+ }
+ }
+
+ /**
+ * see:
+ * [[org.apache.flink.v1beta1.flinkdeploymentspec.Job.UpgradeMode]]
+ * [[org.apache.flink.v1beta1.flinksessionjobspec.Job.UpgradeMode]]
+ */
+ object UpgradeMode extends Enumeration {
+ type UpgradeMode = Value
+ val SAVEPOINT, LASTSTATE, STATELESS = Value
+
+ def toFlinkDeploymentEnum(mode: UpgradeMode): flinkdeploymentspec.Job.UpgradeMode = mode match {
+ case SAVEPOINT => flinkdeploymentspec.Job.UpgradeMode.SAVEPOINT
+ case LASTSTATE => flinkdeploymentspec.Job.UpgradeMode.LASTSTATE
+ case STATELESS => flinkdeploymentspec.Job.UpgradeMode.STATELESS
+ }
+
+ def toFlinkSessionJobEnum(mode: UpgradeMode): flinksessionjobspec.Job.UpgradeMode = mode match {
+ case SAVEPOINT => flinksessionjobspec.Job.UpgradeMode.SAVEPOINT
+ case LASTSTATE => flinksessionjobspec.Job.UpgradeMode.LASTSTATE
+ case STATELESS => flinksessionjobspec.Job.UpgradeMode.STATELESS
+ }
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSavepointDef.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSavepointDef.scala
new file mode 100644
index 0000000000..77b74ab623
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSavepointDef.scala
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+/**
+ * Flink Job savepoint definition.
+ *
+ * @param drain If true, drain the job before taking savepoint.
+ * @param savepointPath The path to save the savepoint.
+ * @param formatType The format type of the savepoint.
+ * @param triggerId The trigger id of the savepoint.
+ */
+case class JobSavepointDef(
+ drain: Boolean = false,
+ savepointPath: Option[String] = None,
+ formatType: Option[String] = None,
+ triggerId: Option[String] = None)
+
+object JobSavepointDef {
+ val CANONICAL_FORMAT = "CANONICAL"
+ val NATIVE_FORMAT = "NATIVE"
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSavepointStatus.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSavepointStatus.scala
new file mode 100644
index 0000000000..41904ee5b7
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSavepointStatus.scala
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.model.FlinkPipeOprState.FlinkPipeOprState
+
+/**
+ * Flink Job savepoint status.
+ *
+ * @param state The state of the savepoint process.
+ * @param failureCause The cause of failure.
+ * @param location The location of the savepoint.
+ */
+case class JobSavepointStatus(state: FlinkPipeOprState, failureCause: Option[String], location: Option[String]) {
+ lazy val isCompleted = state == FlinkPipeOprState.Completed
+ lazy val isFailed = failureCause.isDefined
+}
+
+object FlinkPipeOprState extends Enumeration {
+ type FlinkPipeOprState = Value
+
+ val Completed = Value("COMPLETED")
+ val InProgress = Value("IN_PROGRESS")
+ val Unknown = Value("UNKNOWN")
+
+ def ofRaw(rawValue: String): FlinkPipeOprState =
+ FlinkPipeOprState.values.find(_.toString == rawValue).getOrElse(FlinkPipeOprState.Unknown)
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSnapshot.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSnapshot.scala
new file mode 100644
index 0000000000..0578141267
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobSnapshot.scala
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.model.EvalJobState.EvalJobState
+
+/**
+ * Flink job status snapshot identified by StreamPark app-id.
+ *
+ * For the logical code to convert a JobSnapshot to a [[org.apache.streampark.console.core.enums.FlinkAppState]],
+ *
+ * @param appId Ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param clusterNs Flink cluster namespace on kubernetes.
+ * @param clusterId Flink cluster name on kubernetes.
+ * @param evalState Final evaluation job status
+ * @param crStatus Flink K8s CR status.
+ * @param jobStatus Flink job status received from REST API.
+ */
+case class JobSnapshot(
+ appId: Long,
+ clusterNs: String,
+ clusterId: String,
+ evalState: EvalJobState,
+ crStatus: Option[FlinkCRStatus],
+ jobStatus: Option[JobStatus])
+
+object JobSnapshot {
+
+ def eval(
+ appId: Long,
+ clusterNs: String,
+ clusterId: String,
+ crStatus: Option[FlinkCRStatus],
+ jobStatus: Option[JobStatus]): JobSnapshot = JobSnapshot(
+ appId = appId,
+ clusterNs = clusterNs,
+ clusterId = clusterId,
+ evalState = evalFinalJobState(crStatus, jobStatus),
+ crStatus = crStatus,
+ jobStatus = jobStatus
+ )
+
+ private def evalFinalJobState(crStatus: Option[FlinkCRStatus], jobStatus: Option[JobStatus]): EvalJobState =
+ (crStatus, jobStatus) match {
+ case (None, None) => EvalJobState.LOST
+ case (None, Some(jobStatus)) => EvalJobState.of(jobStatus.state)
+ case (Some(crStatus), None) =>
+ crStatus.evalState match {
+ case EvalState.DEPLOYING | EvalState.READY => EvalJobState.INITIALIZING
+ case EvalState.FAILED => EvalJobState.FAILED
+ case EvalState.SUSPENDED => EvalJobState.SUSPENDED
+ case EvalState.DELETED => EvalJobState.TERMINATED
+ }
+ case (Some(crStatus), Some(jobStatus)) =>
+ if (jobStatus.updatedTs >= crStatus.updatedTs) EvalJobState.of(jobStatus.state)
+ else {
+ crStatus.evalState match {
+ case EvalState.FAILED => EvalJobState.FAILED
+ case EvalState.SUSPENDED => EvalJobState.SUSPENDED
+ case EvalState.DELETED => EvalJobState.TERMINATED
+ case EvalState.READY => EvalJobState.of(jobStatus.state)
+ case EvalState.DEPLOYING =>
+ if (JobState.maybeDeploying.contains(jobStatus.state)) EvalJobState.of(jobStatus.state)
+ else EvalJobState.INITIALIZING
+ }
+ }
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobState.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobState.scala
new file mode 100644
index 0000000000..9463cf0933
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobState.scala
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.model.JobState.JobState
+
+/**
+ * Original Flink Job State.
+ * This enum is essentially equivalent to the meaning in the Flink REST API.
+ * see: [[org.apache.flink.kubernetes.operator.api.status.JobStatus]]
+ */
+object JobState extends Enumeration {
+
+ type JobState = Value
+ val INITIALIZING, CREATED, RUNNING, FAILING, FAILED, CANCELLING, CANCELED, FINISHED, RESTARTING, SUSPENDED,
+ RECONCILING = Value
+ val UNKNOWN = Value
+
+ def valueOf(raw: String): JobState = values.find(_.toString == raw).getOrElse(UNKNOWN)
+ val maybeDeploying = Set(INITIALIZING, CREATED, RESTARTING, RECONCILING)
+}
+
+/**
+ * Evaluated Job State.
+ * This state is the result of a combination of Flink CR and REST API evaluations,
+ * It can be converted directly to StreamPark [[org.apache.streampark.console.core.enums.FlinkAppState]]
+ */
+object EvalJobState extends Enumeration {
+
+ type EvalJobState = Value
+
+ val INITIALIZING, CREATED, RUNNING, FAILING, FAILED, CANCELLING, CANCELED, FINISHED, RESTARTING, SUSPENDED,
+ RECONCILING = Value
+
+ // copy from [[org.apache.streampark.console.core.enums.FlinkAppState]]
+ val LOST, TERMINATED, OTHER = Value
+
+ def of(state: JobState): EvalJobState = values.find(e => e.toString == state.toString).getOrElse(OTHER)
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobStatus.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobStatus.scala
new file mode 100644
index 0000000000..54e5b04e37
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/JobStatus.scala
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.FlinkRestRequest.{JobOverviewInfo, TaskStats}
+import org.apache.streampark.flink.kubernetes.v2.model.JobState.JobState
+
+import org.apache.flink.v1beta1.flinkdeploymentstatus
+import org.apache.flink.v1beta1.flinksessionjobstatus
+
+import scala.util.Try
+
+/**
+ * Flink job status snapshot.
+ *
+ * @param jobId Job id
+ * @param jobName Job name
+ * @param state Job state from rest api
+ * @param startTs Job start timestamp
+ * @param endTs Job end timestamp
+ * @param tasks Tasks statistical information
+ * @param updatedTs Last updated timestamp
+ */
+case class JobStatus(
+ jobId: String,
+ jobName: String,
+ state: JobState,
+ startTs: Long,
+ endTs: Option[Long] = None,
+ tasks: Option[TaskStats] = None,
+ updatedTs: Long)
+
+object JobStatus {
+
+ // Convert from REST API object.
+ def fromRest(ov: JobOverviewInfo): JobStatus = JobStatus(
+ jobId = ov.jid,
+ jobName = ov.name,
+ state = JobState.valueOf(ov.state),
+ startTs = ov.startTime,
+ endTs = Some(ov.endTime),
+ updatedTs = ov.lastModifyTime,
+ tasks = Some(ov.tasks)
+ )
+
+ // Convert from Kubernetes CR object.
+ def fromDeployCR(status: flinkdeploymentstatus.JobStatus): JobStatus = JobStatus(
+ jobId = status.getJobId,
+ jobName = status.getJobName,
+ state = JobState.valueOf(status.getState),
+ startTs = Try(status.getStartTime.toLong).getOrElse(0L),
+ endTs = None,
+ updatedTs = Try(status.getUpdateTime.toLong).getOrElse(0L),
+ tasks = None
+ )
+
+ def fromSessionJobCR(status: flinksessionjobstatus.JobStatus): JobStatus = JobStatus(
+ jobId = status.getJobId,
+ jobName = status.getJobName,
+ state = JobState.valueOf(status.getState),
+ startTs = Try(status.getStartTime.toLong).getOrElse(0L),
+ endTs = None,
+ updatedTs = Try(status.getUpdateTime.toLong).getOrElse(0L),
+ tasks = None
+ )
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/RestSvcEndpoint.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/RestSvcEndpoint.scala
new file mode 100644
index 0000000000..9a124abe2c
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/RestSvcEndpoint.scala
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+import org.apache.streampark.flink.kubernetes.v2.observer.{reachFlinkRestType, AccessFlinkRestType}
+
+/**
+ * Flink rest service endpoint info on kubernetes.
+ *
+ * @param namespace k8s resource namespace
+ * @param name k8s resource name
+ * @param port Flink rest service port
+ * @param clusterIP fFink rest server clusterIP
+ */
+case class RestSvcEndpoint(namespace: String, name: String, port: Int, clusterIP: String) {
+
+ /** k8s dns for flink rest service. */
+ lazy val dns: String = s"$name.$namespace"
+
+ lazy val dnsRest: String = s"http://$dns:$port"
+
+ lazy val ipRest: String = s"http://$clusterIP:$port"
+
+ /** Choose rest api http address according to [[reachFlinkRestType]]. */
+ def chooseRest: String = reachFlinkRestType match {
+ case AccessFlinkRestType.DNS => dnsRest
+ case AccessFlinkRestType.IP => ipRest
+ }
+
+ /** Choose rest api host according to [[reachFlinkRestType]]. */
+ def chooseHost: String = reachFlinkRestType match {
+ case AccessFlinkRestType.DNS => dns
+ case AccessFlinkRestType.IP => clusterIP
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/TrackKey.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/TrackKey.scala
new file mode 100644
index 0000000000..0c18429ce9
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/model/TrackKey.scala
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.model
+
+/** Flink kubernetes resource tracking id which is a unique value defined in the StreamPark system. */
+sealed trait TrackKey {
+
+ /** id attribute depends on the specific resource. */
+ val id: Long
+
+ /** flink cluster namespace on kubernetes */
+ def clusterNamespace: String
+
+ /** flink cluster name on kubernetes */
+ def clusterName: String
+}
+
+object TrackKey {
+
+ def appJob(id: Long, namespace: String, name: String): ApplicationJobKey = ApplicationJobKey(id, namespace, name)
+
+ def sessionJob(id: Long, namespace: String, name: String, clusterName: String): SessionJobKey =
+ SessionJobKey(id, namespace, name, clusterName)
+
+ def cluster(id: Long, namespace: String, name: String): ClusterKey = ClusterKey(id, namespace, name)
+
+ def unmanagedSessionJob(id: Long, clusterNs: String, clusterName: String, jid: String): UnmanagedSessionJobKey =
+ UnmanagedSessionJobKey(id, clusterNs, clusterName, jid)
+
+ /**
+ * Key of Flink application mode Job.
+ * It is also compatible with application mode tasks managed by the flink k8s operator
+ * or directly by streampark in previous versions.
+ *
+ * @param id ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param namespace k8s CR namespace
+ * @param name k8s CR name
+ */
+ case class ApplicationJobKey(id: Long, namespace: String, name: String) extends TrackKey {
+ lazy val clusterNamespace = namespace
+ lazy val clusterName = name
+ }
+
+ /**
+ * Key of Flink session mode Job that manged by Flink K8s operator.
+ *
+ * @param id ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param namespace k8s CR namespace
+ * @param name k8s CR namespace
+ * @param clusterName The CR name of the target flink cluster submitted by the job.
+ */
+ case class SessionJobKey(id: Long, namespace: String, name: String, clusterName: String) extends TrackKey {
+ lazy val clusterNamespace = namespace
+ }
+
+ /**
+ * Key of Flink cluster manged by Flink K8s operator.
+ *
+ * @param id ref to [[org.apache.streampark.console.core.entity.FlinkCluster.id]]
+ * @param namespace k8s CR namespace
+ * @param name k8s CR namespace
+ */
+ case class ClusterKey(id: Long, namespace: String, name: String) extends TrackKey {
+ lazy val clusterNamespace = namespace
+ lazy val clusterName = name
+ }
+
+ /**
+ * Compatible with previous versions of tasks submitted directly to flink-k8s-session.
+ *
+ * @param id ref to [[org.apache.streampark.console.core.entity.Application.id]]]
+ * @param namespace flink cluster k8s namespace
+ * @param clusterId flink cluster k8s name
+ * @param jid jobid
+ */
+ case class UnmanagedSessionJobKey(id: Long, namespace: String, clusterId: String, jid: String) extends TrackKey {
+ lazy val clusterNamespace = namespace
+ lazy val clusterName = clusterId
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/DeployCRObserver.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/DeployCRObserver.scala
new file mode 100644
index 0000000000..43904ee1e5
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/DeployCRObserver.scala
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.observer
+
+import org.apache.streampark.common.zio.ZIOExt.{OptionZIOOps, UIOOps}
+import org.apache.streampark.flink.kubernetes.v2.K8sTools.{watchK8sResourceForever, K8sResourceWatcher}
+import org.apache.streampark.flink.kubernetes.v2.model._
+
+import org.apache.flink.v1beta1.FlinkDeployment
+import zio.UIO
+import zio.concurrent.ConcurrentMap
+
+/**
+ * Observer for FlinkDeployment K8s CRs.
+ *
+ * See: [[org.apache.flink.v1beta1.FlinkDeployment]]
+ */
+case class DeployCRObserver(deployCRSnaps: ConcurrentMap[(Namespace, Name), (DeployCRStatus, Option[JobStatus])]) {
+
+ // store all hooks of listeners fibers
+ private val watchers = ConcurrentMap.empty[(Namespace, Name), K8sResourceWatcher[FlinkDeployment]].runUIO
+
+ /** Monitor the status of K8s FlinkDeployment CR for a specified namespace and name. */
+ def watch(namespace: String, name: String): UIO[Unit] =
+ watchers
+ .get((namespace, name))
+ .noneOrUnitZIO {
+ val watch = launchProc(namespace, name)
+ watchers.put((namespace, name), watch) *>
+ watch.launch
+ }
+
+ def unWatch(namespace: String, name: String): UIO[Unit] =
+ watchers
+ .get((namespace, name))
+ .someOrUnitZIO { watcher =>
+ watcher.stop *>
+ watchers.remove((namespace, name)).unit
+ }
+
+// private def existCr(namespace: String, name: String): IO[Throwable, Boolean] =
+// usingK8sClient { client =>
+// client
+// .resources(classOf[FlinkDeployment])
+// .inNamespace(namespace)
+// .withName(name)
+// .get != null
+// }
+
+ private def launchProc(namespace: String, name: String): K8sResourceWatcher[FlinkDeployment] =
+ watchK8sResourceForever(client =>
+ client
+ .resources(classOf[FlinkDeployment])
+ .inNamespace(namespace)
+ .withName(name)) { stream =>
+ stream
+ // Eval FlinkDeployment status
+ .map { case (action, deployment) =>
+ DeployCRStatus.eval(action, deployment) ->
+ Option(deployment.getStatus.getJobStatus).map(JobStatus.fromDeployCR)
+ }
+ // Update FlinkDeployment status cache
+ .tap(status => deployCRSnaps.put((namespace, name), status))
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/FlinkK8sObserver.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/FlinkK8sObserver.scala
new file mode 100644
index 0000000000..a355be3d38
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/FlinkK8sObserver.scala
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.observer
+
+import org.apache.streampark.common.zio.ZIOExt.UIOOps
+import org.apache.streampark.flink.kubernetes.v2.K8sTools.usingK8sClient
+import org.apache.streampark.flink.kubernetes.v2.model._
+import org.apache.streampark.flink.kubernetes.v2.model.TrackKey._
+
+import org.apache.flink.v1beta1.{FlinkDeployment, FlinkDeploymentSpec, FlinkSessionJob, FlinkSessionJobSpec}
+import zio.{IO, Ref, Schedule, UIO}
+import zio.ZIO.logInfo
+import zio.concurrent.{ConcurrentMap, ConcurrentSet}
+import zio.stream.ZStream
+
+/** Flink Kubernetes resource observer. */
+sealed trait FlinkK8sObserver {
+
+ /** Start tracking resources. */
+ def track(key: TrackKey): UIO[Unit]
+
+ /** Stop tracking resources. */
+ def untrack(key: TrackKey): UIO[Unit]
+
+ /** All tracked key in observer. */
+ def trackedKeys: ConcurrentSet[TrackKey]
+
+ /**
+ * Snapshots of the Flink jobs that have been evaluated.
+ *
+ * This value can be subscribed as a ZStream structure.
+ * Example:
+ * {{{
+ * val stream: UStream[Chunk[(AppId, JobSnapshot)]] = evaluatedJobSnaps.subscribe()
+ * val stream: UStream[(AppId, JobSnapshot)] = evaluatedJobSnaps.flatSubscribe()
+ * val stream: UStream[JobSnapshot] = evaluatedJobSnaps.flatSubscribeValues()
+ * }}}
+ * Ref: [[org.apache.streampark.common.zio.ZIOContainerSubscription.RefMapExtension]]
+ */
+ def evaluatedJobSnaps: Ref[Map[AppId, JobSnapshot]]
+
+ /**
+ * Flink rest service endpoint snapshots cache.
+ *
+ * This value can be subscribed as a ZStream structure.
+ * {{{
+ * val stream: UStream[Chunk[((Namespace, Name), RestSvcEndpoint)]] = restSvcEndpointSnaps.subscribe()
+ * val stream: UStream[((Namespace, Name), RestSvcEndpoint)] = restSvcEndpointSnaps.flatSubscribe()
+ * val stream: UStream[RestSvcEndpoint] = restSvcEndpointSnaps.flatSubscribeValues()
+ * }}}
+ * Ref: [[org.apache.streampark.common.zio.ZIOContainerSubscription.ConcurrentMapExtension]]
+ */
+ def restSvcEndpointSnaps: ConcurrentMap[(Namespace, Name), RestSvcEndpoint]
+
+ /**
+ * Flink cluster metrics snapshots.
+ *
+ * This value can be subscribed as a ZStream structure.
+ * {{{
+ * val stream: UStream[Chunk[((Namespace, Name), ClusterMetrics)]] = clusterMetricsSnaps.subscribe()
+ * val stream: UStream[((Namespace, Name), ClusterMetrics)] = clusterMetricsSnaps.flatSubscribe()
+ * val stream: UStream[ClusterMetrics] = clusterMetricsSnaps.flatSubscribeValues()
+ * }}}
+ * Ref: [[org.apache.streampark.common.zio.ZIOContainerSubscription.ConcurrentMapExtension]]
+ */
+ def clusterMetricsSnaps: ConcurrentMap[(Namespace, Name), ClusterMetrics]
+
+ /** Get Flink Deployment CR spec from K8s. */
+ def getFlinkDeploymentCrSpec(ns: String, name: String): IO[Throwable, Option[FlinkDeploymentSpec]]
+
+ /** Get Flink SessionJob CR spec from K8s. */
+ def getFlinkSessionJobCrSpec(ns: String, name: String): IO[Throwable, Option[FlinkSessionJobSpec]]
+
+}
+
+object FlinkK8sObserver extends FlinkK8sObserver {
+
+ // The following is a visible external snapshot.
+ val trackedKeys = ConcurrentSet.empty[TrackKey].runUIO
+ val evaluatedJobSnaps = Ref.make(Map.empty[AppId, JobSnapshot]).runUIO
+ val restSvcEndpointSnaps = ConcurrentMap.empty[(Namespace, Name), RestSvcEndpoint].runUIO
+ val clusterMetricsSnaps = ConcurrentMap.empty[(Namespace, Name), ClusterMetrics].runUIO
+
+ // In general, there is no need to view these snapshots externally.
+ val deployCRSnaps = ConcurrentMap.empty[(Namespace, Name), (DeployCRStatus, Option[JobStatus])].runUIO
+ val sessionJobCRSnaps = ConcurrentMap.empty[(Namespace, Name), (SessionJobCRStatus, Option[JobStatus])].runUIO
+ val clusterJobStatusSnaps = ConcurrentMap.empty[(Namespace, Name), Vector[JobStatus]].runUIO
+
+ private val restSvcEndpointObserver = RestSvcEndpointObserver(restSvcEndpointSnaps)
+ private val deployCrObserver = DeployCRObserver(deployCRSnaps)
+ private val sessionJobCRObserver = SessionJobCRObserver(sessionJobCRSnaps)
+ private val clusterObserver = RawClusterObserver(restSvcEndpointSnaps, clusterJobStatusSnaps, clusterMetricsSnaps)
+
+ // Auto eval job snapshots forever.
+ evalJobSnapshot
+ .repeat(Schedule.spaced(evalJobSnapInterval))
+ .forever
+ .forkDaemon
+ .runUIO
+
+ /** Start tracking resources. */
+ override def track(key: TrackKey): UIO[Unit] = {
+
+ def trackCluster(ns: String, name: String): UIO[Unit] = {
+ for {
+ _ <- deployCrObserver.watch(ns, name)
+ _ <- restSvcEndpointObserver.watch(ns, name)
+ _ <- clusterObserver.watch(ns, name)
+ } yield ()
+ }
+
+ def trackSessionJob(ns: String, name: String, refDeployName: String): UIO[Unit] = {
+ sessionJobCRObserver.watch(ns, name) *>
+ trackCluster(ns, refDeployName)
+ }
+
+ for {
+ _ <- key match {
+ case ApplicationJobKey(id, ns, name) => trackCluster(ns, name)
+ case SessionJobKey(id, ns, name, clusterName) => trackSessionJob(ns, name, clusterName)
+ case UnmanagedSessionJobKey(id, clusterNs, clusterId, jid) => trackCluster(clusterNs, clusterId)
+ case ClusterKey(id, ns, name) => trackCluster(ns, name)
+ }
+ _ <- trackedKeys.add(key).unit
+ _ <- logInfo(s"Start watching Flink resource: $key")
+ } yield ()
+ }
+
+ /** Stop tracking resources. */
+ override def untrack(key: TrackKey): UIO[Unit] = {
+
+ def unTrackCluster(ns: String, name: String): UIO[Unit] = for {
+ _ <- deployCrObserver.unWatch(ns, name)
+ _ <- restSvcEndpointObserver.unWatch(ns, name)
+ _ <- clusterObserver.unWatch(ns, name)
+ } yield ()
+
+ def unTrackSessionJob(ns: String, name: String) = {
+ sessionJobCRObserver.unWatch(ns, name)
+ }
+
+ def unTrackPureCluster(ns: String, name: String) = unTrackCluster(ns, name).whenZIO {
+ trackedKeys.toSet
+ .map(set =>
+ // When a flink cluster is referenced by another resource, tracking of that cluster is maintained.
+ set.find {
+ case k: ApplicationJobKey if k.namespace == ns && k.name == name => true
+ case k: SessionJobKey if k.namespace == ns && k.clusterName == name => true
+ case k: UnmanagedSessionJobKey if k.clusterNamespace == ns && k.clusterId == name => true
+ case _ => false
+ })
+ .map(_.isEmpty)
+ }
+
+ def unTrackUnmanagedSessionJob(clusterNs: String, clusterName: String) =
+ unTrackCluster(clusterNs, clusterName).whenZIO {
+ trackedKeys.toSet
+ .map(set =>
+ // When a flink cluster is referenced by another resource, tracking of that cluster is maintained.
+ set.find {
+ case k: ApplicationJobKey if k.namespace == clusterNs && k.name == clusterName => true
+ case k: SessionJobKey if k.namespace == clusterNs && k.clusterName == clusterName => true
+ case k: ClusterKey if k.namespace == clusterNs && k.name == clusterName => true
+ case _ => false
+ })
+ .map(_.isEmpty)
+ }.unit
+
+ for {
+ _ <- key match {
+ case ApplicationJobKey(id, ns, name) => unTrackCluster(ns, name)
+ case SessionJobKey(id, ns, name, clusterName) => unTrackSessionJob(ns, name)
+ case ClusterKey(id, ns, name) => unTrackPureCluster(ns, name)
+ case UnmanagedSessionJobKey(id, clusterNs, clusterName, jid) =>
+ unTrackUnmanagedSessionJob(clusterNs, clusterName)
+ }
+ _ <- trackedKeys.remove(key)
+ _ <- logInfo(s"Stop watching Flink resource: $key")
+ } yield ()
+ }
+
+ /** Re-evaluate all job status snapshots from caches. */
+ private def evalJobSnapshot: UIO[Unit] = {
+
+ def mergeJobStatus(crStatus: Option[JobStatus], restStatus: Option[JobStatus]) =
+ (crStatus, restStatus) match {
+ case (Some(e), None) => Some(e)
+ case (None, Some(e)) => Some(e)
+ case (None, None) => None
+ case (Some(cr), Some(rest)) =>
+ Some(
+ if (rest.updatedTs > cr.updatedTs) rest
+ else cr.copy(endTs = rest.endTs, tasks = rest.tasks)
+ )
+ }
+
+ ZStream
+ .fromIterableZIO(trackedKeys.toSet)
+ .filter { key =>
+ key.isInstanceOf[ApplicationJobKey] || key.isInstanceOf[SessionJobKey] || key
+ .isInstanceOf[UnmanagedSessionJobKey]
+ }
+ // Evaluate job snapshots for each TrackKey in parallel.
+ .mapZIOParUnordered(evalJobSnapParallelism) {
+ case ApplicationJobKey(id, ns, name) =>
+ for {
+ crSnap <- deployCRSnaps.get(ns, name)
+ restJobStatusVec <- clusterJobStatusSnaps.get((ns, name))
+ crStatus = crSnap.map(_._1)
+
+ jobStatusFromCr = crSnap.flatMap(_._2)
+ jobStatusFromRest = restJobStatusVec.flatMap(_.headOption)
+ finalJobStatus = mergeJobStatus(jobStatusFromCr, jobStatusFromRest)
+
+ } yield JobSnapshot.eval(id, ns, name, crStatus, finalJobStatus)
+
+ case SessionJobKey(id, ns, name, clusterName) =>
+ for {
+ sessionJobSnap <- sessionJobCRSnaps.get(ns, name)
+ restJobStatusVec <- clusterJobStatusSnaps.get((ns, clusterName))
+ crStatus = sessionJobSnap.map(_._1)
+
+ jobStatusFromCr = sessionJobSnap.flatMap(_._2)
+ jobId = jobStatusFromCr.map(_.jobId).getOrElse("")
+ jobStatusFromRest = restJobStatusVec.flatMap(_.find(_.jobId == jobId))
+ finalJobStatus = mergeJobStatus(jobStatusFromCr, jobStatusFromRest)
+
+ } yield JobSnapshot.eval(id, ns, clusterName, crStatus, finalJobStatus)
+
+ case UnmanagedSessionJobKey(id, clusterNs, clusterName, jid) =>
+ for {
+ restJobStatusVec <- clusterJobStatusSnaps.get((clusterNs, clusterName))
+ jobStatus = restJobStatusVec.flatMap(_.find(_.jobId == jid))
+ } yield JobSnapshot.eval(id, clusterNs, clusterName, None, jobStatus)
+ }
+ // Collect result and Refresh evaluatedJobSnaps cache
+ .runCollect
+ .map(chunk => chunk.map(snap => (snap.appId, snap)).toMap)
+ .flatMap(map => evaluatedJobSnaps.set(map))
+ .unit
+ }
+
+ /** Get Flink Deployment CR spec from K8s. */
+ override def getFlinkDeploymentCrSpec(ns: String, name: String): IO[Throwable, Option[FlinkDeploymentSpec]] =
+ usingK8sClient { client =>
+ Option(
+ client
+ .resources(classOf[FlinkDeployment])
+ .inNamespace(ns)
+ .withName(name)
+ .get()
+ ).map(_.getSpec)
+ }
+
+ /** Get Flink SessionJob CR spec from K8s. */
+ override def getFlinkSessionJobCrSpec(ns: String, name: String): IO[Throwable, Option[FlinkSessionJobSpec]] = {
+ usingK8sClient { client =>
+ Option(
+ client
+ .resources(classOf[FlinkSessionJob])
+ .inNamespace(ns)
+ .withName(name)
+ .get()
+ ).map(_.getSpec)
+ }
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/RawClusterObserver.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/RawClusterObserver.scala
new file mode 100644
index 0000000000..d4a7e12b78
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/RawClusterObserver.scala
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.observer
+
+import org.apache.streampark.common.zio.ZIOExt.{OptionZIOOps, UIOOps}
+import org.apache.streampark.flink.kubernetes.v2.{FlinkMemorySizeParser, FlinkRestRequest}
+import org.apache.streampark.flink.kubernetes.v2.model._
+
+import zio.{Fiber, Schedule, UIO, ZIO}
+import zio.concurrent.ConcurrentMap
+import zio.stream.ZStream
+
+import scala.util.Try
+
+/** Observer for Flink cluster REST API. */
+case class RawClusterObserver(
+ restSvcEndpointSnaps: ConcurrentMap[(Namespace, Name), RestSvcEndpoint],
+ clusterJobStatusSnaps: ConcurrentMap[(Namespace, Name), Vector[JobStatus]],
+ clusterMetricsSnaps: ConcurrentMap[(Namespace, Name), ClusterMetrics]) {
+
+ private val jobOverviewPollFibers = ConcurrentMap.empty[(Namespace, Name), Fiber.Runtime[_, _]].runUIO
+ private val clusterMetricsPollFibers = ConcurrentMap.empty[(Namespace, Name), Fiber.Runtime[_, _]].runUIO
+
+ def watch(namespace: String, name: String): UIO[Unit] = {
+ watchJobOverviews(namespace, name) *>
+ watchClusterMetrics(namespace, name)
+ }
+
+ def unWatch(namespace: String, name: String): UIO[Unit] = {
+ unWatchJobOverviews(namespace, name) *>
+ unWatchClusterMetrics(namespace, name)
+ }
+
+ /** Monitor Flink job overview API. */
+ private def watchJobOverviews(namespace: String, name: String): UIO[Unit] = {
+
+ val procEffect = ZStream
+ // retrieve rest endpoint
+ .fromZIO(
+ restSvcEndpointSnaps
+ .get(namespace, name)
+ .flatMap {
+ case None => ZIO.fail(RestEndpointNotFound)
+ // request job overview api
+ case Some(endpoint) => FlinkRestRequest(endpoint.chooseRest).listJobOverviewInfo
+ })
+ .retry(Schedule.spaced(restRetryInterval))
+ .map(jobOverviews => jobOverviews.map(info => JobStatus.fromRest(info)))
+ .tap(jobStatuses => clusterJobStatusSnaps.put((namespace, name), jobStatuses))
+ .repeat(Schedule.spaced(restPollingInterval))
+ .runDrain
+ .forkDaemon
+
+ jobOverviewPollFibers
+ .get((namespace, name))
+ .noneOrUnitZIO {
+ procEffect.flatMap(fiber => jobOverviewPollFibers.put((namespace, name), fiber))
+ }
+ }
+
+ private def unWatchJobOverviews(namespace: String, name: String): UIO[Unit] = {
+ jobOverviewPollFibers
+ .get((namespace, name))
+ .someOrUnitZIO(fiber => fiber.interrupt)
+ }
+
+ /** Monitor Flink cluster metrics via cluster overview API and jm configuration API. */
+ private def watchClusterMetrics(namespace: String, name: String): UIO[Unit] = {
+
+ val effect = ZStream
+ // retrieve rest endpoint
+ .fromZIO(
+ restSvcEndpointSnaps
+ .get(namespace, name)
+ .flatMap {
+ case None => ZIO.fail(RestEndpointNotFound)
+ case Some(endpoint) =>
+ // request cluster overview & jobmanager config api in parallel
+ FlinkRestRequest(endpoint.chooseRest).getClusterOverview <&>
+ FlinkRestRequest(endpoint.chooseRest).getJobmanagerConfig
+ })
+ .retry(Schedule.spaced(restRetryInterval))
+ .map { case (clusterOv, jmConfigs) =>
+ val totalJmMemory = FlinkMemorySizeParser
+ .parse(jmConfigs.getOrElse("jobmanager.memory.process.size", "0b"))
+ .map(_.mebiBytes)
+ .map(e => Try(e.toInt).getOrElse(0))
+ .getOrElse(0)
+
+ val totalTmMemory = FlinkMemorySizeParser
+ .parse(jmConfigs.getOrElse("taskmanager.memory.process.size", "0b"))
+ .map(_.mebiBytes * clusterOv.taskManagers)
+ .map(e => Try(e.toInt).getOrElse(0))
+ .getOrElse(0)
+
+ ClusterMetrics(
+ totalJmMemory = totalJmMemory,
+ totalTmMemory = totalTmMemory,
+ totalTm = clusterOv.taskManagers,
+ totalSlot = clusterOv.slotsTotal,
+ availableSlot = clusterOv.slotsAvailable,
+ runningJob = clusterOv.jobsRunning,
+ cancelledJob = clusterOv.jobsFinished,
+ failedJob = clusterOv.jobsFailed
+ )
+ }
+ .tap(metrics => clusterMetricsSnaps.put((namespace, name), metrics))
+ .repeat(Schedule.spaced(restPollingInterval))
+ .runDrain
+ .forkDaemon
+
+ clusterMetricsPollFibers
+ .get((namespace, name))
+ .noneOrUnitZIO {
+ effect.flatMap(fiber => clusterMetricsPollFibers.put((namespace, name), fiber)).unit
+ }
+ }
+
+ // noinspection DuplicatedCode
+ private def unWatchClusterMetrics(namespace: String, name: String): UIO[Unit] = {
+ clusterMetricsPollFibers
+ .get((namespace, name))
+ .someOrUnitZIO(fiber => fiber.interrupt.unit)
+ }
+
+ private case object RestEndpointNotFound
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/RestSvcEndpointObserver.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/RestSvcEndpointObserver.scala
new file mode 100644
index 0000000000..ff97e2c5a9
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/RestSvcEndpointObserver.scala
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.observer
+
+import org.apache.streampark.common.zio.ZIOExt.{OptionZIOOps, UIOOps}
+import org.apache.streampark.flink.kubernetes.v2.K8sTools.{watchK8sResourceForever, K8sResourceWatcher}
+import org.apache.streampark.flink.kubernetes.v2.model.RestSvcEndpoint
+
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.client.Watcher
+import zio.UIO
+import zio.concurrent.ConcurrentMap
+
+import scala.jdk.CollectionConverters._
+
+/**
+ * Observer for Flink cluster REST svc endpoint on K8s,
+ * monitor the status of FlinkSessionJob CRs for a specified namespace and name.
+ */
+case class RestSvcEndpointObserver(restSvcEndpointSnaps: ConcurrentMap[(Namespace, Name), RestSvcEndpoint]) {
+
+ // store all hooks of listeners fibers
+ private val watchers = ConcurrentMap.empty[(Namespace, Name), K8sResourceWatcher[Service]].runUIO
+
+ def watch(namespace: String, name: String): UIO[Unit] =
+ watchers
+ .get((namespace, name))
+ .noneOrUnitZIO {
+ val watch = launchProc(namespace, name)
+ watchers.put((namespace, name), watch) *> watch.launch
+ }
+
+ def unWatch(namespace: String, name: String): UIO[Unit] =
+ watchers
+ .get((namespace, name))
+ .someOrUnitZIO { watcher =>
+ for {
+ _ <- watcher.stop
+ _ <- watchers.remove((namespace, name))
+ _ <- restSvcEndpointSnaps.remove((namespace, name))
+ } yield ()
+ }
+
+ private def launchProc(namespace: String, name: String): K8sResourceWatcher[Service] =
+ watchK8sResourceForever(client =>
+ client
+ .services()
+ .inNamespace(namespace)
+ .withName(s"$name-rest")) { stream =>
+ stream
+ .map {
+ case (Watcher.Action.DELETED, _) => None
+ case (_, svc) =>
+ val namespace = svc.getMetadata.getNamespace
+ val name = svc.getMetadata.getName
+ val clusterIP = svc.getSpec.getClusterIP
+ val port = svc.getSpec.getPorts.asScala
+ .find(_.getPort == 8081)
+ .map(_.getTargetPort.getIntVal.toInt)
+ .getOrElse(8081)
+ Some(RestSvcEndpoint(namespace, name, port, clusterIP))
+ }
+ .mapZIO {
+ case None => restSvcEndpointSnaps.remove((namespace, name))
+ case Some(endpoint) => restSvcEndpointSnaps.put((namespace, name), endpoint)
+ }
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/SessionJobCRObserver.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/SessionJobCRObserver.scala
new file mode 100644
index 0000000000..73b7764735
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/SessionJobCRObserver.scala
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.observer
+
+import org.apache.streampark.common.zio.ZIOExt.{OptionZIOOps, UIOOps}
+import org.apache.streampark.flink.kubernetes.v2.K8sTools.{watchK8sResourceForever, K8sResourceWatcher}
+import org.apache.streampark.flink.kubernetes.v2.model.{JobStatus, SessionJobCRStatus}
+
+import org.apache.flink.v1beta1.FlinkSessionJob
+import zio.UIO
+import zio.concurrent.ConcurrentMap
+
+/**
+ * Observer for FlinkSessionJob K8s CRs,
+ * monitor the status of FlinkSessionJob CRs for a specified namespace and name.
+ *
+ * See: [[org.apache.streampark.shaded.org.apache.flink.kubernetes.operator.api.FlinkSessionJob]]
+ */
+case class SessionJobCRObserver(
+ sessionJobCRSnaps: ConcurrentMap[(Namespace, Name), (SessionJobCRStatus, Option[JobStatus])]) {
+
+ // store all hooks of listeners fibers
+ private val watchers = ConcurrentMap.empty[(Namespace, Name), K8sResourceWatcher[FlinkSessionJob]].runUIO
+
+ def watch(namespace: String, name: String): UIO[Unit] =
+ watchers
+ .get((namespace, name))
+ .noneOrUnitZIO {
+ val watch = launchProc(namespace, name)
+ watchers.put((namespace, name), watch) *>
+ watch.launch
+ }
+
+ def unWatch(namespace: String, name: String): UIO[Unit] =
+ watchers
+ .get((namespace, name))
+ .someOrUnitZIO { watcher =>
+ watcher.stop *>
+ watchers.remove((namespace, name))
+ }
+
+ private def launchProc(namespace: String, name: String): K8sResourceWatcher[FlinkSessionJob] =
+ watchK8sResourceForever(client =>
+ client
+ .resources(classOf[FlinkSessionJob])
+ .inNamespace(namespace)
+ .withName(name)) { stream =>
+ stream
+ // eval SessionJobCR status
+ .map { case (action, cr) =>
+ SessionJobCRStatus.eval(action, cr) ->
+ Option(cr.getStatus.getJobStatus).map(JobStatus.fromSessionJobCR)
+ }
+ // update SessionJobCR status cache
+ .tap(status => sessionJobCRSnaps.put((namespace, name), status))
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/package.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/package.scala
new file mode 100644
index 0000000000..0f1e6dca01
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/observer/package.scala
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.common.conf.InternalConfigHolder
+import org.apache.streampark.flink.kubernetes.v2.Config._
+import org.apache.streampark.flink.kubernetes.v2.observer.AccessFlinkRestType.AccessFlinkRestType
+
+import zio.{durationLong, Duration}
+
+import scala.util.chaining.scalaUtilChainingOps
+
+/**
+ * Notes:
+ * Entry point [[FlinkK8sObserver]].
+ */
+package object observer {
+
+ type Namespace = String
+ type Name = String
+ type AppId = Long
+
+ lazy val evalJobSnapParallelism: Int = InternalConfigHolder.get(EVAL_FLINK_JOB_SNAPSHOT_PARALLELISM)
+ lazy val evalJobSnapInterval: Duration = InternalConfigHolder.get[Long](EVAL_FLINK_JOB_SNAP_INTERVAL_MILLIS).millis
+
+ lazy val restPollingInterval: Duration = InternalConfigHolder.get[Long](POLL_FLINK_REST_INTERVAL).millis
+ lazy val restRetryInterval: Duration = InternalConfigHolder.get[Long](RETRY_FLINK_REST_INTERVAL).millis
+
+ val reachFlinkRestType: AccessFlinkRestType = InternalConfigHolder
+ .get[String](REACH_FLINK_REST_TYPE)
+ .pipe { plain =>
+ AccessFlinkRestType.values
+ .find(_.toString.equalsIgnoreCase(plain))
+ .getOrElse(AccessFlinkRestType.IP)
+ }
+
+ object AccessFlinkRestType extends Enumeration {
+ type AccessFlinkRestType = Value
+ val DNS, IP = Value
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/CROperator.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/CROperator.scala
new file mode 100644
index 0000000000..6cefcf5176
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/CROperator.scala
@@ -0,0 +1,247 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.operator
+
+import org.apache.streampark.flink.kubernetes.v2.{pathLastSegment, yamlMapper}
+import org.apache.streampark.flink.kubernetes.v2.K8sTools.usingK8sClient
+import org.apache.streampark.flink.kubernetes.v2.httpfs.FileMirror
+import org.apache.streampark.flink.kubernetes.v2.model.{FlinkDeploymentDef, FlinkSessionJobDef, JobDef}
+import org.apache.streampark.flink.kubernetes.v2.observer.FlinkK8sObserver
+
+import io.fabric8.kubernetes.api.model._
+import org.apache.flink.v1beta1.{FlinkDeployment, FlinkSessionJob}
+import zio.{IO, UIO, ZIO}
+import zio.stream.ZStream
+
+import java.util
+
+import scala.jdk.CollectionConverters._
+
+/**
+ * Flink Kubernetes CR operator.
+ * Responsible for applying and deleting operations on CR resources.
+ */
+sealed trait CROperator {
+
+ /** Apply FlinkDeployment CR. */
+ def applyDeployment(spec: FlinkDeploymentDef): IO[Throwable, Unit]
+
+ /** Apply FlinkSessionJob CR. */
+ def applySessionJob(spec: FlinkSessionJobDef): IO[Throwable, Unit]
+
+ /** Delete FlinkDeployment CR. */
+ def deleteDeployment(namespace: String, name: String): IO[Throwable, Unit]
+
+ /** Delete FlinkSessionJob CR. */
+ def deleteSessionJob(namespace: String, name: String): IO[Throwable, Unit]
+
+}
+
+object CROperator extends CROperator {
+
+ /** Apply FlinkDeployment CR. */
+ // noinspection DuplicatedCode
+ override def applyDeployment(spec: FlinkDeploymentDef): IO[Throwable, Unit] = {
+ lazy val mirrorSpace = s"${spec.namespace}_${spec.name}"
+ for {
+ // Generate FlinkDeployment CR
+ correctedJob <- mirrorJobJarToHttpFileServer(spec.job, mirrorSpace)
+ correctedExtJars <- mirrorExtJarsToHttpFileServer(spec.extJarPaths, mirrorSpace)
+ correctedPod <- correctPodSpec(
+ spec.podTemplate,
+ correctedExtJars ++ correctedJob.map(_.jarURI).filter(_.startsWith("http://")).toArray[String]
+ )
+ correctedLocalUriJob = correctedJob.map { jobDef =>
+ if (!jobDef.jarURI.startsWith("http://")) jobDef
+ else jobDef.copy("local:///opt/flink/lib/" + pathLastSegment(jobDef.jarURI))
+ }
+ correctedSpec = spec.copy(
+ job = correctedLocalUriJob,
+ extJarPaths = correctedExtJars,
+ podTemplate = correctedPod
+ )
+ flinkDeployCR = correctedSpec.toFlinkDeployment
+ // Logging CR yaml
+ _ <- ZIO
+ .attempt(yamlMapper.writeValueAsString(flinkDeployCR))
+ .catchAll(e => ZIO.succeed(e.getMessage))
+ .flatMap(yaml => ZIO.logInfo(s"Applying FlinkDeployment K8s CR: \n$yaml"))
+ .when(logFlinkCrYaml)
+
+ // Apply FlinkDeployment CR to kubernetes
+ isCrExist <- FlinkK8sObserver.getFlinkDeploymentCrSpec(spec.namespace, spec.name).map(_.isDefined)
+ _ <- usingK8sClient { client =>
+ if (isCrExist) client.resource(flinkDeployCR).update()
+ else client.resource(flinkDeployCR).create()
+ }
+ } yield ()
+ } *> ZIO.logInfo(s"Successfully apply FlinkDeployment K8s CR: namespace=${spec.namespace}, name=${spec.name}")
+
+ /** Apply FlinkSessionJob CR. */
+ // noinspection DuplicatedCode
+ def applySessionJob(spec: FlinkSessionJobDef): IO[Throwable, Unit] = {
+ lazy val mirrorSpace = s"${spec.namespace}_${spec.name}"
+ for {
+ // Generate FlinkSessionJob CR
+ correctedJob <- mirrorJobJarToHttpFileServer(Some(spec.job), mirrorSpace).map(_.get)
+ correctedSpec = spec.copy(job = correctedJob)
+ sessionJobCR = correctedSpec.toFlinkSessionJob
+ // Logging CR yaml
+ _ <- ZIO
+ .attempt(yamlMapper.writeValueAsString(sessionJobCR))
+ .catchAll(e => ZIO.succeed(e.getMessage))
+ .flatMap(yaml => ZIO.logInfo(s"Applying FlinkSessionJob K8s CR: \n$yaml"))
+ .when(logFlinkCrYaml)
+
+ // Apply FlinkSessionJob CR to kubernetes
+ isCrExist <- FlinkK8sObserver.getFlinkSessionJobCrSpec(spec.namespace, spec.name).map(_.isDefined)
+ _ <- usingK8sClient { client =>
+ if (isCrExist) client.resource(sessionJobCR).update()
+ else client.resource(sessionJobCR).create()
+ }
+ } yield ()
+ } *> ZIO.logInfo(s"Successfully apply FlinkSessionJob K8s CR: namespace=${spec.namespace}, name=${spec.name}")
+
+ // Convert job.uri to file-server http access uri
+ private def mirrorJobJarToHttpFileServer(job: Option[JobDef], mirrorSpace: String) = {
+ for {
+ jobJarHttpUrl <- job
+ .map(_.jarURI)
+ .filter(!_.startsWith("local://"))
+ .map(jarUri => FileMirror.mirrorAndGetHttpUrl(jarUri, mirrorSpace).map(Some(_)))
+ .getOrElse(ZIO.succeed(None))
+
+ correctedJob = jobJarHttpUrl match {
+ case Some(url) => job.map(_.copy(jarURI = url))
+ case None => job
+ }
+ } yield correctedJob
+ }
+
+ // Convert extra jar paths to file-server http access uri.
+ private def mirrorExtJarsToHttpFileServer(extJars: Array[String], mirrorSpace: String) = {
+ ZStream
+ .fromIterable(extJars)
+ .mapZIOPar(5)(path => FileMirror.mirrorAndGetHttpUrl(path, mirrorSpace))
+ .runCollect
+ .map(_.toArray)
+ }
+
+ // Inject pod-template to load jars from http file server.
+ private def correctPodSpec(oriPod: Option[Pod], jarHttpUrls: Array[String]): UIO[Option[Pod]] = ZIO.succeed {
+ if (jarHttpUrls.isEmpty) oriPod
+ else {
+ val pod = oriPod.getOrElse(new Pod())
+
+ // handle metadata
+ val metadata = Option(pod.getMetadata).getOrElse(new ObjectMeta())
+ metadata.setName("pod-template")
+ pod.setMetadata(metadata)
+
+ val spec = Option(pod.getSpec).getOrElse(new PodSpec())
+
+ // handle initContainers
+ val initContainers: util.List[Container] = Option(spec.getInitContainers).getOrElse(new util.ArrayList())
+ val libLoaderInitContainer = new ContainerBuilder()
+ .withName("userlib-loader")
+ .withImage("busybox:1.35.0")
+ .withCommand(
+ "sh",
+ "-c",
+ jarHttpUrls.map(url => s"wget $url -O /opt/flink/lib/${pathLastSegment(url)}").mkString(" && "))
+ .withVolumeMounts(
+ new VolumeMountBuilder()
+ .withName("flink-usrlib")
+ .withMountPath("/opt/flink/lib")
+ .build
+ )
+ .build
+ initContainers.add(libLoaderInitContainer)
+ spec.setInitContainers(initContainers)
+
+ // handle containers
+ val flinkMainContainerVolMounts: util.List[VolumeMount] =
+ jarHttpUrls
+ .map(url => pathLastSegment(url))
+ .map(jarName =>
+ new VolumeMountBuilder()
+ .withName("flink-usrlib")
+ .withMountPath(s"/opt/flink/lib/$jarName")
+ .withSubPath(jarName)
+ .build)
+ .toList
+ .asJava
+
+ val containers: util.List[Container] = Option(spec.getContainers).getOrElse(new util.ArrayList())
+ containers.asScala.zipWithIndex
+ .find { case (e, _) => e.getName == "flink-main-container" }
+ .map { case (e, idx) =>
+ val volMounts = Option(e.getVolumeMounts)
+ .map { mounts =>
+ mounts.addAll(flinkMainContainerVolMounts)
+ mounts
+ }
+ .getOrElse(flinkMainContainerVolMounts)
+ e.setVolumeMounts(volMounts)
+ containers.set(idx, e)
+ }
+ .getOrElse(
+ containers.add(
+ new ContainerBuilder()
+ .withName("flink-main-container")
+ .withVolumeMounts(flinkMainContainerVolMounts)
+ .build)
+ )
+ spec.setContainers(containers)
+
+ // handle volumes
+ val volumes: util.List[Volume] = Option(spec.getVolumes).getOrElse(new util.ArrayList())
+ volumes.add(
+ new VolumeBuilder()
+ .withName("flink-usrlib")
+ .withEmptyDir(new EmptyDirVolumeSource())
+ .build
+ )
+ spec.setVolumes(volumes)
+
+ pod.setSpec(spec)
+ Some(pod)
+ }
+ }
+
+ /** Delete FlinkDeployment CR. */
+ def deleteDeployment(namespace: String, name: String): IO[Throwable, Unit] =
+ usingK8sClient { client =>
+ client
+ .resources(classOf[FlinkDeployment])
+ .inNamespace(namespace)
+ .withName(name)
+ .delete()
+ } *> ZIO.logInfo(s"Delete FlinkDeployment CR: namespace=$namespace, name=$name")
+
+ /** Delete FlinkSessionJob CR. */
+ def deleteSessionJob(namespace: String, name: String): IO[Throwable, Unit] =
+ usingK8sClient { client =>
+ client
+ .resources(classOf[FlinkSessionJob])
+ .inNamespace(namespace)
+ .withName(name)
+ .delete()
+ } *> ZIO.logInfo(s"Delete FlinkDeployment CR: namespace=$namespace, name=$name")
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/FlinkK8sOperator.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/FlinkK8sOperator.scala
new file mode 100644
index 0000000000..b9e0eac004
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/FlinkK8sOperator.scala
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.operator
+
+import org.apache.streampark.common.zio.ZIOContainerSubscription.RefMapExtension
+import org.apache.streampark.flink.kubernetes.v2.FlinkRestRequest
+import org.apache.streampark.flink.kubernetes.v2.FlinkRestRequest.{StopJobSptReq, TriggerSptReq}
+import org.apache.streampark.flink.kubernetes.v2.model._
+import org.apache.streampark.flink.kubernetes.v2.model.TrackKey._
+import org.apache.streampark.flink.kubernetes.v2.observer.FlinkK8sObserver
+import org.apache.streampark.flink.kubernetes.v2.operator.OprErr._
+
+import zio.{durationInt, IO, Schedule, ZIO}
+import zio.stream.ZStream
+
+/**
+ * Flink Kubernetes resources operator.
+ * When deploying or deleting flink resources, the FlinkK8sOperator will automatically
+ * handle the related tracing.
+ */
+sealed trait FlinkK8sOperator {
+
+ /** Directly operate Flink Kubernetes CR. */
+ val k8sCrOpr: CROperator.type = CROperator
+
+ /**
+ * Deploy a Flink cluster with the given spec.
+ *
+ * @param id Ref to [[org.apache.streampark.console.core.entity.FlinkCluster.id]]
+ * @param spec Flink cluster definition
+ */
+ def deployCluster(id: Long, spec: FlinkDeploymentDef): IO[Throwable, TrackKey.ClusterKey]
+
+ /**
+ * Deploy a Flink application mode job with the given spec.
+ *
+ * @param appId ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param spec Flink application mode job definition.
+ */
+ def deployApplicationJob(appId: Long, spec: FlinkDeploymentDef): IO[Throwable, TrackKey.ApplicationJobKey]
+
+ /**
+ * Deploy a Flink session job with the given spec.
+ *
+ * @param appId ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param spec Flink session mode job definition.
+ */
+ def deploySessionJob(appId: Long, spec: FlinkSessionJobDef): IO[Throwable, TrackKey.SessionJobKey]
+
+ /**
+ * Delete Flink Cluster, Application Job or Session Job resource.
+ *
+ * @param id Ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * or [[org.apache.streampark.console.core.entity.FlinkCluster.id]]
+ */
+ def delete(id: Long): IO[Throwable, Unit]
+
+ /**
+ * Cancel Flink job via rest api.
+ *
+ * @param appId Ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ */
+ def cancelJob(appId: Long): IO[Throwable, Unit]
+
+ /**
+ * Stop Flink job with savepoint via rest api.
+ *
+ * @param appId Ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param savepoint Flink savepoint definition.
+ */
+ def stopJob(appId: Long, savepoint: JobSavepointDef): IO[Throwable, JobSavepointStatus]
+
+ /**
+ * Trigger flink job savepoint via rest api.
+ *
+ * @param appId Ref to [[org.apache.streampark.console.core.entity.Application.id]]
+ * @param savepoint Flink savepoint definition.
+ */
+ def triggerJobSavepoint(appId: Long, savepoint: JobSavepointDef): IO[Throwable, JobSavepointStatus]
+
+}
+
+object FlinkK8sOperator extends FlinkK8sOperator {
+
+ private val obr = FlinkK8sObserver
+ private val flinkRest = FlinkRestRequest
+
+ /** Deploy a Flink cluster with the given spec. */
+ def deployCluster(id: Long, spec: FlinkDeploymentDef): IO[Throwable, TrackKey.ClusterKey] = {
+ for {
+ _ <- k8sCrOpr.applyDeployment(spec.copy(job = None))
+ trackKey = TrackKey.cluster(id, spec.namespace, spec.name)
+ _ <- obr.track(trackKey)
+ } yield trackKey
+ }
+
+ /** Deploy a Flink application job with the given spec. */
+ def deployApplicationJob(appId: Long, spec: FlinkDeploymentDef): IO[Throwable, TrackKey.ApplicationJobKey] = {
+ for {
+ _ <- k8sCrOpr.applyDeployment(spec)
+ trackKey = TrackKey.appJob(appId, spec.namespace, spec.name)
+ _ <- obr.track(trackKey)
+ } yield trackKey
+ }
+
+ /** Deploy a Flink session job with the given spec. */
+ def deploySessionJob(appId: Long, spec: FlinkSessionJobDef): IO[Throwable, TrackKey.SessionJobKey] = {
+ for {
+ _ <- k8sCrOpr.applySessionJob(spec)
+ trackKey = TrackKey.sessionJob(appId, spec.namespace, spec.name, spec.deploymentName)
+ _ <- obr.track(trackKey)
+ } yield trackKey
+ }
+
+ /** Delete Flink Cluster, Application Job or Session Job resource. */
+ def delete(id: Long): IO[Throwable, Unit] =
+ for {
+ trackKey <- obr.trackedKeys
+ .find(_.id == id)
+ .someOrFail(FlinkResourceNotFound(id))
+ _ <- trackKey match {
+ case ClusterKey(_, namespace, name) => k8sCrOpr.deleteDeployment(namespace, name)
+ case ApplicationJobKey(_, namespace, name) => k8sCrOpr.deleteDeployment(namespace, name)
+ case SessionJobKey(_, namespace, name, _) => k8sCrOpr.deleteSessionJob(namespace, name)
+ case _ => ZIO.fail(UnsupportedAction(s"delete resource for $trackKey"))
+ }
+ } yield ()
+
+ /** Cancel Flink job via rest api. */
+ def cancelJob(appId: Long): IO[Throwable, Unit] =
+ for {
+ hook <- retrieveFlinkJobEndpoint(appId)
+ (restEndpoint, jobId) = hook
+ _ <- flinkRest(restEndpoint.chooseRest).cancelJob(jobId)
+ } yield ()
+
+ /** Stop Flink job with savepoint via rest api. */
+ // noinspection DuplicatedCode
+ def stopJob(appId: Long, savepoint: JobSavepointDef): IO[Throwable, JobSavepointStatus] =
+ for {
+ hook <- retrieveFlinkJobEndpoint(appId)
+ (restEndpoint, jobId) = hook
+ rest = flinkRest(restEndpoint.chooseRest)
+ // submit stop job request
+ triggerId <- rest.stopJobWithSavepoint(jobId, StopJobSptReq(savepoint))
+ // watch trigger status until it's finished
+ triggerRs <- ZStream
+ .fromZIO(rest.getSavepointOperationStatus(jobId, triggerId))
+ .repeat(Schedule.spaced(100.millis))
+ .takeUntil(_.isCompleted)
+ .runLast
+ .map(_.get)
+ } yield triggerRs
+
+ /** Trigger flink job savepoint via rest api. */
+ // noinspection DuplicatedCode
+ def triggerJobSavepoint(appId: Long, savepoint: JobSavepointDef): IO[Throwable, JobSavepointStatus] =
+ for {
+ hook <- retrieveFlinkJobEndpoint(appId)
+ (restEndpoint, jobId) = hook
+ rest = flinkRest(restEndpoint.chooseRest)
+ // submit stop job request
+ triggerId <- rest.triggerSavepoint(jobId, TriggerSptReq(savepoint))
+ // watch trigger status until it's finished
+ triggerRs <- ZStream
+ .fromZIO(rest.getSavepointOperationStatus(jobId, triggerId))
+ .repeat(Schedule.spaced(100.millis))
+ .takeUntil(_.isCompleted)
+ .runLast
+ .map(_.get)
+ } yield triggerRs
+
+ private type JobId = String
+ private def retrieveFlinkJobEndpoint(appId: Long): IO[Throwable, (RestSvcEndpoint, JobId)] =
+ for {
+ // Find track key
+ trackKey <- obr.trackedKeys.find(_.id == appId).someOrFail(FlinkResourceNotFound(appId))
+
+ // Find ref flink cluster rest endpoint
+ restEpt <- obr.restSvcEndpointSnaps
+ .get((trackKey.clusterNamespace, trackKey.clusterName))
+ .someOrFail(FlinkRestEndpointNotFound(trackKey.clusterNamespace, trackKey.clusterName))
+
+ // Find flink job id
+ jobId <- trackKey match {
+ case UnmanagedSessionJobKey(_, _, _, jobId) => ZIO.succeed(jobId)
+ case _ =>
+ obr.evaluatedJobSnaps
+ .getValue(trackKey.id)
+ .map(snap => snap.flatMap(_.jobStatus).map(_.jobId))
+ .someOrFail(FlinkJobNotFound(trackKey.id))
+ }
+ } yield (restEpt, jobId)
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/OprErr.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/OprErr.scala
new file mode 100644
index 0000000000..f33623b687
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/OprErr.scala
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.operator
+
+object OprErr {
+
+ case class UnsupportedAction(msg: String) extends Exception("Unsupported action: " + msg)
+
+ case class FlinkResourceNotFound(id: Long) extends Exception(s"Flink resource not found: id=$id")
+
+ case class FlinkJobNotFound(appId: Long) extends Exception(s"Flink job not found: appId=$appId")
+
+ case class FlinkRestEndpointNotFound(namespace: String, name: String)
+ extends Exception(s"Flink cluster rest endpoint not found: namespace=$namespace, name=$name")
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/package.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/package.scala
new file mode 100644
index 0000000000..1a37f412b8
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/operator/package.scala
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.common.conf.InternalConfigHolder
+import org.apache.streampark.flink.kubernetes.v2.Config.LOG_FLINK_CR_YAML
+
+/**
+ * Notes:
+ * Entry point [[FlinkK8sOperator]].
+ */
+package object operator {
+
+ lazy val logFlinkCrYaml: Boolean = InternalConfigHolder.get(LOG_FLINK_CR_YAML)
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/package.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/package.scala
new file mode 100644
index 0000000000..c3685f16f7
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/main/scala/org/apache/streampark/flink/kubernetes/v2/package.scala
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes
+
+import com.fasterxml.jackson.annotation.JsonInclude
+import com.fasterxml.jackson.databind.ObjectMapper
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
+import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature
+
+import scala.language.implicitConversions
+import scala.util.chaining.scalaUtilChainingOps
+
+package object v2 {
+
+ val jacksonMapper: ObjectMapper = new ObjectMapper()
+
+ val yamlMapper: ObjectMapper =
+ new ObjectMapper(new YAMLFactory().disable(Feature.WRITE_DOC_START_MARKER))
+ .tap(_.setSerializationInclusion(JsonInclude.Include.NON_NULL))
+
+ /** Get the last segment of a path strings. */
+ def pathLastSegment(path: String): String = path.split("/").last
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/StateMachineExample.jar b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/StateMachineExample.jar
new file mode 100644
index 0000000000..2a0522c9dc
Binary files /dev/null and b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/StateMachineExample.jar differ
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/flink-faker-0.5.3.jar b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/flink-faker-0.5.3.jar
new file mode 100644
index 0000000000..deca9142ef
Binary files /dev/null and b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/flink-faker-0.5.3.jar differ
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/quick-sql-1.0.jar b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/quick-sql-1.0.jar
new file mode 100644
index 0000000000..4545a0b16f
Binary files /dev/null and b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/resources/assets/quick-sql-1.0.jar differ
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/FlinkMemorySizeParserSpec.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/FlinkMemorySizeParserSpec.scala
new file mode 100644
index 0000000000..369ccbe207
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/FlinkMemorySizeParserSpec.scala
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.scalatest.matchers.should.Matchers
+import org.scalatest.wordspec.AnyWordSpecLike
+
+class FlinkMemorySizeParserSpec extends AnyWordSpecLike with Matchers {
+
+ "case-1: parse flink memory size text to bytes" in {
+
+ val cases = Seq(
+ "1b" -> Some(1L),
+ "1bytes" -> Some(1L),
+ "1k" -> Some(1024L),
+ "1kb" -> Some(1024L),
+ "1kibibytes" -> Some(1024L),
+ "1m" -> Some(1024 * 1024L),
+ "1mb" -> Some(1024 * 1024L),
+ "1mebibytes" -> Some(1024 * 1024L),
+ "1g" -> Some(1024 * 1024 * 1024L),
+ "1gb" -> Some(1024 * 1024 * 1024L),
+ "1gibibytes" -> Some(1024 * 1024 * 1024L),
+ "1t" -> Some(1024 * 1024 * 1024 * 1024L),
+ "1tb" -> Some(1024 * 1024 * 1024 * 1024L),
+ "1tebibytes" -> Some(1024 * 1024 * 1024 * 1024L)
+ )
+ cases.foreach { case (in, expect) =>
+ FlinkMemorySizeParser.parse(in).map(_.bytes) shouldBe expect
+ }
+ }
+
+ "case-2: parse strange flink memory size text to bytes" in {
+
+ val cases = Seq(
+ "1 m" -> Some(1024 * 1024L),
+ " 1 m" -> Some(1024 * 1024L),
+ " 1 m " -> Some(1024 * 1024L),
+ "1 m " -> Some(1024 * 1024L),
+ " 1m " -> Some(1024 * 1024L),
+ " m " -> None,
+ "m " -> None,
+ "1024" -> None,
+ "1024 " -> None,
+ " 1024 " -> None
+ )
+ cases.foreach { case (in, expect) =>
+ FlinkMemorySizeParser.parse(in).map(_.bytes) shouldBe expect
+ }
+ }
+
+ "case-3: parse and convert memory unit" in {
+ FlinkMemorySizeParser.parse("1g").map(_.mebiBytes) shouldBe Some(1024L)
+ FlinkMemorySizeParser.parse("1g").map(_.kibiBytes) shouldBe Some(1024L * 1024L)
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingEmbeddedFileServer.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingEmbeddedFileServer.scala
new file mode 100644
index 0000000000..c005c1ef28
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingEmbeddedFileServer.scala
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.example
+
+import org.apache.streampark.common.zio.ZIOExt.unsafeRun
+import org.apache.streampark.flink.kubernetes.v2.httpfs.{EmbeddedFileServer, FileMirror}
+
+import org.scalatest.{BeforeAndAfterAll, Ignore}
+import org.scalatest.wordspec.AnyWordSpecLike
+import zio.ZIO
+
+/**
+ * Example of using an embedded file server.
+ * Tips: Please uncomment the @Ignore tag to execute the example code.
+ */
+@Ignore
+class UsingEmbeddedFileServer extends AnyWordSpecLike with BeforeAndAfterAll {
+
+ "Launch embedded http file server and mirror files" in unsafeRun {
+ for {
+ _ <- EmbeddedFileServer.launch
+
+ // mirror local file to http filer server which can be any local path.
+ _ <- FileMirror.mirror(s"$assetPath/flink-faker-0.5.3.jar", "test")
+ _ <- FileMirror.mirror(s"$assetPath/quick-sql-1.0.jar", "test")
+
+ // print the http url that corresponds to the accessible file.
+ _ <- FileMirror.getHttpUrl("test", "flink-faker-0.5.3.jar").debug
+ _ <- FileMirror.getHttpUrl("test", "quick-sql-1.0.jar").debug
+ /* OUTPUT:
+ http://{LAN_IP}:10030/fs/test/flink-faker-0.5.3.jar
+ http://{LAN_IP}:10030/fs/test/quick-sql-1.0.jar
+ */
+ _ <- ZIO.never
+ } yield ()
+ }
+
+ "A more simplified example" in unsafeRun {
+ for {
+ _ <- EmbeddedFileServer.launch
+
+ _ <- FileMirror.mirrorAndGetHttpUrl(s"$assetPath/flink-faker-0.5.3.jar", "test2").debug
+ _ <- FileMirror.mirrorAndGetHttpUrl(s"$assetPath/quick-sql-1.0.jar", "test2").debug
+ /* OUTPUT:
+ http://{LAN_IP}:10030/fs/test2/flink-faker-0.5.3.jar
+ http://{LAN_IP}:10030/fs/test2/quick-sql-1.0.jar
+ */
+ _ <- ZIO.never
+ } yield ()
+ }
+
+ override protected def beforeAll(): Unit = prepareTestAssets()
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingObserver.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingObserver.scala
new file mode 100644
index 0000000000..c65f8c1fdc
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingObserver.scala
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.example
+
+import org.apache.streampark.common.zio.PrettyStringOps
+import org.apache.streampark.common.zio.ZIOContainerSubscription.{ConcurrentMapExtension, RefMapExtension}
+import org.apache.streampark.common.zio.ZIOExt.{unsafeRun, ZStreamOps}
+import org.apache.streampark.flink.kubernetes.v2.model.TrackKey
+import org.apache.streampark.flink.kubernetes.v2.observer.FlinkK8sObserver
+
+import org.scalatest.BeforeAndAfterAll
+import org.scalatest.wordspec.AnyWordSpecLike
+import zio.{durationInt, Console, ZIO}
+
+class UsingObserver extends AnyWordSpecLike with BeforeAndAfterAll {
+
+ "Track and get flink job snapshot." in unsafeRun {
+ for {
+ // track resource
+ _ <- ZIO.unit
+ trackId = TrackKey.appJob(233, "fdev", "simple-appjob")
+ _ <- FlinkK8sObserver.track(trackId)
+ // get job snapshot
+ _ <- ZIO.sleep(3.seconds)
+ jobSnap <- FlinkK8sObserver.evaluatedJobSnaps.getValue(trackId.id)
+ _ <- Console.printLine(jobSnap.prettyStr)
+ } yield ()
+ }
+
+ "Track and get flink cluster metrics" in unsafeRun {
+ for {
+ // track resource
+ _ <- ZIO.unit
+ trackId = TrackKey.appJob(233, "fdev", "simple-appjob")
+ _ <- FlinkK8sObserver.track(trackId)
+ // get job snapshot
+ _ <- ZIO.sleep(3.seconds)
+ jobSnap <- FlinkK8sObserver.clusterMetricsSnaps.get((trackId.namespace, trackId.name))
+ _ <- Console.printLine(jobSnap.prettyStr)
+ } yield ()
+ }
+
+ "Subscribe Flink job snapshots changes." in unsafeRun {
+ for {
+ // track resource
+ _ <- FlinkK8sObserver.track(TrackKey.sessionJob(233, "fdev", "simple-sessionjob", "simple-session"))
+ _ <- FlinkK8sObserver.track(TrackKey.appJob(234, "fdev", "simple-appjob"))
+ // subscribe job status changes
+ watchStream = FlinkK8sObserver.evaluatedJobSnaps.flatSubscribe()
+ _ <- watchStream.debugPretty.runDrain
+ } yield ()
+ }
+
+ "Only subscribe Flink job state changes." in unsafeRun {
+ for {
+ _ <- FlinkK8sObserver.track(TrackKey.appJob(234, "fdev", "simple-appjob"))
+ _ <- FlinkK8sObserver.evaluatedJobSnaps
+ .flatSubscribe()
+ .map { case (appId, status) => (appId, status.evalState) }
+ .diffPrev
+ .debug
+ .runDrain
+ } yield ()
+ }
+
+ "Subscribe Flink cluster metrics changes." in unsafeRun {
+ for {
+ // track resource
+ _ <- FlinkK8sObserver.track(TrackKey.sessionJob(233, "fdev", "simple-sessionjob", "simple-session"))
+ _ <- FlinkK8sObserver.track(TrackKey.appJob(234, "fdev", "simple-appjob"))
+ // subscribe job status changes
+ watchStream = FlinkK8sObserver.clusterMetricsSnaps.flatSubscribe()
+ _ <- watchStream.debugPretty.runDrain
+ } yield ()
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingOperator.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingOperator.scala
new file mode 100644
index 0000000000..fba938449a
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/UsingOperator.scala
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.streampark.flink.kubernetes.v2.example
+
+import org.apache.streampark.common.zio.{liftValueAsSome, PrettyStringOps}
+import org.apache.streampark.common.zio.ZIOContainerSubscription.{ConcurrentMapExtension, RefMapExtension}
+import org.apache.streampark.common.zio.ZIOExt.{unsafeRun, IOOps, ZStreamOps}
+import org.apache.streampark.flink.kubernetes.v2.httpfs.EmbeddedFileServer
+import org.apache.streampark.flink.kubernetes.v2.model._
+import org.apache.streampark.flink.kubernetes.v2.observer.FlinkK8sObserver
+import org.apache.streampark.flink.kubernetes.v2.operator.FlinkK8sOperator
+
+import org.apache.flink.v1beta1.FlinkDeploymentSpec.FlinkVersion
+import org.scalatest.{BeforeAndAfterAll, Ignore}
+import org.scalatest.wordspec.AnyWordSpecLike
+import zio.{Console, ZIO}
+
+/**
+ * Example of using FlinkK8sOperator.
+ * Tips: Please uncomment the @Ignore tag to execute the example code.
+ *
+ * Prerequisites for running this example:
+ * 1. There exists a locally connectable Kubernetes cluster.
+ * 2. Flink Kubernetes operator has been installed on Kubernetes.
+ * see: https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/docs/try-flink-kubernetes-operator/quick-start/
+ * 3. A namespace named "fdev" has been created in Kubernetes.
+ */
+// @Ignore
+class UsingOperator extends AnyWordSpecLike with BeforeAndAfterAll {
+
+ "Deploy a simple Flink application job to Kubernetes" in unsafeRun {
+
+ val spec = FlinkDeploymentDef(
+ name = "simple-appjob",
+ namespace = "fdev",
+ image = "flink:1.16",
+ flinkVersion = FlinkVersion.V1_16,
+ jobManager = JobManagerDef(cpu = 1, memory = "1024m"),
+ taskManager = TaskManagerDef(cpu = 1, memory = "1024m"),
+ job = JobDef(
+ jarURI = "local:///opt/flink/examples/streaming/StateMachineExample.jar",
+ parallelism = 1
+ )
+ )
+ for {
+ _ <- FlinkK8sOperator.deployApplicationJob(114514, spec)
+ // subscribe job status
+ _ <- FlinkK8sObserver.evaluatedJobSnaps.flatSubscribeValues().debugPretty.runDrain
+ } yield ()
+ }
+
+ "Deploy a simple Flink cluster to Kubernetes" in unsafeRun {
+ val spec = FlinkDeploymentDef(
+ name = "simple-session",
+ namespace = "fdev",
+ image = "flink:1.16",
+ flinkVersion = FlinkVersion.V1_16,
+ jobManager = JobManagerDef(cpu = 1, memory = "1024m"),
+ taskManager = TaskManagerDef(cpu = 1, memory = "1024m")
+ )
+ for {
+ _ <- FlinkK8sOperator.deployCluster(114515, spec)
+ _ <- FlinkK8sObserver.clusterMetricsSnaps.flatSubscribeValues().debugPretty.runDrain
+ } yield ()
+ }
+
+ "Deploy a simple Flink session mode job to Kubernetes" in unsafeRun {
+ val spec = FlinkSessionJobDef(
+ namespace = "fdev",
+ name = "simple-sessionjob",
+ deploymentName = "simple-session",
+ job = JobDef(
+ jarURI = s"$assetPath/StateMachineExample.jar",
+ parallelism = 1
+ )
+ )
+ for {
+ _ <- FlinkK8sOperator.deploySessionJob(114515, spec)
+ _ <- FlinkK8sObserver.evaluatedJobSnaps.flatSubscribeValues().debugPretty("evaluated job status").runDrain
+ } yield ()
+ }
+
+ "Deploy an application mode job with additional jar resources such as third-party dependencies pr udf" in unsafeRun {
+ val spec = FlinkDeploymentDef(
+ name = "appjob-with-extra-jar",
+ namespace = "fdev",
+ image = "flink:1.16",
+ flinkVersion = FlinkVersion.V1_16,
+ jobManager = JobManagerDef(cpu = 1, memory = "1024m"),
+ taskManager = TaskManagerDef(cpu = 1, memory = "1024m"),
+ job = JobDef(
+ jarURI = "assets/quick-sql-1.0.jar",
+ parallelism = 1,
+ entryClass = "demo.flink.SqlFakerDataJob"
+ ),
+ extJarPaths = Array("assets/flink-faker-0.5.3.jar")
+ )
+ for {
+ _ <- FlinkK8sOperator.deployApplicationJob(114514, spec)
+ _ <- FlinkK8sObserver.evaluatedJobSnaps.flatSubscribeValues().debugPretty.runDrain
+ } yield ()
+ }
+
+ "Deploy an session mode job with additional jar resources such as third-party dependencies pr udf." in unsafeRun {
+ val clusterSpec = FlinkDeploymentDef(
+ name = "session-with-extra-jar",
+ namespace = "fdev",
+ image = "flink:1.16",
+ flinkVersion = FlinkVersion.V1_16,
+ jobManager = JobManagerDef(cpu = 1, memory = "1024m"),
+ taskManager = TaskManagerDef(cpu = 1, memory = "1024m"),
+ extJarPaths = Array("assets/flink-faker-0.5.3.jar")
+ )
+ val jobSpec = FlinkSessionJobDef(
+ namespace = "fdev",
+ name = "sessionjob-with-extra-jar",
+ deploymentName = "session-with-extra-jar",
+ job = JobDef(
+ jarURI = "assets/quick-sql-1.0.jar",
+ parallelism = 1,
+ entryClass = "demo.flink.SqlFakerDataJob"
+ )
+ )
+ for {
+ // deploy cluster
+ _ <- FlinkK8sOperator.deployCluster(114514, clusterSpec)
+ // deploy jar
+ _ <- FlinkK8sOperator.deploySessionJob(114514, jobSpec)
+ _ <- FlinkK8sObserver.evaluatedJobSnaps.flatSubscribeValues().debugPretty.runDrain
+ } yield ()
+ }
+
+ "Deploy an application job and set up ingress resources." in unsafeRun {
+ val spec = FlinkDeploymentDef(
+ name = "appjob-with-ingress",
+ namespace = "fdev",
+ image = "flink:1.16",
+ flinkVersion = FlinkVersion.V1_16,
+ jobManager = JobManagerDef(cpu = 1, memory = "1024m"),
+ taskManager = TaskManagerDef(cpu = 1, memory = "1024m"),
+ job = JobDef(
+ jarURI = "local:///opt/flink/examples/streaming/StateMachineExample.jar",
+ parallelism = 1
+ ),
+ ingress = IngressDef.simplePathBased
+ )
+ for {
+ _ <- FlinkK8sOperator.deployApplicationJob(114514, spec)
+ _ <- FlinkK8sObserver.evaluatedJobSnaps.flatSubscribeValues().debugPretty.runDrain
+ } yield ()
+ }
+
+ "Cancel a Flink job" in unsafeRun {
+ for {
+ _ <- FlinkK8sObserver.track(TrackKey.appJob(114514, "fdev", "simple-appjob"))
+ _ <-
+ FlinkK8sObserver.evaluatedJobSnaps
+ .flatSubscribeValues()
+ .takeUntil(snap => snap.clusterNs == "fdev" && snap.clusterId == "simple-appjob" && snap.jobStatus.nonEmpty)
+ .runDrain
+
+ _ <- Console.printLine("start to cancel job.")
+ _ <- FlinkK8sOperator.cancelJob(114514)
+ _ <- Console.printLine("job cancelled")
+ _ <- ZIO.interrupt
+ } yield ()
+ }
+
+ "Stop the flink job and specify the corresponding savepoint configuration" in unsafeRun {
+ for {
+ _ <- FlinkK8sObserver.track(TrackKey.appJob(114514, "fdev", "simple-appjob"))
+ _ <-
+ FlinkK8sObserver.evaluatedJobSnaps
+ .flatSubscribeValues()
+ .takeUntil(snap => snap.clusterNs == "fdev" && snap.clusterId == "simple-appjob" && snap.jobStatus.nonEmpty)
+ .runDrain
+
+ _ <- Console.printLine("start to stop job.")
+ _ <- FlinkK8sOperator
+ .stopJob(114514, JobSavepointDef(savepointPath = "file:///opt/flink/savepoint"))
+ .map(_.prettyStr)
+ .debug("trigger status result")
+ _ <- Console.printLine("job stopped.")
+ _ <- ZIO.interrupt.ignore
+ } yield ()
+ }
+
+ "Trigger savepoint for flink job." in unsafeRun {
+ for {
+ _ <- FlinkK8sObserver.track(TrackKey.appJob(114514, "fdev", "simple-appjob"))
+ _ <-
+ FlinkK8sObserver.evaluatedJobSnaps
+ .flatSubscribeValues()
+ .takeUntil(snap => snap.clusterNs == "fdev" && snap.clusterId == "simple-appjob" && snap.jobStatus.nonEmpty)
+ .runDrain
+
+ _ <- Console.printLine("start to stop job.")
+ _ <- FlinkK8sOperator
+ .triggerJobSavepoint(114514, JobSavepointDef(savepointPath = "file:///opt/flink/savepoint"))
+ .map(_.prettyStr)
+ .debug("trigger status result")
+ _ <- Console.printLine("job stopped.")
+ _ <- ZIO.interrupt.ignore
+ } yield ()
+ }
+
+ "Delete Flink cluster on Kubernetes" in unsafeRun {
+ FlinkK8sOperator.k8sCrOpr.deleteDeployment("fdev", "simple-session")
+ // or FlinkK8sOperator.delete(114514)
+ }
+
+ "Delete a Flink application mode job on Kubernetes" in unsafeRun {
+ FlinkK8sOperator.k8sCrOpr.deleteDeployment("fdev", "simple-appjob")
+ // or FlinkK8sOperator.delete(114514)
+ }
+
+ "Delete flink session mode job resources on kubernetes." in unsafeRun {
+ FlinkK8sOperator.k8sCrOpr.deleteSessionJob("fdev", "simple-sessionjob")
+ // or FlinkK8sOperator.delete(114514)
+ }
+
+ override protected def beforeAll(): Unit = {
+ prepareTestAssets()
+ EmbeddedFileServer.launch.runIO
+ }
+
+}
diff --git a/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/package.scala b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/package.scala
new file mode 100644
index 0000000000..d2ed58ec01
--- /dev/null
+++ b/streampark-flink/streampark-flink-kubernetes-v2/streampark-flink-kubernetes-engine/src/test/scala/org/apache/streampark/flink/kubernetes/v2/example/package.scala
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.streampark.flink.kubernetes.v2
+
+import org.apache.streampark.common.conf.InternalConfigHolder
+import org.apache.streampark.flink.kubernetes.v2.Config.EMBEDDED_HTTP_FILE_SERVER_LOCAL_MIRROR_DIR
+import org.apache.streampark.flink.kubernetes.v2.example.clearTestAssets
+
+package object example {
+
+ val testPath = os.pwd / "streampark_workspace"
+ val mirrorPath = testPath / "mirror"
+ val assetPath = testPath / "assets"
+
+ def prepareTestAssets() = {
+ // prepare test assets and embedded http file server local directory
+ if (!os.exists(testPath)) os.makeDir(testPath)
+ if (!os.exists(mirrorPath)) os.makeDir(mirrorPath)
+ if (os.exists(assetPath)) os.remove.all(assetPath)
+ os.copy(os.Path(getClass.getResource("/assets").getPath), assetPath)
+
+ // force set streampark system configuration
+ InternalConfigHolder.set(EMBEDDED_HTTP_FILE_SERVER_LOCAL_MIRROR_DIR, mirrorPath.toString)
+ }
+
+ def clearTestAssets() = {
+ os.remove.all(testPath)
+ }
+}
+
+/** Clear file resources during testing. */
+object CleanTestResource extends App {
+ clearTestAssets()
+}
diff --git a/streampark-flink/streampark-flink-kubernetes/pom.xml b/streampark-flink/streampark-flink-kubernetes/pom.xml
index ec72feb85f..0ee290313a 100644
--- a/streampark-flink/streampark-flink-kubernetes/pom.xml
+++ b/streampark-flink/streampark-flink-kubernetes/pom.xml
@@ -25,7 +25,7 @@
streampark-flink-kubernetes_${scala.binary.version}
- StreamPark : Flink Kubernetes Integration
+ StreamPark : Flink Kubernetes Integration(Deprecated)
5.1
diff --git a/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/enums/FlinkJobState.scala b/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/enums/FlinkJobState.scala
index f56065e9dc..feefe1ad9b 100644
--- a/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/enums/FlinkJobState.scala
+++ b/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/enums/FlinkJobState.scala
@@ -18,6 +18,7 @@
package org.apache.streampark.flink.kubernetes.enums
/** flink job status on kubernetes */
+@Deprecated
object FlinkJobState extends Enumeration {
// flink job has been submit by the streampark.
diff --git a/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/model/FlinkMetricCV.scala b/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/model/FlinkMetricCV.scala
index cf5e46335e..9718aba961 100644
--- a/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/model/FlinkMetricCV.scala
+++ b/streampark-flink/streampark-flink-kubernetes/src/main/scala/org/apache/streampark/flink/kubernetes/model/FlinkMetricCV.scala
@@ -18,6 +18,7 @@
package org.apache.streampark.flink.kubernetes.model
/** flink cluster metric info */
+@Deprecated
case class FlinkMetricCV(
groupId: String = null,
totalJmMemory: Integer = 0,
diff --git a/streampark-shaded/pom.xml b/streampark-shaded/pom.xml
index ba70c3c6d1..b9c71630d7 100644
--- a/streampark-shaded/pom.xml
+++ b/streampark-shaded/pom.xml
@@ -32,7 +32,6 @@
streampark-shaded-slf4j
streampark-shaded-jackson
- streampark-shaded-flink-kubernetes-operator
diff --git a/streampark-shaded/streampark-shaded-flink-kubernetes-operator/pom.xml b/streampark-shaded/streampark-shaded-flink-kubernetes-operator/pom.xml
deleted file mode 100644
index 5e34360c00..0000000000
--- a/streampark-shaded/streampark-shaded-flink-kubernetes-operator/pom.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
- 4.0.0
-
- org.apache.streampark
- streampark-shaded
- 1.0.0
-
-
- streampark-shaded-flink-kubernetes-operator-api
- StreamPark : Shaded Flink K8s Operator API
-
-
-
- org.apache.flink
- flink-kubernetes-operator-api
- 1.5.0
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
-
-
- package
-
- shade
-
-
- true
- ${project.basedir}/target/dependency-reduced-pom.xml
-
-
-
- org.apache.flink:*
-
-
-
-
- org.apache.flink
- ${streampark.shaded.package}.org.apache.flink
-
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
-
-
-