Master recive全部方法
一.case ElectedLeader => ......
二.case CompleteRecovery => ......
三.case RevokedLeadership => ......
四.case WorkerDecommissioning => ......
五.case DecommissionWorkers => ......
六.case RegisterWorker => ......
七.case RegisterApplication => ......
八.case ExecutorStateChanged => ......
九.case DriverStateChanged => ......
十.case Heartbeat => ......
十一.case MasterChangeAcknowledged => ......
十二.case WorkerSchedulerStateResponse => ......
十三.case WorkerLatestState => ......
十四.case UnregisterApplication => ......
十五.case CheckForWorkerTimeOut => ......
一.ElectedLeader 详解
val (storedApps, storedDrivers, storedWorkers) = persistenceEngine.readPersistedData(rpcEnv)
state = if (storedApps.isEmpty && storedDrivers.isEmpty && storedWorkers.isEmpty) {
RecoveryState.ALIVE
} else {
RecoveryState.RECOVERING
}
logInfo("I have been elected leader! New state: " + state)
if (state == RecoveryState.RECOVERING) {
beginRecovery(storedApps, storedDrivers, storedWorkers)
recoveryCompletionTask = forwardMessageThread.schedule(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
self.send(CompleteRecovery)
}
}, workerTimeoutMs, TimeUnit.MILLISECONDS)
}
1.1 start方法(zkFactory.createPersistenceEngine(), zkFactory.createLeaderElectionAgent(this)) 详解
def createPersistenceEngine(): PersistenceEngine = {
new ZooKeeperPersistenceEngine(conf, serializer)
}
1.2 ZooKeeperPersistenceEngine 详解
private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
extends PersistenceEngine with Logging {
private val workingDir = conf.get(ZOOKEEPER_DIRECTORY).getOrElse("/spark") + "/master_status"
private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)
SparkCuratorUtil.mkdir(zk, workingDir)
.......
}
private def beginRecovery(storedApps: Seq[ApplicationInfo], storedDrivers: Seq[DriverInfo],
storedWorkers: Seq[WorkerInfo]): Unit = {
for (app <- storedApps) {
logInfo("Trying to recover app: " + app.id)
try {
registerApplication(app)
app.state = ApplicationState.UNKNOWN
app.driver.send(MasterChanged(self, masterWebUiUrl))
} catch {
case e: Exception => logInfo("App " + app.id + " had exception on reconnect")
}
}
for (driver <- storedDrivers) {
drivers += driver
}
for (worker <- storedWorkers) {
logInfo("Trying to recover worker: " + worker.id)
try {
registerWorker(worker)
worker.state = WorkerState.UNKNOWN
worker.endpoint.send(MasterChanged(self, masterWebUiUrl))
} catch {
se e: Exception => logInfo("Worker " + worker.id + " had exception on reconnect")
}
}
}
1.3.1 beginRecovery 变量操作 详解
当前Master 类初始化的时候,其实属性里面都没有值的。
参数需要的(storedApps, storedDrivers, storedWorkers)也是从ZK里面拉取的,所以可以避免重复元素
属性其实有很多都是hashset和hashmap,也可以避免重复
函数里面操作了很多变量还有嵌套,但最后都是调用了 schedule() 方法,
在这就列出来所有操作的变量和RPC请求
操作的变量:
registerApplication:
applicationMetricsSystem.registerSource(app.appSource)
apps += app
idToApp(app.id) = app
endpointToApp(app.driver) = app
addressToApp(appAddress) = app
waitingApps += app
drivers += driver
registerWorker:
workers -= w
workers += worker
idToWorker(worker.id) = worker
addressToWorker(workerAddress) = worker
removeWorker:
idToWorker -= worker.id
addressToWorker -= worker.endpoint.address
relaunchDriver:
drivers.add(newDriver)
waitingDrivers += newDriver
removeDriver:
drivers -= driver
completedDrivers += driver
driver.state = finalState
driver.exception = exception
driver.worker.foreach(w => w.removeDriver(driver))
RPC请求:
worker.send(MasterChanged())
driver.send(MasterChanged())
driver.send(ExecutorUpdated())
driver.send(WorkerRemoved())
1.3.1.1 schedule() 详解
private def schedule(): Unit = {
if (state != RecoveryState.ALIVE) {
return
}
val shuffledAliveWorkers = Random.shuffle(workers.toSeq.filter(_.state == WorkerState.ALIVE))
val numWorkersAlive = shuffledAliveWorkers.size
var curPos = 0
for (driver <- waitingDrivers.toList) {
var launched = false
var isClusterIdle = true
var numWorkersVisited = 0
while (numWorkersVisited < numWorkersAlive && !launched) {
val worker = shuffledAliveWorkers(curPos)
isClusterIdle = worker.drivers.isEmpty && worker.executors.isEmpty
numWorkersVisited += 1
if (canLaunchDriver(worker, driver.desc)) {
val allocated = worker.acquireResources(driver.desc.resourceReqs)
driver.withResources(allocated)
launchDriver(worker, driver)
waitingDrivers -= driver
launched = true
}
curPos = (curPos + 1) % numWorkersAlive
}
if (!launched && isClusterIdle) {
logWarning(s"Driver ${driver.id} requires more resource than any of Workers could have.")
}
}
startExecutorsOnWorkers()
}
1.3.1.1.1 launchDriver(worker, driver) 详解
private def launchDriver(worker: WorkerInfo, driver: DriverInfo): Unit = {
logInfo("Launching driver " + driver.id + " on worker " + worker.id)
worker.addDriver(driver)
driver.worker = Some(worker)
worker.endpoint.send(LaunchDriver(driver.id, driver.desc, driver.resources))
driver.state = DriverState.RUNNING
}
1.3.1.1.2 startExecutorsOnWorkers() 详解
private def startExecutorsOnWorkers(): Unit = {
for (app <- waitingApps) {
val coresPerExecutor = app.desc.coresPerExecutor.getOrElse(1)
if (app.coresLeft >= coresPerExecutor) {
val usableWorkers = workers.toArray.filter(_.state == WorkerState.ALIVE)
.filter(canLaunchExecutor(_, app.desc))
.sortBy(_.coresFree).reverse
val appMayHang = waitingApps.length == 1 &&
waitingApps.head.executors.isEmpty && usableWorkers.isEmpty
if (appMayHang) {
logWarning(s"App ${app.id} requires more resource than any of Workers could have.")
}
val assignedCores = scheduleExecutorsOnWorkers(app, usableWorkers, spreadOutApps)
for (pos <- 0 until usableWorkers.length if assignedCores(pos) > 0) {
allocateWorkerResourceToExecutors(
app, assignedCores(pos), app.desc.coresPerExecutor, usableWorkers(pos))
}
}
}
}
1.3.1.1.2.1 allocateWorkerResourceToExecutors()详解
private def allocateWorkerResourceToExecutors(app: ApplicationInfo,assignedCores: Int,
coresPerExecutor: Option[Int],worker: WorkerInfo): Unit = {
val numExecutors = coresPerExecutor.map { assignedCores / _ }.getOrElse(1)
val coresToAssign = coresPerExecutor.getOrElse(assignedCores)
for (i <- 1 to numExecutors) {
val allocated = worker.acquireResources(app.desc.resourceReqsPerExecutor)
val exec = app.addExecutor(worker, coresToAssign, allocated)
launchExecutor(worker, exec)
app.state = ApplicationState.RUNNING
}
}
1.3.1.1.2.1.1 launchExecutor 详解
private def launchExecutor(worker: WorkerInfo, exec: ExecutorDesc): Unit = {
worker.addExecutor(exec)
worker.endpoint.send(LaunchExecutor(masterUrl, exec.application.id, exec.id,
exec.application.desc, exec.cores, exec.memory, exec.resources))/*给worker发送启动Executor的程序*/
exec.application.driver.send(
ExecutorAdded(exec.id, worker.id, worker.hostPort, exec.cores, exec.memory))/*driver发送启动Executor的程序*/
}
其实最后就是两个消息:这一块其实是全部申请新资源,没有杀掉旧资源
worker.send(LaunchExecutor)
driver.send(ExecutorAdded)
二.CompleteRecovery 详解
private def completeRecovery(): Unit = {
if (state != RecoveryState.RECOVERING) { return }
state = RecoveryState.COMPLETING_RECOVERY
workers.filter(_.state == WorkerState.UNKNOWN).foreach(
removeWorker(_, "Not responding for recovery"))
apps.filter(_.state == ApplicationState.UNKNOWN).foreach(finishApplication)
apps.filter(_.state == ApplicationState.WAITING).foreach(_.state = ApplicationState.RUNNING)
drivers.filter(_.worker.isEmpty).foreach { d =>
logWarning(s"Driver ${d.id} was not found after master recovery")
if (d.desc.supervise) {
logWarning(s"Re-launching ${d.id}")
relaunchDriver(d)
} else {
removeDriver(d.id, DriverState.ERROR, None)
logWarning(s"Did not re-launch ${d.id} because it was not supervised")
}
}
state = RecoveryState.ALIVE
schedule()
logInfo("Recovery complete - resuming operations!")
}
2.1 removeWorker 详解
private def removeWorker(worker: WorkerInfo, msg: String): Unit = {
logInfo("Removing worker " + worker.id + " on " + worker.host + ":" + worker.port)
worker.setState(WorkerState.DEAD)
idToWorker -= worker.id
addressToWorker -= worker.endpoint.address
for (exec <- worker.executors.values) {
logInfo("Telling app of lost executor: " + exec.id)
exec.application.driver.send(ExecutorUpdated(
exec.id, ExecutorState.LOST, Some("worker lost"), None, Some(worker.host)))
exec.state = ExecutorState.LOST
exec.application.removeExecutor(exec)
}
for (driver <- worker.drivers.values) {
if (driver.desc.supervise) {
logInfo(s"Re-launching ${driver.id}")
relaunchDriver(driver)
} else {
logInfo(s"Not re-launching ${driver.id} because it was not supervised")
removeDriver(driver.id, DriverState.ERROR, None)
}
}
logInfo(s"Telling app of lost worker: " + worker.id)
apps.filterNot(completedApps.contains(_)).foreach { app =>
app.driver.send(WorkerRemoved(worker.id, worker.host, msg))
}
persistenceEngine.removeWorker(worker)
schedule()
}
2.1.1 relaunchDriver 详解
private def relaunchDriver(driver: DriverInfo): Unit = {
removeDriver(driver.id, DriverState.RELAUNCHING, None)
val newDriver = createDriver(driver.desc)
persistenceEngine.addDriver(newDriver)
drivers.add(newDriver)
waitingDrivers += newDriver
schedule()
}
2.1.1.1 removeDriver 详解
private def removeDriver(driverId: String,finalState: DriverState,exception: Option[Exception]): Unit = {
drivers.find(d => d.id == driverId) match {
case Some(driver) =>
logInfo(s"Removing driver: $driverId")
drivers -= driver
if (completedDrivers.size >= retainedDrivers) {
val toRemove = math.max(retainedDrivers / 10, 1)
completedDrivers.trimStart(toRemove)
}
completedDrivers += driver
persistenceEngine.removeDriver(driver)
driver.state = finalState
driver.exception = exception
driver.worker.foreach(w => w.removeDriver(driver))
schedule()
case None =>
logWarning(s"Asked to remove unknown driver: $driverId")
}
}