buildAutoscaler()-->
core.NewAutoscaler(opts)
autoscaler.Start()-->
a.clusterStateRegistry.Start()-->
csr.cloudProviderNodeInstancesCache.Start(csr.interrupt)-->
cache.cloudProvider.NodeGroups() // load provider specified group
nodeGroupInstances, err := nodeGroup.Nodes()-->
cache.updateCacheEntryLocked(nodeGroup, &cloudProviderNodeInstancesCacheEntry{nodeGroupInstances, time.Now()})
for { // interval 10s
autoscaler.RunOnce(loopStart)-->
unschedulablePodLister := a.UnschedulablePodLister() // phase != succeeded and != failed
scheduledPodLister := a.ScheduledPodLister()
pdbLister := a.PodDisruptionBudgetLister()
allNodes, readyNodes, typedErr := a.obtainNodeLists() // Get nodes and pods currently living on cluster
// Update cluster resource usage metrics
coresTotal, memoryTotal := calculateCoresMemoryTotal(allNodes, currentTime)
daemonsets, err := a.ListerRegistry.DaemonSetLister().List(labels.Everything())
err = a.AutoscalingContext.CloudProvider.Refresh()
cache.cloudProvider.NodeGroups() // load provider specified group
nodeGroupInstances, err := nodeGroup.Nodes()
a.initializeClusterSnapshot(allNodes, nonExpendableScheduledPods) // exclude low priority pods
a.processors.TemplateNodeInfoProvider.Process()-->
processNode(node)-->
simulator.BuildNodeInfoForNode(node, podsForNodes) // simulate scheduled pods right after node is created
sanitizedNodeInfo, err := utils.SanitizeNodeInfo(nodeInfo, id, ignoredTaints) // modify node name
a.updateClusterState(allNodes, nodeInfosForGroups, currentTime)
a.updateClusterState(allNodes, nodeInfosForGroups, currentTime)
ScaleUp(autoscalingContext, a.processors, a.clusterStateRegistry, unschedulablePodsToHelp, readyNodes, daemonsets, nodeInfosForGroups, a.ignoredTaints)-->
// filter out virtual kubelet
nodesFromNotAutoscaledGroups, err := utils.FilterOutNodesFromNotAutoscaledGroups(nodes, context.CloudProvider)
computeScaleUpResourcesLeftLimits(context, processors, nodeGroups, nodeInfos, nodesFromNotAutoscaledGroups, resourceLimiter)-->
calculateScaleUpCoresMemoryTotal(nodeGroups, nodeInfos, nodesFromNotAutoscaledGroups) // sum totoal cpu and mem resource of the node group
calculateScaleUpCustomResourcesTotal() // sum allocable gpu of the node group
clusterStateRegistry.GetUpcomingNodes()-->
newNodes := ar.CurrentTarget - (readiness.Ready + readiness.Unready + readiness.LongUnregistered)
upcomingNodes = append(upcomingNodes, nodeTemplate) // build node template for each node group
computeScaleUpResourcesDelta(context, processors, nodeInfo, nodeGroup, resourceLimiter)
context.ExpanderStrategy.BestOption(options, nodeInfos) // different strategy, e.g.leastwaste, mostpods, priceBased
processors.NodeGroupManager.CreateNodeGroup(context, bestOption.NodeGroup)
// calculate node numbers
newNodes, err = applyScaleUpResourcesLimits(context, processors, newNodes, scaleUpResourcesLeft, nodeInfo, bestOption.NodeGroup, resourceLimiter)
// find similar group
processors.NodeGroupSetProcessor.FindSimilarNodeGroups(context, bestOption.NodeGroup, nodeInfos)
processors.NodeGroupSetProcessor.BalanceScaleUpBetweenGroups()
executeScaleUp(context, clusterStateRegistry, info, gpu.GetGpuTypeForMetrics(gpuLabel, availableGPUTypes, nodeInfo.Node(), nil), now)-->
info.Group.IncreaseSize(increase)
}