> cd output/build/bin > ./version.sh Using CATALINA_BASE: /Users/zrss/Documents/Code/Java/apache-tomcat-7.0.88-src/output/build Using CATALINA_HOME: /Users/zrss/Documents/Code/Java/apache-tomcat-7.0.88-src/output/build Using CATALINA_TMPDIR: /Users/zrss/Documents/Code/Java/apache-tomcat-7.0.88-src/output/build/temp Using JRE_HOME: /Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home Using CLASSPATH: /Users/zrss/Documents/Code/Java/apache-tomcat-7.0.88-src/output/build/bin/bootstrap.jar:/Users/zrss/Documents/Code/Java/apache-tomcat-7.0.88-src/output/build/bin/tomcat-juli.jar Server version: Apache Tomcat/7.0.88 Server built: Jul 6 2018 14:30:23 UTC Server number: 7.0.88.0 OS Name: Mac OS X OS Version: 10.12.6 Architecture: x86_64 JVM Version: 1.6.0_65-b14-468 JVM Vendor: Apple Inc.
// Get all the pods. podList, err := g.runtime.GetPods(true) ShouldContainerBeRestarted
// Check RestartPolicy for dead container if pod.Spec.RestartPolicy == v1.RestartPolicyNever { glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) returnfalse } if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { // Check the exit code. if status.ExitCode == 0 { glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) returnfalse } } returntrue
minikube 下载完成 iso 后,再 bootstrap k8s cluster,cluster 起来之后,会启动一些 system 组件,比如 kube-addon-manager-minikube/ dashboard / dns 等,然而不幸的是这些 pod 会一直处于 ContainerCreating 的状态
查看 events
1
kubectl describe po kube-addon-manager-minikube -nkube-system
minikube cache list gcr.io/google-containers/kube-addon-manager:v6.5 gcr.io/google_containers/pause-amd64:3.0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.5 k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.5 k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.5 k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.1
cache 这些 image 之后,就可以使得 kube-system 下面的 pod 都 running 了
minikube logs healthcheck error
使用 minikube 过程中发现其 logs 中一直有如下错误日志
1
Jun 23 18:15:15 minikube localkube[3034]: E0623 18:15:15.392453 3034 healthcheck.go:317] Failed to start node healthz on 0: listen tcp: address 0: missing port in address
if volume.Spec.ClaimRef == nil { returnfalse } if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { returnfalse } if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { returnfalse }
volume 和 claim binding 时,发现 volume 与 claim 的字段不匹配
1 2 3 4 5
// Check if the claim was already bound (either by controller or by user) shouldBind := false if volume.Name != claim.Spec.VolumeName { shouldBind = true }
functionhandleUpgrade(req, socket, head) { // set to initialized when used externally wsInitialized = true; if (shouldProxy(config.context, req)) { var activeProxyOptions = prepareProxyRequest(req); proxy.ws(req, socket, head, activeProxyOptions); logger.info('[HPM] Upgrading to WebSocket'); } }
查看 Nginx 的文档后发现,Nginx add_header 仅在特定的 http status code 生效
1
Adds the specified field to a response header provided that the response code equals 200, 201 (1.3.10), 204, 206, 301, 302, 303, 304, 307 (1.1.16, 1.0.13), or 308 (1.13.0). The value can contain variables.
func(cache *schedulerCache)addPod(pod *v1.Pod) { n, ok := cache.nodes[pod.Spec.NodeName] if !ok { n = NewNodeInfo() cache.nodes[pod.Spec.NodeName] = n } n.addPod(pod) }
计算节点资源
plugin/pkg/scheduler/schedulercache/node_info.go
1 2 3 4 5 6 7 8 9 10 11
// addPod adds pod information to this NodeInfo. func(n *NodeInfo)addPod(pod *v1.Pod) { res, non0_cpu, non0_mem := calculateResource(pod) n.requestedResource.MilliCPU += res.MilliCPU n.requestedResource.Memory += res.Memory ... // Consume ports when pods added. n.updateUsedPorts(pod, true) n.generation++ }
// Only nodes in the "Ready" condition with status == "True" are schedulable nodeInformer.Informer().AddEventHandlerWithResyncPeriod( cache.ResourceEventHandlerFuncs{ AddFunc: c.addNodeToCache, UpdateFunc: c.updateNodeInCache, DeleteFunc: c.deleteNodeFromCache, }, 0, )
scheduler 如何调度 pod 的?
从 nodeList (nodeInformer 中来) 获取 nodes
Computing predicates
Prioritizing
Selecting host (按得分排序,相同得分的 round-robin)
predicates 有哪些?
重要的如
PodFitsResources
计算当前 node 的资源是否能满足 Pod Request,注意 init-container 是串行运行的,因此其所需要的资源,取各个资源维度的最大值,而其他正常的 container 为并行运行的,因此其所需要的资源,取各个资源维度的总和,最后一个 pod 所需要的资源,为 init-container 的最大值与正常 container 的资源总和的较大值
// Returns a *schedulercache.Resource that covers the largest width in each // resource dimension. Because init-containers run sequentially, we collect the // max in each dimension iteratively. In contrast, we sum the resource vectors // for regular containers since they run simultaneously. // // Example: // // Pod: // InitContainers // IC1: // CPU: 2 // Memory: 1G // IC2: // CPU: 2 // Memory: 3G // Containers // C1: // CPU: 2 // Memory: 1G // C2: // CPU: 1 // Memory: 1G // // Result: CPU: 3, Memory: 3G
// CalculateSpreadPriority spreads pods across hosts and zones, considering pods belonging to the same service or replication controller. // When a pod is scheduled, it looks for services, RCs or RSs that match the pod, then finds existing pods that match those selectors. // It favors nodes that have fewer existing matching pods.
page 3 (start from 0) is a leaf page, it will be used as a root bucket
1 2 3 4
type bucket struct { root pgid // page id of the bucket's root-level page sequence uint64// monotonically incrementing, used by NextSequence() }
1
m.root = bucket{root: 3}
bucket 结构表示存储于文件中的 bucket
另外 tx 会关联一个 Bucket 结构体
1 2 3 4 5 6 7 8 9 10 11 12 13 14
type Bucket struct { *bucket tx *Tx // the associated transaction buckets map[string]*Bucket // subbucket cache page *page // inline page reference rootNode *node // materialized node for the root page. nodes map[pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this // amount if you know that your write workloads are mostly append-only. // // This is non-persisted across transactions so it must be set in every Tx. FillPercent float64 }
可见其中组合了 bucket 结构体
写事务在初始化时,会使用 meta 锁,锁定住 meta 页的修改;随后将 meta 页拷贝至写事务内部存储;而实际上写事务开启时,会使用 rwlock,因此写事务并不会并发,另仅有写事务会修改 meta 页,所以此处的 meta 页拷贝存疑,似乎没必要
init 方法为 beginTX 内部执行,读写事务都会执行,因此虽然写事务无需 copy meta page 然而读事务需要,因为写事务 commit 之后,会修改 meta page
完成 meta 页的拷贝后,将 tx 的 root (Bucket) 初始化,并设置其 root bucket 为 meta 中的 root bucket; 第一个写事务的 txid 为 2,0、1 用于设置两个 meta 页
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
for i := 0; i < 2; i++ { p := db.pageInBuffer(buf[:], pgid(i)) p.id = pgid(i) p.flags = metaPageFlag // Initialize the meta page. m := p.meta() m.magic = magic m.version = version m.pageSize = uint32(db.pageSize) m.freelist = 2 m.root = bucket{root: 3} m.pgid = 4 m.txid = txid(i) // 0 1 txid used m.checksum = m.sum64() }