TBR: oncall: update oncall dashboard.
Change-Id: I7cdab5432f57ec638b04ad2c3703fa8fcba79086
diff --git a/internal/monitoring/monitoring.go b/internal/monitoring/monitoring.go
index e3aeca5..58c93f5 100644
--- a/internal/monitoring/monitoring.go
+++ b/internal/monitoring/monitoring.go
@@ -47,6 +47,12 @@
zoneStatSuffix = "__debug/stats/system/gce/zone"
)
+// Human-readable metric names.
+const (
+ MNMounttableMountedServers = "mounttable mounted servers"
+ MNMounttableNodes = "mounttable nodes"
+)
+
// serviceMountedNames is a map from human-readable service names to their
// relative mounted names in the global mounttable.
var serviceMountedNames = map[string]string{
diff --git a/oncall/doc.go b/oncall/doc.go
index 5e9725e..9896b77 100644
--- a/oncall/doc.go
+++ b/oncall/doc.go
@@ -39,6 +39,8 @@
Listening address for the server.
-cache=
Directory to use for caching files.
+ -key=
+ The path to the service account's JSON credentials file.
-static=
Directory to use for serving static files.
diff --git a/oncall/serve.go b/oncall/serve.go
index 3c8f24e..651d750 100644
--- a/oncall/serve.go
+++ b/oncall/serve.go
@@ -6,24 +6,154 @@
import (
"bytes"
+ "encoding/json"
"fmt"
+ "html"
+ "io/ioutil"
+ "math"
"net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ cloudmonitoring "google.golang.org/api/monitoring/v3"
"v.io/jiri"
"v.io/jiri/collect"
+ "v.io/jiri/tool"
"v.io/x/devtools/internal/cache"
+ "v.io/x/devtools/internal/monitoring"
"v.io/x/lib/cmdline"
)
+const (
+ numWorkers = 64
+ resultTypeServiceLatency = "resultTypeServiceLatency"
+ resultTypeServiceQPS = "resultTypeServiceQPS"
+ resultTypeServiceCounters = "resultTypeServiceCounters"
+ resultTypeServiceMetadata = "resultTypeServiceMetadata"
+
+ getKubeDataTaskTypePod = "getKubeDataTaskTypePod"
+ getKubeDataTaskTypeNode = "getKubeDataTaskTypeNode"
+
+ logsStyle = "font-family: monospace; font-size: 12px; white-space:pre-wrap; word-wrap: break-word;"
+)
+
var (
addressFlag string
cacheFlag string
+ keyFileFlag string
staticDirFlag string
)
+type podSpec struct {
+ Metadata struct {
+ Name string `json:"name"`
+ Uid string `json:"uid"`
+ Labels struct {
+ Service string `json:"service"`
+ Version string `json:"version"`
+ } `json:"labels"`
+ } `json:"metadata"`
+ Spec struct {
+ NodeName string `json:"nodeName"`
+ Containers []struct {
+ Name string `json:"name"`
+ } `json:"containers"`
+ } `json:"spec"`
+ Status struct {
+ Phase string `json:"phase"`
+ StartTime string `json:"startTime"`
+ } `json:"status"`
+ zone string
+ project string
+}
+
+type nodeSpec struct {
+ Metadata struct {
+ Name string `json:"name"`
+ } `json:"metadata"`
+ Spec struct {
+ ExternalID string `json:"externalID"`
+ } `json:"spec"`
+}
+
+type clusterLocation struct {
+ project string
+ zone string
+}
+
+// Task and result for getKubeData.
+type getKubeDataTask struct {
+ location clusterLocation
+ taskType string
+}
+type getKubeDataResult struct {
+ pods []*podSpec
+ nodes []*nodeSpec
+ taskType string
+ err error
+}
+
+// Task and result for getMetricWorker.
+type getMetricTask struct {
+ resultType string
+ md *cloudmonitoring.MetricDescriptor
+ metricName string
+ pod *podSpec
+ extraLabels map[string]string
+}
+type getMetricResult struct {
+ ResultType string
+ Instance string
+ Zone string
+ Project string
+ MetricName string
+ ExtraLabels map[string]string
+ CurrentValue float64
+ MinValue float64
+ MaxValue float64
+ HistoryTimestamps []int64
+ HistoryValues []float64
+ ErrMsg string
+ PodName string
+ PodUID string
+ PodNode string
+ PodStatus string
+ MainContainer string
+ ServiceVersion string
+}
+
+// Final result for getData endpoint.
+type getDataResult struct {
+ // These fields are indexed by metric names.
+ ServiceLatency map[string][]getMetricResult
+ ServiceQPS map[string][]getMetricResult
+ ServiceCounters map[string][]getMetricResult
+ ServiceMetadata map[string][]getMetricResult
+
+ Instances map[string]string // instances -> external ids
+ Oncalls []string
+ MinTime int64
+ MaxTime int64
+}
+
+// Current service locations.
+var locations = []clusterLocation{
+ {
+ project: "vanadium-production",
+ zone: "us-central1-c",
+ },
+ {
+ project: "vanadium-auth-production",
+ zone: "us-central1-c",
+ },
+}
+
func init() {
cmdServe.Flags.StringVar(&addressFlag, "address", ":8000", "Listening address for the server.")
cmdServe.Flags.StringVar(&cacheFlag, "cache", "", "Directory to use for caching files.")
+ cmdServe.Flags.StringVar(&keyFileFlag, "key", "", "The path to the service account's JSON credentials file.")
cmdServe.Flags.StringVar(&staticDirFlag, "static", "", "Directory to use for serving static files.")
}
@@ -56,6 +186,12 @@
http.HandleFunc("/data", func(w http.ResponseWriter, r *http.Request) {
dataHandler(jirix, root, w, r)
})
+ http.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) {
+ logsHandler(jirix, root, w, r)
+ })
+ http.HandleFunc("/cfg", func(w http.ResponseWriter, r *http.Request) {
+ cfgHandler(jirix, root, w, r)
+ })
http.HandleFunc("/pic", func(w http.ResponseWriter, r *http.Request) {
picHandler(jirix, root, w, r)
})
@@ -69,40 +205,466 @@
}
func dataHandler(jirix *jiri.X, root string, w http.ResponseWriter, r *http.Request) {
- // Get timestamp from either the "latest" file or "ts" parameter.
- r.ParseForm()
- ts := r.Form.Get("ts")
- if ts == "" {
+ s, err := monitoring.Authenticate(keyFileFlag)
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+
+ // Get start and end timestamps.
+ if err := r.ParseForm(); err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ now := time.Now()
+ startTimestamp := now.Unix() - 3600
+ endTimestamp := now.Unix()
+ strStartTimestamp := r.Form.Get("st")
+ if strStartTimestamp != "" {
var err error
- ts, err = readGoogleStorageFile(jirix, "latest")
+ startTimestamp, err = strconv.ParseInt(strStartTimestamp, 10, 64)
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ }
+ strEndTimestamp := r.Form.Get("et")
+ if strEndTimestamp != "" {
+ var err error
+ endTimestamp, err = strconv.ParseInt(strEndTimestamp, 10, 64)
if err != nil {
respondWithError(jirix, err, w)
return
}
}
- cachedFile, err := cache.StoreGoogleStorageFile(jirix, root, bucketData, ts+".oncall")
+ // Get currently running pods and nodes from vanadium production clusters.
+ podsByServices, nodes, err := getKubeData(jirix)
if err != nil {
respondWithError(jirix, err, w)
return
}
- bytes, err := jirix.NewSeq().ReadFile(cachedFile)
+
+ // Create tasks of getting metrics from GCM.
+ allTasks := []getMetricTask{}
+ mdServiceLatency, err := monitoring.GetMetric("service-latency", "vanadium-production")
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ mdServiceQPS, err := monitoring.GetMetric("service-qps-total", "vanadium-production")
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ mdServiceCounters, err := monitoring.GetMetric("service-counters", "vanadium-production")
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ mdServiceMetadata, err := monitoring.GetMetric("service-metadata", "vanadium-production")
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ for serviceName, pods := range podsByServices {
+ for _, pod := range pods {
+ allTasks = append(allTasks,
+ // Metadata.
+ getMetricTask{
+ resultType: resultTypeServiceMetadata,
+ md: mdServiceMetadata,
+ metricName: serviceName,
+ pod: pod,
+ extraLabels: map[string]string{
+ "metadata_name": "build age",
+ },
+ },
+ // QPS.
+ getMetricTask{
+ resultType: resultTypeServiceQPS,
+ md: mdServiceQPS,
+ metricName: serviceName,
+ pod: pod,
+ })
+ // Latency.
+ if serviceName == monitoring.SNIdentity {
+ for _, n := range []string{monitoring.SNMacaroon, monitoring.SNGoogleIdentity, monitoring.SNBinaryDischarger} {
+ allTasks = append(allTasks,
+ getMetricTask{
+ resultType: resultTypeServiceLatency,
+ md: mdServiceLatency,
+ metricName: n,
+ pod: pod,
+ })
+ }
+ } else {
+ allTasks = append(allTasks,
+ getMetricTask{
+ resultType: resultTypeServiceLatency,
+ md: mdServiceLatency,
+ metricName: serviceName,
+ pod: pod,
+ })
+ }
+ // Counters.
+ if serviceName == monitoring.SNMounttable {
+ allTasks = append(allTasks,
+ getMetricTask{
+ resultType: resultTypeServiceCounters,
+ md: mdServiceCounters,
+ metricName: monitoring.MNMounttableMountedServers,
+ pod: pod,
+ },
+ getMetricTask{
+ resultType: resultTypeServiceCounters,
+ md: mdServiceCounters,
+ metricName: monitoring.MNMounttableNodes,
+ pod: pod,
+ },
+ )
+ }
+ }
+ }
+
+ // Send tasks to workers.
+ numTasks := len(allTasks)
+ tasks := make(chan getMetricTask, numTasks)
+ taskResults := make(chan getMetricResult, numTasks)
+ for i := 0; i < numWorkers; i++ {
+ go getMetricWorker(jirix, s, time.Unix(startTimestamp, 0), time.Unix(endTimestamp, 0), tasks, taskResults)
+ }
+ for _, task := range allTasks {
+ tasks <- task
+ }
+ close(tasks)
+
+ // Process results.
+ result := getDataResult{
+ ServiceLatency: map[string][]getMetricResult{},
+ ServiceQPS: map[string][]getMetricResult{},
+ ServiceCounters: map[string][]getMetricResult{},
+ ServiceMetadata: map[string][]getMetricResult{},
+ }
+ for i := 0; i < numTasks; i++ {
+ r := <-taskResults
+ n := r.MetricName
+ switch r.ResultType {
+ case resultTypeServiceLatency:
+ result.ServiceLatency[n] = append(result.ServiceLatency[n], r)
+ case resultTypeServiceQPS:
+ result.ServiceQPS[n] = append(result.ServiceQPS[n], r)
+ case resultTypeServiceCounters:
+ result.ServiceCounters[n] = append(result.ServiceCounters[n], r)
+ case resultTypeServiceMetadata:
+ result.ServiceMetadata[n] = append(result.ServiceMetadata[n], r)
+ }
+ }
+ result.MinTime = startTimestamp
+ result.MaxTime = endTimestamp
+ result.Instances = nodes
+
+ // Get oncalls.
+ oncalls, err := getOncalls(jirix)
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ result.Oncalls = oncalls
+
+ // Convert results to json and return it.
+ b, err := json.MarshalIndent(&result, "", " ")
if err != nil {
respondWithError(jirix, err, w)
return
}
w.Header().Set("Content-Type", "application/json")
- w.Write(bytes)
+ w.Write(b)
+}
+
+func dataHandler2(jirix *jiri.X, root string, w http.ResponseWriter, r *http.Request) {
+ b, _ := ioutil.ReadFile("/usr/local/google/home/jingjin/data")
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(b)
+}
+
+func getMetricWorker(jirix *jiri.X, s *cloudmonitoring.Service, startTime, endTime time.Time, tasks <-chan getMetricTask, results chan<- getMetricResult) {
+ for task := range tasks {
+ result := getMetricResult{
+ ResultType: task.resultType,
+ Instance: task.pod.Metadata.Name,
+ Zone: task.pod.zone,
+ Project: task.pod.project,
+ MetricName: task.metricName,
+ ExtraLabels: task.extraLabels,
+ CurrentValue: -1,
+ MinValue: math.MaxFloat64,
+ MaxValue: 0,
+ PodName: task.pod.Metadata.Name,
+ PodUID: task.pod.Metadata.Uid,
+ PodNode: task.pod.Spec.NodeName,
+ PodStatus: task.pod.Status.Phase,
+ MainContainer: task.pod.Spec.Containers[0].Name,
+ ServiceVersion: task.pod.Metadata.Labels.Version,
+ }
+ filters := []string{
+ fmt.Sprintf("metric.type=%q", task.md.Type),
+ fmt.Sprintf("metric.label.metric_name=%q", task.metricName),
+ fmt.Sprintf("metric.label.gce_instance=%q", task.pod.Metadata.Name),
+ fmt.Sprintf("metric.label.gce_zone=%q", task.pod.zone),
+ }
+ for labelKey, labelValue := range task.extraLabels {
+ filters = append(filters, fmt.Sprintf("metric.label.%s=%q", labelKey, labelValue))
+ }
+ nextPageToken := ""
+ points := []*cloudmonitoring.Point{}
+ timestamps := []int64{}
+ values := []float64{}
+ for {
+ resp, err := s.Projects.TimeSeries.List("projects/vanadium-production").
+ IntervalStartTime(startTime.UTC().Format(time.RFC3339)).
+ IntervalEndTime(endTime.UTC().Format(time.RFC3339)).
+ Filter(strings.Join(filters, " AND ")).
+ PageToken(nextPageToken).Do()
+ if err != nil {
+ result.ErrMsg = fmt.Sprintf("List() failed: %v", err)
+ break
+ }
+ if len(resp.TimeSeries) > 0 {
+ // We should only get one timeseries.
+ ts := resp.TimeSeries[0]
+ points = append(points, ts.Points...)
+ }
+ if result.ErrMsg != "" {
+ break
+ }
+ nextPageToken = resp.NextPageToken
+ if nextPageToken == "" {
+ break
+ }
+ }
+ for i := len(points) - 1; i >= 0; i-- {
+ pt := points[i]
+ epochTime, err := time.Parse(time.RFC3339, pt.Interval.EndTime)
+ if err != nil {
+ result.ErrMsg = fmt.Sprintf("Parse(%s) failed: %v", pt.Interval.EndTime)
+ break
+ }
+ timestamp := epochTime.Unix()
+ timestamps = append(timestamps, timestamp)
+ value := pt.Value.DoubleValue
+ values = append(values, value)
+ result.MaxValue = math.Max(result.MaxValue, value)
+ result.MinValue = math.Min(result.MinValue, value)
+ }
+ result.HistoryTimestamps = timestamps
+ result.HistoryValues = values
+ if len(values) > 0 {
+ result.CurrentValue = values[len(values)-1]
+ }
+ results <- result
+ }
+}
+
+// getKubeData gets:
+// - pod data indexed by service names.
+// - node external ids indexed by names.
+func getKubeData(jirix *jiri.X) (map[string][]*podSpec, map[string]string, error) {
+ retPods := map[string][]*podSpec{}
+ retNodes := map[string]string{}
+
+ // Get pods data.
+ numTasks := len(locations) * 2
+ tasks := make(chan getKubeDataTask, numTasks)
+ taskResults := make(chan getKubeDataResult, numTasks)
+ for i := 0; i < numTasks; i++ {
+ go getKubeDataWorker(jirix, tasks, taskResults)
+ }
+ for _, loc := range locations {
+ tasks <- getKubeDataTask{
+ location: loc,
+ taskType: getKubeDataTaskTypePod,
+ }
+ tasks <- getKubeDataTask{
+ location: loc,
+ taskType: getKubeDataTaskTypeNode,
+ }
+ }
+ close(tasks)
+
+ pods := []*podSpec{}
+ nodes := []*nodeSpec{}
+ for i := 0; i < numTasks; i++ {
+ result := <-taskResults
+ switch result.taskType {
+ case getKubeDataTaskTypePod:
+ pods = append(pods, result.pods...)
+ case getKubeDataTaskTypeNode:
+ nodes = append(nodes, result.nodes...)
+ }
+ }
+
+ // Index pods data by service name.
+ for _, pod := range pods {
+ switch pod.Metadata.Labels.Service {
+ case "auth":
+ retPods[monitoring.SNIdentity] = append(retPods[monitoring.SNIdentity], pod)
+ case "mounttable":
+ retPods[monitoring.SNMounttable] = append(retPods[monitoring.SNMounttable], pod)
+ case "proxy":
+ retPods[monitoring.SNProxy] = append(retPods[monitoring.SNProxy], pod)
+ case "role":
+ retPods[monitoring.SNRole] = append(retPods[monitoring.SNRole], pod)
+ }
+ }
+ // Index nodes names by ids.
+ for _, node := range nodes {
+ retNodes[node.Metadata.Name] = node.Spec.ExternalID
+ }
+
+ return retPods, retNodes, nil
+}
+
+func getKubeDataWorker(jirix *jiri.X, tasks <-chan getKubeDataTask, results chan<- getKubeDataResult) {
+ for task := range tasks {
+ kubeCtlArgs := []string{"get", "pods", "-o=json"}
+ if task.taskType == getKubeDataTaskTypeNode {
+ kubeCtlArgs = []string{"get", "nodes", "-o=json"}
+ }
+ var podItems struct {
+ Items []*podSpec `json:"items"`
+ }
+ var nodeItems struct {
+ Items []*nodeSpec `json:"items"`
+ }
+ out, err := runKubeCtl(jirix, task.location, kubeCtlArgs)
+ if err != nil {
+ results <- getKubeDataResult{
+ err: err,
+ }
+ continue
+ }
+ switch task.taskType {
+ case getKubeDataTaskTypePod:
+ if err := json.Unmarshal(out, &podItems); err != nil {
+ results <- getKubeDataResult{
+ err: fmt.Errorf("Unmarshal() failed: %v", err),
+ }
+ continue
+ }
+ for _, item := range podItems.Items {
+ item.zone = task.location.zone
+ item.project = task.location.project
+ }
+ results <- getKubeDataResult{
+ taskType: getKubeDataTaskTypePod,
+ pods: podItems.Items,
+ }
+ case getKubeDataTaskTypeNode:
+ if err := json.Unmarshal(out, &nodeItems); err != nil {
+ results <- getKubeDataResult{
+ err: fmt.Errorf("Unmarshal() failed: %v", err),
+ }
+ continue
+ }
+ results <- getKubeDataResult{
+ taskType: getKubeDataTaskTypeNode,
+ nodes: nodeItems.Items,
+ }
+ }
+ }
+}
+
+func runKubeCtl(jirix *jiri.X, location clusterLocation, kubeCtlArgs []string) ([]byte, error) {
+ f, err := ioutil.TempFile("", "")
+ if err != nil {
+ return nil, nil
+ }
+ defer jirix.NewSeq().RemoveAll(f.Name())
+ s := tool.NewContext(tool.ContextOpts{
+ Env: map[string]string{
+ "KUBECONFIG": f.Name(),
+ },
+ }).NewSeq()
+ var out bytes.Buffer
+ getCredsArgs := []string{
+ "container",
+ "clusters",
+ "get-credentials",
+ "vanadium",
+ "--project",
+ location.project,
+ "--zone",
+ location.zone,
+ }
+ if err := s.Run("gcloud", getCredsArgs...).Capture(&out, &out).Last("kubectl", kubeCtlArgs...); err != nil {
+ return nil, err
+ }
+ return out.Bytes(), nil
+}
+
+func getOncalls(jirix *jiri.X) ([]string, error) {
+ s := jirix.NewSeq()
+ var out bytes.Buffer
+ if err := s.Capture(&out, nil).Last("jiri", "oncall"); err != nil {
+ return nil, err
+ }
+ return strings.Split(strings.TrimSpace(out.String()), ","), nil
+}
+
+func logsHandler(jirix *jiri.X, root string, w http.ResponseWriter, r *http.Request) {
+ // Parse project, zone, pod name, and container.
+ f, err := parseForm(r, "p", "z", "d", "c")
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ project, zone, pod, container := f["p"], f["z"], f["d"], f["c"]
+ out, err := runKubeCtl(jirix, clusterLocation{
+ project: project,
+ zone: zone,
+ }, []string{"logs", pod, container})
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/html")
+ content := fmt.Sprintf("<pre style='%s'>%s</pre>", logsStyle, html.EscapeString(string(out)))
+ w.Write([]byte(content))
+}
+
+func cfgHandler(jirix *jiri.X, root string, w http.ResponseWriter, r *http.Request) {
+ // Parse project, zone, and pod name.
+ f, err := parseForm(r, "p", "z", "d")
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+ project, zone, pod := f["p"], f["z"], f["d"]
+ out, err := runKubeCtl(jirix, clusterLocation{
+ project: project,
+ zone: zone,
+ }, []string{"get", "pods", pod, "-o=json"})
+ if err != nil {
+ respondWithError(jirix, err, w)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/html")
+ content := fmt.Sprintf("<pre style='%s'>%s</pre>", logsStyle, html.EscapeString(string(out)))
+ w.Write([]byte(content))
}
func picHandler(jirix *jiri.X, root string, w http.ResponseWriter, r *http.Request) {
- r.ParseForm()
// Parameter "id" specifies the id of the pic.
- id := r.Form.Get("id")
- if id == "" {
- respondWithError(jirix, fmt.Errorf("parameter 'id' not found"), w)
+ f, err := parseForm(r, "id")
+ if err != nil {
+ respondWithError(jirix, err, w)
return
}
+ id := f["id"]
+
// Read picture file from Google Storage.
cachedFile, err := cache.StoreGoogleStorageFile(jirix, root, bucketPics, id+".png")
if err != nil {
@@ -123,15 +685,20 @@
w.Write(bytes)
}
-func respondWithError(jirix *jiri.X, err error, w http.ResponseWriter) {
- fmt.Fprintf(jirix.Stderr(), "%v\n", err)
- http.Error(w, "500 internal server error", http.StatusInternalServerError)
+func parseForm(r *http.Request, fields ...string) (map[string]string, error) {
+ m := map[string]string{}
+ r.ParseForm()
+ for _, f := range fields {
+ value := r.Form.Get(f)
+ if value == "" {
+ return nil, fmt.Errorf("parameter %q not found", f)
+ }
+ m[f] = value
+ }
+ return m, nil
}
-func readGoogleStorageFile(jirix *jiri.X, filename string) (string, error) {
- var out bytes.Buffer
- if err := jirix.NewSeq().Capture(&out, &out).Last("gsutil", "-q", "cat", bucketData+"/"+filename); err != nil {
- return "", err
- }
- return out.String(), nil
+func respondWithError(jirix *jiri.X, err error, w http.ResponseWriter) {
+ fmt.Fprintf(jirix.Stderr(), "%v\n", err)
+ http.Error(w, fmt.Sprintf("500 internal server error\n\n%v", err), http.StatusInternalServerError)
}
diff --git a/vmon/servicecounters.go b/vmon/servicecounters.go
index 1e43f04..fe32325 100644
--- a/vmon/servicecounters.go
+++ b/vmon/servicecounters.go
@@ -31,11 +31,11 @@
counters := map[string][]prodServiceCounter{
monitoring.SNMounttable: []prodServiceCounter{
prodServiceCounter{
- name: "mounttable nodes",
+ name: monitoring.MNMounttableNodes,
statSuffix: "__debug/stats/mounttable/num-nodes",
},
prodServiceCounter{
- name: "mounttable mounted servers",
+ name: monitoring.MNMounttableMountedServers,
statSuffix: "__debug/stats/mounttable/num-mounted-servers",
},
},