blob: 6cc6ddac83132e262e206cb4030d1df383cfc49f [file] [log] [blame]
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file was auto-generated by the vanadium vdl tool.
// Package: interfaces
package interfaces
import (
"fmt"
"io"
"time"
"v.io/v23"
"v.io/v23/context"
"v.io/v23/i18n"
"v.io/v23/rpc"
"v.io/v23/security/access"
"v.io/v23/services/syncbase"
"v.io/v23/vdl"
vdltime "v.io/v23/vdlroot/time"
"v.io/v23/verror"
"v.io/v23/vom"
)
var _ = __VDLInit() // Must be first; see __VDLInit comments for details.
//////////////////////////////////////////////////
// Type definitions
// GenVector is the generation vector for any syncable entity, which maps each
// device id to its last locally known generation in the scope of that entity.
type GenVector map[uint64]uint64
func (GenVector) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.GenVector"`
}) {
}
func (x GenVector) VDLIsZero() bool {
return len(x) == 0
}
func (x GenVector) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_map_1); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueUint(vdl.Uint64Type, key); err != nil {
return err
}
if err := enc.WriteValueUint(vdl.Uint64Type, elem); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *GenVector) VDLRead(dec vdl.Decoder) error {
if err := dec.StartValue(__VDLType_map_1); err != nil {
return err
}
var tmpMap GenVector
if len := dec.LenHint(); len > 0 {
tmpMap = make(GenVector, len)
}
for {
switch done, key, err := dec.NextEntryValueUint(64); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem uint64
switch value, err := dec.ReadValueUint(64); {
case err != nil:
return err
default:
elem = value
}
if tmpMap == nil {
tmpMap = make(GenVector)
}
tmpMap[key] = elem
}
}
}
// Knowledge is a mapping of syncable entities to their generation
// vectors. These syncable entities could be data prefixes relative to a
// Database id, or syncgroup oids.
type Knowledge map[string]GenVector
func (Knowledge) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.Knowledge"`
}) {
}
func (x Knowledge) VDLIsZero() bool {
return len(x) == 0
}
func (x Knowledge) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_map_2); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueString(vdl.StringType, key); err != nil {
return err
}
if err := elem.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *Knowledge) VDLRead(dec vdl.Decoder) error {
if err := dec.StartValue(__VDLType_map_2); err != nil {
return err
}
var tmpMap Knowledge
if len := dec.LenHint(); len > 0 {
tmpMap = make(Knowledge, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem GenVector
if err := elem.VDLRead(dec); err != nil {
return err
}
if tmpMap == nil {
tmpMap = make(Knowledge)
}
tmpMap[key] = elem
}
}
}
// LogRecMetadata represents the metadata of a single log record that is
// exchanged between two peers. Each log record represents a change made to an
// object in the store.
//
// TODO(hpucha): Add readset/scanset. Look into sending tx metadata only once
// per transaction.
type LogRecMetadata struct {
// Log related information.
Id uint64 // device id that created the log record.
Gen uint64 // generation number for the log record.
RecType byte // type of log record.
// Id of the object that was updated. This id is relative to Application
// and Database names and is the store key for a particular row in a
// collection.
ObjId string
CurVers string // current version number of the object.
Parents []string // 0, 1 or 2 parent versions that the current version is derived from.
UpdTime time.Time // timestamp when the update is generated.
Delete bool // indicates whether the update resulted in object being deleted from the store.
BatchId uint64 // unique id of the Batch this update belongs to.
BatchCount uint64 // number of objects in the Batch.
}
func (LogRecMetadata) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.LogRecMetadata"`
}) {
}
func (x LogRecMetadata) VDLIsZero() bool {
if x.Id != 0 {
return false
}
if x.Gen != 0 {
return false
}
if x.RecType != 0 {
return false
}
if x.ObjId != "" {
return false
}
if x.CurVers != "" {
return false
}
if len(x.Parents) != 0 {
return false
}
if !x.UpdTime.IsZero() {
return false
}
if x.Delete {
return false
}
if x.BatchId != 0 {
return false
}
if x.BatchCount != 0 {
return false
}
return true
}
func (x LogRecMetadata) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_3); err != nil {
return err
}
if x.Id != 0 {
if err := enc.NextFieldValueUint(0, vdl.Uint64Type, x.Id); err != nil {
return err
}
}
if x.Gen != 0 {
if err := enc.NextFieldValueUint(1, vdl.Uint64Type, x.Gen); err != nil {
return err
}
}
if x.RecType != 0 {
if err := enc.NextFieldValueUint(2, vdl.ByteType, uint64(x.RecType)); err != nil {
return err
}
}
if x.ObjId != "" {
if err := enc.NextFieldValueString(3, vdl.StringType, x.ObjId); err != nil {
return err
}
}
if x.CurVers != "" {
if err := enc.NextFieldValueString(4, vdl.StringType, x.CurVers); err != nil {
return err
}
}
if len(x.Parents) != 0 {
if err := enc.NextField(5); err != nil {
return err
}
if err := __VDLWriteAnon_list_1(enc, x.Parents); err != nil {
return err
}
}
if !x.UpdTime.IsZero() {
if err := enc.NextField(6); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.UpdTime); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if x.Delete {
if err := enc.NextFieldValueBool(7, vdl.BoolType, x.Delete); err != nil {
return err
}
}
if x.BatchId != 0 {
if err := enc.NextFieldValueUint(8, vdl.Uint64Type, x.BatchId); err != nil {
return err
}
}
if x.BatchCount != 0 {
if err := enc.NextFieldValueUint(9, vdl.Uint64Type, x.BatchCount); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func __VDLWriteAnon_list_1(enc vdl.Encoder, x []string) error {
if err := enc.StartValue(__VDLType_list_4); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for _, elem := range x {
if err := enc.NextEntryValueString(vdl.StringType, elem); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *LogRecMetadata) VDLRead(dec vdl.Decoder) error {
*x = LogRecMetadata{}
if err := dec.StartValue(__VDLType_struct_3); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_3 {
index = __VDLType_struct_3.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
switch value, err := dec.ReadValueUint(64); {
case err != nil:
return err
default:
x.Id = value
}
case 1:
switch value, err := dec.ReadValueUint(64); {
case err != nil:
return err
default:
x.Gen = value
}
case 2:
switch value, err := dec.ReadValueUint(8); {
case err != nil:
return err
default:
x.RecType = byte(value)
}
case 3:
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
x.ObjId = value
}
case 4:
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
x.CurVers = value
}
case 5:
if err := __VDLReadAnon_list_1(dec, &x.Parents); err != nil {
return err
}
case 6:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.UpdTime); err != nil {
return err
}
case 7:
switch value, err := dec.ReadValueBool(); {
case err != nil:
return err
default:
x.Delete = value
}
case 8:
switch value, err := dec.ReadValueUint(64); {
case err != nil:
return err
default:
x.BatchId = value
}
case 9:
switch value, err := dec.ReadValueUint(64); {
case err != nil:
return err
default:
x.BatchCount = value
}
}
}
}
func __VDLReadAnon_list_1(dec vdl.Decoder, x *[]string) error {
if err := dec.StartValue(__VDLType_list_4); err != nil {
return err
}
if len := dec.LenHint(); len > 0 {
*x = make([]string, 0, len)
} else {
*x = nil
}
for {
switch done, elem, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
return dec.FinishValue()
default:
*x = append(*x, elem)
}
}
}
// LogRec represents the on-wire representation of an entire log record: its
// metadata and data. Value is the actual value of a store object.
type LogRec struct {
Metadata LogRecMetadata
Value *vom.RawBytes
}
func (LogRec) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.LogRec"`
}) {
}
func (x LogRec) VDLIsZero() bool {
if !x.Metadata.VDLIsZero() {
return false
}
if x.Value != nil && !x.Value.VDLIsZero() {
return false
}
return true
}
func (x LogRec) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_6); err != nil {
return err
}
if !x.Metadata.VDLIsZero() {
if err := enc.NextField(0); err != nil {
return err
}
if err := x.Metadata.VDLWrite(enc); err != nil {
return err
}
}
if x.Value != nil && !x.Value.VDLIsZero() {
if err := enc.NextField(1); err != nil {
return err
}
if err := x.Value.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *LogRec) VDLRead(dec vdl.Decoder) error {
*x = LogRec{
Value: vom.RawBytesOf(vdl.ZeroValue(vdl.AnyType)),
}
if err := dec.StartValue(__VDLType_struct_6); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_6 {
index = __VDLType_struct_6.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := x.Metadata.VDLRead(dec); err != nil {
return err
}
case 1:
x.Value = new(vom.RawBytes)
if err := x.Value.VDLRead(dec); err != nil {
return err
}
}
}
}
// GroupId is a globally unique syncgroup ID.
// It is a hash of the syncgroup name.
type GroupId string
func (GroupId) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.GroupId"`
}) {
}
func (x GroupId) VDLIsZero() bool {
return x == ""
}
func (x GroupId) VDLWrite(enc vdl.Encoder) error {
if err := enc.WriteValueString(__VDLType_string_7, string(x)); err != nil {
return err
}
return nil
}
func (x *GroupId) VDLRead(dec vdl.Decoder) error {
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
*x = GroupId(value)
}
return nil
}
// Possible states for a syncgroup.
type SyncgroupStatus int
const (
SyncgroupStatusPublishPending SyncgroupStatus = iota
SyncgroupStatusPublishRejected
SyncgroupStatusRunning
)
// SyncgroupStatusAll holds all labels for SyncgroupStatus.
var SyncgroupStatusAll = [...]SyncgroupStatus{SyncgroupStatusPublishPending, SyncgroupStatusPublishRejected, SyncgroupStatusRunning}
// SyncgroupStatusFromString creates a SyncgroupStatus from a string label.
func SyncgroupStatusFromString(label string) (x SyncgroupStatus, err error) {
err = x.Set(label)
return
}
// Set assigns label to x.
func (x *SyncgroupStatus) Set(label string) error {
switch label {
case "PublishPending", "publishpending":
*x = SyncgroupStatusPublishPending
return nil
case "PublishRejected", "publishrejected":
*x = SyncgroupStatusPublishRejected
return nil
case "Running", "running":
*x = SyncgroupStatusRunning
return nil
}
*x = -1
return fmt.Errorf("unknown label %q in interfaces.SyncgroupStatus", label)
}
// String returns the string label of x.
func (x SyncgroupStatus) String() string {
switch x {
case SyncgroupStatusPublishPending:
return "PublishPending"
case SyncgroupStatusPublishRejected:
return "PublishRejected"
case SyncgroupStatusRunning:
return "Running"
}
return ""
}
func (SyncgroupStatus) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.SyncgroupStatus"`
Enum struct{ PublishPending, PublishRejected, Running string }
}) {
}
func (x SyncgroupStatus) VDLIsZero() bool {
return x == SyncgroupStatusPublishPending
}
func (x SyncgroupStatus) VDLWrite(enc vdl.Encoder) error {
if err := enc.WriteValueString(__VDLType_enum_8, x.String()); err != nil {
return err
}
return nil
}
func (x *SyncgroupStatus) VDLRead(dec vdl.Decoder) error {
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
if err := x.Set(value); err != nil {
return err
}
}
return nil
}
// SyncgroupMemberState contains information about a joiner and the internal bookkeeping
// state required for resolving conflicts on this joiner's join/leave activity.
type SyncgroupMemberState struct {
// Timestamp of when the member last joined/left the syncgroup. This timestamp is updated
// even when an existing member rejoins a syncgroup. Represented as Unix time.
WhenUpdated int64
// If set then this record indicates that this member has left the group. The SyncgroupMember
// entry is retained after a delete so that it can be used during conflict resolution, when
// one node indicates that the member has left the group and another says that the member
// is still in the group.
HasLeft bool
// Information supplied when requesting a join.
MemberInfo syncbase.SyncgroupMemberInfo
}
func (SyncgroupMemberState) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.SyncgroupMemberState"`
}) {
}
func (x SyncgroupMemberState) VDLIsZero() bool {
return x == SyncgroupMemberState{}
}
func (x SyncgroupMemberState) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_9); err != nil {
return err
}
if x.WhenUpdated != 0 {
if err := enc.NextFieldValueInt(0, vdl.Int64Type, x.WhenUpdated); err != nil {
return err
}
}
if x.HasLeft {
if err := enc.NextFieldValueBool(1, vdl.BoolType, x.HasLeft); err != nil {
return err
}
}
if x.MemberInfo != (syncbase.SyncgroupMemberInfo{}) {
if err := enc.NextField(2); err != nil {
return err
}
if err := x.MemberInfo.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *SyncgroupMemberState) VDLRead(dec vdl.Decoder) error {
*x = SyncgroupMemberState{}
if err := dec.StartValue(__VDLType_struct_9); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_9 {
index = __VDLType_struct_9.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
switch value, err := dec.ReadValueInt(64); {
case err != nil:
return err
default:
x.WhenUpdated = value
}
case 1:
switch value, err := dec.ReadValueBool(); {
case err != nil:
return err
default:
x.HasLeft = value
}
case 2:
if err := x.MemberInfo.VDLRead(dec); err != nil {
return err
}
}
}
}
// Syncgroup contains the state of a syncgroup.
type Syncgroup struct {
Id syncbase.Id // the relative syncgroup Id chosen by app
SpecVersion string // version on syncgroup spec for concurrency control
Spec syncbase.SyncgroupSpec // app-given specification
Creator string // Creator's Vanadium name
DbId syncbase.Id // Globally unique database id
Status SyncgroupStatus // Status of the syncgroup
Joiners map[string]SyncgroupMemberState // map of joiners to their metadata
}
func (Syncgroup) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.Syncgroup"`
}) {
}
func (x Syncgroup) VDLIsZero() bool {
if x.Id != (syncbase.Id{}) {
return false
}
if x.SpecVersion != "" {
return false
}
if !x.Spec.VDLIsZero() {
return false
}
if x.Creator != "" {
return false
}
if x.DbId != (syncbase.Id{}) {
return false
}
if x.Status != SyncgroupStatusPublishPending {
return false
}
if len(x.Joiners) != 0 {
return false
}
return true
}
func (x Syncgroup) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_11); err != nil {
return err
}
if x.Id != (syncbase.Id{}) {
if err := enc.NextField(0); err != nil {
return err
}
if err := x.Id.VDLWrite(enc); err != nil {
return err
}
}
if x.SpecVersion != "" {
if err := enc.NextFieldValueString(1, vdl.StringType, x.SpecVersion); err != nil {
return err
}
}
if !x.Spec.VDLIsZero() {
if err := enc.NextField(2); err != nil {
return err
}
if err := x.Spec.VDLWrite(enc); err != nil {
return err
}
}
if x.Creator != "" {
if err := enc.NextFieldValueString(3, vdl.StringType, x.Creator); err != nil {
return err
}
}
if x.DbId != (syncbase.Id{}) {
if err := enc.NextField(4); err != nil {
return err
}
if err := x.DbId.VDLWrite(enc); err != nil {
return err
}
}
if x.Status != SyncgroupStatusPublishPending {
if err := enc.NextFieldValueString(5, __VDLType_enum_8, x.Status.String()); err != nil {
return err
}
}
if len(x.Joiners) != 0 {
if err := enc.NextField(6); err != nil {
return err
}
if err := __VDLWriteAnon_map_2(enc, x.Joiners); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func __VDLWriteAnon_map_2(enc vdl.Encoder, x map[string]SyncgroupMemberState) error {
if err := enc.StartValue(__VDLType_map_14); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueString(vdl.StringType, key); err != nil {
return err
}
if err := elem.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *Syncgroup) VDLRead(dec vdl.Decoder) error {
*x = Syncgroup{}
if err := dec.StartValue(__VDLType_struct_11); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_11 {
index = __VDLType_struct_11.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := x.Id.VDLRead(dec); err != nil {
return err
}
case 1:
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
x.SpecVersion = value
}
case 2:
if err := x.Spec.VDLRead(dec); err != nil {
return err
}
case 3:
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
x.Creator = value
}
case 4:
if err := x.DbId.VDLRead(dec); err != nil {
return err
}
case 5:
switch value, err := dec.ReadValueString(); {
case err != nil:
return err
default:
if err := x.Status.Set(value); err != nil {
return err
}
}
case 6:
if err := __VDLReadAnon_map_2(dec, &x.Joiners); err != nil {
return err
}
}
}
}
func __VDLReadAnon_map_2(dec vdl.Decoder, x *map[string]SyncgroupMemberState) error {
if err := dec.StartValue(__VDLType_map_14); err != nil {
return err
}
var tmpMap map[string]SyncgroupMemberState
if len := dec.LenHint(); len > 0 {
tmpMap = make(map[string]SyncgroupMemberState, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem SyncgroupMemberState
if err := elem.VDLRead(dec); err != nil {
return err
}
if tmpMap == nil {
tmpMap = make(map[string]SyncgroupMemberState)
}
tmpMap[key] = elem
}
}
}
// CollectionPerms represent the persistent, synced permissions of a Collection.
// Existence of CollectionPerms in the store determines existence of the
// Collection.
// Note: Since CollectionPerms is synced and conflict resolved, the sync
// protocol needs to be aware of it. Any potential additions to synced
// Collection metadata should be written to a separate, synced key prefix,
// written in the same transaction with CollectionPerms and incorporated into
// the sync protocol. All persistent Collection metadata should be synced;
// local-only metadata is acceptable only if optional (e.g. stats).
type CollectionPerms access.Permissions
func (CollectionPerms) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.CollectionPerms"`
}) {
}
func (x CollectionPerms) VDLIsZero() bool {
return len(x) == 0
}
func (x CollectionPerms) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_map_15); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueString(vdl.StringType, key); err != nil {
return err
}
if err := elem.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *CollectionPerms) VDLRead(dec vdl.Decoder) error {
if err := dec.StartValue(__VDLType_map_15); err != nil {
return err
}
var tmpMap CollectionPerms
if len := dec.LenHint(); len > 0 {
tmpMap = make(CollectionPerms, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem access.AccessList
if err := elem.VDLRead(dec); err != nil {
return err
}
if tmpMap == nil {
tmpMap = make(CollectionPerms)
}
tmpMap[key] = elem
}
}
}
// SgDeltaReq contains the initiator's genvectors for the syncgroups it is
// interested in within a database when requesting deltas for those syncgroups.
type SgDeltaReq struct {
DbId syncbase.Id
Gvs Knowledge // Contains a genvector per syncgroup.
}
func (SgDeltaReq) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.SgDeltaReq"`
}) {
}
func (x SgDeltaReq) VDLIsZero() bool {
if x.DbId != (syncbase.Id{}) {
return false
}
if len(x.Gvs) != 0 {
return false
}
return true
}
func (x SgDeltaReq) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_17); err != nil {
return err
}
if x.DbId != (syncbase.Id{}) {
if err := enc.NextField(0); err != nil {
return err
}
if err := x.DbId.VDLWrite(enc); err != nil {
return err
}
}
if len(x.Gvs) != 0 {
if err := enc.NextField(1); err != nil {
return err
}
if err := x.Gvs.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *SgDeltaReq) VDLRead(dec vdl.Decoder) error {
*x = SgDeltaReq{}
if err := dec.StartValue(__VDLType_struct_17); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_17 {
index = __VDLType_struct_17.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := x.DbId.VDLRead(dec); err != nil {
return err
}
case 1:
if err := x.Gvs.VDLRead(dec); err != nil {
return err
}
}
}
}
// DataDeltaReq contains the initiator's genvectors and the set of syncgroups it
// is interested in within a database when requesting deltas for that database.
type DataDeltaReq struct {
DbId syncbase.Id
SgIds map[GroupId]struct{}
Gvs Knowledge
}
func (DataDeltaReq) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.DataDeltaReq"`
}) {
}
func (x DataDeltaReq) VDLIsZero() bool {
if x.DbId != (syncbase.Id{}) {
return false
}
if len(x.SgIds) != 0 {
return false
}
if len(x.Gvs) != 0 {
return false
}
return true
}
func (x DataDeltaReq) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_18); err != nil {
return err
}
if x.DbId != (syncbase.Id{}) {
if err := enc.NextField(0); err != nil {
return err
}
if err := x.DbId.VDLWrite(enc); err != nil {
return err
}
}
if len(x.SgIds) != 0 {
if err := enc.NextField(1); err != nil {
return err
}
if err := __VDLWriteAnon_set_3(enc, x.SgIds); err != nil {
return err
}
}
if len(x.Gvs) != 0 {
if err := enc.NextField(2); err != nil {
return err
}
if err := x.Gvs.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func __VDLWriteAnon_set_3(enc vdl.Encoder, x map[GroupId]struct{}) error {
if err := enc.StartValue(__VDLType_set_19); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key := range x {
if err := enc.NextEntryValueString(__VDLType_string_7, string(key)); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *DataDeltaReq) VDLRead(dec vdl.Decoder) error {
*x = DataDeltaReq{}
if err := dec.StartValue(__VDLType_struct_18); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_18 {
index = __VDLType_struct_18.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := x.DbId.VDLRead(dec); err != nil {
return err
}
case 1:
if err := __VDLReadAnon_set_3(dec, &x.SgIds); err != nil {
return err
}
case 2:
if err := x.Gvs.VDLRead(dec); err != nil {
return err
}
}
}
}
func __VDLReadAnon_set_3(dec vdl.Decoder, x *map[GroupId]struct{}) error {
if err := dec.StartValue(__VDLType_set_19); err != nil {
return err
}
var tmpMap map[GroupId]struct{}
if len := dec.LenHint(); len > 0 {
tmpMap = make(map[GroupId]struct{}, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
if tmpMap == nil {
tmpMap = make(map[GroupId]struct{})
}
tmpMap[GroupId(key)] = struct{}{}
}
}
}
type (
// DeltaReq represents any single field of the DeltaReq union type.
//
// DeltaReq contains a request to sync either data or syncgroup metadata for a
// Database.
DeltaReq interface {
// Index returns the field index.
Index() int
// Interface returns the field value as an interface.
Interface() interface{}
// Name returns the field name.
Name() string
// __VDLReflect describes the DeltaReq union type.
__VDLReflect(__DeltaReqReflect)
VDLIsZero() bool
VDLWrite(vdl.Encoder) error
}
// DeltaReqSgs represents field Sgs of the DeltaReq union type.
DeltaReqSgs struct{ Value SgDeltaReq }
// DeltaReqData represents field Data of the DeltaReq union type.
DeltaReqData struct{ Value DataDeltaReq }
// __DeltaReqReflect describes the DeltaReq union type.
__DeltaReqReflect struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.DeltaReq"`
Type DeltaReq
Union struct {
Sgs DeltaReqSgs
Data DeltaReqData
}
}
)
func (x DeltaReqSgs) Index() int { return 0 }
func (x DeltaReqSgs) Interface() interface{} { return x.Value }
func (x DeltaReqSgs) Name() string { return "Sgs" }
func (x DeltaReqSgs) __VDLReflect(__DeltaReqReflect) {}
func (x DeltaReqData) Index() int { return 1 }
func (x DeltaReqData) Interface() interface{} { return x.Value }
func (x DeltaReqData) Name() string { return "Data" }
func (x DeltaReqData) __VDLReflect(__DeltaReqReflect) {}
func (x DeltaReqSgs) VDLIsZero() bool {
return x.Value.VDLIsZero()
}
func (x DeltaReqData) VDLIsZero() bool {
return false
}
func (x DeltaReqSgs) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_union_20); err != nil {
return err
}
if err := enc.NextField(0); err != nil {
return err
}
if err := x.Value.VDLWrite(enc); err != nil {
return err
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x DeltaReqData) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_union_20); err != nil {
return err
}
if err := enc.NextField(1); err != nil {
return err
}
if err := x.Value.VDLWrite(enc); err != nil {
return err
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func VDLReadDeltaReq(dec vdl.Decoder, x *DeltaReq) error {
if err := dec.StartValue(__VDLType_union_20); err != nil {
return err
}
decType := dec.Type()
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return fmt.Errorf("missing field in union %T, from %v", x, decType)
}
if decType != __VDLType_union_20 {
name := decType.Field(index).Name
index = __VDLType_union_20.FieldIndexByName(name)
if index == -1 {
return fmt.Errorf("field %q not in union %T, from %v", name, x, decType)
}
}
switch index {
case 0:
var field DeltaReqSgs
if err := field.Value.VDLRead(dec); err != nil {
return err
}
*x = field
case 1:
var field DeltaReqData
if err := field.Value.VDLRead(dec); err != nil {
return err
}
*x = field
}
switch index, err := dec.NextField(); {
case err != nil:
return err
case index != -1:
return fmt.Errorf("extra field %d in union %T, from %v", index, x, dec.Type())
}
return dec.FinishValue()
}
type (
// DeltaResp represents any single field of the DeltaResp union type.
//
// DeltaResp contains the responder's genvectors or the missing log records
// returned in response to an initiator's request for deltas for a Database.
DeltaResp interface {
// Index returns the field index.
Index() int
// Interface returns the field value as an interface.
Interface() interface{}
// Name returns the field name.
Name() string
// __VDLReflect describes the DeltaResp union type.
__VDLReflect(__DeltaRespReflect)
VDLIsZero() bool
VDLWrite(vdl.Encoder) error
}
// DeltaRespRec represents field Rec of the DeltaResp union type.
DeltaRespRec struct{ Value LogRec }
// DeltaRespGvs represents field Gvs of the DeltaResp union type.
DeltaRespGvs struct{ Value Knowledge }
// __DeltaRespReflect describes the DeltaResp union type.
__DeltaRespReflect struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.DeltaResp"`
Type DeltaResp
Union struct {
Rec DeltaRespRec
Gvs DeltaRespGvs
}
}
)
func (x DeltaRespRec) Index() int { return 0 }
func (x DeltaRespRec) Interface() interface{} { return x.Value }
func (x DeltaRespRec) Name() string { return "Rec" }
func (x DeltaRespRec) __VDLReflect(__DeltaRespReflect) {}
func (x DeltaRespGvs) Index() int { return 1 }
func (x DeltaRespGvs) Interface() interface{} { return x.Value }
func (x DeltaRespGvs) Name() string { return "Gvs" }
func (x DeltaRespGvs) __VDLReflect(__DeltaRespReflect) {}
func (x DeltaRespRec) VDLIsZero() bool {
return x.Value.VDLIsZero()
}
func (x DeltaRespGvs) VDLIsZero() bool {
return false
}
func (x DeltaRespRec) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_union_21); err != nil {
return err
}
if err := enc.NextField(0); err != nil {
return err
}
if err := x.Value.VDLWrite(enc); err != nil {
return err
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x DeltaRespGvs) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_union_21); err != nil {
return err
}
if err := enc.NextField(1); err != nil {
return err
}
if err := x.Value.VDLWrite(enc); err != nil {
return err
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func VDLReadDeltaResp(dec vdl.Decoder, x *DeltaResp) error {
if err := dec.StartValue(__VDLType_union_21); err != nil {
return err
}
decType := dec.Type()
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return fmt.Errorf("missing field in union %T, from %v", x, decType)
}
if decType != __VDLType_union_21 {
name := decType.Field(index).Name
index = __VDLType_union_21.FieldIndexByName(name)
if index == -1 {
return fmt.Errorf("field %q not in union %T, from %v", name, x, decType)
}
}
switch index {
case 0:
var field DeltaRespRec
if err := field.Value.VDLRead(dec); err != nil {
return err
}
*x = field
case 1:
var field DeltaRespGvs
if err := field.Value.VDLRead(dec); err != nil {
return err
}
*x = field
}
switch index, err := dec.NextField(); {
case err != nil:
return err
case index != -1:
return fmt.Errorf("extra field %d in union %T, from %v", index, x, dec.Type())
}
return dec.FinishValue()
}
// A SgPriority represents data used to decide whether to transfer blob ownership
// between two devices.
type SgPriority struct {
DevType int32 // device type (BlobDevTypeServer, BlobDevTypeNormal, BlobDevTypeLeaf)
Distance float32 // mean number of hops from a server-quality member of the syncgroup
ServerTime time.Time // when data from a server-quality member reached this device
}
func (SgPriority) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.SgPriority"`
}) {
}
func (x SgPriority) VDLIsZero() bool {
if x.DevType != 0 {
return false
}
if x.Distance != 0 {
return false
}
if !x.ServerTime.IsZero() {
return false
}
return true
}
func (x SgPriority) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_22); err != nil {
return err
}
if x.DevType != 0 {
if err := enc.NextFieldValueInt(0, vdl.Int32Type, int64(x.DevType)); err != nil {
return err
}
}
if x.Distance != 0 {
if err := enc.NextFieldValueFloat(1, vdl.Float32Type, float64(x.Distance)); err != nil {
return err
}
}
if !x.ServerTime.IsZero() {
if err := enc.NextField(2); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.ServerTime); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *SgPriority) VDLRead(dec vdl.Decoder) error {
*x = SgPriority{}
if err := dec.StartValue(__VDLType_struct_22); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_22 {
index = __VDLType_struct_22.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
switch value, err := dec.ReadValueInt(32); {
case err != nil:
return err
default:
x.DevType = int32(value)
}
case 1:
switch value, err := dec.ReadValueFloat(32); {
case err != nil:
return err
default:
x.Distance = float32(value)
}
case 2:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.ServerTime); err != nil {
return err
}
}
}
}
// A SgPriorities maps syncgroup IDs to SgPriority structures. It is sent and
// received in GetDeltas calls to allow the participants to assess who has
// higher priorities for keeping blobs.
type SgPriorities map[GroupId]SgPriority
func (SgPriorities) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.SgPriorities"`
}) {
}
func (x SgPriorities) VDLIsZero() bool {
return len(x) == 0
}
func (x SgPriorities) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_map_23); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueString(__VDLType_string_7, string(key)); err != nil {
return err
}
if err := elem.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *SgPriorities) VDLRead(dec vdl.Decoder) error {
if err := dec.StartValue(__VDLType_map_23); err != nil {
return err
}
var tmpMap SgPriorities
if len := dec.LenHint(); len > 0 {
tmpMap = make(SgPriorities, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem SgPriority
if err := elem.VDLRead(dec); err != nil {
return err
}
if tmpMap == nil {
tmpMap = make(SgPriorities)
}
tmpMap[GroupId(key)] = elem
}
}
}
// DeltaFinalResp contains the data returned at the end of a GetDeltas call.
type DeltaFinalResp struct {
SgPriorities SgPriorities
}
func (DeltaFinalResp) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.DeltaFinalResp"`
}) {
}
func (x DeltaFinalResp) VDLIsZero() bool {
if len(x.SgPriorities) != 0 {
return false
}
return true
}
func (x DeltaFinalResp) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_24); err != nil {
return err
}
if len(x.SgPriorities) != 0 {
if err := enc.NextField(0); err != nil {
return err
}
if err := x.SgPriorities.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *DeltaFinalResp) VDLRead(dec vdl.Decoder) error {
*x = DeltaFinalResp{}
if err := dec.StartValue(__VDLType_struct_24); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_24 {
index = __VDLType_struct_24.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := x.SgPriorities.VDLRead(dec); err != nil {
return err
}
}
}
}
// ChunkHash contains the hash of a chunk that is part of a blob's recipe.
type ChunkHash struct {
Hash []byte
}
func (ChunkHash) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.ChunkHash"`
}) {
}
func (x ChunkHash) VDLIsZero() bool {
if len(x.Hash) != 0 {
return false
}
return true
}
func (x ChunkHash) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_25); err != nil {
return err
}
if len(x.Hash) != 0 {
if err := enc.NextFieldValueBytes(0, __VDLType_list_26, x.Hash); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *ChunkHash) VDLRead(dec vdl.Decoder) error {
*x = ChunkHash{}
if err := dec.StartValue(__VDLType_struct_25); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_25 {
index = __VDLType_struct_25.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := dec.ReadValueBytes(-1, &x.Hash); err != nil {
return err
}
}
}
}
// ChunkData contains the data of a chunk.
type ChunkData struct {
Data []byte
}
func (ChunkData) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.ChunkData"`
}) {
}
func (x ChunkData) VDLIsZero() bool {
if len(x.Data) != 0 {
return false
}
return true
}
func (x ChunkData) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_27); err != nil {
return err
}
if len(x.Data) != 0 {
if err := enc.NextFieldValueBytes(0, __VDLType_list_26, x.Data); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *ChunkData) VDLRead(dec vdl.Decoder) error {
*x = ChunkData{}
if err := dec.StartValue(__VDLType_struct_27); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_27 {
index = __VDLType_struct_27.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := dec.ReadValueBytes(-1, &x.Data); err != nil {
return err
}
}
}
}
// TimeReq contains the send timestamp from the requester.
type TimeReq struct {
SendTs time.Time
}
func (TimeReq) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.TimeReq"`
}) {
}
func (x TimeReq) VDLIsZero() bool {
if !x.SendTs.IsZero() {
return false
}
return true
}
func (x TimeReq) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_28); err != nil {
return err
}
if !x.SendTs.IsZero() {
if err := enc.NextField(0); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.SendTs); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *TimeReq) VDLRead(dec vdl.Decoder) error {
*x = TimeReq{}
if err := dec.StartValue(__VDLType_struct_28); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_28 {
index = __VDLType_struct_28.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.SendTs); err != nil {
return err
}
}
}
}
// TimeResp contains information needed by the requester to estimate the
// difference between the two vclocks and to decide whether to incorporate the
// peer's vclock data.
type TimeResp struct {
OrigTs time.Time // when we sent request
RecvTs time.Time // when peer received request
SendTs time.Time // when peer sent response
// NTP server timestamp from the most recent NTP sync, or zero value if none.
// Note, the NTP sync may have been performed by some peer device.
LastNtpTs time.Time
// Number of reboots since last NTP sync, accumulated across all hops of p2p
// clock sync.
NumReboots uint16
// Number of sync hops between peer's device and its source of LastNtpTs.
NumHops uint16
}
func (TimeResp) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.TimeResp"`
}) {
}
func (x TimeResp) VDLIsZero() bool {
if !x.OrigTs.IsZero() {
return false
}
if !x.RecvTs.IsZero() {
return false
}
if !x.SendTs.IsZero() {
return false
}
if !x.LastNtpTs.IsZero() {
return false
}
if x.NumReboots != 0 {
return false
}
if x.NumHops != 0 {
return false
}
return true
}
func (x TimeResp) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_29); err != nil {
return err
}
if !x.OrigTs.IsZero() {
if err := enc.NextField(0); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.OrigTs); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if !x.RecvTs.IsZero() {
if err := enc.NextField(1); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.RecvTs); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if !x.SendTs.IsZero() {
if err := enc.NextField(2); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.SendTs); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if !x.LastNtpTs.IsZero() {
if err := enc.NextField(3); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.LastNtpTs); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if x.NumReboots != 0 {
if err := enc.NextFieldValueUint(4, vdl.Uint16Type, uint64(x.NumReboots)); err != nil {
return err
}
}
if x.NumHops != 0 {
if err := enc.NextFieldValueUint(5, vdl.Uint16Type, uint64(x.NumHops)); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *TimeResp) VDLRead(dec vdl.Decoder) error {
*x = TimeResp{}
if err := dec.StartValue(__VDLType_struct_29); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_29 {
index = __VDLType_struct_29.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.OrigTs); err != nil {
return err
}
case 1:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.RecvTs); err != nil {
return err
}
case 2:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.SendTs); err != nil {
return err
}
case 3:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.LastNtpTs); err != nil {
return err
}
case 4:
switch value, err := dec.ReadValueUint(16); {
case err != nil:
return err
default:
x.NumReboots = uint16(value)
}
case 5:
switch value, err := dec.ReadValueUint(16); {
case err != nil:
return err
default:
x.NumHops = uint16(value)
}
}
}
}
// A BlobSharesBySyncgroup maps syncgroup IDs to integer share numbers that a
// syncbase instance may have for a blob.
type BlobSharesBySyncgroup map[GroupId]int32
func (BlobSharesBySyncgroup) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.BlobSharesBySyncgroup"`
}) {
}
func (x BlobSharesBySyncgroup) VDLIsZero() bool {
return len(x) == 0
}
func (x BlobSharesBySyncgroup) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_map_30); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueString(__VDLType_string_7, string(key)); err != nil {
return err
}
if err := enc.WriteValueInt(vdl.Int32Type, int64(elem)); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *BlobSharesBySyncgroup) VDLRead(dec vdl.Decoder) error {
if err := dec.StartValue(__VDLType_map_30); err != nil {
return err
}
var tmpMap BlobSharesBySyncgroup
if len := dec.LenHint(); len > 0 {
tmpMap = make(BlobSharesBySyncgroup, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem int32
switch value, err := dec.ReadValueInt(32); {
case err != nil:
return err
default:
elem = int32(value)
}
if tmpMap == nil {
tmpMap = make(BlobSharesBySyncgroup)
}
tmpMap[GroupId(key)] = elem
}
}
}
// A LocationData is the information known about a particular location in a Signpost.
// TODO(m3b): Include mount table information to allow the location to be found.
type LocationData struct {
WhenSeen time.Time // most recent time when blob thought to have been at location
IsProxy bool // whether the location is a likely proxy to another syncgroup
IsServer bool // whether the location is a server that may be revealed outside its syncgroup
}
func (LocationData) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.LocationData"`
}) {
}
func (x LocationData) VDLIsZero() bool {
if !x.WhenSeen.IsZero() {
return false
}
if x.IsProxy {
return false
}
if x.IsServer {
return false
}
return true
}
func (x LocationData) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_31); err != nil {
return err
}
if !x.WhenSeen.IsZero() {
if err := enc.NextField(0); err != nil {
return err
}
var wire vdltime.Time
if err := vdltime.TimeFromNative(&wire, x.WhenSeen); err != nil {
return err
}
if err := wire.VDLWrite(enc); err != nil {
return err
}
}
if x.IsProxy {
if err := enc.NextFieldValueBool(1, vdl.BoolType, x.IsProxy); err != nil {
return err
}
}
if x.IsServer {
if err := enc.NextFieldValueBool(2, vdl.BoolType, x.IsServer); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *LocationData) VDLRead(dec vdl.Decoder) error {
*x = LocationData{}
if err := dec.StartValue(__VDLType_struct_31); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_31 {
index = __VDLType_struct_31.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
var wire vdltime.Time
if err := wire.VDLRead(dec); err != nil {
return err
}
if err := vdltime.TimeToNative(wire, &x.WhenSeen); err != nil {
return err
}
case 1:
switch value, err := dec.ReadValueBool(); {
case err != nil:
return err
default:
x.IsProxy = value
}
case 2:
switch value, err := dec.ReadValueBool(); {
case err != nil:
return err
default:
x.IsServer = value
}
}
}
}
// A PeerToLocationDataMap is a map from syncbase peer names to LocationData structures.
type PeerToLocationDataMap map[string]LocationData
func (PeerToLocationDataMap) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.PeerToLocationDataMap"`
}) {
}
func (x PeerToLocationDataMap) VDLIsZero() bool {
return len(x) == 0
}
func (x PeerToLocationDataMap) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_map_32); err != nil {
return err
}
if err := enc.SetLenHint(len(x)); err != nil {
return err
}
for key, elem := range x {
if err := enc.NextEntryValueString(vdl.StringType, key); err != nil {
return err
}
if err := elem.VDLWrite(enc); err != nil {
return err
}
}
if err := enc.NextEntry(true); err != nil {
return err
}
return enc.FinishValue()
}
func (x *PeerToLocationDataMap) VDLRead(dec vdl.Decoder) error {
if err := dec.StartValue(__VDLType_map_32); err != nil {
return err
}
var tmpMap PeerToLocationDataMap
if len := dec.LenHint(); len > 0 {
tmpMap = make(PeerToLocationDataMap, len)
}
for {
switch done, key, err := dec.NextEntryValueString(); {
case err != nil:
return err
case done:
*x = tmpMap
return dec.FinishValue()
default:
var elem LocationData
if err := elem.VDLRead(dec); err != nil {
return err
}
if tmpMap == nil {
tmpMap = make(PeerToLocationDataMap)
}
tmpMap[key] = elem
}
}
}
// A Signpost is a hint to syncbase of the device on which a blob may be found.
// It represents the data known about a blob even when the blob itself is not
// present on the device.
type Signpost struct {
Locations PeerToLocationDataMap // Maps name of syncbase that probably has the blob to a LocationData
SgIds map[GroupId]struct{} // SyncGroups through which the BlobRef was learned.
}
func (Signpost) __VDLReflect(struct {
Name string `vdl:"v.io/x/ref/services/syncbase/server/interfaces.Signpost"`
}) {
}
func (x Signpost) VDLIsZero() bool {
if len(x.Locations) != 0 {
return false
}
if len(x.SgIds) != 0 {
return false
}
return true
}
func (x Signpost) VDLWrite(enc vdl.Encoder) error {
if err := enc.StartValue(__VDLType_struct_33); err != nil {
return err
}
if len(x.Locations) != 0 {
if err := enc.NextField(0); err != nil {
return err
}
if err := x.Locations.VDLWrite(enc); err != nil {
return err
}
}
if len(x.SgIds) != 0 {
if err := enc.NextField(1); err != nil {
return err
}
if err := __VDLWriteAnon_set_3(enc, x.SgIds); err != nil {
return err
}
}
if err := enc.NextField(-1); err != nil {
return err
}
return enc.FinishValue()
}
func (x *Signpost) VDLRead(dec vdl.Decoder) error {
*x = Signpost{}
if err := dec.StartValue(__VDLType_struct_33); err != nil {
return err
}
decType := dec.Type()
for {
index, err := dec.NextField()
switch {
case err != nil:
return err
case index == -1:
return dec.FinishValue()
}
if decType != __VDLType_struct_33 {
index = __VDLType_struct_33.FieldIndexByName(decType.Field(index).Name)
if index == -1 {
if err := dec.SkipValue(); err != nil {
return err
}
continue
}
}
switch index {
case 0:
if err := x.Locations.VDLRead(dec); err != nil {
return err
}
case 1:
if err := __VDLReadAnon_set_3(dec, &x.SgIds); err != nil {
return err
}
}
}
}
//////////////////////////////////////////////////
// Const definitions
const NoGroupId = GroupId("")
// NodeRec type log record adds a new node in the dag.
const NodeRec = byte(0)
// LinkRec type log record adds a new link in the dag. Link records are
// added when a conflict is resolved by picking the local or the remote
// version as the resolution of a conflict, instead of creating a new
// version.
const LinkRec = byte(1)
//////////////////////////////////////////////////
// Error definitions
var (
ErrDupSyncgroupPublish = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.DupSyncgroupPublish", verror.NoRetry, "{1:}{2:} duplicate publish on syncgroup: {3}")
ErrConnFail = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.ConnFail", verror.NoRetry, "{1:}{2:} connection to peer failed{:_}")
ErrBrokenCrConnection = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.BrokenCrConnection", verror.NoRetry, "{1:}{2:} CrConnection stream to client does not exist or is broken")
ErrDbOffline = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.DbOffline", verror.NoRetry, "{1:}{2:} database {3} is offline and cannot be synced{:_}")
ErrGetTimeFailed = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.GetTimeFailed", verror.NoRetry, "{1:}{2:} GetTime failed{:_}")
ErrNotAdmin = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.NotAdmin", verror.NoRetry, "{1:}{2:} not an admin of the syncgroup")
)
// NewErrDupSyncgroupPublish returns an error with the ErrDupSyncgroupPublish ID.
func NewErrDupSyncgroupPublish(ctx *context.T, sgId syncbase.Id) error {
return verror.New(ErrDupSyncgroupPublish, ctx, sgId)
}
// NewErrConnFail returns an error with the ErrConnFail ID.
func NewErrConnFail(ctx *context.T) error {
return verror.New(ErrConnFail, ctx)
}
// NewErrBrokenCrConnection returns an error with the ErrBrokenCrConnection ID.
func NewErrBrokenCrConnection(ctx *context.T) error {
return verror.New(ErrBrokenCrConnection, ctx)
}
// NewErrDbOffline returns an error with the ErrDbOffline ID.
func NewErrDbOffline(ctx *context.T, dbId syncbase.Id) error {
return verror.New(ErrDbOffline, ctx, dbId)
}
// NewErrGetTimeFailed returns an error with the ErrGetTimeFailed ID.
func NewErrGetTimeFailed(ctx *context.T) error {
return verror.New(ErrGetTimeFailed, ctx)
}
// NewErrNotAdmin returns an error with the ErrNotAdmin ID.
func NewErrNotAdmin(ctx *context.T) error {
return verror.New(ErrNotAdmin, ctx)
}
//////////////////////////////////////////////////
// Interface definitions
// SyncClientMethods is the client interface
// containing Sync methods.
//
// Sync defines methods for data exchange between Syncbases.
// TODO(hpucha): Flesh this out further.
type SyncClientMethods interface {
// GetTime returns metadata related to the Syncbase virtual clock, including
// system clock values, last NTP timestamp, num reboots, etc.
GetTime(_ *context.T, req TimeReq, initiator string, _ ...rpc.CallOpt) (TimeResp, error)
// GetDeltas returns the responder's current generation vectors and all
// the missing log records when compared to the initiator's generation
// vectors for one Database for either syncgroup metadata or data.
// The final result (in DeltaFinalResp) currently includes the
// syncgroup priorities for blob ownership for the server.
GetDeltas(_ *context.T, req DeltaReq, initiator string, _ ...rpc.CallOpt) (SyncGetDeltasClientCall, error)
// PublishSyncgroup is invoked on the syncgroup name (typically served
// by a "central" peer) to publish the syncgroup. It takes the name of
// Syncbase doing the publishing (the publisher) and returns the name
// of the Syncbase where the syncgroup is published (the publishee).
// This allows the publisher and the publishee to learn of each other.
// When a syncgroup is published, the publishee is given the syncgroup
// metadata, its current version at the publisher, and the current
// syncgroup generation vector. The generation vector serves as a
// checkpoint at the time of publishing. The publishing proceeds
// asynchronously, and the publishee learns the syncgroup history
// through the routine p2p sync process and determines when it has
// caught up to the level of knowledge at the time of publishing using
// the checkpointed generation vector. Until that point, the publishee
// locally deems the syncgroup to be in a pending state and does not
// mutate it. Thus it locally rejects syncgroup joins or updates to
// its spec until it is caught up on the syncgroup history.
PublishSyncgroup(_ *context.T, publisher string, sg Syncgroup, version string, genvec GenVector, _ ...rpc.CallOpt) (string, error)
// JoinSyncgroupAtAdmin is invoked by a prospective syncgroup member's
// Syncbase on a syncgroup admin. It checks whether the requestor is
// allowed to join the named syncgroup, and if so, adds the requestor to
// the syncgroup. It returns a copy of the updated syncgroup metadata,
// its version, and the syncgroup generation vector at the time of the
// join. Similar to the PublishSyncgroup scenario, the joiner at that
// point does not have the syncgroup history and locally deems it to be
// in a pending state and does not mutate it. This means it rejects
// local updates to the syncgroup spec or, if it were also an admin on
// the syncgroup, it would reject syncgroup joins until it is caught up
// on the syncgroup history through p2p sync.
JoinSyncgroupAtAdmin(_ *context.T, dbId syncbase.Id, sgId syncbase.Id, joinerName string, myInfo syncbase.SyncgroupMemberInfo, _ ...rpc.CallOpt) (sg Syncgroup, version string, genvec GenVector, _ error)
// HaveBlob verifies that the peer has the requested blob, and if
// present, returns its size. Otherwise, it returns -1, and the location
// hints (the Signpost) that the peer has for the blob, filtered to
// include only data the caller is permitted to see:
// + Device D reveals a syncgroup SG to the caller C iff
// - D is in SG, and
// - SG is in the Signpost, and
// - at least one of:
// - SG is not private, or
// - C has permission to join SG.
// + Device D reveals a location hint L to caller C iff
// there is a syncgroup SG such that
// - D is in SG, and
// - SG is in the Signpost, and
// - L is in SG, and
// - at least one of:
// - SG is not private, or
// - C has permission to join SG, or
// - L is a blob server in SG.
HaveBlob(_ *context.T, br syncbase.BlobRef, _ ...rpc.CallOpt) (size int64, signpost Signpost, _ error)
// FetchBlob fetches the requested blob.
// It returns a number of blob ownership shares that the server hopes
// the client will accept using the AcceptedBlobOwnership() call.
FetchBlob(_ *context.T, br syncbase.BlobRef, mySgPriorities SgPriorities, _ ...rpc.CallOpt) (SyncFetchBlobClientCall, error)
// Methods for incremental blob transfer. The transfer starts with the
// receiver making a FetchBlobRecipe call to the sender for a given
// BlobRef. The sender, in turn, sends the chunk hashes of all the
// chunks that make up the requested blob (blob recipe). The receiver
// looks up the chunk hashes in its local blob store, and identifies the
// missing ones. The receiver then fetches the missing chunks using a
// FetchChunks call from the sender. Finally, the receiver finishes the
// blob fetch by combining the chunks obtained over the network with the
// already available local chunks as per the blob recipe.
// callerName is the syncbase Id of the caller, expressed as a string.
// FetchBlobRecipe returns a number of blob ownership shares that the
// server hopes the client will accept for each syncgroup using the
// AcceptedBlobOwnership() call.
FetchBlobRecipe(_ *context.T, br syncbase.BlobRef, callerName string, mySgPriorities SgPriorities, _ ...rpc.CallOpt) (SyncFetchBlobRecipeClientCall, error)
FetchChunks(*context.T, ...rpc.CallOpt) (SyncFetchChunksClientCall, error)
// RequestTakeBlob indicates that the caller wishes the server to take
// some blob ownership shares for various syncgroups for the specified blob.
// If the server chooses to act on the request, it may call FetchBlob/FetchBlobRecipe,
// and ultimately AcceptedBlobOwnership().
// callerName is the syncbase Id of the caller, expressed as a string.
RequestTakeBlob(_ *context.T, br syncbase.BlobRef, callerName string, shares BlobSharesBySyncgroup, _ ...rpc.CallOpt) error
// AcceptedBlobOwnership tells the server that the client callerName (a
// syncbase Id expressed as a string) has accepted blob ownership of a
// specified number of shares for blob br. The server may decrement
// its share count by up to this number. It is safe for the server to
// decrement its share count by fewer than the number of shares another
// device has taken responsibility for, but unsafe to decrement it by
// more than that that number. It returns a hint as to whether the
// server is likely to keep the blob itself, plus its syncbase Id
// expressed as a string.
AcceptedBlobOwnership(_ *context.T, br syncbase.BlobRef, callerName string, shares BlobSharesBySyncgroup, _ ...rpc.CallOpt) (serverName string, keepingBlob bool, _ error)
}
// SyncClientStub adds universal methods to SyncClientMethods.
type SyncClientStub interface {
SyncClientMethods
rpc.UniversalServiceMethods
}
// SyncClient returns a client stub for Sync.
func SyncClient(name string) SyncClientStub {
return implSyncClientStub{name}
}
type implSyncClientStub struct {
name string
}
func (c implSyncClientStub) GetTime(ctx *context.T, i0 TimeReq, i1 string, opts ...rpc.CallOpt) (o0 TimeResp, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "GetTime", []interface{}{i0, i1}, []interface{}{&o0}, opts...)
return
}
func (c implSyncClientStub) GetDeltas(ctx *context.T, i0 DeltaReq, i1 string, opts ...rpc.CallOpt) (ocall SyncGetDeltasClientCall, err error) {
var call rpc.ClientCall
if call, err = v23.GetClient(ctx).StartCall(ctx, c.name, "GetDeltas", []interface{}{i0, i1}, opts...); err != nil {
return
}
ocall = &implSyncGetDeltasClientCall{ClientCall: call}
return
}
func (c implSyncClientStub) PublishSyncgroup(ctx *context.T, i0 string, i1 Syncgroup, i2 string, i3 GenVector, opts ...rpc.CallOpt) (o0 string, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "PublishSyncgroup", []interface{}{i0, i1, i2, i3}, []interface{}{&o0}, opts...)
return
}
func (c implSyncClientStub) JoinSyncgroupAtAdmin(ctx *context.T, i0 syncbase.Id, i1 syncbase.Id, i2 string, i3 syncbase.SyncgroupMemberInfo, opts ...rpc.CallOpt) (o0 Syncgroup, o1 string, o2 GenVector, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "JoinSyncgroupAtAdmin", []interface{}{i0, i1, i2, i3}, []interface{}{&o0, &o1, &o2}, opts...)
return
}
func (c implSyncClientStub) HaveBlob(ctx *context.T, i0 syncbase.BlobRef, opts ...rpc.CallOpt) (o0 int64, o1 Signpost, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "HaveBlob", []interface{}{i0}, []interface{}{&o0, &o1}, opts...)
return
}
func (c implSyncClientStub) FetchBlob(ctx *context.T, i0 syncbase.BlobRef, i1 SgPriorities, opts ...rpc.CallOpt) (ocall SyncFetchBlobClientCall, err error) {
var call rpc.ClientCall
if call, err = v23.GetClient(ctx).StartCall(ctx, c.name, "FetchBlob", []interface{}{i0, i1}, opts...); err != nil {
return
}
ocall = &implSyncFetchBlobClientCall{ClientCall: call}
return
}
func (c implSyncClientStub) FetchBlobRecipe(ctx *context.T, i0 syncbase.BlobRef, i1 string, i2 SgPriorities, opts ...rpc.CallOpt) (ocall SyncFetchBlobRecipeClientCall, err error) {
var call rpc.ClientCall
if call, err = v23.GetClient(ctx).StartCall(ctx, c.name, "FetchBlobRecipe", []interface{}{i0, i1, i2}, opts...); err != nil {
return
}
ocall = &implSyncFetchBlobRecipeClientCall{ClientCall: call}
return
}
func (c implSyncClientStub) FetchChunks(ctx *context.T, opts ...rpc.CallOpt) (ocall SyncFetchChunksClientCall, err error) {
var call rpc.ClientCall
if call, err = v23.GetClient(ctx).StartCall(ctx, c.name, "FetchChunks", nil, opts...); err != nil {
return
}
ocall = &implSyncFetchChunksClientCall{ClientCall: call}
return
}
func (c implSyncClientStub) RequestTakeBlob(ctx *context.T, i0 syncbase.BlobRef, i1 string, i2 BlobSharesBySyncgroup, opts ...rpc.CallOpt) (err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "RequestTakeBlob", []interface{}{i0, i1, i2}, nil, opts...)
return
}
func (c implSyncClientStub) AcceptedBlobOwnership(ctx *context.T, i0 syncbase.BlobRef, i1 string, i2 BlobSharesBySyncgroup, opts ...rpc.CallOpt) (o0 string, o1 bool, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "AcceptedBlobOwnership", []interface{}{i0, i1, i2}, []interface{}{&o0, &o1}, opts...)
return
}
// SyncGetDeltasClientStream is the client stream for Sync.GetDeltas.
type SyncGetDeltasClientStream interface {
// RecvStream returns the receiver side of the Sync.GetDeltas client stream.
RecvStream() interface {
// Advance stages an item so that it may be retrieved via Value. Returns
// true iff there is an item to retrieve. Advance must be called before
// Value is called. May block if an item is not available.
Advance() bool
// Value returns the item that was staged by Advance. May panic if Advance
// returned false or was not called. Never blocks.
Value() DeltaResp
// Err returns any error encountered by Advance. Never blocks.
Err() error
}
}
// SyncGetDeltasClientCall represents the call returned from Sync.GetDeltas.
type SyncGetDeltasClientCall interface {
SyncGetDeltasClientStream
// Finish blocks until the server is done, and returns the positional return
// values for call.
//
// Finish returns immediately if the call has been canceled; depending on the
// timing the output could either be an error signaling cancelation, or the
// valid positional return values from the server.
//
// Calling Finish is mandatory for releasing stream resources, unless the call
// has been canceled or any of the other methods return an error. Finish should
// be called at most once.
Finish() (DeltaFinalResp, error)
}
type implSyncGetDeltasClientCall struct {
rpc.ClientCall
valRecv DeltaResp
errRecv error
}
func (c *implSyncGetDeltasClientCall) RecvStream() interface {
Advance() bool
Value() DeltaResp
Err() error
} {
return implSyncGetDeltasClientCallRecv{c}
}
type implSyncGetDeltasClientCallRecv struct {
c *implSyncGetDeltasClientCall
}
func (c implSyncGetDeltasClientCallRecv) Advance() bool {
c.c.errRecv = c.c.Recv(&c.c.valRecv)
return c.c.errRecv == nil
}
func (c implSyncGetDeltasClientCallRecv) Value() DeltaResp {
return c.c.valRecv
}
func (c implSyncGetDeltasClientCallRecv) Err() error {
if c.c.errRecv == io.EOF {
return nil
}
return c.c.errRecv
}
func (c *implSyncGetDeltasClientCall) Finish() (o0 DeltaFinalResp, err error) {
err = c.ClientCall.Finish(&o0)
return
}
// SyncFetchBlobClientStream is the client stream for Sync.FetchBlob.
type SyncFetchBlobClientStream interface {
// RecvStream returns the receiver side of the Sync.FetchBlob client stream.
RecvStream() interface {
// Advance stages an item so that it may be retrieved via Value. Returns
// true iff there is an item to retrieve. Advance must be called before
// Value is called. May block if an item is not available.
Advance() bool
// Value returns the item that was staged by Advance. May panic if Advance
// returned false or was not called. Never blocks.
Value() []byte
// Err returns any error encountered by Advance. Never blocks.
Err() error
}
}
// SyncFetchBlobClientCall represents the call returned from Sync.FetchBlob.
type SyncFetchBlobClientCall interface {
SyncFetchBlobClientStream
// Finish blocks until the server is done, and returns the positional return
// values for call.
//
// Finish returns immediately if the call has been canceled; depending on the
// timing the output could either be an error signaling cancelation, or the
// valid positional return values from the server.
//
// Calling Finish is mandatory for releasing stream resources, unless the call
// has been canceled or any of the other methods return an error. Finish should
// be called at most once.
Finish() (shares BlobSharesBySyncgroup, _ error)
}
type implSyncFetchBlobClientCall struct {
rpc.ClientCall
valRecv []byte
errRecv error
}
func (c *implSyncFetchBlobClientCall) RecvStream() interface {
Advance() bool
Value() []byte
Err() error
} {
return implSyncFetchBlobClientCallRecv{c}
}
type implSyncFetchBlobClientCallRecv struct {
c *implSyncFetchBlobClientCall
}
func (c implSyncFetchBlobClientCallRecv) Advance() bool {
c.c.errRecv = c.c.Recv(&c.c.valRecv)
return c.c.errRecv == nil
}
func (c implSyncFetchBlobClientCallRecv) Value() []byte {
return c.c.valRecv
}
func (c implSyncFetchBlobClientCallRecv) Err() error {
if c.c.errRecv == io.EOF {
return nil
}
return c.c.errRecv
}
func (c *implSyncFetchBlobClientCall) Finish() (o0 BlobSharesBySyncgroup, err error) {
err = c.ClientCall.Finish(&o0)
return
}
// SyncFetchBlobRecipeClientStream is the client stream for Sync.FetchBlobRecipe.
type SyncFetchBlobRecipeClientStream interface {
// RecvStream returns the receiver side of the Sync.FetchBlobRecipe client stream.
RecvStream() interface {
// Advance stages an item so that it may be retrieved via Value. Returns
// true iff there is an item to retrieve. Advance must be called before
// Value is called. May block if an item is not available.
Advance() bool
// Value returns the item that was staged by Advance. May panic if Advance
// returned false or was not called. Never blocks.
Value() ChunkHash
// Err returns any error encountered by Advance. Never blocks.
Err() error
}
}
// SyncFetchBlobRecipeClientCall represents the call returned from Sync.FetchBlobRecipe.
type SyncFetchBlobRecipeClientCall interface {
SyncFetchBlobRecipeClientStream
// Finish blocks until the server is done, and returns the positional return
// values for call.
//
// Finish returns immediately if the call has been canceled; depending on the
// timing the output could either be an error signaling cancelation, or the
// valid positional return values from the server.
//
// Calling Finish is mandatory for releasing stream resources, unless the call
// has been canceled or any of the other methods return an error. Finish should
// be called at most once.
Finish() (shares BlobSharesBySyncgroup, _ error)
}
type implSyncFetchBlobRecipeClientCall struct {
rpc.ClientCall
valRecv ChunkHash
errRecv error
}
func (c *implSyncFetchBlobRecipeClientCall) RecvStream() interface {
Advance() bool
Value() ChunkHash
Err() error
} {
return implSyncFetchBlobRecipeClientCallRecv{c}
}
type implSyncFetchBlobRecipeClientCallRecv struct {
c *implSyncFetchBlobRecipeClientCall
}
func (c implSyncFetchBlobRecipeClientCallRecv) Advance() bool {
c.c.valRecv = ChunkHash{}
c.c.errRecv = c.c.Recv(&c.c.valRecv)
return c.c.errRecv == nil
}
func (c implSyncFetchBlobRecipeClientCallRecv) Value() ChunkHash {
return c.c.valRecv
}
func (c implSyncFetchBlobRecipeClientCallRecv) Err() error {
if c.c.errRecv == io.EOF {
return nil
}
return c.c.errRecv
}
func (c *implSyncFetchBlobRecipeClientCall) Finish() (o0 BlobSharesBySyncgroup, err error) {
err = c.ClientCall.Finish(&o0)
return
}
// SyncFetchChunksClientStream is the client stream for Sync.FetchChunks.
type SyncFetchChunksClientStream interface {
// RecvStream returns the receiver side of the Sync.FetchChunks client stream.
RecvStream() interface {
// Advance stages an item so that it may be retrieved via Value. Returns
// true iff there is an item to retrieve. Advance must be called before
// Value is called. May block if an item is not available.
Advance() bool
// Value returns the item that was staged by Advance. May panic if Advance
// returned false or was not called. Never blocks.
Value() ChunkData
// Err returns any error encountered by Advance. Never blocks.
Err() error
}
// SendStream returns the send side of the Sync.FetchChunks client stream.
SendStream() interface {
// Send places the item onto the output stream. Returns errors
// encountered while sending, or if Send is called after Close or
// the stream has been canceled. Blocks if there is no buffer
// space; will unblock when buffer space is available or after
// the stream has been canceled.
Send(item ChunkHash) error
// Close indicates to the server that no more items will be sent;
// server Recv calls will receive io.EOF after all sent items.
// This is an optional call - e.g. a client might call Close if it
// needs to continue receiving items from the server after it's
// done sending. Returns errors encountered while closing, or if
// Close is called after the stream has been canceled. Like Send,
// blocks if there is no buffer space available.
Close() error
}
}
// SyncFetchChunksClientCall represents the call returned from Sync.FetchChunks.
type SyncFetchChunksClientCall interface {
SyncFetchChunksClientStream
// Finish performs the equivalent of SendStream().Close, then blocks until
// the server is done, and returns the positional return values for the call.
//
// Finish returns immediately if the call has been canceled; depending on the
// timing the output could either be an error signaling cancelation, or the
// valid positional return values from the server.
//
// Calling Finish is mandatory for releasing stream resources, unless the call
// has been canceled or any of the other methods return an error. Finish should
// be called at most once.
Finish() error
}
type implSyncFetchChunksClientCall struct {
rpc.ClientCall
valRecv ChunkData
errRecv error
}
func (c *implSyncFetchChunksClientCall) RecvStream() interface {
Advance() bool
Value() ChunkData
Err() error
} {
return implSyncFetchChunksClientCallRecv{c}
}
type implSyncFetchChunksClientCallRecv struct {
c *implSyncFetchChunksClientCall
}
func (c implSyncFetchChunksClientCallRecv) Advance() bool {
c.c.valRecv = ChunkData{}
c.c.errRecv = c.c.Recv(&c.c.valRecv)
return c.c.errRecv == nil
}
func (c implSyncFetchChunksClientCallRecv) Value() ChunkData {
return c.c.valRecv
}
func (c implSyncFetchChunksClientCallRecv) Err() error {
if c.c.errRecv == io.EOF {
return nil
}
return c.c.errRecv
}
func (c *implSyncFetchChunksClientCall) SendStream() interface {
Send(item ChunkHash) error
Close() error
} {
return implSyncFetchChunksClientCallSend{c}
}
type implSyncFetchChunksClientCallSend struct {
c *implSyncFetchChunksClientCall
}
func (c implSyncFetchChunksClientCallSend) Send(item ChunkHash) error {
return c.c.Send(item)
}
func (c implSyncFetchChunksClientCallSend) Close() error {
return c.c.CloseSend()
}
func (c *implSyncFetchChunksClientCall) Finish() (err error) {
err = c.ClientCall.Finish()
return
}
// SyncServerMethods is the interface a server writer
// implements for Sync.
//
// Sync defines methods for data exchange between Syncbases.
// TODO(hpucha): Flesh this out further.
type SyncServerMethods interface {
// GetTime returns metadata related to the Syncbase virtual clock, including
// system clock values, last NTP timestamp, num reboots, etc.
GetTime(_ *context.T, _ rpc.ServerCall, req TimeReq, initiator string) (TimeResp, error)
// GetDeltas returns the responder's current generation vectors and all
// the missing log records when compared to the initiator's generation
// vectors for one Database for either syncgroup metadata or data.
// The final result (in DeltaFinalResp) currently includes the
// syncgroup priorities for blob ownership for the server.
GetDeltas(_ *context.T, _ SyncGetDeltasServerCall, req DeltaReq, initiator string) (DeltaFinalResp, error)
// PublishSyncgroup is invoked on the syncgroup name (typically served
// by a "central" peer) to publish the syncgroup. It takes the name of
// Syncbase doing the publishing (the publisher) and returns the name
// of the Syncbase where the syncgroup is published (the publishee).
// This allows the publisher and the publishee to learn of each other.
// When a syncgroup is published, the publishee is given the syncgroup
// metadata, its current version at the publisher, and the current
// syncgroup generation vector. The generation vector serves as a
// checkpoint at the time of publishing. The publishing proceeds
// asynchronously, and the publishee learns the syncgroup history
// through the routine p2p sync process and determines when it has
// caught up to the level of knowledge at the time of publishing using
// the checkpointed generation vector. Until that point, the publishee
// locally deems the syncgroup to be in a pending state and does not
// mutate it. Thus it locally rejects syncgroup joins or updates to
// its spec until it is caught up on the syncgroup history.
PublishSyncgroup(_ *context.T, _ rpc.ServerCall, publisher string, sg Syncgroup, version string, genvec GenVector) (string, error)
// JoinSyncgroupAtAdmin is invoked by a prospective syncgroup member's
// Syncbase on a syncgroup admin. It checks whether the requestor is
// allowed to join the named syncgroup, and if so, adds the requestor to
// the syncgroup. It returns a copy of the updated syncgroup metadata,
// its version, and the syncgroup generation vector at the time of the
// join. Similar to the PublishSyncgroup scenario, the joiner at that
// point does not have the syncgroup history and locally deems it to be
// in a pending state and does not mutate it. This means it rejects
// local updates to the syncgroup spec or, if it were also an admin on
// the syncgroup, it would reject syncgroup joins until it is caught up
// on the syncgroup history through p2p sync.
JoinSyncgroupAtAdmin(_ *context.T, _ rpc.ServerCall, dbId syncbase.Id, sgId syncbase.Id, joinerName string, myInfo syncbase.SyncgroupMemberInfo) (sg Syncgroup, version string, genvec GenVector, _ error)
// HaveBlob verifies that the peer has the requested blob, and if
// present, returns its size. Otherwise, it returns -1, and the location
// hints (the Signpost) that the peer has for the blob, filtered to
// include only data the caller is permitted to see:
// + Device D reveals a syncgroup SG to the caller C iff
// - D is in SG, and
// - SG is in the Signpost, and
// - at least one of:
// - SG is not private, or
// - C has permission to join SG.
// + Device D reveals a location hint L to caller C iff
// there is a syncgroup SG such that
// - D is in SG, and
// - SG is in the Signpost, and
// - L is in SG, and
// - at least one of:
// - SG is not private, or
// - C has permission to join SG, or
// - L is a blob server in SG.
HaveBlob(_ *context.T, _ rpc.ServerCall, br syncbase.BlobRef) (size int64, signpost Signpost, _ error)
// FetchBlob fetches the requested blob.
// It returns a number of blob ownership shares that the server hopes
// the client will accept using the AcceptedBlobOwnership() call.
FetchBlob(_ *context.T, _ SyncFetchBlobServerCall, br syncbase.BlobRef, mySgPriorities SgPriorities) (shares BlobSharesBySyncgroup, _ error)
// Methods for incremental blob transfer. The transfer starts with the
// receiver making a FetchBlobRecipe call to the sender for a given
// BlobRef. The sender, in turn, sends the chunk hashes of all the
// chunks that make up the requested blob (blob recipe). The receiver
// looks up the chunk hashes in its local blob store, and identifies the
// missing ones. The receiver then fetches the missing chunks using a
// FetchChunks call from the sender. Finally, the receiver finishes the
// blob fetch by combining the chunks obtained over the network with the
// already available local chunks as per the blob recipe.
// callerName is the syncbase Id of the caller, expressed as a string.
// FetchBlobRecipe returns a number of blob ownership shares that the
// server hopes the client will accept for each syncgroup using the
// AcceptedBlobOwnership() call.
FetchBlobRecipe(_ *context.T, _ SyncFetchBlobRecipeServerCall, br syncbase.BlobRef, callerName string, mySgPriorities SgPriorities) (shares BlobSharesBySyncgroup, _ error)
FetchChunks(*context.T, SyncFetchChunksServerCall) error
// RequestTakeBlob indicates that the caller wishes the server to take
// some blob ownership shares for various syncgroups for the specified blob.
// If the server chooses to act on the request, it may call FetchBlob/FetchBlobRecipe,
// and ultimately AcceptedBlobOwnership().
// callerName is the syncbase Id of the caller, expressed as a string.
RequestTakeBlob(_ *context.T, _ rpc.ServerCall, br syncbase.BlobRef, callerName string, shares BlobSharesBySyncgroup) error
// AcceptedBlobOwnership tells the server that the client callerName (a
// syncbase Id expressed as a string) has accepted blob ownership of a
// specified number of shares for blob br. The server may decrement
// its share count by up to this number. It is safe for the server to
// decrement its share count by fewer than the number of shares another
// device has taken responsibility for, but unsafe to decrement it by
// more than that that number. It returns a hint as to whether the
// server is likely to keep the blob itself, plus its syncbase Id
// expressed as a string.
AcceptedBlobOwnership(_ *context.T, _ rpc.ServerCall, br syncbase.BlobRef, callerName string, shares BlobSharesBySyncgroup) (serverName string, keepingBlob bool, _ error)
}
// SyncServerStubMethods is the server interface containing
// Sync methods, as expected by rpc.Server.
// The only difference between this interface and SyncServerMethods
// is the streaming methods.
type SyncServerStubMethods interface {
// GetTime returns metadata related to the Syncbase virtual clock, including
// system clock values, last NTP timestamp, num reboots, etc.
GetTime(_ *context.T, _ rpc.ServerCall, req TimeReq, initiator string) (TimeResp, error)
// GetDeltas returns the responder's current generation vectors and all
// the missing log records when compared to the initiator's generation
// vectors for one Database for either syncgroup metadata or data.
// The final result (in DeltaFinalResp) currently includes the
// syncgroup priorities for blob ownership for the server.
GetDeltas(_ *context.T, _ *SyncGetDeltasServerCallStub, req DeltaReq, initiator string) (DeltaFinalResp, error)
// PublishSyncgroup is invoked on the syncgroup name (typically served
// by a "central" peer) to publish the syncgroup. It takes the name of
// Syncbase doing the publishing (the publisher) and returns the name
// of the Syncbase where the syncgroup is published (the publishee).
// This allows the publisher and the publishee to learn of each other.
// When a syncgroup is published, the publishee is given the syncgroup
// metadata, its current version at the publisher, and the current
// syncgroup generation vector. The generation vector serves as a
// checkpoint at the time of publishing. The publishing proceeds
// asynchronously, and the publishee learns the syncgroup history
// through the routine p2p sync process and determines when it has
// caught up to the level of knowledge at the time of publishing using
// the checkpointed generation vector. Until that point, the publishee
// locally deems the syncgroup to be in a pending state and does not
// mutate it. Thus it locally rejects syncgroup joins or updates to
// its spec until it is caught up on the syncgroup history.
PublishSyncgroup(_ *context.T, _ rpc.ServerCall, publisher string, sg Syncgroup, version string, genvec GenVector) (string, error)
// JoinSyncgroupAtAdmin is invoked by a prospective syncgroup member's
// Syncbase on a syncgroup admin. It checks whether the requestor is
// allowed to join the named syncgroup, and if so, adds the requestor to
// the syncgroup. It returns a copy of the updated syncgroup metadata,
// its version, and the syncgroup generation vector at the time of the
// join. Similar to the PublishSyncgroup scenario, the joiner at that
// point does not have the syncgroup history and locally deems it to be
// in a pending state and does not mutate it. This means it rejects
// local updates to the syncgroup spec or, if it were also an admin on
// the syncgroup, it would reject syncgroup joins until it is caught up
// on the syncgroup history through p2p sync.
JoinSyncgroupAtAdmin(_ *context.T, _ rpc.ServerCall, dbId syncbase.Id, sgId syncbase.Id, joinerName string, myInfo syncbase.SyncgroupMemberInfo) (sg Syncgroup, version string, genvec GenVector, _ error)
// HaveBlob verifies that the peer has the requested blob, and if
// present, returns its size. Otherwise, it returns -1, and the location
// hints (the Signpost) that the peer has for the blob, filtered to
// include only data the caller is permitted to see:
// + Device D reveals a syncgroup SG to the caller C iff
// - D is in SG, and
// - SG is in the Signpost, and
// - at least one of:
// - SG is not private, or
// - C has permission to join SG.
// + Device D reveals a location hint L to caller C iff
// there is a syncgroup SG such that
// - D is in SG, and
// - SG is in the Signpost, and
// - L is in SG, and
// - at least one of:
// - SG is not private, or
// - C has permission to join SG, or
// - L is a blob server in SG.
HaveBlob(_ *context.T, _ rpc.ServerCall, br syncbase.BlobRef) (size int64, signpost Signpost, _ error)
// FetchBlob fetches the requested blob.
// It returns a number of blob ownership shares that the server hopes
// the client will accept using the AcceptedBlobOwnership() call.
FetchBlob(_ *context.T, _ *SyncFetchBlobServerCallStub, br syncbase.BlobRef, mySgPriorities SgPriorities) (shares BlobSharesBySyncgroup, _ error)
// Methods for incremental blob transfer. The transfer starts with the
// receiver making a FetchBlobRecipe call to the sender for a given
// BlobRef. The sender, in turn, sends the chunk hashes of all the
// chunks that make up the requested blob (blob recipe). The receiver
// looks up the chunk hashes in its local blob store, and identifies the
// missing ones. The receiver then fetches the missing chunks using a
// FetchChunks call from the sender. Finally, the receiver finishes the
// blob fetch by combining the chunks obtained over the network with the
// already available local chunks as per the blob recipe.
// callerName is the syncbase Id of the caller, expressed as a string.
// FetchBlobRecipe returns a number of blob ownership shares that the
// server hopes the client will accept for each syncgroup using the
// AcceptedBlobOwnership() call.
FetchBlobRecipe(_ *context.T, _ *SyncFetchBlobRecipeServerCallStub, br syncbase.BlobRef, callerName string, mySgPriorities SgPriorities) (shares BlobSharesBySyncgroup, _ error)
FetchChunks(*context.T, *SyncFetchChunksServerCallStub) error
// RequestTakeBlob indicates that the caller wishes the server to take
// some blob ownership shares for various syncgroups for the specified blob.
// If the server chooses to act on the request, it may call FetchBlob/FetchBlobRecipe,
// and ultimately AcceptedBlobOwnership().
// callerName is the syncbase Id of the caller, expressed as a string.
RequestTakeBlob(_ *context.T, _ rpc.ServerCall, br syncbase.BlobRef, callerName string, shares BlobSharesBySyncgroup) error
// AcceptedBlobOwnership tells the server that the client callerName (a
// syncbase Id expressed as a string) has accepted blob ownership of a
// specified number of shares for blob br. The server may decrement
// its share count by up to this number. It is safe for the server to
// decrement its share count by fewer than the number of shares another
// device has taken responsibility for, but unsafe to decrement it by
// more than that that number. It returns a hint as to whether the
// server is likely to keep the blob itself, plus its syncbase Id
// expressed as a string.
AcceptedBlobOwnership(_ *context.T, _ rpc.ServerCall, br syncbase.BlobRef, callerName string, shares BlobSharesBySyncgroup) (serverName string, keepingBlob bool, _ error)
}
// SyncServerStub adds universal methods to SyncServerStubMethods.
type SyncServerStub interface {
SyncServerStubMethods
// Describe the Sync interfaces.
Describe__() []rpc.InterfaceDesc
}
// SyncServer returns a server stub for Sync.
// It converts an implementation of SyncServerMethods into
// an object that may be used by rpc.Server.
func SyncServer(impl SyncServerMethods) SyncServerStub {
stub := implSyncServerStub{
impl: impl,
}
// Initialize GlobState; always check the stub itself first, to handle the
// case where the user has the Glob method defined in their VDL source.
if gs := rpc.NewGlobState(stub); gs != nil {
stub.gs = gs
} else if gs := rpc.NewGlobState(impl); gs != nil {
stub.gs = gs
}
return stub
}
type implSyncServerStub struct {
impl SyncServerMethods
gs *rpc.GlobState
}
func (s implSyncServerStub) GetTime(ctx *context.T, call rpc.ServerCall, i0 TimeReq, i1 string) (TimeResp, error) {
return s.impl.GetTime(ctx, call, i0, i1)
}
func (s implSyncServerStub) GetDeltas(ctx *context.T, call *SyncGetDeltasServerCallStub, i0 DeltaReq, i1 string) (DeltaFinalResp, error) {
return s.impl.GetDeltas(ctx, call, i0, i1)
}
func (s implSyncServerStub) PublishSyncgroup(ctx *context.T, call rpc.ServerCall, i0 string, i1 Syncgroup, i2 string, i3 GenVector) (string, error) {
return s.impl.PublishSyncgroup(ctx, call, i0, i1, i2, i3)
}
func (s implSyncServerStub) JoinSyncgroupAtAdmin(ctx *context.T, call rpc.ServerCall, i0 syncbase.Id, i1 syncbase.Id, i2 string, i3 syncbase.SyncgroupMemberInfo) (Syncgroup, string, GenVector, error) {
return s.impl.JoinSyncgroupAtAdmin(ctx, call, i0, i1, i2, i3)
}
func (s implSyncServerStub) HaveBlob(ctx *context.T, call rpc.ServerCall, i0 syncbase.BlobRef) (int64, Signpost, error) {
return s.impl.HaveBlob(ctx, call, i0)
}
func (s implSyncServerStub) FetchBlob(ctx *context.T, call *SyncFetchBlobServerCallStub, i0 syncbase.BlobRef, i1 SgPriorities) (BlobSharesBySyncgroup, error) {
return s.impl.FetchBlob(ctx, call, i0, i1)
}
func (s implSyncServerStub) FetchBlobRecipe(ctx *context.T, call *SyncFetchBlobRecipeServerCallStub, i0 syncbase.BlobRef, i1 string, i2 SgPriorities) (BlobSharesBySyncgroup, error) {
return s.impl.FetchBlobRecipe(ctx, call, i0, i1, i2)
}
func (s implSyncServerStub) FetchChunks(ctx *context.T, call *SyncFetchChunksServerCallStub) error {
return s.impl.FetchChunks(ctx, call)
}
func (s implSyncServerStub) RequestTakeBlob(ctx *context.T, call rpc.ServerCall, i0 syncbase.BlobRef, i1 string, i2 BlobSharesBySyncgroup) error {
return s.impl.RequestTakeBlob(ctx, call, i0, i1, i2)
}
func (s implSyncServerStub) AcceptedBlobOwnership(ctx *context.T, call rpc.ServerCall, i0 syncbase.BlobRef, i1 string, i2 BlobSharesBySyncgroup) (string, bool, error) {
return s.impl.AcceptedBlobOwnership(ctx, call, i0, i1, i2)
}
func (s implSyncServerStub) Globber() *rpc.GlobState {
return s.gs
}
func (s implSyncServerStub) Describe__() []rpc.InterfaceDesc {
return []rpc.InterfaceDesc{SyncDesc}
}
// SyncDesc describes the Sync interface.
var SyncDesc rpc.InterfaceDesc = descSync
// descSync hides the desc to keep godoc clean.
var descSync = rpc.InterfaceDesc{
Name: "Sync",
PkgPath: "v.io/x/ref/services/syncbase/server/interfaces",
Doc: "// Sync defines methods for data exchange between Syncbases.\n// TODO(hpucha): Flesh this out further.",
Methods: []rpc.MethodDesc{
{
Name: "GetTime",
Doc: "// GetTime returns metadata related to the Syncbase virtual clock, including\n// system clock values, last NTP timestamp, num reboots, etc.",
InArgs: []rpc.ArgDesc{
{"req", ``}, // TimeReq
{"initiator", ``}, // string
},
OutArgs: []rpc.ArgDesc{
{"", ``}, // TimeResp
},
},
{
Name: "GetDeltas",
Doc: "// GetDeltas returns the responder's current generation vectors and all\n// the missing log records when compared to the initiator's generation\n// vectors for one Database for either syncgroup metadata or data.\n// The final result (in DeltaFinalResp) currently includes the\n// syncgroup priorities for blob ownership for the server.",
InArgs: []rpc.ArgDesc{
{"req", ``}, // DeltaReq
{"initiator", ``}, // string
},
OutArgs: []rpc.ArgDesc{
{"", ``}, // DeltaFinalResp
},
Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
},
{
Name: "PublishSyncgroup",
Doc: "// PublishSyncgroup is invoked on the syncgroup name (typically served\n// by a \"central\" peer) to publish the syncgroup. It takes the name of\n// Syncbase doing the publishing (the publisher) and returns the name\n// of the Syncbase where the syncgroup is published (the publishee).\n// This allows the publisher and the publishee to learn of each other.\n// When a syncgroup is published, the publishee is given the syncgroup\n// metadata, its current version at the publisher, and the current\n// syncgroup generation vector. The generation vector serves as a\n// checkpoint at the time of publishing. The publishing proceeds\n// asynchronously, and the publishee learns the syncgroup history\n// through the routine p2p sync process and determines when it has\n// caught up to the level of knowledge at the time of publishing using\n// the checkpointed generation vector. Until that point, the publishee\n// locally deems the syncgroup to be in a pending state and does not\n// mutate it. Thus it locally rejects syncgroup joins or updates to\n// its spec until it is caught up on the syncgroup history.",
InArgs: []rpc.ArgDesc{
{"publisher", ``}, // string
{"sg", ``}, // Syncgroup
{"version", ``}, // string
{"genvec", ``}, // GenVector
},
OutArgs: []rpc.ArgDesc{
{"", ``}, // string
},
Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
},
{
Name: "JoinSyncgroupAtAdmin",
Doc: "// JoinSyncgroupAtAdmin is invoked by a prospective syncgroup member's\n// Syncbase on a syncgroup admin. It checks whether the requestor is\n// allowed to join the named syncgroup, and if so, adds the requestor to\n// the syncgroup. It returns a copy of the updated syncgroup metadata,\n// its version, and the syncgroup generation vector at the time of the\n// join. Similar to the PublishSyncgroup scenario, the joiner at that\n// point does not have the syncgroup history and locally deems it to be\n// in a pending state and does not mutate it. This means it rejects\n// local updates to the syncgroup spec or, if it were also an admin on\n// the syncgroup, it would reject syncgroup joins until it is caught up\n// on the syncgroup history through p2p sync.",
InArgs: []rpc.ArgDesc{
{"dbId", ``}, // syncbase.Id
{"sgId", ``}, // syncbase.Id
{"joinerName", ``}, // string
{"myInfo", ``}, // syncbase.SyncgroupMemberInfo
},
OutArgs: []rpc.ArgDesc{
{"sg", ``}, // Syncgroup
{"version", ``}, // string
{"genvec", ``}, // GenVector
},
Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
},
{
Name: "HaveBlob",
Doc: "// HaveBlob verifies that the peer has the requested blob, and if\n// present, returns its size. Otherwise, it returns -1, and the location\n// hints (the Signpost) that the peer has for the blob, filtered to\n// include only data the caller is permitted to see:\n// + Device D reveals a syncgroup SG to the caller C iff\n// - D is in SG, and\n// - SG is in the Signpost, and\n// - at least one of:\n// - SG is not private, or\n// - C has permission to join SG.\n// + Device D reveals a location hint L to caller C iff\n// there is a syncgroup SG such that\n// - D is in SG, and\n// - SG is in the Signpost, and\n// - L is in SG, and\n// - at least one of:\n// - SG is not private, or\n// - C has permission to join SG, or\n// - L is a blob server in SG.",
InArgs: []rpc.ArgDesc{
{"br", ``}, // syncbase.BlobRef
},
OutArgs: []rpc.ArgDesc{
{"size", ``}, // int64
{"signpost", ``}, // Signpost
},
},
{
Name: "FetchBlob",
Doc: "// FetchBlob fetches the requested blob.\n// It returns a number of blob ownership shares that the server hopes\n// the client will accept using the AcceptedBlobOwnership() call.",
InArgs: []rpc.ArgDesc{
{"br", ``}, // syncbase.BlobRef
{"mySgPriorities", ``}, // SgPriorities
},
OutArgs: []rpc.ArgDesc{
{"shares", ``}, // BlobSharesBySyncgroup
},
},
{
Name: "FetchBlobRecipe",
Doc: "// Methods for incremental blob transfer. The transfer starts with the\n// receiver making a FetchBlobRecipe call to the sender for a given\n// BlobRef. The sender, in turn, sends the chunk hashes of all the\n// chunks that make up the requested blob (blob recipe). The receiver\n// looks up the chunk hashes in its local blob store, and identifies the\n// missing ones. The receiver then fetches the missing chunks using a\n// FetchChunks call from the sender. Finally, the receiver finishes the\n// blob fetch by combining the chunks obtained over the network with the\n// already available local chunks as per the blob recipe.\n// callerName is the syncbase Id of the caller, expressed as a string.\n// FetchBlobRecipe returns a number of blob ownership shares that the\n// server hopes the client will accept for each syncgroup using the\n// AcceptedBlobOwnership() call.",
InArgs: []rpc.ArgDesc{
{"br", ``}, // syncbase.BlobRef
{"callerName", ``}, // string
{"mySgPriorities", ``}, // SgPriorities
},
OutArgs: []rpc.ArgDesc{
{"shares", ``}, // BlobSharesBySyncgroup
},
},
{
Name: "FetchChunks",
},
{
Name: "RequestTakeBlob",
Doc: "// RequestTakeBlob indicates that the caller wishes the server to take\n// some blob ownership shares for various syncgroups for the specified blob.\n// If the server chooses to act on the request, it may call FetchBlob/FetchBlobRecipe,\n// and ultimately AcceptedBlobOwnership().\n// callerName is the syncbase Id of the caller, expressed as a string.",
InArgs: []rpc.ArgDesc{
{"br", ``}, // syncbase.BlobRef
{"callerName", ``}, // string
{"shares", ``}, // BlobSharesBySyncgroup
},
},
{
Name: "AcceptedBlobOwnership",
Doc: "// AcceptedBlobOwnership tells the server that the client callerName (a\n// syncbase Id expressed as a string) has accepted blob ownership of a\n// specified number of shares for blob br. The server may decrement\n// its share count by up to this number. It is safe for the server to\n// decrement its share count by fewer than the number of shares another\n// device has taken responsibility for, but unsafe to decrement it by\n// more than that that number. It returns a hint as to whether the\n// server is likely to keep the blob itself, plus its syncbase Id\n// expressed as a string.",
InArgs: []rpc.ArgDesc{
{"br", ``}, // syncbase.BlobRef
{"callerName", ``}, // string
{"shares", ``}, // BlobSharesBySyncgroup
},
OutArgs: []rpc.ArgDesc{
{"serverName", ``}, // string
{"keepingBlob", ``}, // bool
},
},
},
}
// SyncGetDeltasServerStream is the server stream for Sync.GetDeltas.
type SyncGetDeltasServerStream interface {
// SendStream returns the send side of the Sync.GetDeltas server stream.
SendStream() interface {
// Send places the item onto the output stream. Returns errors encountered
// while sending. Blocks if there is no buffer space; will unblock when
// buffer space is available.
Send(item DeltaResp) error
}
}
// SyncGetDeltasServerCall represents the context passed to Sync.GetDeltas.
type SyncGetDeltasServerCall interface {
rpc.ServerCall
SyncGetDeltasServerStream
}
// SyncGetDeltasServerCallStub is a wrapper that converts rpc.StreamServerCall into
// a typesafe stub that implements SyncGetDeltasServerCall.
type SyncGetDeltasServerCallStub struct {
rpc.StreamServerCall
}
// Init initializes SyncGetDeltasServerCallStub from rpc.StreamServerCall.
func (s *SyncGetDeltasServerCallStub) Init(call rpc.StreamServerCall) {
s.StreamServerCall = call
}
// SendStream returns the send side of the Sync.GetDeltas server stream.
func (s *SyncGetDeltasServerCallStub) SendStream() interface {
Send(item DeltaResp) error
} {
return implSyncGetDeltasServerCallSend{s}
}
type implSyncGetDeltasServerCallSend struct {
s *SyncGetDeltasServerCallStub
}
func (s implSyncGetDeltasServerCallSend) Send(item DeltaResp) error {
return s.s.Send(item)
}
// SyncFetchBlobServerStream is the server stream for Sync.FetchBlob.
type SyncFetchBlobServerStream interface {
// SendStream returns the send side of the Sync.FetchBlob server stream.
SendStream() interface {
// Send places the item onto the output stream. Returns errors encountered
// while sending. Blocks if there is no buffer space; will unblock when
// buffer space is available.
Send(item []byte) error
}
}
// SyncFetchBlobServerCall represents the context passed to Sync.FetchBlob.
type SyncFetchBlobServerCall interface {
rpc.ServerCall
SyncFetchBlobServerStream
}
// SyncFetchBlobServerCallStub is a wrapper that converts rpc.StreamServerCall into
// a typesafe stub that implements SyncFetchBlobServerCall.
type SyncFetchBlobServerCallStub struct {
rpc.StreamServerCall
}
// Init initializes SyncFetchBlobServerCallStub from rpc.StreamServerCall.
func (s *SyncFetchBlobServerCallStub) Init(call rpc.StreamServerCall) {
s.StreamServerCall = call
}
// SendStream returns the send side of the Sync.FetchBlob server stream.
func (s *SyncFetchBlobServerCallStub) SendStream() interface {
Send(item []byte) error
} {
return implSyncFetchBlobServerCallSend{s}
}
type implSyncFetchBlobServerCallSend struct {
s *SyncFetchBlobServerCallStub
}
func (s implSyncFetchBlobServerCallSend) Send(item []byte) error {
return s.s.Send(item)
}
// SyncFetchBlobRecipeServerStream is the server stream for Sync.FetchBlobRecipe.
type SyncFetchBlobRecipeServerStream interface {
// SendStream returns the send side of the Sync.FetchBlobRecipe server stream.
SendStream() interface {
// Send places the item onto the output stream. Returns errors encountered
// while sending. Blocks if there is no buffer space; will unblock when
// buffer space is available.
Send(item ChunkHash) error
}
}
// SyncFetchBlobRecipeServerCall represents the context passed to Sync.FetchBlobRecipe.
type SyncFetchBlobRecipeServerCall interface {
rpc.ServerCall
SyncFetchBlobRecipeServerStream
}
// SyncFetchBlobRecipeServerCallStub is a wrapper that converts rpc.StreamServerCall into
// a typesafe stub that implements SyncFetchBlobRecipeServerCall.
type SyncFetchBlobRecipeServerCallStub struct {
rpc.StreamServerCall
}
// Init initializes SyncFetchBlobRecipeServerCallStub from rpc.StreamServerCall.
func (s *SyncFetchBlobRecipeServerCallStub) Init(call rpc.StreamServerCall) {
s.StreamServerCall = call
}
// SendStream returns the send side of the Sync.FetchBlobRecipe server stream.
func (s *SyncFetchBlobRecipeServerCallStub) SendStream() interface {
Send(item ChunkHash) error
} {
return implSyncFetchBlobRecipeServerCallSend{s}
}
type implSyncFetchBlobRecipeServerCallSend struct {
s *SyncFetchBlobRecipeServerCallStub
}
func (s implSyncFetchBlobRecipeServerCallSend) Send(item ChunkHash) error {
return s.s.Send(item)
}
// SyncFetchChunksServerStream is the server stream for Sync.FetchChunks.
type SyncFetchChunksServerStream interface {
// RecvStream returns the receiver side of the Sync.FetchChunks server stream.
RecvStream() interface {
// Advance stages an item so that it may be retrieved via Value. Returns
// true iff there is an item to retrieve. Advance must be called before
// Value is called. May block if an item is not available.
Advance() bool
// Value returns the item that was staged by Advance. May panic if Advance
// returned false or was not called. Never blocks.
Value() ChunkHash
// Err returns any error encountered by Advance. Never blocks.
Err() error
}
// SendStream returns the send side of the Sync.FetchChunks server stream.
SendStream() interface {
// Send places the item onto the output stream. Returns errors encountered
// while sending. Blocks if there is no buffer space; will unblock when
// buffer space is available.
Send(item ChunkData) error
}
}
// SyncFetchChunksServerCall represents the context passed to Sync.FetchChunks.
type SyncFetchChunksServerCall interface {
rpc.ServerCall
SyncFetchChunksServerStream
}
// SyncFetchChunksServerCallStub is a wrapper that converts rpc.StreamServerCall into
// a typesafe stub that implements SyncFetchChunksServerCall.
type SyncFetchChunksServerCallStub struct {
rpc.StreamServerCall
valRecv ChunkHash
errRecv error
}
// Init initializes SyncFetchChunksServerCallStub from rpc.StreamServerCall.
func (s *SyncFetchChunksServerCallStub) Init(call rpc.StreamServerCall) {
s.StreamServerCall = call
}
// RecvStream returns the receiver side of the Sync.FetchChunks server stream.
func (s *SyncFetchChunksServerCallStub) RecvStream() interface {
Advance() bool
Value() ChunkHash
Err() error
} {
return implSyncFetchChunksServerCallRecv{s}
}
type implSyncFetchChunksServerCallRecv struct {
s *SyncFetchChunksServerCallStub
}
func (s implSyncFetchChunksServerCallRecv) Advance() bool {
s.s.valRecv = ChunkHash{}
s.s.errRecv = s.s.Recv(&s.s.valRecv)
return s.s.errRecv == nil
}
func (s implSyncFetchChunksServerCallRecv) Value() ChunkHash {
return s.s.valRecv
}
func (s implSyncFetchChunksServerCallRecv) Err() error {
if s.s.errRecv == io.EOF {
return nil
}
return s.s.errRecv
}
// SendStream returns the send side of the Sync.FetchChunks server stream.
func (s *SyncFetchChunksServerCallStub) SendStream() interface {
Send(item ChunkData) error
} {
return implSyncFetchChunksServerCallSend{s}
}
type implSyncFetchChunksServerCallSend struct {
s *SyncFetchChunksServerCallStub
}
func (s implSyncFetchChunksServerCallSend) Send(item ChunkData) error {
return s.s.Send(item)
}
// Hold type definitions in package-level variables, for better performance.
var (
__VDLType_map_1 *vdl.Type
__VDLType_map_2 *vdl.Type
__VDLType_struct_3 *vdl.Type
__VDLType_list_4 *vdl.Type
__VDLType_struct_5 *vdl.Type
__VDLType_struct_6 *vdl.Type
__VDLType_string_7 *vdl.Type
__VDLType_enum_8 *vdl.Type
__VDLType_struct_9 *vdl.Type
__VDLType_struct_10 *vdl.Type
__VDLType_struct_11 *vdl.Type
__VDLType_struct_12 *vdl.Type
__VDLType_struct_13 *vdl.Type
__VDLType_map_14 *vdl.Type
__VDLType_map_15 *vdl.Type
__VDLType_struct_16 *vdl.Type
__VDLType_struct_17 *vdl.Type
__VDLType_struct_18 *vdl.Type
__VDLType_set_19 *vdl.Type
__VDLType_union_20 *vdl.Type
__VDLType_union_21 *vdl.Type
__VDLType_struct_22 *vdl.Type
__VDLType_map_23 *vdl.Type
__VDLType_struct_24 *vdl.Type
__VDLType_struct_25 *vdl.Type
__VDLType_list_26 *vdl.Type
__VDLType_struct_27 *vdl.Type
__VDLType_struct_28 *vdl.Type
__VDLType_struct_29 *vdl.Type
__VDLType_map_30 *vdl.Type
__VDLType_struct_31 *vdl.Type
__VDLType_map_32 *vdl.Type
__VDLType_struct_33 *vdl.Type
)
var __VDLInitCalled bool
// __VDLInit performs vdl initialization. It is safe to call multiple times.
// If you have an init ordering issue, just insert the following line verbatim
// into your source files in this package, right after the "package foo" clause:
//
// var _ = __VDLInit()
//
// The purpose of this function is to ensure that vdl initialization occurs in
// the right order, and very early in the init sequence. In particular, vdl
// registration and package variable initialization needs to occur before
// functions like vdl.TypeOf will work properly.
//
// This function returns a dummy value, so that it can be used to initialize the
// first var in the file, to take advantage of Go's defined init order.
func __VDLInit() struct{} {
if __VDLInitCalled {
return struct{}{}
}
__VDLInitCalled = true
// Register types.
vdl.Register((*GenVector)(nil))
vdl.Register((*Knowledge)(nil))
vdl.Register((*LogRecMetadata)(nil))
vdl.Register((*LogRec)(nil))
vdl.Register((*GroupId)(nil))
vdl.Register((*SyncgroupStatus)(nil))
vdl.Register((*SyncgroupMemberState)(nil))
vdl.Register((*Syncgroup)(nil))
vdl.Register((*CollectionPerms)(nil))
vdl.Register((*SgDeltaReq)(nil))
vdl.Register((*DataDeltaReq)(nil))
vdl.Register((*DeltaReq)(nil))
vdl.Register((*DeltaResp)(nil))
vdl.Register((*SgPriority)(nil))
vdl.Register((*SgPriorities)(nil))
vdl.Register((*DeltaFinalResp)(nil))
vdl.Register((*ChunkHash)(nil))
vdl.Register((*ChunkData)(nil))
vdl.Register((*TimeReq)(nil))
vdl.Register((*TimeResp)(nil))
vdl.Register((*BlobSharesBySyncgroup)(nil))
vdl.Register((*LocationData)(nil))
vdl.Register((*PeerToLocationDataMap)(nil))
vdl.Register((*Signpost)(nil))
// Initialize type definitions.
__VDLType_map_1 = vdl.TypeOf((*GenVector)(nil))
__VDLType_map_2 = vdl.TypeOf((*Knowledge)(nil))
__VDLType_struct_3 = vdl.TypeOf((*LogRecMetadata)(nil)).Elem()
__VDLType_list_4 = vdl.TypeOf((*[]string)(nil))
__VDLType_struct_5 = vdl.TypeOf((*vdltime.Time)(nil)).Elem()
__VDLType_struct_6 = vdl.TypeOf((*LogRec)(nil)).Elem()
__VDLType_string_7 = vdl.TypeOf((*GroupId)(nil))
__VDLType_enum_8 = vdl.TypeOf((*SyncgroupStatus)(nil))
__VDLType_struct_9 = vdl.TypeOf((*SyncgroupMemberState)(nil)).Elem()
__VDLType_struct_10 = vdl.TypeOf((*syncbase.SyncgroupMemberInfo)(nil)).Elem()
__VDLType_struct_11 = vdl.TypeOf((*Syncgroup)(nil)).Elem()
__VDLType_struct_12 = vdl.TypeOf((*syncbase.Id)(nil)).Elem()
__VDLType_struct_13 = vdl.TypeOf((*syncbase.SyncgroupSpec)(nil)).Elem()
__VDLType_map_14 = vdl.TypeOf((*map[string]SyncgroupMemberState)(nil))
__VDLType_map_15 = vdl.TypeOf((*CollectionPerms)(nil))
__VDLType_struct_16 = vdl.TypeOf((*access.AccessList)(nil)).Elem()
__VDLType_struct_17 = vdl.TypeOf((*SgDeltaReq)(nil)).Elem()
__VDLType_struct_18 = vdl.TypeOf((*DataDeltaReq)(nil)).Elem()
__VDLType_set_19 = vdl.TypeOf((*map[GroupId]struct{})(nil))
__VDLType_union_20 = vdl.TypeOf((*DeltaReq)(nil))
__VDLType_union_21 = vdl.TypeOf((*DeltaResp)(nil))
__VDLType_struct_22 = vdl.TypeOf((*SgPriority)(nil)).Elem()
__VDLType_map_23 = vdl.TypeOf((*SgPriorities)(nil))
__VDLType_struct_24 = vdl.TypeOf((*DeltaFinalResp)(nil)).Elem()
__VDLType_struct_25 = vdl.TypeOf((*ChunkHash)(nil)).Elem()
__VDLType_list_26 = vdl.TypeOf((*[]byte)(nil))
__VDLType_struct_27 = vdl.TypeOf((*ChunkData)(nil)).Elem()
__VDLType_struct_28 = vdl.TypeOf((*TimeReq)(nil)).Elem()
__VDLType_struct_29 = vdl.TypeOf((*TimeResp)(nil)).Elem()
__VDLType_map_30 = vdl.TypeOf((*BlobSharesBySyncgroup)(nil))
__VDLType_struct_31 = vdl.TypeOf((*LocationData)(nil)).Elem()
__VDLType_map_32 = vdl.TypeOf((*PeerToLocationDataMap)(nil))
__VDLType_struct_33 = vdl.TypeOf((*Signpost)(nil)).Elem()
// Set error format strings.
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrDupSyncgroupPublish.ID), "{1:}{2:} duplicate publish on syncgroup: {3}")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrConnFail.ID), "{1:}{2:} connection to peer failed{:_}")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrBrokenCrConnection.ID), "{1:}{2:} CrConnection stream to client does not exist or is broken")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrDbOffline.ID), "{1:}{2:} database {3} is offline and cannot be synced{:_}")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrGetTimeFailed.ID), "{1:}{2:} GetTime failed{:_}")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrNotAdmin.ID), "{1:}{2:} not an admin of the syncgroup")
return struct{}{}
}