Dataset Viewer
Auto-converted to Parquet
query_id
stringlengths
32
32
query
stringlengths
7
4.32k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
35b72b49ef8599c9b3ef22f67e205c70
NewEmptyEncoder returns a empty encoder
[ { "docid": "01017f4f2f673aaf106587b48b654ff9", "score": "0.8314351", "text": "func NewEmptyEncoder() Encoder {\n\treturn &emptyEncoder{}\n}", "title": "" } ]
[ { "docid": "38467339a77591da8d2663aaa72f4569", "score": "0.85056394", "text": "func NewEmptyEncoder() Encoder {\r\n\treturn &emptyEncoder{}\r\n}", "title": "" }, { "docid": "66a4fe2fe2ecce9a0b5e15a8ba9be8f4", "score": "0.6745302", "text": "func DefaultEncoder() Encoder {\n\tdefaultEncoderOnce.Do(func() {\n\t\tdefaultEncoderInstance = &defaultLabelEncoder{\n\t\t\tpool: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &bytes.Buffer{}\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t})\n\treturn defaultEncoderInstance\n}", "title": "" }, { "docid": "cbf01bee923f94e3112af275ef389f53", "score": "0.6565882", "text": "func (ep *EncoderPool) New() Encoder {\n\treturn NewEncoder(nil, ep.format)\n}", "title": "" }, { "docid": "a4f84f2333736479110fc18caa93129e", "score": "0.6500416", "text": "func NewEncoder() *Encoder {\n\treturn &Encoder{cache: newCache(), regenc: make(map[reflect.Type]encoderFunc)}\n}", "title": "" }, { "docid": "349c9f2086f256f0cd6bfd40413c2624", "score": "0.6389443", "text": "func NewEncoder() *Encoder {\n\tbuffer := make([]byte, 64)\n\n\treturn &Encoder{\n\t\tbuffer: buffer[:0],\n\t\trefMap: make(map[unsafe.Pointer]_refElem, 7),\n\t}\n}", "title": "" }, { "docid": "694c634041b49813e91d27a892320d53", "score": "0.63035333", "text": "func NewEncoder(w io.Writer) *Encoder {}", "title": "" }, { "docid": "4166a48aa5f7f58cd51d15d8dd77ccc7", "score": "0.62450594", "text": "func (e *Encoder) EncodeNoOp() error {\n\treturn e.encode(NoOpMarker, func(*Encoder) error { return nil })\n}", "title": "" }, { "docid": "1b15d5f2440b8a3ea6af9881864986a4", "score": "0.6202977", "text": "func NewEncoder() *Encoder {\n\treturn &Encoder{\n\t\tPerlCompat: false,\n\t\tCompressionThreshold: 1024,\n\t\tversion: 1,\n\t}\n}", "title": "" }, { "docid": "3f142c9b3c9ed433c12308c2b3cfb0a5", "score": "0.61709267", "text": "func (en Encoder) Reset() {}", "title": "" }, { "docid": "bfaab98f84a9c2299e76fa47b932d08f", "score": "0.61592305", "text": "func NewEmptyDecoder() Decoder {\n\treturn &emptyDecoder{}\n}", "title": "" }, { "docid": "257d0bf14882a9fd9c42121890a9d7ea", "score": "0.6142079", "text": "func New(cfg *EncoderConfig) *Encoder {\n\treturn &Encoder{config: cfg}\n}", "title": "" }, { "docid": "3b503546e73ff12acd313031d88da7ad", "score": "0.6137017", "text": "func NewSingleEncoder(r EncodingRegistry, names *Names) *SingleEncoder {\n\tif names == nil {\n\t\tnames = globalNames\n\t}\n\treturn &SingleEncoder{\n\t\tregistry: r,\n\t\tnames: names,\n\t}\n}", "title": "" }, { "docid": "d5e214b816ec1e383454c2937ab49297", "score": "0.6124263", "text": "func MustNewEncoder(base Encoding) Encoder {\n\t_, ok := EncodingToStr[base]\n\tif !ok {\n\t\tpanic(\"Unsupported multibase encoding\")\n\t}\n\treturn Encoder{base}\n}", "title": "" }, { "docid": "f3cdb391b1f0d6a583aaa3f17f677d53", "score": "0.60980403", "text": "func NewEncoder(replacement byte) *Encoder {\n\tif replacement == 0 {\n\t\treplacement = byte(0x3F)\n\t}\n\treturn &Encoder{\n\t\treplacement: replacement,\n\t}\n}", "title": "" }, { "docid": "42b04bf821b94220f67835506a4721cf", "score": "0.6086417", "text": "func NewEncoder(w io.Writer) io.Writer { return &encoder{w: w} }", "title": "" }, { "docid": "dc69f6f3456adfa5b40750daeeaf3a64", "score": "0.60756004", "text": "func New() *Encoder {\n\treturn &Encoder{\n\t\tmediaType: libavutil.AvmediaTypeUnknown,\n\t}\n}", "title": "" }, { "docid": "2a1a056931ae783e26c89c1882a40b6e", "score": "0.60208595", "text": "func New() format.Encoder { return formatter }", "title": "" }, { "docid": "45bbdcb427c23bd659c92d3e4a4804b3", "score": "0.59850556", "text": "func NewPlain() (Encoder, error) {\n\treturn new(PlainEncoder), nil\n}", "title": "" }, { "docid": "c8dcd0d721a66a1d22e0099ffef915da", "score": "0.5982036", "text": "func NewEncoder(w io.Writer) *Encoder {\n enc := new(Encoder)\n enc.w = w\n\n return enc\n}", "title": "" }, { "docid": "241e56645156bfc0036a33e5c1ae79f2", "score": "0.5964168", "text": "func New(mode HashingMode) *Encoder {\n\treturn &Encoder{\n\t\tmode: mode,\n\t}\n}", "title": "" }, { "docid": "778b1d3a61432262145ec6d5ebd8299d", "score": "0.5963327", "text": "func NewEncoder(opts ...Option) *encoder {\n\tb := &encoder{}\n\tfor _, option := range opts {\n\t\toption(&b.opts)\n\t}\n\tb.encodeFuncMap = make(map[reflect.Kind]valueEncode)\n\treturn b\n}", "title": "" }, { "docid": "59dc146c70791602ff538535ad9e1a4d", "score": "0.59081995", "text": "func NewEncoder(options EncoderOptions) Encoder {\n\tif options.FileResolver == nil {\n\t\toptions.FileResolver = protoregistry.GlobalFiles\n\t}\n\tif options.TypeResolver == nil {\n\t\toptions.TypeResolver = protoregistry.GlobalTypes\n\t}\n\tenc := Encoder{\n\t\tscalarEncoders: map[string]FieldEncoder{\n\t\t\t\"cosmos.Dec\": cosmosDecEncoder,\n\t\t\t\"cosmos.Int\": cosmosIntEncoder,\n\t\t},\n\t\tmessageEncoders: map[string]MessageEncoder{\n\t\t\t\"key_field\": keyFieldEncoder,\n\t\t\t\"module_account\": moduleAccountEncoder,\n\t\t\t\"threshold_string\": thresholdStringEncoder,\n\t\t},\n\t\tfieldEncoders: map[string]FieldEncoder{\n\t\t\t\"legacy_coins\": nullSliceAsEmptyEncoder,\n\t\t},\n\t\tfileResolver: options.FileResolver,\n\t\ttypeResolver: options.TypeResolver,\n\t\tdoNotSortFields: options.DoNotSortFields,\n\t}\n\treturn enc\n}", "title": "" }, { "docid": "6742a08c55ea5de377cf373ec16c5b39", "score": "0.5878947", "text": "func NewEncoder(w io.Writer) Encoder {\n\treturn Encoder{w}\n}", "title": "" }, { "docid": "276ef4776cf0f2466002a2d394e6593e", "score": "0.58761734", "text": "func EmptyResponseEncoder(_ context.Context, writer http.ResponseWriter, _ interface{}) error {\n\twriter.WriteHeader(http.StatusOK)\n\treturn nil\n}", "title": "" }, { "docid": "a2f5c88326bfdde89ec78ac107e58e58", "score": "0.58735967", "text": "func NewEncoder(size int) *Encoder {\n\treturn &Encoder{make([]byte, 0, size)}\n}", "title": "" }, { "docid": "d50a5080f627f2f7285bb45347ae36f0", "score": "0.58569795", "text": "func NewEncoder(options ...EncoderOption) *Encoder {\n\te := &Encoder{\n\t\ttagAlias: \"qs\",\n\t}\n\n\t// Apply options\n\tfor _, opt := range options {\n\t\topt(e)\n\t}\n\n\te.cache = newCacheStore()\n\n\te.dataPool = &sync.Pool{New: func() interface{} {\n\t\ttagSize := 5\n\t\ttags := make([][]byte, 0, tagSize)\n\t\tfor i := 0; i < tagSize; i++ {\n\t\t\ttags = append(tags, make([]byte, 0, 56))\n\t\t}\n\t\treturn &encoder{\n\t\t\te: e,\n\t\t\ttags: tags,\n\t\t\tscope: make([]byte, 0, 64),\n\t\t}\n\t}}\n\n\treturn e\n}", "title": "" }, { "docid": "cc0594a7d65c77b060c30ba48cd4fb34", "score": "0.58067226", "text": "func NewDefault() Codec {\n\treturn New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength)\n}", "title": "" }, { "docid": "3a418512aacf191e024fedbdb871f6e1", "score": "0.5784041", "text": "func NewEncoder(private_key []byte, defaults TransactionParams) *Encoder {\n\tpriv := signing.NewSecp256k1PrivateKey(private_key)\n\tpub := signing.NewSecp256k1Context().GetPublicKey(priv)\n\treturn &Encoder{\n\t\tprivate_key: private_key,\n\t\tpublic_key: pub.AsHex(),\n\t\tdefaults: defaults,\n\t}\n}", "title": "" }, { "docid": "72cf17982ca2238374fba6c2d47246c4", "score": "0.57821363", "text": "func newCodec() *Codec {\n\treturn &Codec{\n\t\trd: newReader(emptyReader),\n\t\twr: newWriter(ioutil.Discard),\n\t}\n}", "title": "" }, { "docid": "a9d52ab9c188906a68aedeb3ca668dfe", "score": "0.5766595", "text": "func (d *DummyPayload) Encode() []byte {\n\treturn nil\n}", "title": "" }, { "docid": "8b97fa3284d86ebb19b36d61f6c4a8c7", "score": "0.573791", "text": "func NewEncoder(rt reflect.Type) (*Encoder, error) {\n\tif rt == nil {\n\t\treturn nil, errors.New(\"invalid type: nil\")\n\t}\n\treturn &Encoder{typ: rt}, nil\n}", "title": "" }, { "docid": "1120ba1593dcee0c4b20289110e4eeda", "score": "0.5736089", "text": "func DefaultEncoder(ctx context.Context, res interface{}) *StandardComponents {\n\tif res, ok := res.(interface{ GetMetadata() *corev2.ObjectMeta }); ok {\n\t\treturn DefaultEncoder(ctx, res.GetMetadata())\n\t}\n\n\tcmp := StandardComponents{}\n\tif res, ok := res.(interface{ GetName() string }); ok {\n\t\tcmp.uniqueComponent = res.GetName()\n\t}\n\tif res, ok := res.(interface{ GetNamespace() string }); ok {\n\t\tcmp.namespace = res.GetNamespace()\n\t}\n\treturn &cmp\n}", "title": "" }, { "docid": "fdff4b9194b89971f3cdf35310fa1dd1", "score": "0.573544", "text": "func (e *encoder) Reset() { e.sink.Reset(0) }", "title": "" }, { "docid": "724ca511d47310d8b0641f47a7ef8dde", "score": "0.5721225", "text": "func NewEncoder() encoder.Encoder {\n\treturn cueEncoder{}\n}", "title": "" }, { "docid": "4d39505fae462a50ace9cf486e444d2c", "score": "0.5719705", "text": "func (*transcoder) NewEncoder(w io.Writer) Encoder {\n\treturn gob.NewEncoder(w)\n}", "title": "" }, { "docid": "a29eb4b9787907fca57270398bf82fef", "score": "0.5701463", "text": "func NewEncoder() *Encoder {\n\n\treturn &Encoder{\n\t\ttagName: \"form\",\n\t\tstructCache: newStructCacheMap(),\n\t}\n}", "title": "" }, { "docid": "f7cb0461e08f1f8e1f66d38959791b0f", "score": "0.56638795", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "title": "" }, { "docid": "f7cb0461e08f1f8e1f66d38959791b0f", "score": "0.56638795", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}", "title": "" }, { "docid": "dc737fd559c4aebe528ef35778c944e1", "score": "0.56569487", "text": "func TestOmitEmpty(t *testing.T) {\n mk := \"audi\"\n st := struct{ Omit *string `msgpack:\"omit,omitempty\"`\n Nil *string\n Make *string }{ nil, nil, &mk }\n\n //Check omitted\n buf := bytes.Buffer{}\n enc := NewEncoder(&buf)\n encodeDebug(t, enc, &buf, &st)\n t.Logf(\"%.*s\", buf.Len(), buf.Bytes())\n\n //Check not omitted\n buf.Reset()\n omit := \"NO Omit!!\"\n st.Omit = &omit\n enc = NewEncoder(&buf)\n encodeDebug(t, enc, &buf, &st)\n t.Logf(\"%.*s\", buf.Len(), buf.Bytes())\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "75825505fbafdbce3d872b30dbd8812d", "score": "0.5620872", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w: w}\n}", "title": "" }, { "docid": "c30414a1c74cae6c4840595e87230768", "score": "0.560439", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\twriter: w,\n\t\tec: encodeState{},\n\t}\n}", "title": "" }, { "docid": "b8e56c87c8b9c07811a47c3712c938a4", "score": "0.558313", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: bufio.NewWriterSize(w, MinBufSize),\n\t}\n}", "title": "" }, { "docid": "7a83e206d843937019803105d3ac4c9d", "score": "0.558133", "text": "func NewMockDefaultCodec(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockDefaultCodec {\n\tmock := &MockDefaultCodec{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "title": "" }, { "docid": "4bff965c3d08b97d0b173cef5a4128dd", "score": "0.5577044", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: w,\n\t}\n}", "title": "" }, { "docid": "f63b29ec4ca665d26a7d29c94194d708", "score": "0.5561434", "text": "func (v *EmptyUnion) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "title": "" }, { "docid": "92faa92215f961e4948f7dc76f44cad0", "score": "0.55530447", "text": "func (c *Charmap) NewEncoder() *encoding.Encoder {\n\tc.Init()\n\treturn &encoding.Encoder{\n\t\tTransformer: &cmapEncoder{\n\t\t\tbytes: c.bytes,\n\t\t\treplace: c.ReplacementChar,\n\t\t},\n\t}\n}", "title": "" }, { "docid": "215d94bb89d370adbd5606bdec62d472", "score": "0.55247355", "text": "func NewEncoder(w io.Writer, opts *EncodeOptions) *Encoder {\n\taopts := api.EncodeOptions{}\n\tif opts != nil {\n\t\taopts.TrackPointers = opts.TrackPointers\n\t\taopts.Buffer = opts.Buffer\n\t}\n\treturn &Encoder{state: api.NewEncoder(w, aopts)}\n}", "title": "" }, { "docid": "5a4ff1d29bcb3cbfeecd2f89efccf0f3", "score": "0.5522263", "text": "func NewEncoder(cfg zapcore.EncoderConfig) *Encoder {\n\treturn &Encoder{\n\t\tenc: zapcore.NewJSONEncoder(cfg),\n\t}\n}", "title": "" }, { "docid": "d7d261e0a085d3931f5d4391b2a15e58", "score": "0.5494995", "text": "func NewEncoder(w io.Writer, options ...EncoderOption) *Encoder {\n\te := &Encoder{\n\t\tw: w,\n\t\tprefix: \"root\",\n\t\tsuffix: \";\\n\",\n\t}\n\tfor _, option := range options {\n\t\toption(e)\n\t}\n\treturn e\n}", "title": "" }, { "docid": "a5ccab6ed04943edf69e3721dfb7a49d", "score": "0.5480721", "text": "func NewEncoder(w io.Writer) *Encoder {\n\tenc := encoderStatePool.Get()\n\tenc.w = w\n\treturn enc\n}", "title": "" }, { "docid": "3654da87b9301a5dcefed5bc25d3e958", "score": "0.5475694", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tbuff: new(bytes.Buffer),\n\t\tw: w,\n\t}\n}", "title": "" }, { "docid": "66d882885396285ad7ef441777a0316e", "score": "0.5464023", "text": "func NewMockObjectEncoder(ctrl *gomock.Controller) *MockObjectEncoder {\n\tmock := &MockObjectEncoder{ctrl: ctrl}\n\tmock.recorder = &MockObjectEncoderMockRecorder{mock}\n\treturn mock\n}", "title": "" }, { "docid": "3c7d049c81404653330f0f57fecc8b63", "score": "0.5462082", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tencoder: toml.NewEncoder(w),\n\t}\n}", "title": "" }, { "docid": "e827ed8fe17a4a8b925168e5d9c79586", "score": "0.54557943", "text": "func (ep *EncoderPool) Get() (encoder Encoder) {\n\tselect {\n\tcase encoder = <-ep.pool:\n\tdefault:\n\t\tencoder = ep.New()\n\t}\n\n\treturn\n}", "title": "" }, { "docid": "f6e8dae9b7116cdd7bcf577c84e2c099", "score": "0.5449207", "text": "func NewEncoder(target io.Writer) *Encoder {\n\treturn &Encoder{target: target}\n}", "title": "" }, { "docid": "1de75989b8929477886a72864934bc01", "score": "0.54381496", "text": "func NewEncoder(w io.Writer) *Encoder {\n\te := &Encoder{writer: newBinaryWriter(w)}\n\te.writeValType = e.writeMarker\n\treturn e\n}", "title": "" }, { "docid": "b3278c4804a6de6218bc314044150cfd", "score": "0.5429952", "text": "func NewEncoder(w io.Writer) *Encoder {\n\te := &Encoder{w: w}\n\tyaml_emitter_initialize(&e.emitter)\n\tyaml_emitter_set_output_writer(&e.emitter, e.w)\n\tyaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)\n\te.emit()\n\tyaml_document_start_event_initialize(&e.event, nil, nil, true)\n\te.emit()\n\n\treturn e\n}", "title": "" }, { "docid": "1cef66a3f18de6b857629156e01039ee", "score": "0.5419423", "text": "func NewEncoder(w io.Writer) proto.Encoder {\n\te := &encoder{\n\t\tbw: bufio.NewWriterSize(w, encoderBufferSize),\n\t}\n\treturn e\n}", "title": "" }, { "docid": "03a4509bec4325406635f106e25d5afd", "score": "0.53859895", "text": "func NewEncoder(w io.Writer, cb WriteResourceCallback, asBinary bool) *Encoder {\n\treturn &Encoder{\n\t\tw: w,\n\t\tcb: cb,\n\t\tasBinary: asBinary,\n\t}\n}", "title": "" }, { "docid": "68d22dca36935b6e3e1a728d90b47282", "score": "0.5384581", "text": "func NewEncoder(data interface{}) *Encoder {\n\treturn &Encoder{\n\t\tPathToStringConverter: PathToStringConverter,\n\t\tValueCustomProcessors: make(map[reflect.Kind]ValueCustomProcessor),\n\n\t\tinitialData: data,\n\t\tformData: make(map[string]string),\n\t}\n}", "title": "" }, { "docid": "eae00cfe9485d0a19dcf1b2fc362e1df", "score": "0.5382889", "text": "func NewEncoder(log logger.Logger, errFunc func(err error), errResponseHandler func(err error) (errorResponse error, errorCode int)) *encoder {\n\treturn &encoder{log, errFunc, errResponseHandler}\n}", "title": "" }, { "docid": "30d2d679940920d2774d2a46e025a2db", "score": "0.5367889", "text": "func NewMockEncoder(ctrl *gomock.Controller) *MockEncoder {\n\tmock := &MockEncoder{ctrl: ctrl}\n\tmock.recorder = &MockEncoderMockRecorder{mock}\n\treturn mock\n}", "title": "" }, { "docid": "3a7d5f69698b8af5074512fa77ad72d2", "score": "0.5355013", "text": "func NewEmptyRegistry() *Registry {\n\treturn &Registry{\n\t\trevTypeMap: make(map[string]*Type),\n\t\tpbdb: pb.NewDb(),\n\t}\n}", "title": "" }, { "docid": "4eabf0c57a61cebcd5b474d434a93c39", "score": "0.5337445", "text": "func (cfg *Config) NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w, cfg}\n}", "title": "" }, { "docid": "e09b1f3f042d1f05491a261ba4651d32", "score": "0.5331331", "text": "func NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{\n\t\tw: w,\n\t\theader: make([]byte, 0, 128),\n\t\tfooter: make([]byte, 0, 128),\n\t\tpair: make([]byte, 0, 128),\n\t\tfieldList: make([]*Field, 0, 16),\n\t\tprecision: time.Nanosecond,\n\t}\n}", "title": "" }, { "docid": "42fb6fc5d4087ce42256545aa6ded489", "score": "0.5323915", "text": "func NewEncoder(src image.Image) *Encoder {\n\treturn NewIm8Encoder(im8.Convert(src))\n}", "title": "" }, { "docid": "78404599c3ba711e44ff685eba13589b", "score": "0.527962", "text": "func NewEncoder(base Encoding) (Encoder, error) {\n\t_, ok := EncodingToStr[base]\n\tif !ok {\n\t\treturn Encoder{-1}, fmt.Errorf(\"Unsupported multibase encoding: %d\", base)\n\t}\n\treturn Encoder{base}, nil\n}", "title": "" }, { "docid": "a47101f812cca540c30c5b9d1727755c", "score": "0.52746356", "text": "func NewEncoder(w io.Writer, plugins ...plugin) *Encoder {\n\te := &Encoder{w: w, contentPrefix: contentPrefix, attributePrefix: attrPrefix}\n\tfor _, p := range plugins {\n\t\te = p.AddToEncoder(e)\n\t}\n\treturn e\n}", "title": "" }, { "docid": "bbdf086227e6d80f715dcc359c0a85a3", "score": "0.52657247", "text": "func NewEncoder(w io.Writer) sift.Encoder {\n\treturn &encoder{enc: json.NewEncoder(w)}\n}", "title": "" }, { "docid": "5572b8bcb4cd08df411f9d79cf70bf8f", "score": "0.52612436", "text": "func (e *StringEncoder) Reset() {\n\te.bytes = e.bytes[:0]\n}", "title": "" }, { "docid": "964f722d6acc3ce02a76d69c906f3464", "score": "0.5252278", "text": "func (enc *jsonEncoder) Clone() Encoder {\n\tclone := jsonPool.Get().(*jsonEncoder)\n\tclone.truncate()\n\tclone.bytes = append(clone.bytes, enc.bytes...)\n\tclone.messageF = enc.messageF\n\tclone.timeF = enc.timeF\n\tclone.levelF = enc.levelF\n\treturn clone\n}", "title": "" }, { "docid": "582d77810627d7d8679be63e08e86104", "score": "0.5251521", "text": "func NewEncoder() jsoniter.API {\n\treturn jsoniter.Config{\n\t\tEscapeHTML: true,\n\t\tSortMapKeys: true,\n\t\tValidateJsonRawMessage: true,\n\t}.Froze()\n}", "title": "" }, { "docid": "0f6d871e9c995f61305197b3152613f4", "score": "0.52468926", "text": "func NewEncoderID() EncoderID {\n\treturn EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}\n}", "title": "" }, { "docid": "5ae635fcfd885b087db6a5eec5b42e7e", "score": "0.5242053", "text": "func NewEncoder(et Type) (Encoder, error) {\n\tswitch et {\n\tcase TypeMessagePack:\n\t\treturn NewMessagePackEncoder(), nil\n\tcase TypeProtocolBuffer:\n\t\treturn NewProtocolBufferEncoder(), nil\n\tcase TypeJSON:\n\t\treturn NewJSONEncoder(), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"cannot create encoder for unknown Typee %d\", et)\n\t}\n}", "title": "" }, { "docid": "1de6d98c909047a19535882513706c1c", "score": "0.52312046", "text": "func NewEncoder() *EncoderXML {\n\treturn &EncoderXML{\n\t\tMaxBodySize: 4194304, // 4MB\n\t\tIndented: false,\n\t\tAcceptHeader: \"application/xml\",\n\t\tContentTypeHeader: \"application/xml;charset=utf-8\",\n\t}\n}", "title": "" }, { "docid": "9fe83969dfdb7315a5ed1f469c6b6c75", "score": "0.5229958", "text": "func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {\n\treturn &encoder{enc: enc, w: w}\n}", "title": "" }, { "docid": "ebdc002ed37a743fc702a6fda2ae7a2d", "score": "0.5225841", "text": "func NewEncoder(dynTabMaxSize uint32) *Encoder {\n\tdynT := newDynamicTable(dynTabMaxSize)\n\treturn &Encoder{\n\t\tdynTab: dynT,\n\t}\n}", "title": "" }, { "docid": "c0c6527c3c852aaa6acc0da54455a1e6", "score": "0.52168036", "text": "func NewEncoder(opts EncoderOptions) (*Encoder, error) {\n\tvar e Encoder\n\tif err := opts.validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"imaging: error validating encoder options: %w\", err)\n\t}\n\tif opts.ConcurrencyLevel > 0 {\n\t\te.sem = make(chan struct{}, opts.ConcurrencyLevel)\n\t}\n\te.opts = opts\n\te.pngEncoder = &png.Encoder{}\n\treturn &e, nil\n}", "title": "" }, { "docid": "1f01ee7b882898168835bc2096b30251", "score": "0.5214224", "text": "func NewEmptyPublisher() Publisher {\n\treturn NewPublisher(func(s Subscriber) {\n\t\tvar done int32 = 0\n\t\ts.OnSubscribe(NewSubscription(func(n int) {\n\t\t\tif atomic.CompareAndSwapInt32(&done, 0, 1) {\n\t\t\t\ts.OnComplete()\n\t\t\t}\n\t\t}, func() {}))\n\t})\n}", "title": "" }, { "docid": "8fac5408a26330389293dc743d1c1b25", "score": "0.51270664", "text": "func (e *Encoder) EncodeNull() error {\n\treturn e.encode(NullMarker, func(*Encoder) error { return nil })\n}", "title": "" }, { "docid": "dc1ac7cf9e8bbbd7cf4f9baecc2f1873", "score": "0.51140386", "text": "func (e *Encoder) Encode(input any) (any, error) {\n\treturn e.encode(reflect.ValueOf(input))\n}", "title": "" }, { "docid": "59545496910be946191803a2932b2363", "score": "0.5094511", "text": "func newFieldEncoder() *fieldEncoder {\n\tfe := encPool.Get().(*fieldEncoder)\n\tfe.fields = fe.fields[:0]\n\tfe.namespace = fe.namespace[:0]\n\treturn fe\n}", "title": "" }, { "docid": "63e5d827cc55382b12653544d9121afb", "score": "0.5087002", "text": "func Empty() Bytes {\n\treturn make([]byte, Size)\n}", "title": "" }, { "docid": "3014bbb3c1150fc024c1f03340f2587c", "score": "0.5077356", "text": "func NewEncoder(w io.Writer, options ...EncoderOption) *Encoder {\n\tenc := &Encoder{w: w, indentSize: 2}\n\tfor _, opt := range options {\n\t\topt(enc)\n\t}\n\tif enc.Colors == nil {\n\t\tenc.Colors = &Colors{\n\t\t\tKeyColor: color.New(),\n\t\t\tValueColor: color.New(),\n\t\t\tCommentColor: color.New(),\n\t\t}\n\t}\n\treturn enc\n}", "title": "" }, { "docid": "6918d8042357656d30d7cb965b2eb8cd", "score": "0.5066783", "text": "func NewEncoder(byteLen int) (*Encoder, error) {\n\tif byteLen < 1 || 8 < byteLen {\n\t\treturn nil, fmt.Errorf(\"invalid byte length: %d\", byteLen)\n\t}\n\n\treturn &Encoder{l: byteLen}, nil\n}", "title": "" }, { "docid": "7079f8ca792c910558ce9f3a101a5462", "score": "0.50655544", "text": "func (enc *Encoder) Encode(v interface{}) error {}", "title": "" }, { "docid": "3c68c4a6483300fb0785bd4cc11fd6f6", "score": "0.5064684", "text": "func NewEmptyTransformer() Transformer {\n\treturn &emptyTransformer{}\n}", "title": "" }, { "docid": "2350214c735fc7eb639526a9631178f0", "score": "0.5063244", "text": "func (t *EncoderType) Get() interface{} { return *t }", "title": "" }, { "docid": "187265c42480c3cfb307773b200664de", "score": "0.50588614", "text": "func BorrowEncoder(w io.Writer) *Encoder {\n\tenc := encPool.Get().(*Encoder)\n\tenc.w = w\n\tenc.buf = enc.buf[:0]\n\tenc.isPooled = 0\n\tenc.err = nil\n\tenc.hasKeys = false\n\tenc.keys = nil\n\treturn enc\n}", "title": "" }, { "docid": "22a63d0a92937a5fc41905cd4430c240", "score": "0.50584584", "text": "func (e *Encoder) Clone() zapcore.Encoder {\n\tencoder := e.enc.Clone()\n\treturn &Encoder{encoder}\n}", "title": "" }, { "docid": "2c4c89fba898e0c92c42838d747d9cc0", "score": "0.5051456", "text": "func MakeDefaultCodec() *codec.Codec {\n\tvar cdc = codec.New()\n\n\t// cosmos-sdk using interface to register all the modules codec.\n\tModuleBasics.RegisterCodec(cdc)\n\tbank.RegisterCodec(cdc)\n\tnameservice.RegisterCodec(cdc)\n\tsdkTypes.RegisterCodec(cdc)\n\tkyc.RegisterCodec(cdc)\n\tcodec.RegisterCrypto(cdc)\n\tfungible.RegisterCodec(cdc)\n\tnonFungible.RegisterCodec(cdc)\n\tfee.RegisterCodec(cdc)\n\tmaintenance.RegisterCodec(cdc)\n\tauth.RegisterCodec(cdc)\n\treturn cdc\n}", "title": "" }, { "docid": "4e8fe98bbed64766642ab3aaa3738a3c", "score": "0.5051161", "text": "func New() *Decoder { return &Decoder{} }", "title": "" }, { "docid": "6909283ce20ea912b24e4082d77b51da", "score": "0.50257385", "text": "func testEncoder(t *testing.T, v interface{}, err error, expected []byte) {\n\t// buffer is where we write the CBOR encoded values\n\tvar buffer = bytes.Buffer{}\n\t// create a new encoder writing to buffer, and encode v with it\n\tvar e = NewEncoder(&buffer).Encode(v)\n\n\tif e != err {\n\t\tt.Fatalf(\"err: %#v != %#v with %#v\", e, err, v)\n\t}\n\n\tif !bytes.Equal(buffer.Bytes(), expected) {\n\t\tt.Fatalf(\n\t\t\t\"(%#v) %#v != %#v\", v, buffer.Bytes(), expected,\n\t\t)\n\t}\n}", "title": "" }, { "docid": "81db5b3de7c04eb5028faff5c642c7cb", "score": "0.5022097", "text": "func NewEncoder() *EncoderJSON {\n\treturn &EncoderJSON{\n\t\tMaxBodySize: 2097152, // 2MB\n\t\tIndented: false,\n\t\tAcceptHeader: \"application/json\",\n\t\tContentTypeHeader: \"application/json;charset=utf-8\",\n\t}\n}", "title": "" } ]
ce44e234011513468f2cf5626e08cf5f
Init populates metadata struct with values from the buffer
[ { "docid": "132106cfdc43b3cd5f1cddef96d45fe4", "score": "0.74086654", "text": "func (metadata *Metadata) Init(buffer *bytes.Buffer) error {\n\tif err := json.NewDecoder(buffer).Decode(&metadata); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "title": "" } ]
[ { "docid": "369c7a3e800f5dbc46711046eaf0289c", "score": "0.6446612", "text": "func (b *Buffer) Init(buf []byte) {\n b.buf = buf\n b.off = 0\n b.layer_off = 0\n}", "title": "" }, { "docid": "58236420c8217c7415b308d93bc1132d", "score": "0.5935042", "text": "func (mm *metricMetadata) initialize(fields field.Metas, fieldMaxID int32, tagKeys tag.Metas) {\n\tmm.fields = fields\n\tmm.tagKeys = tagKeys\n\tmm.fieldIDSeq.Store(fieldMaxID)\n}", "title": "" }, { "docid": "3d55bc43a29e3cdbc554cc6fa7e75511", "score": "0.5697314", "text": "func (f *packedFileReader) init() error { return f.v.init() }", "title": "" }, { "docid": "49e69de14b227ddcce5b6fc42a108800", "score": "0.56805116", "text": "func (b *BigData) Init(config []byte) error {\n\n\treturn nil\n}", "title": "" }, { "docid": "631046e6cf89e15580b43029916b9b80", "score": "0.56609505", "text": "func (this *node) init(tree *BTree) {\n\tthis.size = int(tree.header.KeySize) + /*size of offset*/ 4\n\tthis.offset = -1\n\tthis.count = 0\n\tthis.raw = make([]byte, tree.header.bufferSize(), tree.header.bufferSize()+uint32(this.size))\n\tthis.datas = this.raw[8:]\n\tcopy(this.raw[0:4], []byte{0xff, 0xff, 0xff, 0xff})\n}", "title": "" }, { "docid": "bd0473f8b9b363e6aaeb0ec7219f3f7a", "score": "0.54629594", "text": "func (target *QueueAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"capacity\"]; x != nil {\n\t\tif y, ok := x.(int); ok {\n\t\t\ttarget.Capacity = &y\n\t\t}\n\t}\n\n\tif x := source[\"closed\"]; x != nil {\n\t\ttarget.Closed = true\n\t}\n\n\tif x := source[\"length\"]; x != nil {\n\t\tif y, ok := x.(int); ok {\n\t\t\ttarget.Length = &y\n\t\t}\n\t}\n\n\tif x := source[\"name\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Name = &y\n\t\t}\n\t}\n\n\tif x := source[\"suspended\"]; x != nil {\n\t\ttarget.Suspended = true\n\t}\n\n\tif x := source[\"upload\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Upload = &y\n\t\t}\n\t}\n}", "title": "" }, { "docid": "1ecb8de68addf4c693cef3a079253310", "score": "0.5447695", "text": "func newMetadata() *metadata {\n\tm := &metadata{\n\t\tdataMu: &sync.Mutex{},\n\n\t\tmsgPerSec: newMessageRate(1*time.Second, 30),\n\t\tmsgPerMin: newMessageRate(1*time.Minute, 15),\n\t\tmsgPerHr: newMessageRate(1*time.Hour, 10),\n\t\tmsgPerDay: newMessageRate(24*time.Hour, 7),\n\t}\n\n\tm.msgPerSec.init()\n\tm.msgPerMin.init()\n\tm.msgPerHr.init()\n\tm.msgPerDay.init()\n\n\treturn m\n}", "title": "" }, { "docid": "8d47a11149a669d370e0dfc5d0cfabbb", "score": "0.5437093", "text": "func (b *propBuf) Init(\n\tp proposer, tracker tracker.Tracker, clock *hlc.Clock, settings *cluster.Settings,\n) {\n\tb.p = p\n\tb.full.L = p.rlocker()\n\tb.clock = clock\n\tb.evalTracker = tracker\n\tb.settings = settings\n\tb.liBase = p.leaseAppliedIndex()\n}", "title": "" }, { "docid": "5f1848094eb2e7a1e761e916be37dbf2", "score": "0.53441286", "text": "func NewMetadata(key string, data []byte) Metadata {\n\treturn Metadata{\n\t\tPath: key,\n\t\tSize: len(data),\n\t\tTimestamp: time.Now().UTC(),\n\t\tHash: sha1.Sum(data),\n\t}\n}", "title": "" }, { "docid": "fccbda85bb3082adbd50c88de3e4851b", "score": "0.53148437", "text": "func (c *CRDT) Init(metadata state.Metadata) error {\n\tm, err := c.parseMetadata(metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.metadata = m\n\tgo c.startServer()\n\treturn nil\n}", "title": "" }, { "docid": "625f3c1775a369c50055aeb21e84623a", "score": "0.52658725", "text": "func (m *masterParser) Init(data, origin string, ttl uint32) {\n\tm.zone = NewZoneFile(\"(data)\", \"\")\n\tm.lineno = 1\n\tm.origin = strings.ToLower(origin)\n\tm.ttl = ttl\n\tif m.reader == nil {\n\t\tm.reader = new(libio.Reader)\n\t}\n\tm.reader.Init([]byte(data))\n}", "title": "" }, { "docid": "088dd81554d8df27d81acfdb425bd395", "score": "0.5246775", "text": "func newMetadata(data []byte) (*metadata, error) {\n\treturn &metadata{data: data}, nil\n\t// TODO: Create the data on disk atomically.\n}", "title": "" }, { "docid": "9c7b04218383ee596a9a5c6eceb6e876", "score": "0.5191952", "text": "func InitBuffer() {\n\tblockBuffer = NewLRUCache()\n\tfileNamePos2Int = make(map[nameAndPos]int, InitSize*4)\n\tposNum = 0\n}", "title": "" }, { "docid": "d5b623c03c544ebd02215a761a17143e", "score": "0.5180514", "text": "func (target *TagAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"name\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Name = &y\n\t\t}\n\t}\n\n\tif x := source[\"parent_id\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.ParentId = &y\n\t\t}\n\t}\n\n\tif x := source[\"theme\"]; x != nil {\n\t\tif y, ok := x.(map[string]interface{}); ok {\n\t\t\ttarget.Theme = NewTagThemeAttr(y)\n\t\t}\n\t}\n}", "title": "" }, { "docid": "5ce03fdd8655e12a9aff57bc50739128", "score": "0.51781577", "text": "func (b *Base) Init(r io.ReadCloser, readNext BaseReadNext) {\n\tb.Reader = bufio.NewReader(r)\n\tb.raw = r\n\tb.rn = readNext\n}", "title": "" }, { "docid": "21bd9aa6fbab6adf61186bea7e22b6b1", "score": "0.51622593", "text": "func (f *FieldInfos) init() error {\n\tf.addField(\"\", false)\n\treturn nil\n}", "title": "" }, { "docid": "713861dc174f83d627e41ed9d192bc13", "score": "0.51510584", "text": "func (target *ChannelAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"autohide\"]; x != nil {\n\t\ttarget.Autohide = true\n\t}\n\n\tif x := source[\"autosilence\"]; x != nil {\n\t\ttarget.Autosilence = true\n\t}\n\n\tif x := source[\"blacklisted_message_types\"]; x != nil {\n\t\tif y, ok := x.([]interface{}); ok {\n\t\t\ttarget.BlacklistedMessageTypes = AppendStrings(nil, y)\n\t\t}\n\t}\n\n\tif x := source[\"closed\"]; x != nil {\n\t\ttarget.Closed = true\n\t}\n\n\tif x := source[\"disclosed_since\"]; x != nil {\n\t\tif y, ok := x.(int); ok {\n\t\t\ttarget.DisclosedSince = &y\n\t\t}\n\t}\n\n\tif x := source[\"followable\"]; x != nil {\n\t\ttarget.Followable = true\n\t}\n\n\tif x := source[\"name\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Name = &y\n\t\t}\n\t}\n\n\tif x := source[\"owner_id\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.OwnerId = &y\n\t\t}\n\t}\n\n\tif x := source[\"private\"]; x != nil {\n\t\ttarget.Private = true\n\t}\n\n\tif x := source[\"public\"]; x != nil {\n\t\ttarget.Public = true\n\t}\n\n\tif x := source[\"ratelimit\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Ratelimit = &y\n\t\t}\n\t}\n\n\tif x := source[\"suspended\"]; x != nil {\n\t\ttarget.Suspended = true\n\t}\n\n\tif x := source[\"topic\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Topic = &y\n\t\t}\n\t}\n\n\tif x := source[\"upload\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Upload = &y\n\t\t}\n\t}\n\n\tif x := source[\"verified_join\"]; x != nil {\n\t\ttarget.VerifiedJoin = true\n\t}\n}", "title": "" }, { "docid": "08394150a7a36ec067a2c80b8c97b3b2", "score": "0.51455855", "text": "func createMeta(buff *bytes.Buffer, start bool, id string, from, length uint64) {\n\taddedBytes := 1\n\n\tif start {\n\t\tbuff.WriteByte(0)\n\t} else {\n\t\tbuff.WriteByte(1)\n\t}\n\n\taddedBytes += len(id)\n\tbuff.WriteString(id)\n\n\taddedBytes += 8\n\tfromBytes := make([]byte, 4)\n\tfromBytes[0] = byte(from % 255)\n\tfrom /= 255\n\tfromBytes[1] = byte(from % 255)\n\tfrom /= 255\n\tfromBytes[2] = byte(from % 255)\n\tfrom /= 255\n\tfromBytes[3] = byte(from % 255)\n\tbuff.Write(fromBytes)\n\n\tlengthBytes := make([]byte, 4)\n\tlengthBytes[0] = byte(length % 255)\n\tlength /= 255\n\tlengthBytes[1] = byte(length % 255)\n\tlength /= 255\n\tlengthBytes[2] = byte(length % 255)\n\tlength /= 255\n\tlengthBytes[3] = byte(length % 255)\n\tbuff.Write(lengthBytes)\n}", "title": "" }, { "docid": "5a84d82ca23f67a6661fe3093c36d9fc", "score": "0.51368153", "text": "func (target *DialogueMemberAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"audience_ended\"]; x != nil {\n\t\ttarget.AudienceEnded = true\n\t}\n\n\tif x := source[\"queue_id\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.QueueId = &y\n\t\t}\n\t}\n\n\tif x := source[\"rating\"]; x != nil {\n\t\tif y, ok := x.(int); ok {\n\t\t\ttarget.Rating = &y\n\t\t}\n\t}\n\n\tif x := source[\"writing\"]; x != nil {\n\t\ttarget.Writing = true\n\t}\n}", "title": "" }, { "docid": "2d3c5e6bf9f0315e4ba017b76f5a8da2", "score": "0.51240534", "text": "func (s *Store) Init(metadataRaw secretstores.Metadata) error {\n\tmetadata, err := s.parseSecretManagerMetadata(metadataRaw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := s.getClient(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to setup secretmanager client: %s\", err)\n\t}\n\n\ts.client = client\n\ts.ProjectID = metadata.ProjectID\n\n\treturn nil\n}", "title": "" }, { "docid": "66e75f57838766912a881620337edb65", "score": "0.51041925", "text": "func (target *ChannelMemberAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"autohide\"]; x != nil {\n\t\ttarget.Autohide = true\n\t}\n\n\tif x := source[\"moderator\"]; x != nil {\n\t\ttarget.Moderator = true\n\t}\n\n\tif x := source[\"operator\"]; x != nil {\n\t\ttarget.Operator = true\n\t}\n\n\tif x := source[\"silenced\"]; x != nil {\n\t\ttarget.Silenced = true\n\t}\n\n\tif x := source[\"since\"]; x != nil {\n\t\tif y, ok := x.(int); ok {\n\t\t\ttarget.Since = &y\n\t\t}\n\t}\n\n\tif x := source[\"writing\"]; x != nil {\n\t\ttarget.Writing = true\n\t}\n}", "title": "" }, { "docid": "c45a09812ef2f634805fccde12a86811", "score": "0.50799084", "text": "func (b *buf) initPkt(t packetType) {\n\tb.pb.Reset()\n\tb.h.token, b.h.status = t, 0\n\tb.h.write(&b.he)\n}", "title": "" }, { "docid": "544ba5a5f75683ecf2633522f4166df3", "score": "0.50733864", "text": "func initMetadata(m *metadata, values url.Values) {\n\tfor name, values := range values {\n\t\tif matches := serverKeyRe.MatchString(name); matches {\n\t\t\tcontinue // We MUST skip variables reserved to the server\n\t\t}\n\t\t*m = append(*m, meta.NameValue{Name: name, Value: values[0]})\n\t}\n}", "title": "" }, { "docid": "a852b1a598a0d7dadc18cb73879281fb", "score": "0.5070795", "text": "func (r *ReleaseManifest) init() {\n\tif r.AppMetadata == nil {\n\t\tr.AppMetadata = map[string]*AppMetadata{}\n\t}\n}", "title": "" }, { "docid": "76372ee0dba2b472d081f27f01e45fbb", "score": "0.50634587", "text": "func (d *TimeDecoder) Init(b []byte) {\n\td.v = 0\n\td.i = 0\n\td.ts = d.ts[:0]\n\td.err = nil\n\tif len(b) > 0 {\n\t\t// Encoding type is stored in the 4 high bits of the first byte\n\t\td.encoding = b[0] >> 4\n\t}\n\td.decode(b)\n}", "title": "" }, { "docid": "ad13e36d508857fc976dcf6eae37e74d", "score": "0.5041052", "text": "func (m *Message) Init() {\n\tm.CreatedAt = time.Now()\n\tm.UpdatedAt = time.Now()\n}", "title": "" }, { "docid": "bfc788ff85282c10d725ff2cb574aa5c", "score": "0.5037676", "text": "func (target *QueueMemberAttrs) Init(source map[string]interface{}) {\n}", "title": "" }, { "docid": "9e6d03fa22c14f55b36c5252401de77e", "score": "0.503553", "text": "func InitMetaData(rootDir string) (*MetaData, error) {\n\tvar metaData *MetaData\n\tmetaFile := filepath.Join(rootDir, \"support\", constants.RuntimeMetaFile)\n\tif fileutils.FileExists(metaFile) {\n\t\tcontents, err := fileutils.ReadFile(metaFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmetaData, err = ParseMetaData(contents)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tmetaData = &MetaData{}\n\t}\n\n\tif metaData.Env == nil {\n\t\tmetaData.Env = map[string]string{}\n\t}\n\n\tif metaData.PathListEnv == nil {\n\t\tmetaData.PathListEnv = map[string]string{}\n\t}\n\n\tvar relInstallDir string\n\tinstallDirs := strings.Split(constants.RuntimeInstallDirs, \",\")\n\tfor _, dir := range installDirs {\n\t\tif fileutils.DirExists(filepath.Join(rootDir, dir)) {\n\t\t\trelInstallDir = dir\n\t\t}\n\t}\n\n\tif relInstallDir == \"\" {\n\t\tlogging.Debug(\"Did not find an installation directory relative to metadata file.\")\n\t}\n\n\tmetaData.InstallDir = relInstallDir\n\terr := metaData.Prepare(filepath.Join(rootDir, relInstallDir))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metaData, nil\n}", "title": "" }, { "docid": "0680d96019bab04d43fb085a996077d9", "score": "0.5011409", "text": "func (target *IdentityAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"auth\"]; x != nil {\n\t\ttarget.Auth = true\n\t}\n\n\tif x := source[\"blocked\"]; x != nil {\n\t\ttarget.Blocked = true\n\t}\n\n\tif x := source[\"pending\"]; x != nil {\n\t\ttarget.Pending = true\n\t}\n\n\tif x := source[\"protected\"]; x != nil {\n\t\ttarget.Protected = true\n\t}\n\n\tif x := source[\"public\"]; x != nil {\n\t\ttarget.Public = true\n\t}\n\n\tif x := source[\"rejected\"]; x != nil {\n\t\ttarget.Rejected = true\n\t}\n}", "title": "" }, { "docid": "d198c0e2f34215d69cb7a82ae8381778", "score": "0.4989205", "text": "func (s ConnectResponse) NewMetadata(n int32) (Metadata_List, error) {\n\tl, err := NewMetadata_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Metadata_List{}, err\n\t}\n\terr = s.Struct.SetPtr(1, l.List.ToPtr())\n\treturn l, err\n}", "title": "" }, { "docid": "01efcfc6bd24b43122d141f16e3bbbf3", "score": "0.4984709", "text": "func FromMetadataInfo(pbMetadata *api.Metadata) types.MetadataInfo {\n\treturn types.MetadataInfo{\n\t\tClock: pbMetadata.Clock,\n\t\tData: pbMetadata.Data,\n\t}\n}", "title": "" }, { "docid": "e015a08e2a7dfa10cd8b3c5e4d86dd1c", "score": "0.49709362", "text": "func init() {\n\tmd := activity.NewMetadata(jsonMetadata)\n\tactivity.Register(NewActivity(md))\n}", "title": "" }, { "docid": "e015a08e2a7dfa10cd8b3c5e4d86dd1c", "score": "0.49709362", "text": "func init() {\n\tmd := activity.NewMetadata(jsonMetadata)\n\tactivity.Register(NewActivity(md))\n}", "title": "" }, { "docid": "e015a08e2a7dfa10cd8b3c5e4d86dd1c", "score": "0.49709362", "text": "func init() {\n\tmd := activity.NewMetadata(jsonMetadata)\n\tactivity.Register(NewActivity(md))\n}", "title": "" }, { "docid": "876f634ebb1c750141937120d1da0bde", "score": "0.4965589", "text": "func NewMetadata(\n\treader io.Reader,\n\tpath string,\n\text string,\n\tformat Format,\n\tencodingFormat Format,\n\twidth int,\n\theight int,\n\tversion string,\n) (Metadata, error) {\n\tvar created time.Time\n\tvar lat, lon float64\n\tx, err := exif.Decode(reader)\n\tif err == nil {\n\t\t// time taken\n\t\tcreatedTmp, err := x.DateTime()\n\t\tif err == nil {\n\t\t\tcreated = createdTmp\n\t\t}\n\t\t// coords taken\n\t\tlatTmp, lonTmp, err := x.LatLong()\n\t\tif err == nil {\n\t\t\tlat, lon = latTmp, lonTmp\n\t\t}\n\t}\n\treturn Metadata{\n\t\tVersion: version,\n\t\tCreated: created,\n\t\tAdded: time.Now(),\n\t\tName: strings.TrimSuffix(filepath.Base(path), ext),\n\t\tExt: ext,\n\t\tOriginalFormat: string(format),\n\t\tEncodingFormat: string(encodingFormat),\n\t\tWidth: width,\n\t\tHeight: height,\n\t\tLatitude: lat,\n\t\tLongitude: lon,\n\t}, nil\n}", "title": "" }, { "docid": "0ef895d48fcb7a7276d9fbeebe659370", "score": "0.4952293", "text": "func (m *Memstore) Init(kinds []string) error {\n\treturn nil\n}", "title": "" }, { "docid": "10afd6c85ad5c3ee07d266e0a78d35e4", "score": "0.49449408", "text": "func (b *Binding) Init(metadata bindings.Metadata) error {\n\tcfg, err := metadataToConfig(metadata.Properties, b.logger)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to parse metadata properties\")\n\t}\n\tb.config = cfg\n\n\tses, err := r.Connect(b.config.ConnectOpts)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error connecting to the database\")\n\t}\n\tb.session = ses\n\n\treturn nil\n}", "title": "" }, { "docid": "0c03faea867255e3d81ed41b8ea1ad70", "score": "0.49318174", "text": "func init() {\n\tentryFields := schema.Entry{}.Fields()\n\t_ = entryFields\n\t// entryDescContent is the schema descriptor for content field.\n\tentryDescContent := entryFields[0].Descriptor()\n\t// entry.DefaultContent holds the default value on creation for the content field.\n\tentry.DefaultContent = entryDescContent.Default.(string)\n\t// entryDescCreatedAt is the schema descriptor for created_at field.\n\tentryDescCreatedAt := entryFields[1].Descriptor()\n\t// entry.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tentry.DefaultCreatedAt = entryDescCreatedAt.Default.(func() time.Time)\n}", "title": "" }, { "docid": "209454f3385a74c80ec98bd89b9c65f8", "score": "0.49215665", "text": "func newMetaFile(r io.Reader) (metaFile, error) {\n\tm := metaFile{}\n\tr = io.LimitReader(r, 16*1024)\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn m, err\n\t}\n\tlines := bytes.Split(b, []byte(\"\\r\\n\"))\n\tfor i, line := range lines {\n\t\tif len(line) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tlineno := i + 1\n\t\tparts := bytes.SplitN(line, []byte(\":\"), 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn m, fmt.Errorf(\"line %d: expecting key:value not %q\", lineno, string(line))\n\t\t}\n\t\tkey := string(parts[0])\n\t\tval := string(parts[1])\n\t\tswitch key {\n\t\tcase \"lastModifiedDate\":\n\t\t\tt, err := time.Parse(time.RFC3339, val)\n\t\t\tif err != nil {\n\t\t\t\treturn m, fmt.Errorf(\"line %d: expecting lastModifiedDate={RFC3339} not %q\", lineno, string(line))\n\t\t\t}\n\t\t\tm.LastModifiedDate = t\n\t\tcase \"size\":\n\t\t\tv, err := strconv.Atoi(val)\n\t\t\tif err != nil {\n\t\t\t\treturn m, fmt.Errorf(\"line %d: expecting size={int} not %q\", lineno, string(line))\n\t\t\t}\n\t\t\tm.Size = v\n\t\tcase \"zipSize\":\n\t\t\tv, err := strconv.Atoi(val)\n\t\t\tif err != nil {\n\t\t\t\treturn m, fmt.Errorf(\"line %d: expecting zipSize={int} not %q\", lineno, string(line))\n\t\t\t}\n\t\t\tm.ZipSize = v\n\t\tcase \"gzSize\":\n\t\t\tv, err := strconv.Atoi(val)\n\t\t\tif err != nil {\n\t\t\t\treturn m, fmt.Errorf(\"line %d: expecting gzSize={int} not %q\", lineno, string(line))\n\t\t\t}\n\t\t\tm.GzSize = v\n\t\tcase \"sha256\":\n\t\t\tm.SHA256 = strings.ToUpper(val)\n\t\t}\n\t}\n\treturn m, nil\n}", "title": "" }, { "docid": "0c341ede6c75eec82565722f5431e42f", "score": "0.49107245", "text": "func (e *UserCreatedEvent) Init(data *eventstore.EventData) {\n\te.EventNumber = data.EventNumber\n\te.StreamID = data.StreamID\n\te.Timestamp = data.Timestamp\n\n\tvar eventData userCreatedEventData\n\terr := json.Unmarshal([]byte(data.Data), &eventData)\n\tif err != nil {\n\t\tfmt.Printf(\"Error parsing eventstore data: %v\", err)\n\t}\n\te.Name = eventData.Name\n\te.Age = eventData.Age\n}", "title": "" }, { "docid": "ad7c5434c5c09ceafbe820f217173ffa", "score": "0.48989382", "text": "func (s ConnectRequest) NewMetadata(n int32) (Metadata_List, error) {\n\tl, err := NewMetadata_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Metadata_List{}, err\n\t}\n\terr = s.Struct.SetPtr(1, l.List.ToPtr())\n\treturn l, err\n}", "title": "" }, { "docid": "49fd23daedbfe0ccfad248be5228c6fd", "score": "0.48982114", "text": "func (af *AdaptationField) Initialize(pos int64, options options.Options) {\n\taf.pcr = 0\n\taf.pos = pos\n\taf.options = options\n\taf.buf = af.buf[0:0]\n\n\taf.adaptationFieldLength = 0\n\taf.discontinuityIndicator = 0\n\taf.randomAccessIndicator = 0\n\taf.elementaryStreamPriorityIndicator = 0\n\taf.pcrFlag = 0\n\taf.oPcrFlag = 0\n\taf.splicingPointFlag = 0\n\taf.transportPrivateDataFlag = 0\n\taf.adaptationFieldExtensionFlag = 0\n\taf.programClockReferenceBase = 0\n\taf.programClockReferenceExtension = 0\n\taf.originalProgramClockReferenceBase = 0\n\taf.originalProgramClockReferenceExtension = 0\n\taf.spliceCountdown = 0\n\taf.transportPrivateDataLength = 0\n\taf.privateDataByte = af.privateDataByte[0:0]\n\taf.adaptationFieldExtensionLength = 0\n\taf.ltwFlag = 0\n\taf.piecewiseRateFlag = 0\n\taf.seamlessSpliceFlag = 0\n\taf.ltwValidFlag = 0\n\taf.ltwOffset = 0\n\taf.piecewiseRate = 0\n\taf.spliceType = 0\n\taf.dtsNextAu = 0\n}", "title": "" }, { "docid": "02e2fa0a5e4b03456306327d02be01b0", "score": "0.487894", "text": "func (rc *ReadCloser) init(r *tar.Reader) error {\n\tdefer rc.Close()\n\n\trc.File = make([]*tar.Header, 0, 10)\n\tfor {\n\t\th, err := r.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trc.File = append(rc.File, h)\n\t}\n\treturn nil\n}", "title": "" }, { "docid": "bcebcb90f779480b6d346f3a968d5acc", "score": "0.48655573", "text": "func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {\n\tp.init(typ, name, tag, f, true)\n}", "title": "" }, { "docid": "153f6fc2d0c1079cd8e6b1311e111b9c", "score": "0.48509154", "text": "func (t *twitterInput) Init(metadata bindings.Metadata) error {\n\treturn t.parseMetadata(metadata)\n}", "title": "" }, { "docid": "0c3b32ba0d7c93d48945fdca47f36762", "score": "0.48503655", "text": "func (_this *RootBuilder) Init(session *Session, dstType reflect.Type, opts *options.BuilderOptions) {\n\t_this.dstType = dstType\n\t_this.context = context(opts,\n\t\tsession.opts.CustomBinaryBuildFunction,\n\t\tsession.opts.CustomTextBuildFunction,\n\t\t_this.referenceFiller.NotifyMarker,\n\t\t_this.referenceFiller.NotifyReference,\n\t\tsession.GetBuilderGeneratorForType)\n\t_this.object = reflect.New(dstType).Elem()\n\t_this.chunkedData = make([]byte, 0, 128)\n\n\tgenerator := session.GetBuilderGeneratorForType(dstType)\n\t_this.context.StackBuilder(newTopLevelBuilder(_this, generator))\n\t_this.referenceFiller.Init()\n}", "title": "" }, { "docid": "48295d92a0e6fb9bb03ad2b2aad790de", "score": "0.4843311", "text": "func (s *StatFHelper) Init(comm *Communicator, node string) {\r\n\ts.node = node\r\n\ts.lStatInfo = list.New()\r\n\ts.lStatInfoFromServer = list.New()\r\n\ts.mlock = new(sync.Mutex)\r\n\ts.mStatInfo = make(map[statf.StatMicMsgHead]statf.StatMicMsgBody)\r\n\ts.mStatCount = make(map[statf.StatMicMsgHead]int)\r\n\ts.comm = comm\r\n\ts.sf = new(statf.StatF)\r\n\ts.comm.StringToProxy(s.node, s.sf)\r\n}", "title": "" }, { "docid": "faa1df9476dc31f51d2445f23e321867", "score": "0.48322544", "text": "func NewMetadata(src tag.Metadata, picture string, hash string, path string) Metadata {\n\tvar dest Metadata\n\n\tdest.Format = string(src.Format())\n\tdest.FileType = string(src.FileType())\n\tdest.Title = src.Title()\n\tdest.Album = src.Album()\n\tdest.Artist = src.Artist()\n\tdest.AlbumArtist = src.AlbumArtist()\n\tdest.Composer = src.Composer()\n\tdest.Genre = src.Genre()\n\tdest.Year = src.Year()\n\n\tdest.Track, dest.TotalTracks = src.Track()\n\tdest.Disc, dest.TotalDisks = src.Disc()\n\n\tdest.Comment = src.Comment()\n\n\tdest.Picture = picture\n\tdest.Hash = hash\n\tdest.Path = path\n\n\treturn dest\n}", "title": "" }, { "docid": "72e7a9a795386e51e5bedd5064a15ec9", "score": "0.48251548", "text": "func (target *RealmAttrs) Init(source map[string]interface{}) {\n\tif x := source[\"name\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.Name = &y\n\t\t}\n\t}\n\n\tif x := source[\"owner_account\"]; x != nil {\n\t\tif y, ok := x.(map[string]interface{}); ok {\n\t\t\ttarget.OwnerAccount = NewRealmOwnerAccountAttr(y)\n\t\t}\n\t}\n\n\tif x := source[\"owner_id\"]; x != nil {\n\t\tif y, ok := x.(string); ok {\n\t\t\ttarget.OwnerId = &y\n\t\t}\n\t}\n\n\tif x := source[\"suspended\"]; x != nil {\n\t\ttarget.Suspended = true\n\t}\n\n\tif x := source[\"theme\"]; x != nil {\n\t\tif y, ok := x.(map[string]interface{}); ok {\n\t\t\ttarget.Theme = NewRealmThemeAttr(y)\n\t\t}\n\t}\n}", "title": "" }, { "docid": "cf003b2e660076052be16b4165be9e05", "score": "0.4824898", "text": "func (o *Object) setMetadata(info os.FileInfo) {\n\to.modTime = info.ModTime()\n\to.size = info.Size()\n\to.mode = info.Mode()\n}", "title": "" }, { "docid": "17a779926b6dde994e1cc7baedb2299f", "score": "0.48229578", "text": "func (iom *IOMetric0) Init() {\n\tif iom.LatencyNsHist == nil {\n\t\tiom.LatencyNsHist = make([]uint64, IOMetric0NumLatencyHistBuckets)\n\t}\n\tif iom.LatencyNsMaxHist == nil {\n\t\tiom.LatencyNsMaxHist = make([]uint64, IOMetric0NumMaxLatencyHistBuckets)\n\t}\n\tif iom.SizeBytesHist == nil {\n\t\tiom.SizeBytesHist = make([]uint64, IOMetric0NumSizeHistBuckets)\n\t}\n}", "title": "" }, { "docid": "b64bd98dff9a3ecf47b9cdca22c4ed5d", "score": "0.4811062", "text": "func init() {\n\ttSequence = 0\n\tdata = make(map[int64]*Topic)\n\tdescSorted = []*Topic{}\n\n}", "title": "" }, { "docid": "39ffab2c52d69df139e2f592627a33cb", "score": "0.4801694", "text": "func MetadataFromBytes(b []byte) (*Metadata, error) {\n\tpbd := new(pb.Data)\n\terr := proto.Unmarshal(b, pbd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pbd.GetType() != pb.Data_Metadata {\n\t\treturn nil, errors.New(\"incorrect node type\")\n\t}\n\n\tpbm := new(pb.Metadata)\n\terr = proto.Unmarshal(pbd.Data, pbm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmd := new(Metadata)\n\tmd.MimeType = pbm.GetMimeType()\n\treturn md, nil\n}", "title": "" }, { "docid": "642c76c271ab896f1b3922cdcdd310e5", "score": "0.479341", "text": "func (ps *privateStorage) init() {\n\tps.datumCtx = tree.MakeFmtCtx(&ps.keyBuf.Buffer, tree.FmtSimple)\n\tps.privatesMap = make(map[privateKey]PrivateID)\n\tps.privates = make([]interface{}, 1)\n}", "title": "" }, { "docid": "b679eb57cc5efb657023a605c12af910", "score": "0.4781889", "text": "func (*InitializeResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{1}\n}", "title": "" }, { "docid": "6fcf11f26dd7d9ca0ef11e7c9d8c4174", "score": "0.4776816", "text": "func New(mimetype string, data []byte) *Object {\n\tsum := md5.New()\n\tsum.Write(data)\n\toid := objectid.New(sum)\n\n\tmd := metadata.New(int64(len(data)), mimetype, oid)\n\treturn &Object{\n\t\tMetadata: *md,\n\t\tData: data,\n\t}\n}", "title": "" }, { "docid": "2b5950d00a627c42e137e4e107699d49", "score": "0.47737184", "text": "func New() *metadata {\n\tmd := make(metadata)\n\n\treturn &md\n}", "title": "" }, { "docid": "2715b0ca280fcce28f53b2dafbd8ecad", "score": "0.47663125", "text": "func (b *BufferStack) Init() {\n\tfields := strings.Fields(config.General.Initial_Command)\n\tif len(fields) > 0 {\n\t\taccept := b.handleCommand(fields[0], fields[1:])\n\t\tif !accept {\n\t\t\tinvalidCommand(fields[0])\n\t\t}\n\t\tif len(b.buffers) == 0 {\n\t\t\tb.Push(NewSearchBuffer(\"\", STMessages))\n\t\t}\n\t}\n}", "title": "" }, { "docid": "783a0d407de37727eb1752238501bd46", "score": "0.47628826", "text": "func (t *Tag) TagInit(typeKey string) {\n\tif t.TagType == \"\" {\n\t\tt.TagType = typeKey\n\t}\n\n\t// Set the full ID (but only if it not )\n\tif t.TagID == \"\" {\n\t\tt.TagID = NewID().String()\n\t}\n}", "title": "" }, { "docid": "b5bc315561b314553dde1d182f1ee8ab", "score": "0.47586593", "text": "func (meta *NativeMetadataResponse) Build(b *fb.Builder) fb.UOffsetT {\n\tinputOffsets := make([]fb.UOffsetT, len(meta.Inputs))\n\tfor i := 0; i < len(meta.Inputs); i++ {\n\t\tio := meta.Inputs[i]\n\t\tgraphpipefb.IOMetadataStartShapeVector(b, len(io.Shape))\n\t\tfor j := len(io.Shape) - 1; j >= 0; j-- {\n\t\t\tb.PrependInt64(io.Shape[j])\n\t\t}\n\t\tendShape := b.EndVector(len(io.Shape))\n\t\tname := b.CreateString(io.Name)\n\t\tdesc := b.CreateString(io.Description)\n\t\tgraphpipefb.IOMetadataStart(b)\n\t\tgraphpipefb.IOMetadataAddShape(b, endShape)\n\t\tgraphpipefb.IOMetadataAddName(b, name)\n\t\tgraphpipefb.IOMetadataAddType(b, io.Type)\n\t\tgraphpipefb.IOMetadataAddDescription(b, desc)\n\t\tinputOffsets[i] = graphpipefb.IOMetadataEnd(b)\n\t}\n\toutputOffsets := make([]fb.UOffsetT, len(meta.Outputs))\n\n\tfor i := 0; i < len(meta.Outputs); i++ {\n\t\tio := meta.Outputs[i]\n\t\tgraphpipefb.IOMetadataStartShapeVector(b, len(io.Shape))\n\t\tfor j := len(io.Shape) - 1; j >= 0; j-- {\n\t\t\tb.PrependInt64(io.Shape[j])\n\t\t}\n\t\tendShape := b.EndVector(len(io.Shape))\n\t\tname := b.CreateString(io.Name)\n\t\tdesc := b.CreateString(io.Description)\n\t\tgraphpipefb.IOMetadataStart(b)\n\t\tgraphpipefb.IOMetadataAddShape(b, endShape)\n\t\tgraphpipefb.IOMetadataAddName(b, name)\n\t\tgraphpipefb.IOMetadataAddType(b, io.Type)\n\t\tgraphpipefb.IOMetadataAddDescription(b, desc)\n\t\toutputOffsets[i] = graphpipefb.IOMetadataEnd(b)\n\t}\n\n\tgraphpipefb.MetadataResponseStartInputsVector(b, len(meta.Inputs))\n\tfor i := len(meta.Inputs) - 1; i >= 0; i-- {\n\t\tb.PrependUOffsetT(inputOffsets[i])\n\t}\n\tinputs := b.EndVector(len(meta.Inputs))\n\n\tgraphpipefb.MetadataResponseStartOutputsVector(b, len(meta.Outputs))\n\tfor i := len(meta.Outputs) - 1; i >= 0; i-- {\n\t\tb.PrependUOffsetT(outputOffsets[i])\n\t}\n\toutputs := b.EndVector(len(meta.Outputs))\n\n\tdesc := b.CreateString(meta.Description)\n\tversion := b.CreateString(meta.Version)\n\tserver := b.CreateString(meta.Server)\n\tname := b.CreateString(meta.Name)\n\tgraphpipefb.MetadataResponseStart(b)\n\tgraphpipefb.MetadataResponseAddDescription(b, desc)\n\tgraphpipefb.MetadataResponseAddVersion(b, version)\n\tgraphpipefb.MetadataResponseAddName(b, name)\n\tgraphpipefb.MetadataResponseAddServer(b, server)\n\tgraphpipefb.MetadataResponseAddInputs(b, inputs)\n\tgraphpipefb.MetadataResponseAddOutputs(b, outputs)\n\treturn graphpipefb.MetadataResponseEnd(b)\n}", "title": "" }, { "docid": "d2b396448524c894b9122e7bac0c5e55", "score": "0.4758566", "text": "func (s *Reader) newMetadataReader(offset int64) (*metadataReader, error) {\n\tvar br metadataReader\n\tbr.s = s\n\tbr.offset = offset\n\terr := br.parseMetadata()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = br.readNextDataBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &br, nil\n}", "title": "" }, { "docid": "650e358e2ae00370ffc461a3ef106d77", "score": "0.4752007", "text": "func (osInfoParser *osInfoParser) Init() error {\n\n\tvar err error\n\n\tif _, err := os.Stat(osReleaseFile); os.IsNotExist(err) {\n\t\treturn errors.Wrapf(err, \"Could not find os-release file %q\", osReleaseFile)\n\t}\n\n\tosInfoParser.reader, err = os.Open(osReleaseFile)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to open os-release file %q\", osReleaseFile)\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "a781a387f2c575686e397e9ca4cdacd7", "score": "0.47443154", "text": "func (b *PeerDependencyBuilder) Metadata(value interface{}) *PeerDependencyBuilder {\n\tb.metadata = value\n\tb.bitmap_ |= 16\n\treturn b\n}", "title": "" }, { "docid": "b52097fb9fa602d5ffa0500580b76fb3", "score": "0.47378284", "text": "func (e *Entry) ParseMeta(buf []byte) error {\n\te.Meta = NewMetaData().WithCrc(binary.LittleEndian.Uint32(buf[0:4])).\n\t\tWithTimeStamp(binary.LittleEndian.Uint64(buf[4:12])).WithKeySize(binary.LittleEndian.Uint32(buf[12:16])).\n\t\tWithValueSize(binary.LittleEndian.Uint32(buf[16:20])).WithFlag(binary.LittleEndian.Uint16(buf[20:22])).\n\t\tWithTTL(binary.LittleEndian.Uint32(buf[22:26])).WithBucketSize(binary.LittleEndian.Uint32(buf[26:30])).\n\t\tWithStatus(binary.LittleEndian.Uint16(buf[30:32])).WithDs(binary.LittleEndian.Uint16(buf[32:34])).\n\t\tWithTxID(binary.LittleEndian.Uint64(buf[34:42]))\n\treturn nil\n}", "title": "" }, { "docid": "e92aa9e6a7d8a5b39f7d116de4b341a3", "score": "0.47247902", "text": "func (it *replicatedCmdBufSlice) init(buf *replicatedCmdBuf) {\n\t*it = replicatedCmdBufSlice{\n\t\thead: replicatedCmdBufPtr{idx: 0, buf: buf, node: buf.head},\n\t\ttail: replicatedCmdBufPtr{idx: buf.len, buf: buf, node: buf.tail},\n\t}\n}", "title": "" }, { "docid": "7d1f96ccdcdc80833ad571f4fffa9b68", "score": "0.47241923", "text": "func (s *Song) FillMetadata() {\n\ts.Metadata = strings.TrimSpace(s.Metadata)\n\tif !s.HasTrack() {\n\t\treturn\n\t}\n\n\tif s.Title != \"\" && s.Artist != \"\" {\n\t\ts.Metadata = fmt.Sprintf(\"%s - %s\", s.Artist, s.Title)\n\t} else if s.Title != \"\" && s.Metadata == \"\" {\n\t\ts.Metadata = s.Title\n\t}\n}", "title": "" }, { "docid": "cd8012a480a5a36f388f037009b0f2bc", "score": "0.47192562", "text": "func decodeInit(input io.Reader) (version int16, headers []Header, err error) {\n\terr = binary.Read(input, binary.BigEndian, &version)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\tvar header Header\n\t\theader.Key, err = readInt32Slice(input)\n\t\tif err != nil{\n\t\t\tif err == io.EOF { // not really an error\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\theader.Value, err = readInt32Slice(input)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\n\t\theaders = append(headers, header)\n\t}\n}", "title": "" }, { "docid": "20fa7e3ee31dc4e43627505bbaf4cef1", "score": "0.47149655", "text": "func InitData(){\n\n /*\n Assuming AdSlot is to be unique across system having\n multiple bid of different ads on it.\n */\n\n AdData = AdDataType{\n AdData:map[int]*AdSlot{\n 10:{10.5,true,\"12345\"},\n 11:{11, true, \"12346\"},\n 12:{12,false,\"12347\"},\n },\n }\n // adding each request in the list to have a unique id for each request.\n AdData.RequestList = make([]BiddingRequest,0)\n}", "title": "" }, { "docid": "a945b3148ee5553a8a72275f8b6d679a", "score": "0.46984997", "text": "func (w *Writer) init() error {\n\tif w.e != nil {\n\t\tpanic(\"w.e expected to be nil\")\n\t}\n\tvar err error\n\tif err = w.Properties.Verify(); err != nil {\n\t\treturn err\n\t}\n\tif !(MinDictCap <= w.DictCap && int64(w.DictCap) <= MaxDictCap) {\n\t\treturn errors.New(\"lzma.Writer: DictCap out of range\")\n\t}\n\tif w.Size < 0 {\n\t\tw.EOSMarker = true\n\t}\n\tif !(maxMatchLen <= w.BufSize) {\n\t\treturn errors.New(\n\t\t\t\"lzma.Writer: lookahead buffer size too small\")\n\t}\n\n\tstate := newState(w.Properties)\n\tdict, err := newEncoderDict(w.DictCap, w.BufSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar flags encoderFlags\n\tif w.EOSMarker {\n\t\tflags = eosMarker\n\t}\n\tif w.e, err = newEncoder(w.bw, state, dict, flags); err != nil {\n\t\treturn err\n\t}\n\n\terr = w.writeHeader()\n\treturn err\n}", "title": "" }, { "docid": "b92c380f6087c151e1a2ca5ea038adba", "score": "0.4688774", "text": "func NewStruct(fields []*Var, tags []string) *Struct", "title": "" }, { "docid": "0ad6c0843dee9023f68c7331cd8782d9", "score": "0.46847296", "text": "func (m *Memory) Init() {}", "title": "" }, { "docid": "a1375d96d6ed272ed15eaf583575672d", "score": "0.46840343", "text": "func (*Init) Descriptor() ([]byte, []int) {\n\treturn file_eventpb_eventpb_proto_rawDescGZIP(), []int{1}\n}", "title": "" }, { "docid": "41351b8dcc5745c42828ecf4b28fd048", "score": "0.46779716", "text": "func New(b []byte) Buffer {\n\treturn newInternal(b)\n}", "title": "" }, { "docid": "07c2e54e6e25776ad6a96fc9bce29843", "score": "0.46777385", "text": "func (b *Binding) Init(metadata bindings.Metadata) error {\n\tb.name = metadata.Name\n\ts, f := metadata.Properties[\"schedule\"]\n\tif !f || s == \"\" {\n\t\treturn fmt.Errorf(\"schedule not set\")\n\t}\n\t_, err := b.parser.Parse(s)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid schedule format: %s\", s)\n\t}\n\tb.schedule = s\n\n\tb.resetContext()\n\n\treturn nil\n}", "title": "" }, { "docid": "60fc3b6af22c11c853ba6d3843062f5a", "score": "0.4677505", "text": "func (c *Column) Init(name, tag string) error {\n\t(*c).Name = name\n\n\t// auto-detect foreign key\n\tif len(name) > 2 && name[len(name)-2:] == \"ID\" {\n\t\t(*c).IsForeign = true\n\t\ttbl := strings.ToLower((*c).Name[:len((*c).Name)-2])\n\t\t(*c).ForeignKey = fmt.Sprintf(\"REFERENCES %s(id)\", tbl)\n\t}\n\n\t// parse attributes\n\tattributes := strings.Split(tag, \"&\")\n\tfor _, attr := range attributes {\n\t\tpair := strings.Split(attr, \"=\")\n\t\tif len(pair) != 2 {\n\t\t\treturn fmt.Errorf(\"Malformed tag: '%s'\", attr)\n\t\t}\n\n\t\tswitch strings.ToLower(pair[0]) {\n\t\tcase \"columntype\":\n\t\t\t(*c).Type = pair[1]\n\t\tcase \"primary\":\n\t\t\tif pair[1] == \"true\" {\n\t\t\t\t(*c).IsPrimary = true\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Unknown attribute: '%s'\", pair[0])\n\t\t}\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "555f0ee70dd0cb86e9bad0bf114966b9", "score": "0.46649474", "text": "func (o *Object) setMetaData(info *api.MediaItem) {\n\to.url = info.BaseURL\n\to.id = info.ID\n\to.bytes = -1 // FIXME\n\to.mimeType = info.MimeType\n\to.modTime = info.MediaMetadata.CreationTime\n}", "title": "" }, { "docid": "de4ec3ca3a66ed6181557c5af697f586", "score": "0.46644524", "text": "func (d *dictDecoder) init(r io.Reader) error {\n\tbuf := make([]byte, 1)\n\tif _, err := io.ReadFull(r, buf); err != nil {\n\t\treturn err\n\t}\n\tw := int(buf[0])\n\tif w < 0 || w > 32 {\n\t\treturn errors.Errorf(\"invalid bitwidth %d\", w)\n\t}\n\tif w >= 0 {\n\t\td.keys = newHybridDecoder(w)\n\t\treturn d.keys.init(r)\n\t}\n\n\treturn errors.New(\"bit width zero with non-empty dictionary\")\n}", "title": "" }, { "docid": "e32317dc2489dcda55ba0d45fd64f8c8", "score": "0.46597275", "text": "func (m *metricProcessRuntimeMemstatsBuckHashSys) init() {\n\tm.data.SetName(\"process.runtime.memstats.buck_hash_sys\")\n\tm.data.SetDescription(\"Bytes of memory in profiling bucket hash tables.\")\n\tm.data.SetUnit(\"By\")\n\tm.data.SetEmptySum()\n\tm.data.Sum().SetIsMonotonic(false)\n\tm.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)\n}", "title": "" }, { "docid": "ecfdfdd72cdeb64cf38541bb6e275efc", "score": "0.4658285", "text": "func Init(r reader) IRSDK {\n\tif r == nil {\n\t\tvar err error\n\t\tr, err = shm.Open(fileMapName, fileMapSize)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tsdk := IRSDK{r: r, lastValidData: 0}\n\twinevents.OpenEvent(dataValidEventName)\n\tinitIRSDK(&sdk)\n\treturn sdk\n}", "title": "" }, { "docid": "e489fea6bd826ea5234b63bfe2876707", "score": "0.4639159", "text": "func (rc *ReporterCache) Init() {\r\n\trc.Data = make(map[string]*entity.TrafficRecord)\r\n}", "title": "" }, { "docid": "89050111e62a28e55fcfb745b562c7de", "score": "0.46303847", "text": "func (b *Binding) Init(metadata bindings.Metadata) error {\n\ts, f := metadata.Properties[\"schedule\"]\n\tif !f || s == \"\" {\n\t\treturn fmt.Errorf(\"schedule not set\")\n\t}\n\t_, err := b.parser.Parse(s)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid schedule format: %s\", s)\n\t}\n\tb.schedule = s\n\n\treturn nil\n}", "title": "" }, { "docid": "ee862cb41a1c33138445fad0098b436a", "score": "0.46280193", "text": "func NewMetadata_List(s *capnp.Segment, sz int32) (Metadata_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz)\n\treturn Metadata_List{l}, err\n}", "title": "" }, { "docid": "a8a6b3427990c5a9a7ca990106e7bf1d", "score": "0.46241707", "text": "func parseMetadataBlock(r network.MultiReader, fmm fieldMetadataMap) error {\n\tvar offset int32\n\terr := r.Read(&offset)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tendpos := r.Pos() + int(offset)\n\n\terr = r.Align()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar headerSize int16\n\terr = r.Read(&headerSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar flags int16\n\terr = r.Read(&flags)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = r.Seek(16)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor r.Pos() < endpos {\n\t\tfm, err := parseFieldMetadata(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmm[int(fm.header.metadataID)] = fm\n\t}\n\treturn nil\n}", "title": "" }, { "docid": "65756540a86eb962cdc9f28c3453387e", "score": "0.4621096", "text": "func init() {\n\t//md := activity.NewMetadata(jsonMetadata)\n\t//activity.Register(NewActivity(md))\n\tactivityLog.SetLogLevel(logger.InfoLevel)\n\t//act := NewActivity(getActivityMetadata())\n\n}", "title": "" }, { "docid": "744b14a43e1a6d545e2836cc6b5b814e", "score": "0.46105865", "text": "func InitTrack() Data {\n\treturn &Track{}\n}", "title": "" }, { "docid": "7f3b60d2d8425daef1a0f810fddf1ca5", "score": "0.4600563", "text": "func Binit(fd *os.File) *buf {\n\treturn &buf{\n\t\trdline: 0,\n\t\toffset: 0,\n\t\tbbuf: make([]byte, startBufSize),\n\t\trdr: bufio.NewReaderSize(fd, startBufSize),\n\t\tfd: fd,\n\t}\n}", "title": "" }, { "docid": "b6e4e7d3780fae4487e8a266d5005677", "score": "0.4599774", "text": "func NewBuffer(bs []byte) *Buffer {\n\tbuf := proto.NewBuffer(bs)\n\tbuf.SetDeterministic(true)\n\treturn &Buffer{Buffer: buf}\n}", "title": "" }, { "docid": "43569539b64669e0cc2f83115d833544", "score": "0.45981556", "text": "func FromBytes(bytes []byte, ext Extension) (*Metadata, error) {\n\tif ext == nil {\n\t\text = &extension{}\n\t}\n\tmetadata := &Metadata{Extension: ext}\n\n\tif err := json.Unmarshal(bytes, metadata); err != nil {\n\t\treturn nil, err\n\t}\n\twebTestFiles, err := normalizeWebTestFiles(metadata.WebTestFiles)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetadata.WebTestFiles = webTestFiles\n\n\tif metadata.Extension != nil {\n\t\tif err := metadata.Extension.Normalize(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn metadata, nil\n}", "title": "" }, { "docid": "9b4304aee78c33ee7d6c73236f9f4c66", "score": "0.45949674", "text": "func InitData(k string, s string) {\n\tkey = k\n\tsecret = s\n}", "title": "" }, { "docid": "d43af18a1bcd9b0e40f7737d1ea12e99", "score": "0.45946693", "text": "func (fw *FieldsWriter) init(dirPath string, segment string, fn *FieldInfos) error {\n\tfw.fieldInfos = fn\n\n\tfilePath := path.Join(dirPath, segment+FileSuffix[\"fieldData\"])\n\n\tfieldsData, err := CreateFile(filePath, false, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfw.fieldsData = fieldsData\n\n\tfilePath = path.Join(dirPath, segment+FileSuffix[\"fieldIndex\"])\n\tfieldsIndex, err := CreateFile(filePath, false, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfw.fieldsIndex = fieldsIndex\n\treturn nil\n}", "title": "" }, { "docid": "9d7f9ea4fc62e752cd0e28ded5ebdc17", "score": "0.459264", "text": "func (r *storeMMapReader) initialize() error {\n\tbuf := r.readBytes(r.len - sstFileFooterSize)\n\tif (len(buf)) != sstFileFooterSize-1 {\n\t\treturn fmt.Errorf(\"read sstfile:%s footer error\", r.path)\n\t}\n\t// validate magic-number\n\tif uint64Func(buf[9:]) != magicNumberOffsetFile {\n\t\treturn fmt.Errorf(\"verify magic-number of sstfile:%s failure\", r.path)\n\t}\n\tposOfOffset := int(binary.LittleEndian.Uint32(buf[:4]))\n\tposOfKeys := int(binary.LittleEndian.Uint32(buf[4:8]))\n\tif err := encoding.BitmapUnmarshal(r.keys, r.readBytes(posOfKeys)); err != nil {\n\t\treturn fmt.Errorf(\"unmarshal keys data from file[%s] error:%s\", r.path, err)\n\t}\n\toffset := r.readBytes(posOfOffset)\n\tr.offsets = encoding.NewFixedOffsetDecoder(offset)\n\n\tif r.offsets.Size() != int(r.keys.GetCardinality()) {\n\t\treturn fmt.Errorf(\"num. of keys != num. of offsets in file[%s]\", r.path)\n\t}\n\treturn nil\n}", "title": "" }, { "docid": "ffda4e617925b3d05c622b0dcb365c11", "score": "0.459195", "text": "func (m *RedisMetaConstructor) Init() error {\n\tuserData, err := internal.GetSentinelUserData()\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.meta = BackupMeta{\n\t\tPermanent: m.permanent,\n\t\tUser: userData,\n\t\tStartTime: utility.TimeNowCrossPlatformLocal(),\n\t}\n\treturn nil\n}", "title": "" }, { "docid": "902982d7f5b4b224fa416e6824a9d7b0", "score": "0.45869544", "text": "func (c *Collector) Init(ctx context.Context) (err error) { return nil }", "title": "" }, { "docid": "7f2615bc4f30333df87a8bdc29f78729", "score": "0.45834032", "text": "func NewFromBLOB(in io.Reader, chunkSize int64) (res *Manifest, err error) {\n\tres = &Manifest{}\n\terr = res.ParseBlob(in, chunkSize)\n\treturn\n}", "title": "" }, { "docid": "d3afd000003fbf37ac293ade995e7367", "score": "0.45810506", "text": "func InitImmutable(bucket *ibmcloudv1alpha1.Bucket) map[string]string {\n\tanno := make(map[string]string)\n\n\tanno[\"Resiliency\"] = bucket.Spec.Resiliency\n\tanno[\"Location\"] = bucket.Spec.Location\n\tanno[\"BucketType\"] = bucket.Spec.BucketType\n\tanno[\"StorageClass\"] = bucket.Spec.StorageClass\n\tbindingFromStr, err := json.Marshal(bucket.Spec.BindingFrom)\n\tif err == nil {\n\t\tanno[\"BindingFrom\"] = string(bindingFromStr)\n\t}\n\n\tAPIKeyStr, err := json.Marshal(bucket.Spec.APIKey)\n\tif err == nil {\n\t\tanno[\"APIKey\"] = string(APIKeyStr)\n\t}\n\n\tResourceInstanceIDStr, err := json.Marshal(bucket.Spec.ResourceInstanceID)\n\tif err == nil {\n\t\tanno[\"ResourceInstanceID\"] = string(ResourceInstanceIDStr)\n\t}\n\n\tEndpointsStr, err := json.Marshal(bucket.Spec.Endpoints)\n\tif err == nil {\n\t\tanno[\"Endpoints\"] = string(EndpointsStr)\n\t}\n\n\tanno[\"Bindonly\"] = strconv.FormatBool(bucket.Spec.BindOnly)\n\tlog.Info(bucket.ObjectMeta.Name, \"Inside InitImmutable\", anno)\n\treturn anno\n}", "title": "" }, { "docid": "77a925aaf74406be3a64a5f99ecd71a5", "score": "0.45803595", "text": "func (this *Help) Init(name, note string) (rv *Help) {\n\tparams := &Help{}\n\t*this = append(*this,\n\t\tyaml.MapItem{Key: name, Value: note},\n\t\tyaml.MapItem{Key: \"params\", Value: params})\n\treturn params\n}", "title": "" }, { "docid": "9c5255c7dc63fc0f8162c8121e62777f", "score": "0.45801723", "text": "func (m *metricProcessRuntimeMemstatsStackSys) init() {\n\tm.data.SetName(\"process.runtime.memstats.stack_sys\")\n\tm.data.SetDescription(\"Bytes of stack memory obtained from the OS.\")\n\tm.data.SetUnit(\"By\")\n\tm.data.SetEmptySum()\n\tm.data.Sum().SetIsMonotonic(false)\n\tm.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)\n}", "title": "" }, { "docid": "4ae95f5dc83091aaf5b306e10b3fc503", "score": "0.4578259", "text": "func FromReader(format dataset.DataFormat, data io.Reader) (st *dataset.Structure, n int, err error) {\n\tst = &dataset.Structure{\n\t\tFormat: format.String(),\n\t}\n\tst.Schema, n, err = Schema(st, data)\n\treturn\n}", "title": "" }, { "docid": "8d0a6ded982fc95059a3116cfcdae63d", "score": "0.45771107", "text": "func Init(conf Config) error {\n\tvar err error\n\tcodec, err = conf.BuildCodec()\n\treturn err\n}", "title": "" }, { "docid": "2076116ff83ae134375c6855019c0c03", "score": "0.45766714", "text": "func (e *MD) Init(l int) { e.ETLs = make(ETLs, l) }", "title": "" } ]
56a4c5dc26c3a456bcaeb23f1f4527ac
SyncUpdate triggers update on Orchestrator object and updates the cache
[ { "docid": "b37b3ed99bdee90110331ff681ea6273", "score": "0.67417103", "text": "func (api *orchestratorAPI) SyncUpdate(obj *orchestration.Orchestrator) error {\n\tif api.ct.objResolver != nil {\n\t\tlog.Fatal(\"Cannot use Sync update when object resolver is enabled on ctkit\")\n\t}\n\tnewObj := obj\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\tnewObj, writeErr = apicl.OrchestratorV1().Orchestrator().Update(context.Background(), obj)\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleOrchestratorEvent(&kvstore.WatchEvent{Object: newObj, Type: kvstore.Updated})\n\t}\n\n\treturn writeErr\n}", "title": "" } ]
[ { "docid": "2c44bb490b2548a75cc31ba76e93af78", "score": "0.6084702", "text": "func (api *orchestratorAPI) Update(obj *orchestration.Orchestrator) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.OrchestratorV1().Orchestrator().Update(context.Background(), obj)\n\t\treturn err\n\t}\n\n\tapi.ct.handleOrchestratorEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Updated})\n\treturn nil\n}", "title": "" }, { "docid": "18b6632e24c629bc9fa874e93a1425ea", "score": "0.6046163", "text": "func (service *InMemoryService) Update() {\n\tservice.backend.RequestRead(service)\n}", "title": "" }, { "docid": "53d11a858b836ab7f6f49bc4722f4422", "score": "0.59180516", "text": "func (o *Client) Update(exec boil.Executor, whitelist ...string) error {\n\tcurrTime := time.Now().In(boil.GetLocation())\n\n\to.UpdatedAt = currTime\n\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\tclientUpdateCacheMut.RLock()\n\tcache, cached := clientUpdateCache[key]\n\tclientUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(\n\t\t\tclientColumns,\n\t\t\tclientPrimaryKeyColumns,\n\t\t\twhitelist,\n\t\t)\n\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update clients, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"clients\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, clientPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(clientType, clientMapping, append(wl, clientPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update clients row\")\n\t}\n\n\tif !cached {\n\t\tclientUpdateCacheMut.Lock()\n\t\tclientUpdateCache[key] = cache\n\t\tclientUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "a6fc52a2a16d941b0b79ef5edc080b87", "score": "0.5874966", "text": "func (mpc *MockPolicyCache) Update(dataChngEv datasync.ChangeEvent) error {\n\treturn nil\n}", "title": "" }, { "docid": "0639826b0dc0419f4a90fa24004de403", "score": "0.58375883", "text": "func (c *Consistent) Update() {\r\n\tc.update()\r\n}", "title": "" }, { "docid": "33237242aa818c3507ef224dbe755a99", "score": "0.57989293", "text": "func (o *Owner) Update(exec boil.Executor, whitelist ...string) error {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\townerUpdateCacheMut.RLock()\n\tcache, cached := ownerUpdateCache[key]\n\townerUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(ownerColumns, ownerPrimaryKeyColumns, whitelist)\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update owner, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"owner\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, ownerPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(ownerType, ownerMapping, append(wl, ownerPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update owner row\")\n\t}\n\n\tif !cached {\n\t\townerUpdateCacheMut.Lock()\n\t\townerUpdateCache[key] = cache\n\t\townerUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "d8e149245f878cd96d49623078358b78", "score": "0.5732546", "text": "func (o *Task) Update(exec boil.Executor, whitelist ...string) error {\n\tcurrTime := time.Now().In(boil.GetLocation())\n\n\to.UpdatedAt.Time = currTime\n\to.UpdatedAt.Valid = true\n\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\ttaskUpdateCacheMut.RLock()\n\tcache, cached := taskUpdateCache[key]\n\ttaskUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(\n\t\t\ttaskColumns,\n\t\t\ttaskPrimaryKeyColumns,\n\t\t\twhitelist,\n\t\t)\n\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update tasks, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"tasks\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, taskPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(taskType, taskMapping, append(wl, taskPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update tasks row\")\n\t}\n\n\tif !cached {\n\t\ttaskUpdateCacheMut.Lock()\n\t\ttaskUpdateCache[key] = cache\n\t\ttaskUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "7d864039e97b79755b68aeed4a87940f", "score": "0.5721028", "text": "func (p *Manager) Sync(key string) error {\n\t// split the namespace and name from cache\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error splitting namespace/key from obj %s: %v\", key, err)\n\t}\n\n\tresource, err := p.lister.PostgresDBs(ns).Get(name)\n\tif err != nil {\n\t\t// Create a secret and see if it was pre-existing\n\t\tlog.Warnf(\"resource %s does not exist: %v\", key, err)\n\t\tlog.Warnf(\"delete processing item: %s/%s\", ns, name)\n\t\treturn p.Delete(ns, name)\n\t}\n\n\tlog.Infof(\"sync processing item: %s/%s\", ns, name)\n\n\t// deep copy to not change the cache\n\tnewDbInterface, _ := scheme.Scheme.DeepCopy(resource)\n\tobj := newDbInterface.(*v1alpha1.PostgresDB)\n\tinstanceID := fmt.Sprintf(\"%s-%s\", name, obj.GetUID())\n\tpgdb := p.newDB(instanceID, ns, resource)\n\n\treturn pgdb.Save()\n}", "title": "" }, { "docid": "9e365a4374e37e85c867c90e81dc3530", "score": "0.570042", "text": "func (self *OvsDriver) Update(context interface{}, tableUpdates libovsdb.TableUpdates) {\n self.PopulateCache(tableUpdates)\n}", "title": "" }, { "docid": "0f0421c9ea8a7f68cec2ba86980d3a48", "score": "0.569869", "text": "func (c *ipamCache) sync() error {\n\tres, err := c.etcdAPI.Get(context.Background(), CALICO_IPAM, &etcd.GetOptions{Recursive: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar index uint64\n\tindex = res.Index\n\tfor _, node := range res.Node.Nodes {\n\t\tif node.ModifiedIndex > index {\n\t\t\tindex = node.ModifiedIndex\n\t\t}\n\t\tif err = c.syncsubr(node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tc.ready = true\n\tc.readyCond.Broadcast()\n\n\twatcher := c.etcdAPI.Watcher(CALICO_IPAM, &etcd.WatcherOptions{Recursive: true, AfterIndex: index})\n\tfor {\n\t\tres, err := watcher.Next(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdel := false\n\t\tnode := res.Node\n\t\tswitch res.Action {\n\t\tcase \"set\", \"create\", \"update\", \"compareAndSwap\":\n\t\tcase \"delete\":\n\t\t\tdel = true\n\t\t\tnode = res.PrevNode\n\t\tdefault:\n\t\t\tlog.Printf(\"unhandled action: %s\", res.Action)\n\t\t\tcontinue\n\t\t}\n\t\tif err = c.update(node, del); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "title": "" }, { "docid": "91227a03540a54b3fba4b9a050d73808", "score": "0.5694654", "text": "func (e *EPaxosRMWHandler) sync() {\n if !e.R.Durable {\n return\n }\n\n e.R.StableStore.Sync()\n}", "title": "" }, { "docid": "9544a7ee30daf724311d19997fcce9f8", "score": "0.56926984", "text": "func (c mockClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error {\n\tc.calls[\"Update\"] = append(c.calls[\"Update\"], mockFuncCall{\n\t\tctx: ctx,\n\t\tobj: obj,\n\t})\n\tc.state[getStateKey(obj)] = obj\n\treturn nil\n}", "title": "" }, { "docid": "a802d8afac2a2ddc4e88465c0e5c2140", "score": "0.5677224", "text": "func (r *Repository) Sync() error {\n\tcacheDir := util.ReplaceHome(constants.CacheDir)\n\tcachePath := filepath.Join(cacheDir, r.ID)\n\n\terr := r.Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Read repomd.xml\n\trepomdURL := fmt.Sprintf(\"%s/repodata/repomd.xml\", r.BaseURL)\n\tresp, err := http.Get(repomdURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\trmd := RepoMd{}\n\n\terr = xml.Unmarshal(body, &rmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check update\n\tif rmd.Revision > r.Revision {\n\t\terr = r.ClearCache()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar cacheFiles []string\n\n\t\tfor _, item := range rmd.Items {\n\t\t\titemURL := fmt.Sprintf(\"%s/%s\", r.BaseURL, item.Location.Href)\n\t\t\tfname, err := util.Download(itemURL, cachePath, constants.CachePerm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcacheFiles = append(cacheFiles, fname)\n\t\t}\n\n\t\tr.CacheFiles = cacheFiles\n\t\tr.Revision = rmd.Revision\n\n\t\terr = r.LoadCache()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn r.Save()\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "7c328b7d3c4c1abbc415dd55cf487c0d", "score": "0.5657252", "text": "func (r *CachedRepo) Sync() error {\n\treturn r.Remote.Sync(r.LocalDir)\n}", "title": "" }, { "docid": "6cbcff249165895c64e0a977aaddf563", "score": "0.56441957", "text": "func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.ModeSet) (gcSizeChange int64, err error) {\n\titem := addressToItem(addr)\n\n\t// need to get access timestamp here as it is not\n\t// provided by the access function, and it is not\n\t// a property of a chunk provided to Accessor.Put.\n\n\ti, err := db.retrievalDataIndex.Get(item)\n\tif err != nil {\n\t\tif errors.Is(err, leveldb.ErrNotFound) {\n\t\t\t// chunk is not found,\n\t\t\t// no need to update gc index\n\t\t\t// just delete from the push index\n\t\t\t// if it is there\n\t\t\terr = db.pushIndex.DeleteInBatch(batch, item)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn 0, err\n\t}\n\titem.StoreTimestamp = i.StoreTimestamp\n\titem.BinID = i.BinID\n\n\tswitch mode {\n\tcase storage.ModeSetSyncPull:\n\t\t// if we are setting a chunk for pullsync we expect it to be in the index\n\t\t// if it has a tag - we increment it and set the index item to _not_ contain the tag reference\n\t\t// this prevents duplicate increments\n\t\ti, err := db.pullIndex.Get(item)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, leveldb.ErrNotFound) {\n\t\t\t\t// we handle this error internally, since this is an internal inconsistency of the indices\n\t\t\t\t// if we return the error here - it means that for example, in stream protocol peers which we sync\n\t\t\t\t// to would be dropped. this is possible when the chunk is put with ModePutRequest and ModeSetSyncPull is\n\t\t\t\t// called on the same chunk (which should not happen)\n\t\t\t\tdb.logger.Debugf(\"localstore: chunk with address %s not found in pull index\", addr)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\n\t\tif db.tags != nil && i.Tag != 0 {\n\t\t\tt, err := db.tags.Get(i.Tag)\n\n\t\t\t// increment if and only if tag is anonymous\n\t\t\tif err == nil && t.Anonymous {\n\t\t\t\t// since pull sync does not guarantee that\n\t\t\t\t// a chunk has reached its NN, we can only mark\n\t\t\t\t// it as Sent\n\t\t\t\tt.Inc(tags.StateSent)\n\n\t\t\t\t// setting the tag to zero makes sure that\n\t\t\t\t// we don't increment the same tag twice when syncing\n\t\t\t\t// the same chunk to different peers\n\t\t\t\titem.Tag = 0\n\n\t\t\t\terr = db.pullIndex.PutInBatch(batch, item)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase storage.ModeSetSyncPush:\n\t\ti, err := db.pushIndex.Get(item)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, leveldb.ErrNotFound) {\n\t\t\t\t// we handle this error internally, since this is an internal inconsistency of the indices\n\t\t\t\t// this error can happen if the chunk is put with ModePutRequest or ModePutSync\n\t\t\t\t// but this function is called with ModeSetSyncPush\n\t\t\t\tdb.logger.Debugf(\"localstore: chunk with address %s not found in push index\", addr)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\t\tif db.tags != nil && i.Tag != 0 {\n\t\t\tt, err := db.tags.Get(i.Tag)\n\t\t\tif err != nil {\n\t\t\t\t// we cannot break or return here since the function needs to\n\t\t\t\t// run to end from db.pushIndex.DeleteInBatch\n\t\t\t\tdb.logger.Errorf(\"localstore: get tags on push sync set uid %d: %v\", i.Tag, err)\n\t\t\t} else {\n\t\t\t\t// setting a chunk for push sync assumes the tag is not anonymous\n\t\t\t\tif t.Anonymous {\n\t\t\t\t\treturn 0, errors.New(\"got an anonymous chunk in push sync index\")\n\t\t\t\t}\n\n\t\t\t\tt.Inc(tags.StateSynced)\n\t\t\t}\n\t\t}\n\n\t\terr = db.pushIndex.DeleteInBatch(batch, item)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\ti, err = db.retrievalAccessIndex.Get(item)\n\tswitch {\n\tcase err == nil:\n\t\titem.AccessTimestamp = i.AccessTimestamp\n\t\terr = db.gcIndex.DeleteInBatch(batch, item)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tgcSizeChange--\n\tcase errors.Is(err, leveldb.ErrNotFound):\n\t\t// the chunk is not accessed before\n\tdefault:\n\t\treturn 0, err\n\t}\n\titem.AccessTimestamp = now()\n\terr = db.retrievalAccessIndex.PutInBatch(batch, item)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Add in gcIndex only if this chunk is not pinned\n\tok, err := db.pinIndex.Has(item)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif !ok {\n\t\terr = db.gcIndex.PutInBatch(batch, item)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tgcSizeChange++\n\t}\n\n\treturn gcSizeChange, nil\n}", "title": "" }, { "docid": "9a5fe86197842ad64fb2c8b645801fee", "score": "0.5630241", "text": "func (s *Storage) sync() {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tfor id, mm := range s.cache {\n\t\tfor label, cache := range mm {\n\t\t\ts.writerCh <- &Entity{\n\t\t\t\tID: id,\n\t\t\t\tLabel: label,\n\t\t\t\tCount: cache.flush(),\n\t\t\t\tattempt: 0,\n\t\t\t}\n\t\t}\n\t}\n}", "title": "" }, { "docid": "328110ee9162862f67a17ce6da2a5bfe", "score": "0.562731", "text": "func (c client) Update(ctx context.Context, in k8sclient.Object, opt ...k8sclient.UpdateOption) error {\n\tstart := time.Now()\n\tdefer Metrics.Update(c, in, float64(time.Since(start)/nanoToMilli))\n\n\tobj, err := c.downConvert(ctx, in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Client.Update(ctx, obj, opt...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif Settings.EnableCachedClient {\n\t\texpectedRV, err := getResourceVersion(obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.waitForPopulatedCache(obj, expectedRV)\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "942ff08943cc8eef98b16d088453cbf6", "score": "0.56255597", "text": "func (this *SyncRequest) update(log logger.LogContext, initiator resources.Object) (bool, error) {\n\tthis.lock.Lock()\n\tdefer this.lock.Unlock()\n\n\tif this.resourceVersion == initiator.GetResourceVersion() {\n\t\tif len(this.syncPoints) == 0 {\n\t\t\tlog.Infof(\"synchronization %s(%s) for %s(%s) done\", this.name, this.resource, initiator.ClusterKey(), this.resourceVersion)\n\t\t\treturn true, nil\n\t\t}\n\t\tlog.Infof(\"synchronization %s(%s) for %s(%s) still pending\", this.name, this.resource, initiator.ClusterKey(), this.resourceVersion)\n\t\treturn false, nil\n\t}\n\tif this.resourceVersion == \"\" {\n\t\tlog.Infof(\"synchronizing %s(%s) for %s(%s)\", this.name, this.resource, initiator.ClusterKey(), initiator.GetResourceVersion())\n\t} else {\n\t\tlog.Infof(\"resynchronizing %s(%s) for %s(%s->%s)\", this.name, this.resource, initiator.ClusterKey(), this.resourceVersion, initiator.GetResourceVersion())\n\t}\n\tthis.resourceVersion = initiator.GetResourceVersion()\n\treconcilers := this.controller.mappings.Get(this.cluster, this.resource.GroupKind())\n\tif len(reconcilers) == 0 {\n\t\treturn false, fmt.Errorf(\"no reconcilers found for resource %s in %s\", this.resource, this.cluster)\n\t}\n\tlist, err := this.controller.ClusterHandler(this.cluster).resources[this.resource].List()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tthis.syncPoints = SyncPoints{}\n\tif len(list) == 0 {\n\t\tlog.Infof(\" no %s found for sync -> done\", this.resource)\n\t\treturn true, nil\n\t}\n\tif len(reconcilers) == 1 {\n\t\tfor _, o := range list {\n\t\t\tthis.syncPoints[o.ObjectName()] = nil\n\t\t}\n\t} else {\n\t\tfor _, o := range list {\n\t\t\tthis.syncPoints[o.ObjectName()] = reconcilers.Copy()\n\t\t}\n\t}\n\treturn false, this._requestReconcilations(log)\n}", "title": "" }, { "docid": "0c89db9c9e1ab6e14167d74ea9e4f10d", "score": "0.5624002", "text": "func (api *MetadataAPI) Sync(stopCh <-chan struct{}) {\n\tapi.sharedInformers.Start(stopCh)\n\n\twaitForCacheSync(api.syncChecks)\n}", "title": "" }, { "docid": "ea252e259fe947deda5f097367fe8764", "score": "0.56224203", "text": "func (r *ReconcileMobileSecurityServiceDB) update(obj runtime.Object, reqLogger logr.Logger) error {\n\terr := r.client.Update(context.TODO(), obj)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to update Object\", \"obj:\", obj)\n\t\treturn err\n\t}\n\treqLogger.Info(\"Object updated\", \"obj:\", obj)\n\treturn nil\n}", "title": "" }, { "docid": "8ef5c2a3a00a87d28677d51de64a062b", "score": "0.5618591", "text": "func (c *cache) Update(ctx context.Context, key string, obj runtime.Object, cs ...kvstore.Cmp) error {\n\tvar ref apiintf.Requirement\n\tvar refs apiintf.RequirementSet\n\trc, err := apiutils.GetRequirements(ctx)\n\tif err == nil {\n\t\tif rc != nil {\n\t\t\trefs = rc.(apiintf.RequirementSet)\n\t\t\tif errs := refs.Check(ctx); len(errs) != 0 {\n\t\t\t\treturn fmt.Errorf(\"requirement not met [%v]\", errs)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tref = c.getRefRequirements(ctx, key, apiintf.UpdateOper, obj)\n\t\tif ref != nil {\n\t\t\tif errs := ref.Check(ctx); len(errs) > 0 {\n\t\t\t\treturn fmt.Errorf(\"requirement not met [%v]\", errs)\n\t\t\t}\n\t\t}\n\t}\n\tdefer c.RUnlock()\n\tc.RLock()\n\tif !c.active {\n\t\treturn errorCacheInactive\n\t}\n\t// perform KV store Update.\n\tstart := time.Now()\n\tc.logger.DebugLog(\"oper\", \"update\", \"msg\", \"called\")\n\tk := c.pool.GetFromPool().(kvstore.Interface)\n\tkvtime := time.Now()\n\terr = k.Update(ctx, key, obj, cs...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Object update failed: %s\", kvstore.ErrorDesc(err))\n\t}\n\thdr.Record(\"kvstore.Update\", time.Since(kvtime))\n\t_, v := apiutils.MustGetObjectMetaVersion(obj)\n\tc.logger.DebugLog(\"oper\", \"update\", \"msg\", \"kvstore success, updating cache\")\n\tc.store.Set(key, v, obj, c.getCbFunc(kvstore.Updated))\n\tif refs != nil {\n\t\trefs.Finalize(ctx)\n\t} else if ref != nil {\n\t\tref.Finalize(ctx)\n\t}\n\thdr.Record(\"cache.Update\", time.Since(start))\n\tupdateOps.Add(1)\n\treturn nil\n}", "title": "" }, { "docid": "7fc343493afec536e3e26648720bc877", "score": "0.56008315", "text": "func (c *Controller) updateOrchestrator(oldObj, newObj interface{}) {\n\t_, ok := oldObj.(*netappv1.TridentOrchestrator)\n\tif !ok {\n\t\tLog().Errorf(\"'%s' controller expected '%s' CR; got '%v'\", ControllerName, CRDName, oldObj)\n\t\treturn\n\t}\n\n\tnewCR, ok := newObj.(*netappv1.TridentOrchestrator)\n\tif !ok {\n\t\tLog().Errorf(\"'%s' controller expected '%s' CR; got '%v'\", ControllerName, CRDName, newObj)\n\t\treturn\n\t}\n\n\tif !newCR.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\tLog().WithFields(LogFields{\n\t\t\t\"name\": newCR.Name,\n\t\t\t\"deletionTimestamp\": newCR.ObjectMeta.DeletionTimestamp,\n\t\t}).Infof(\"'%s' CR is being deleted, not updated.\", CRDName)\n\t\treturn\n\t}\n\n\tvar key string\n\tvar err error\n\n\tif key, err = cache.MetaNamespaceKeyFunc(newObj); err != nil {\n\t\tLog().Error(err)\n\t\treturn\n\t}\n\n\tLog().WithFields(LogFields{\n\t\t\"CR\": newCR.Name,\n\t\t\"CRD\": CRDName,\n\t}).Infof(\"CR updated.\")\n\n\tkeyItem := KeyItem{\n\t\tkeyDetails: key,\n\t\tresourceType: ResourceTridentOrchestratorCR,\n\t}\n\n\tc.workqueue.Add(keyItem)\n}", "title": "" }, { "docid": "d3ecd2add7965b69dbf091d0d6b77c90", "score": "0.5578875", "text": "func (o *GroupsInvestor) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tif !boil.TimestampsAreSkipped(ctx) {\n\t\tcurrTime := time.Now().In(boil.GetLocation())\n\n\t\to.UpdatedAt = currTime\n\t}\n\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tgroupsInvestorUpdateCacheMut.RLock()\n\tcache, cached := groupsInvestorUpdateCache[key]\n\tgroupsInvestorUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tgroupsInvestorAllColumns,\n\t\t\tgroupsInvestorPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"dbmodel: unable to update groups_investors, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"groups_investors\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 0, groupsInvestorPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(groupsInvestorType, groupsInvestorMapping, append(wl, groupsInvestorPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"dbmodel: unable to update groups_investors row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"dbmodel: failed to get rows affected by update for groups_investors\")\n\t}\n\n\tif !cached {\n\t\tgroupsInvestorUpdateCacheMut.Lock()\n\t\tgroupsInvestorUpdateCache[key] = cache\n\t\tgroupsInvestorUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "1b040af2bc004f4436b93d1060b65472", "score": "0.55767244", "text": "func (fl *dummyLock) Update(ctx context.Context, ler Record) error {\n\treturn nil\n}", "title": "" }, { "docid": "424bce6610a7f147a16af5aaa162b8b4", "score": "0.5563645", "text": "func (r *Replica) sync() {\n if !r.Durable {\n return\n }\n\n r.StableStore.Sync()\n}", "title": "" }, { "docid": "d3476076f68efeb4f55dfb850fd7fbcf", "score": "0.55474705", "text": "func (o *StatNotifier) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tstatNotifierUpdateCacheMut.RLock()\n\tcache, cached := statNotifierUpdateCache[key]\n\tstatNotifierUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tstatNotifierAllColumns,\n\t\t\tstatNotifierPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"model2: unable to update stat_notifier, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE `stat_notifier` SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, statNotifierPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(statNotifierType, statNotifierMapping, append(wl, statNotifierPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"model2: unable to update stat_notifier row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"model2: failed to get rows affected by update for stat_notifier\")\n\t}\n\n\tif !cached {\n\t\tstatNotifierUpdateCacheMut.Lock()\n\t\tstatNotifierUpdateCache[key] = cache\n\t\tstatNotifierUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "35c272a50d8ad13ac0dfe904a21e4a9d", "score": "0.55440784", "text": "func (p *policy) Sync(add []cache.Container, del []cache.Container) error {\n\tlog.Debug(\"synchronizing state...\")\n\tfor _, c := range del {\n\t\tp.ReleaseResources(c)\n\t}\n\tfor _, c := range add {\n\t\tp.AllocateResources(c)\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "65ca0f45632aa6869331a2e1eda6d133", "score": "0.55410165", "text": "func TestUpdate(t *testing.T) {\n\tec := GetCacheWithBigCache(t)\n\tresource := &customResource{\n\t\tcounter: 0,\n\t}\n\tec.AddResource(\"getUser\", resource)\n\tb, err := ec.Provide(\"getUser\", \"2\", \"3\")\n\tif err != nil {\n\t\tt.Fatalf(\"error while providing %v\", err)\n\t}\n\tif string(b) != \"getUser:2-3\" {\n\t\tt.Fatalf(\"%s != %s\", string(b), \"getUser:2-3\")\n\t}\n\n\terr = ec.Set([]byte(\"updated\"), \"getUser\", \"2\", \"3\")\n\tif err != nil {\n\t\tt.Fatalf(\"error while setting value %v\", err)\n\t}\n\tb, err = ec.Provide(\"getUser\", \"2\", \"3\")\n\tif err != nil {\n\t\tt.Fatalf(\"error while providing %v\", err)\n\t}\n\n\tif string(b) != \"updated\" {\n\t\tt.Fatalf(\"%s != %s\", string(b), \"getUser:2-3\")\n\t}\n\n}", "title": "" }, { "docid": "8ef2ca476ccf91717a5c09a346a30ce9", "score": "0.5538283", "text": "func (s *ObjectSyncer) Sync(ctx context.Context) (SyncResult, error) {\n\tvar err error\n\n\tresult := SyncResult{}\n\tlog := logf.FromContext(ctx, \"syncer\", s.Name)\n\tkey := client.ObjectKeyFromObject(s.Obj)\n\n\tresult.Operation, err = controllerutil.CreateOrUpdate(ctx, s.Client, s.Obj, s.mutateFn())\n\n\t// check deep diff\n\tdiff := deep.Equal(redact(s.previousObject), redact(s.Obj))\n\n\t// don't pass to user error for owner deletion, just don't create the object\n\t//nolint: gocritic\n\tif errors.Is(err, ErrOwnerDeleted) {\n\t\tlog.Info(string(result.Operation), \"key\", key, \"kind\", objectType(s.Obj, s.Client), \"error\", err)\n\t\terr = nil\n\t} else if errors.Is(err, ErrIgnore) {\n\t\tlog.V(1).Info(\"syncer skipped\", \"key\", key, \"kind\", objectType(s.Obj, s.Client), \"error\", err)\n\t\terr = nil\n\t} else if err != nil {\n\t\tresult.SetEventData(eventWarning, basicEventReason(s.Name, err),\n\t\t\tfmt.Sprintf(\"%s %s failed syncing: %s\", objectType(s.Obj, s.Client), key, err))\n\t\tlog.Error(err, string(result.Operation), \"key\", key, \"kind\", objectType(s.Obj, s.Client), \"diff\", diff)\n\t} else {\n\t\tresult.SetEventData(eventNormal, basicEventReason(s.Name, err),\n\t\t\tfmt.Sprintf(\"%s %s %s successfully\", objectType(s.Obj, s.Client), key, result.Operation))\n\t\tlog.V(1).Info(string(result.Operation), \"key\", key, \"kind\", objectType(s.Obj, s.Client), \"diff\", diff)\n\t}\n\n\treturn result, err\n}", "title": "" }, { "docid": "3f0226748a5362465977b112f84e59bf", "score": "0.549901", "text": "func syncStoreImpl(storage Storage, overriddenDataSchema *schema.BatchHeader, objects []map[string]interface{}, timeIntervalValue string, cacheTable bool) error {\n\tif len(objects) == 0 {\n\t\treturn nil\n\t}\n\n\tadapter, tableHelper := storage.getAdapters()\n\n\tflatDataPerTable, err := processData(storage, overriddenDataSchema, objects, timeIntervalValue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdeleteConditions := adapters.DeleteByTimeChunkCondition(timeIntervalValue)\n\n\tfor _, flatData := range flatDataPerTable {\n\t\ttable := tableHelper.MapTableSchema(flatData.BatchHeader)\n\n\t\tdbSchema, err := tableHelper.EnsureTable(storage.ID(), table, cacheTable)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstart := time.Now()\n\t\tif err = adapter.BulkUpdate(dbSchema, flatData.GetPayload(), deleteConditions); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogging.Debugf(\"[%s] Inserted [%d] rows in [%.2f] seconds\", storage.ID(), flatData.GetPayloadLen(), time.Now().Sub(start).Seconds())\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "7d90e60d4d92c990ea116338bb8a12fb", "score": "0.54956853", "text": "func (sw *storageWrapper) Update(key string, obj runtime.Object) error {\n\tvar buf bytes.Buffer\n\tif err := sw.backendSerializer.Encode(obj, &buf); err != nil {\n\t\tklog.Errorf(\"failed to encode object in update for %s, %v\", key, err)\n\t\treturn err\n\t}\n\n\tif err := sw.store.Update(key, buf.Bytes()); err != nil {\n\t\treturn err\n\t}\n\n\tif isCacheKey(key) {\n\t\tsw.Lock()\n\t\tsw.cache[key] = obj\n\t\tsw.Unlock()\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "73b36519b8497ef5f9dde0317ddf7a3c", "score": "0.5492433", "text": "func (w *Watcher) Update() error {\n\t//begin transaction\n\ttx, err := w.client.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\trev := 0\n\tval, err := tx.Get(goctx.Background(), []byte(w.keyName))\n\tif err != nil {\n\t\tif err != kv.ErrNotExist {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\trev, err = strconv.Atoi(string(val))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Println(\"Get revision: \", rev)\n\t\trev += 1\n\t}\n\tlog.Printf(\"set revision %d\\n\", rev)\n\tif err = tx.Set([]byte(w.keyName), []byte(strconv.Itoa(rev))); err != nil {\n\t\treturn err\n\t}\n\tw.Lock()\n\tif err = tx.Commit(goctx.Background()); err != nil {\n\t\treturn err\n\t}\n\n\tw.formerValue = []byte(strconv.Itoa(rev))\n\tw.Unlock()\n\treturn err\n\n}", "title": "" }, { "docid": "ed8fd55cf3547923ea89ae2ecaf5945e", "score": "0.54840183", "text": "func (m *argov2manager) Sync(ctx context.Context) error {\n\treturn nil\n}", "title": "" }, { "docid": "8736bcf3615b6344d05466ef90c85dba", "score": "0.5475562", "text": "func (s *store) sync(ctx context.Context) error {\n\tresp, err := s.apiClient.ListEvents(ctx, &pipedservice.ListEventsRequest{\n\t\tFrom: s.milestone,\n\t\tOrder: pipedservice.ListOrder_ASC,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list events: %w\", err)\n\t}\n\tif len(resp.Events) == 0 {\n\t\treturn nil\n\t}\n\n\t// Eliminate events that have duplicated key.\n\tfiltered := make(map[string]*model.Event, len(resp.Events))\n\tfor _, e := range resp.Events {\n\t\tfiltered[e.EventKey] = e\n\t}\n\t// Make the cache up-to-date.\n\ts.mu.Lock()\n\tfor key, event := range filtered {\n\t\tcached, ok := s.latestEvents[key]\n\t\tif ok && cached.CreatedAt > event.CreatedAt {\n\t\t\tcontinue\n\t\t}\n\t\ts.latestEvents[key] = event\n\t}\n\ts.mu.Unlock()\n\n\t// Set the latest one within the result as the next time's \"from\".\n\ts.milestone = resp.Events[len(resp.Events)-1].CreatedAt + 1\n\treturn nil\n}", "title": "" }, { "docid": "6c6ba02742c520f7d6c7aa8fdec7627c", "score": "0.54718727", "text": "func (o *ServiceEndpoint) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tserviceEndpointUpdateCacheMut.RLock()\n\tcache, cached := serviceEndpointUpdateCache[key]\n\tserviceEndpointUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tserviceEndpointAllColumns,\n\t\t\tserviceEndpointPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update service_endpoint, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"service_endpoint\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, serviceEndpointPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(serviceEndpointType, serviceEndpointMapping, append(wl, serviceEndpointPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update service_endpoint row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for service_endpoint\")\n\t}\n\n\tif !cached {\n\t\tserviceEndpointUpdateCacheMut.Lock()\n\t\tserviceEndpointUpdateCache[key] = cache\n\t\tserviceEndpointUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "5ac81caf623e7058514370e0c2664e8c", "score": "0.5469756", "text": "func (this *Cache) Sync(f func(CacheGetter, CacheSetter)) {\n\tthis.mu.Lock()\n\tf(this.Get, this.set)\n\tthis.mu.Unlock()\n}", "title": "" }, { "docid": "6263741e69157de1305d6a3b81a5ed90", "score": "0.5464428", "text": "func (o *ObjectCache) Update(resourceIdent ResourceIdent, object client.Object) error {\n\tif _, ok := o.data[resourceIdent]; !ok {\n\t\treturn fmt.Errorf(\"object cache not found, cannot update\")\n\t}\n\n\tnn, err := getNamespacedNameFromRuntime(object)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, ok := o.data[resourceIdent][nn]; !ok {\n\t\treturn fmt.Errorf(\"object not found in cache, cannot update\")\n\t}\n\n\tvar gvk, obGVK schema.GroupVersionKind\n\tif gvk, err = utils.GetKindFromObj(o.scheme, resourceIdent.GetType()); err != nil {\n\t\treturn err\n\t}\n\n\tif obGVK, err = utils.GetKindFromObj(o.scheme, object); err != nil {\n\t\treturn err\n\t}\n\n\tif gvk != obGVK {\n\t\treturn fmt.Errorf(\"create: resourceIdent type does not match runtime object [%s] [%s] [%s]\", nn, gvk, obGVK)\n\t}\n\n\to.data[resourceIdent][nn].Object = object.DeepCopyObject().(client.Object)\n\n\tif clowder_config.LoadedConfig.DebugOptions.Cache.Update {\n\t\tvar jsonData []byte\n\t\tjsonData, _ = json.MarshalIndent(o.data[resourceIdent][nn].Object, \"\", \" \")\n\t\tif object.GetObjectKind().GroupVersionKind() == secretCompare {\n\t\t\to.log.Info(\"UPDATE resource \", \"namespace\", nn.Namespace, \"name\", nn.Name, \"provider\", resourceIdent.GetProvider(), \"purpose\", resourceIdent.GetPurpose(), \"kind\", object.GetObjectKind().GroupVersionKind().Kind, \"diff\", \"hidden\")\n\t\t} else {\n\t\t\to.log.Info(\"UPDATE resource \", \"namespace\", nn.Namespace, \"name\", nn.Name, \"provider\", resourceIdent.GetProvider(), \"purpose\", resourceIdent.GetPurpose(), \"kind\", object.GetObjectKind().GroupVersionKind().Kind, \"diff\", string(jsonData))\n\t\t}\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "0d3be52db473ded4c9cedf9cff319aee", "score": "0.5461235", "text": "func (c *LokiCore) Sync() error {\n\treturn nil\n}", "title": "" }, { "docid": "1c9ab3ebebb8643d72969ef23b64b827", "score": "0.54505247", "text": "func (o *CustomCommand) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {\n\tif !boil.TimestampsAreSkipped(ctx) {\n\t\tcurrTime := time.Now().In(boil.GetLocation())\n\n\t\to.UpdatedAt = currTime\n\t}\n\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tcustomCommandUpdateCacheMut.RLock()\n\tcache, cached := customCommandUpdateCache[key]\n\tcustomCommandUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tcustomCommandAllColumns,\n\t\t\tcustomCommandPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update custom_commands, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"custom_commands\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, customCommandPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(customCommandType, customCommandMapping, append(wl, customCommandPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\t_, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update custom_commands row\")\n\t}\n\n\tif !cached {\n\t\tcustomCommandUpdateCacheMut.Lock()\n\t\tcustomCommandUpdateCache[key] = cache\n\t\tcustomCommandUpdateCacheMut.Unlock()\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "dd66b6322cf28db530e417469bcb5313", "score": "0.54495215", "text": "func (o *Metric) Update(exec boil.Executor, whitelist ...string) error {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\tmetricUpdateCacheMut.RLock()\n\tcache, cached := metricUpdateCache[key]\n\tmetricUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(metricColumns, metricPrimaryKeyColumns, whitelist)\n\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"public: unable to update metric, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"metric\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, metricPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(metricType, metricMapping, append(wl, metricPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"public: unable to update metric row\")\n\t}\n\n\tif !cached {\n\t\tmetricUpdateCacheMut.Lock()\n\t\tmetricUpdateCache[key] = cache\n\t\tmetricUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "7d2c458ca971af161d3d8ec2e67e421c", "score": "0.5442696", "text": "func (cache *Cache) Sync(state Writer) error {\n\tcache.Lock()\n\tdefer cache.Unlock()\n\tvar hashes ProposalHashArray\n\tfor hash := range cache.proposals {\n\t\thashes = append(hashes, hash)\n\t}\n\tsort.Stable(hashes)\n\n\t// Update or delete proposals\n\tfor _, hash := range hashes {\n\t\tproposalInfo := cache.proposals[hash]\n\t\tproposalInfo.RLock()\n\t\tif proposalInfo.removed {\n\t\t\terr := state.RemoveProposal(hash[:])\n\t\t\tif err != nil {\n\t\t\t\tproposalInfo.RUnlock()\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if proposalInfo.updated {\n\t\t\terr := state.UpdateProposal(hash[:], proposalInfo.ballot)\n\t\t\tif err != nil {\n\t\t\t\tproposalInfo.RUnlock()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tproposalInfo.RUnlock()\n\t}\n\treturn nil\n}", "title": "" }, { "docid": "9c1e4abb342fa9ccc4f6ebb5612fd645", "score": "0.54335815", "text": "func (o *Cache) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tcacheUpdateCacheMut.RLock()\n\tcache, cached := cacheUpdateCache[key]\n\tcacheUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tcacheAllColumns,\n\t\t\tcachePrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update caches, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"caches\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 0, cachePrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(cacheType, cacheMapping, append(wl, cachePrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update caches row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for caches\")\n\t}\n\n\tif !cached {\n\t\tcacheUpdateCacheMut.Lock()\n\t\tcacheUpdateCache[key] = cache\n\t\tcacheUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "fdfcb278fd4e6a9e58388d8eb4d197b5", "score": "0.5429538", "text": "func (c *Core) Sync() error {\n\treturn nil\n}", "title": "" }, { "docid": "d48686415e278d2dc91b23f431fbedf8", "score": "0.54253614", "text": "func (_MetaData *MetaDataTransactor) Update(opts *bind.TransactOpts, _data string) (*types.Transaction, error) {\n\treturn _MetaData.contract.Transact(opts, \"update\", _data)\n}", "title": "" }, { "docid": "77078f0f295fca64a2a32cb9c66ea003", "score": "0.54248786", "text": "func (o *CommandInfo) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {\n\tif !boil.TimestampsAreSkipped(ctx) {\n\t\tcurrTime := time.Now().In(boil.GetLocation())\n\n\t\to.UpdatedAt = currTime\n\t}\n\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tcommandInfoUpdateCacheMut.RLock()\n\tcache, cached := commandInfoUpdateCache[key]\n\tcommandInfoUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tcommandInfoAllColumns,\n\t\t\tcommandInfoPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update command_infos, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"command_infos\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, commandInfoPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(commandInfoType, commandInfoMapping, append(wl, commandInfoPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\t_, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update command_infos row\")\n\t}\n\n\tif !cached {\n\t\tcommandInfoUpdateCacheMut.Lock()\n\t\tcommandInfoUpdateCache[key] = cache\n\t\tcommandInfoUpdateCacheMut.Unlock()\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "f3c1078f2f85b4a0907a6412571b93a5", "score": "0.5420794", "text": "func (c *Cache) Update(ctx context.Context) error {\n\tstatus, err := c.DB.Get(ctx, c.roller)\n\tif err == datastore.ErrNoSuchEntity || status == nil {\n\t\t// This will occur the first time the roller starts,\n\t\t// before it sets the status for the first time. Ignore.\n\t\tsklog.Warningf(\"Unable to find AutoRollStatus for %s. Is this the first startup for this roller?\", c.roller)\n\t\tstatus = &AutoRollStatus{}\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\tc.status = status\n\treturn nil\n}", "title": "" }, { "docid": "c3295c5a8d6d42dd9711be5deab19433", "score": "0.54179406", "text": "func (o *GameOperation) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tgameOperationUpdateCacheMut.RLock()\n\tcache, cached := gameOperationUpdateCache[key]\n\tgameOperationUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tgameOperationColumns,\n\t\t\tgameOperationPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update game_operations, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"game_operations\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, gameOperationPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(gameOperationType, gameOperationMapping, append(wl, gameOperationPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update game_operations row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for game_operations\")\n\t}\n\n\tif !cached {\n\t\tgameOperationUpdateCacheMut.Lock()\n\t\tgameOperationUpdateCache[key] = cache\n\t\tgameOperationUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, nil\n}", "title": "" }, { "docid": "cefafedf5a6e01ecde0c61f4e05f80a3", "score": "0.54134977", "text": "func (*Core) Sync() error {\n\treturn nil\n}", "title": "" }, { "docid": "7aa10af68d093e3655130906c84aff57", "score": "0.5412201", "text": "func (ctrler CtrlDefReactor) OnOrchestratorUpdate(oldObj *Orchestrator, newObj *orchestration.Orchestrator) error {\n\tlog.Info(\"OnOrchestratorUpdate is not implemented\")\n\treturn nil\n}", "title": "" }, { "docid": "303372e4e4cddc13b3e9abf29f7dcb1f", "score": "0.5404804", "text": "func (r *Replica) sync() {\n\tif !r.Durable {\n\t\treturn\n\t}\n\n\tr.StableStore.Sync()\n}", "title": "" }, { "docid": "85115398f76388e2305ebe7661cf57ad", "score": "0.5380892", "text": "func (bc *BypassCache) Update(force bool) {\n\n\t// Lock the cache object.\n\tbc.CacheLock.Lock()\n\tdefer bc.CacheLock.Unlock()\n\n\t// Freeze transaction time to start of method.\n\tnow := time.Now()\n\n\t// Do not update cache if we're not forced to\n\t// and if the update is not due yet.\n\tif !force && !isStale(bc.LastUpdated, now) {\n\t\treturn\n\t}\n\n\t// Call out to the unit and update object.\n\tif gb, err := libcomfo.GetBypass(comfoConn); err == nil {\n\t\tbc.Bypass = gb\n\t\tbc.LastUpdated = now\n\t} else {\n\t\tlog.Printf(\"BypassCache.Update() - Error updating bypass cache: %s\", err)\n\t}\n}", "title": "" }, { "docid": "b2efac8975cdb1e942538759230c58d4", "score": "0.5377913", "text": "func (w *HotCache) Update(item *HotPeerStat) {\n\tswitch item.Kind {\n\tcase WriteFlow:\n\t\tw.writeFlow.Update(item)\n\tcase ReadFlow:\n\t\tw.readFlow.Update(item)\n\t}\n\n\tif item.IsNeedDelete() {\n\t\tw.incMetrics(\"remove_item\", item.StoreID, item.Kind)\n\t} else if item.IsNew() {\n\t\tw.incMetrics(\"add_item\", item.StoreID, item.Kind)\n\t} else {\n\t\tw.incMetrics(\"update_item\", item.StoreID, item.Kind)\n\t}\n}", "title": "" }, { "docid": "97abb29f0e4c22b5d8d0f213140a9b12", "score": "0.5369719", "text": "func (m *manager) Sync(ctx context.Context) error {\n\t// TODO: We're now persisting releases as secrets. To support seamless upgrades, we\n\t// need to sync the release status from the CR to the persistent storage backend.\n\t// Once we release the storage backend migration, this function (and comment)\n\t// can be removed.\n\tif err := m.syncReleaseStatus(*m.status); err != nil {\n\t\treturn fmt.Errorf(\"failed to sync release status to storage backend: %s\", err)\n\t}\n\n\t// Get release history for this release name\n\treleases, err := m.storageBackend.History(m.releaseName)\n\tif err != nil && !notFoundErr(err) {\n\t\treturn fmt.Errorf(\"failed to retrieve release history: %s\", err)\n\t}\n\n\t// Cleanup non-deployed release versions. If all release versions are\n\t// non-deployed, this will ensure that failed installations are correctly\n\t// retried.\n\tfor _, rel := range releases {\n\t\tif rel.GetInfo().GetStatus().GetCode() != rpb.Status_DEPLOYED {\n\t\t\t_, err := m.storageBackend.Delete(rel.GetName(), rel.GetVersion())\n\t\t\tif err != nil && !notFoundErr(err) {\n\t\t\t\treturn fmt.Errorf(\"failed to delete stale release version: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Load the chart and config based on the current state of the custom resource.\n\tchart, config, err := m.loadChartAndConfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load chart and config: %s\", err)\n\t}\n\tm.chart = chart\n\tm.config = config\n\n\t// Load the most recently deployed release from the storage backend.\n\tdeployedRelease, err := m.getDeployedRelease()\n\tif err == ErrNotFound {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get deployed release: %s\", err)\n\t}\n\tm.deployedRelease = deployedRelease\n\tm.isInstalled = true\n\n\t// Get the next candidate release to determine if an update is necessary.\n\tcandidateRelease, err := m.getCandidateRelease(ctx, m.tiller, m.releaseName, chart, config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get candidate release: %s\", err)\n\t}\n\tif deployedRelease.GetManifest() != candidateRelease.GetManifest() {\n\t\tm.isUpdateRequired = true\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "246fe2b7e8fa759afd6d03da29249427", "score": "0.5366401", "text": "func (ri *MemCacheRepoImpl) Update(_ context.Context) error {\n\treturn nil\n}", "title": "" }, { "docid": "d2c1f3e29f6b0e287fc7c2c760fea015", "score": "0.53613096", "text": "func (cache *BlockCache) Sync() {\n\n\t// Determine order for storage updates\n\t// The address comes first so it'll be grouped.\n\tstorageKeys := make([]Tuple256, 0, len(cache.storages))\n\tfor keyTuple := range cache.storages {\n\t\tstorageKeys = append(storageKeys, keyTuple)\n\t}\n\tTuple256Slice(storageKeys).Sort()\n\n\t// Update storage for all account/key.\n\t// Later we'll iterate over all the users and save storage + update storage root.\n\tvar (\n\t\tcurAddr Word256\n\t\tcurAcc *acm.Account\n\t\tcurAccRemoved bool\n\t\tcurStorage merkle.Tree\n\t)\n\tfor _, storageKey := range storageKeys {\n\t\taddr, key := Tuple256Split(storageKey)\n\t\tif addr != curAddr || curAcc == nil {\n\t\t\tacc, storage, removed, _ := cache.accounts[string(addr.Postfix(20))].unpack()\n\t\t\tif !removed && storage == nil {\n\t\t\t\tstorage = makeStorage(cache.db, acc.StorageRoot)\n\t\t\t}\n\t\t\tcurAddr = addr\n\t\t\tcurAcc = acc\n\t\t\tcurAccRemoved = removed\n\t\t\tcurStorage = storage\n\t\t}\n\t\tif curAccRemoved {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, dirty := cache.storages[storageKey].unpack()\n\t\tif !dirty {\n\t\t\tcontinue\n\t\t}\n\t\tif value.IsZero() {\n\t\t\tcurStorage.Remove(key.Bytes())\n\t\t} else {\n\t\t\tcurStorage.Set(key.Bytes(), value.Bytes())\n\t\t\tcache.accounts[string(addr.Postfix(20))] = accountInfo{curAcc, curStorage, false, true}\n\t\t}\n\t}\n\n\t// Determine order for accounts\n\taddrStrs := []string{}\n\tfor addrStr := range cache.accounts {\n\t\taddrStrs = append(addrStrs, addrStr)\n\t}\n\tsort.Strings(addrStrs)\n\n\t// Update or delete accounts.\n\tfor _, addrStr := range addrStrs {\n\t\tacc, storage, removed, dirty := cache.accounts[addrStr].unpack()\n\t\tif removed {\n\t\t\tremoved := cache.backend.RemoveAccount([]byte(addrStr))\n\t\t\tif !removed {\n\t\t\t\tPanicCrisis(Fmt(\"Could not remove account to be removed: %X\", acc.Address))\n\t\t\t}\n\t\t} else {\n\t\t\tif acc == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif storage != nil {\n\t\t\t\tnewStorageRoot := storage.Save()\n\t\t\t\tif !bytes.Equal(newStorageRoot, acc.StorageRoot) {\n\t\t\t\t\tacc.StorageRoot = newStorageRoot\n\t\t\t\t\tdirty = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif dirty {\n\t\t\t\tcache.backend.UpdateAccount(acc)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Determine order for names\n\t// note names may be of any length less than some limit\n\tnameStrs := []string{}\n\tfor nameStr := range cache.names {\n\t\tnameStrs = append(nameStrs, nameStr)\n\t}\n\tsort.Strings(nameStrs)\n\n\t// Update or delete names.\n\tfor _, nameStr := range nameStrs {\n\t\tentry, removed, dirty := cache.names[nameStr].unpack()\n\t\tif removed {\n\t\t\tremoved := cache.backend.RemoveNameRegEntry(nameStr)\n\t\t\tif !removed {\n\t\t\t\tPanicCrisis(Fmt(\"Could not remove namereg entry to be removed: %s\", nameStr))\n\t\t\t}\n\t\t} else {\n\t\t\tif entry == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif dirty {\n\t\t\t\tcache.backend.UpdateNameRegEntry(entry)\n\t\t\t}\n\t\t}\n\t}\n\n}", "title": "" }, { "docid": "538951ffffe3b07a0ce1a3e1607d88c7", "score": "0.5356838", "text": "func (rm *RsrcManager) Update(item reconciler.Object) error {\n\tobj := item.Obj.(*Object)\n\td := obj.Redis\n\t_, err := rm.service.Projects.Locations.Instances.Patch(obj.Parent+\"/instances/\"+obj.InstanceID, d).UpdateMask(\"displayName,labels,memorySizeGb,redisConfigs\").Do()\n\treturn err\n}", "title": "" }, { "docid": "5ec86cf8ecbc4f32638eab7ce757da05", "score": "0.5355945", "text": "func (h *item) Sync(c echo.Context) error {\n\t// Filter params\n\tvar params service.SyncParams\n\tif err := c.Bind(&params); err != nil {\n\t\treturn c.JSON(http.StatusBadRequest, sferror.New(\"Could not get syncing params.\"))\n\t}\n\tparams.UserAgent = c.Request().UserAgent()\n\tparams.Session = currentSession(c)\n\n\tsync := service.NewSync(h.db, currentUser(c), params)\n\tif err := sync.Execute(); err != nil {\n\t\treturn err\n\t}\n\n\treturn c.JSON(http.StatusOK, sync)\n}", "title": "" }, { "docid": "f9510efff512478f0f99ea32f0acae04", "score": "0.53349435", "text": "func (c *Core) Sync() error {\n\tc.client.Flush(c.sentryFlushTimeout)\n\treturn nil\n}", "title": "" }, { "docid": "8fc5548d06e34ce491860ff4627d8e56", "score": "0.5330499", "text": "func (i *VCSInstaller) sync(repo vcs.Repo) error {\n\n\tif _, err := os.Stat(repo.LocalPath()); os.IsNotExist(err) {\n\t\treturn repo.Get()\n\t}\n\treturn repo.Update()\n}", "title": "" }, { "docid": "15a0ccbcad13be790dc21bc38282775d", "score": "0.5324411", "text": "func (o *Gateway) Update(exec boil.Executor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tgatewayUpdateCacheMut.RLock()\n\tcache, cached := gatewayUpdateCache[key]\n\tgatewayUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tgatewayAllColumns,\n\t\t\tgatewayPrimaryKeyColumns,\n\t\t)\n\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update gateways, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"gateways\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, gatewayPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(gatewayType, gatewayMapping, append(wl, gatewayPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update gateways row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for gateways\")\n\t}\n\n\tif !cached {\n\t\tgatewayUpdateCacheMut.Lock()\n\t\tgatewayUpdateCache[key] = cache\n\t\tgatewayUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, nil\n}", "title": "" }, { "docid": "ea62dbeb9009b2c49d58727165464826", "score": "0.5322398", "text": "func (rc *reconciler) sync() {\n\tdefer rc.updateLastSyncTime()\n\trc.syncStates(rc.kubeletPodsDir)\n}", "title": "" }, { "docid": "ea62dbeb9009b2c49d58727165464826", "score": "0.5322398", "text": "func (rc *reconciler) sync() {\n\tdefer rc.updateLastSyncTime()\n\trc.syncStates(rc.kubeletPodsDir)\n}", "title": "" }, { "docid": "803f80e9feb56caae1e8207ac19a0705", "score": "0.5321314", "text": "func (ce *cacheEntry) update(value json.RawMessage) {\n\tce.mu.Lock()\n\tdefer ce.mu.Unlock()\n\n\tce.value = value\n\tce.err = nil\n\n\t// If it was done before, set it to done.\n\tselect {\n\tcase <-ce.done:\n\tdefault:\n\t\tclose(ce.done)\n\t}\n}", "title": "" }, { "docid": "5811f1c693559a098650fd85d71f9410", "score": "0.531911", "text": "func (d *Datastore) Sync(ctx context.Context, prefix ds.Key) error {\n\treturn nil\n}", "title": "" }, { "docid": "66396ae178e5940741652caf303e8128", "score": "0.5314755", "text": "func (o *CMFGift) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tcmfGiftUpdateCacheMut.RLock()\n\tcache, cached := cmfGiftUpdateCache[key]\n\tcmfGiftUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tcmfGiftAllColumns,\n\t\t\tcmfGiftPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update cmf_gift, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE `cmf_gift` SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, cmfGiftPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(cmfGiftType, cmfGiftMapping, append(wl, cmfGiftPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update cmf_gift row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for cmf_gift\")\n\t}\n\n\tif !cached {\n\t\tcmfGiftUpdateCacheMut.Lock()\n\t\tcmfGiftUpdateCache[key] = cache\n\t\tcmfGiftUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "124e6ecc95cce98350bb053f42757212", "score": "0.53102285", "text": "func IncrSync(name string) {\n\tIncrDeltaSync(name, 1)\n}", "title": "" }, { "docid": "1f8dc51537307bcd4de453ef917a5b41", "score": "0.5300945", "text": "func (c *k8scache) UpdateStatus(client.Object) {}", "title": "" }, { "docid": "4e450f86597d898ef4aa96b05c7babe9", "score": "0.52950144", "text": "func (o *Repository) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\trepositoryUpdateCacheMut.RLock()\n\tcache, cached := repositoryUpdateCache[key]\n\trepositoryUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\trepositoryColumns,\n\t\t\trepositoryPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update repositories, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"repositories\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, repositoryPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(repositoryType, repositoryMapping, append(wl, repositoryPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update repositories row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for repositories\")\n\t}\n\n\tif !cached {\n\t\trepositoryUpdateCacheMut.Lock()\n\t\trepositoryUpdateCache[key] = cache\n\t\trepositoryUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "c98a29f28c399db7ff98b69ba9b7e03d", "score": "0.5294496", "text": "func (o *Goauth) Update(exec boil.Executor, whitelist ...string) error {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\tgoauthUpdateCacheMut.RLock()\n\tcache, cached := goauthUpdateCache[key]\n\tgoauthUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(goauthColumns, goauthPrimaryKeyColumns, whitelist)\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update goauth, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"goauth\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, goauthPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(goauthType, goauthMapping, append(wl, goauthPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update goauth row\")\n\t}\n\n\tif !cached {\n\t\tgoauthUpdateCacheMut.Lock()\n\t\tgoauthUpdateCache[key] = cache\n\t\tgoauthUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "f385ed0931ec2e6044cf1eb4e72e7265", "score": "0.52799594", "text": "func (s *podStorage) Sync() {\n\ts.updateLock.Lock()\n\tdefer s.updateLock.Unlock()\n\ts.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource}\n}", "title": "" }, { "docid": "29d244e5262df2384c104721bf09eead", "score": "0.5270341", "text": "func (c *KubeClient) Sync(exit <-chan struct{}) {\n\tif !cache.WaitForCacheSync(exit, c.PodInformer.HasSynced) {\n\t\tklog.Error(\"pod cache could not be synchronized\")\n\t}\n}", "title": "" }, { "docid": "6fcdebed3dca66168d4d6297357138ea", "score": "0.5269193", "text": "func (ac *AuthorizationCache) synchronize() {\n\t// if none of our internal reflectors changed, then we can skip reviewing the cache\n\tskip, currentState := ac.skip.SkipSynchronize(ac.lastState, ac.lastSyncResourceVersioner, ac.policyLastSyncResourceVersioner)\n\tif skip {\n\t\treturn\n\t}\n\n\t// by default, we update our current caches and do an incremental change\n\tuserSubjectRecordStore := ac.userSubjectRecordStore\n\tgroupSubjectRecordStore := ac.groupSubjectRecordStore\n\treviewRecordStore := ac.reviewRecordStore\n\n\t// if there was a global change that forced complete invalidation, we rebuild our cache and do a fast swap at end\n\tinvalidateCache := ac.invalidateCache()\n\tif invalidateCache {\n\t\tuserSubjectRecordStore = cache.NewStore(subjectRecordKeyFn)\n\t\tgroupSubjectRecordStore = cache.NewStore(subjectRecordKeyFn)\n\t\treviewRecordStore = cache.NewStore(reviewRecordKeyFn)\n\t}\n\n\t// iterate over caches and synchronize our three caches\n\tnewKnownNamespaces := ac.synchronizeNamespaces(userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)\n\tac.synchronizePolicies(userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)\n\tac.synchronizePolicyBindings(userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)\n\tac.purgeDeletedNamespaces(ac.allKnownNamespaces, newKnownNamespaces, userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)\n\n\t// if we did a full rebuild, now we swap the fully rebuilt cache\n\tif invalidateCache {\n\t\tac.userSubjectRecordStore = userSubjectRecordStore\n\t\tac.groupSubjectRecordStore = groupSubjectRecordStore\n\t\tac.reviewRecordStore = reviewRecordStore\n\t}\n\tac.allKnownNamespaces = newKnownNamespaces\n\n\t// we were able to update our cache since this last observation period\n\tac.lastState = currentState\n}", "title": "" }, { "docid": "642e8211e260d48001bd61019ab13294", "score": "0.5267145", "text": "func (_e *Manager_Expecter) Update(policy interface{}) *Manager_Update_Call {\n\treturn &Manager_Update_Call{Call: _e.mock.On(\"Update\", policy)}\n}", "title": "" }, { "docid": "c6d40686e73925447597a50bec0443ce", "score": "0.52646726", "text": "func (c mockStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error {\n\tc.calls = append(c.calls, mockFuncCall{\n\t\tctx: ctx,\n\t\tobj: obj,\n\t})\n\treturn c.err\n}", "title": "" }, { "docid": "3d713b6ba58864a7d61238c22fa1c2f7", "score": "0.5261428", "text": "func (c MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {\n\t// Check for induced errors\n\tif value, ok := c.InduceErrorKind[splcommon.MockClientInduceErrorUpdate]; ok && value != nil {\n\t\treturn value\n\t}\n\tc.Calls[\"Update\"] = append(c.Calls[\"Update\"], MockFuncCall{\n\t\tCTX: ctx,\n\t\tObj: obj,\n\t})\n\tc.State[getStateKey(obj)] = obj\n\treturn nil\n}", "title": "" }, { "docid": "34300d49b694c5ddff23e60ad10c09e9", "score": "0.5258888", "text": "func (h *UsecaseEvent) Update(ctx context.Context, data interface{}) error {\n\n\tif data == nil {\n\t\treturn errors.New(\"Data can't be null\")\n\t}\n\n\tctx, cancel := context.WithTimeout(ctx, h.contextTimeout)\n\tdefer cancel()\n\n\t// Manage version\n\tdata.(models.Model).SetVersion(data.(models.Model).GetVersion() + 1)\n\n\terr := h.ElasticRepo.Update(ctx, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Update data successfully\")\n\n\treturn nil\n}", "title": "" }, { "docid": "3586a40ed21903caaa8086023f1a0f30", "score": "0.525708", "text": "func (o *Ogp) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\togpUpdateCacheMut.RLock()\n\tcache, cached := ogpUpdateCache[key]\n\togpUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\togpAllColumns,\n\t\t\togpPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update ogp, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE `ogp` SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, ogpPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(ogpType, ogpMapping, append(wl, ogpPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update ogp row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for ogp\")\n\t}\n\n\tif !cached {\n\t\togpUpdateCacheMut.Lock()\n\t\togpUpdateCache[key] = cache\n\t\togpUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "title": "" }, { "docid": "75df2dbe5959b05cdcbce37addf600ad", "score": "0.5255351", "text": "func (t *TransactionMetadata) Update(objectstorage.StorableObject) {\n\tpanic(\"updates disabled\")\n}", "title": "" }, { "docid": "2f4f47b9de8f3311c2db5987d25439e7", "score": "0.52514154", "text": "func (p *ScopedBBoltPersister) Sync() error {\n\treturn p.db.Update(func(tx *bbolt.Tx) error {\n\t\toffsetBucket, err := tx.CreateBucketIfNotExists(OffsetsBucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbucket, err := offsetBucket.CreateBucketIfNotExists(p.scope)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp.cacheMux.Lock()\n\t\tfor k, v := range p.cache {\n\t\t\terr := bucket.Put([]byte(k), v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tp.cacheMux.Unlock()\n\n\t\treturn nil\n\t})\n}", "title": "" }, { "docid": "79e1d4bea9ba422784621f635039350b", "score": "0.52514124", "text": "func (o *Accountdatum) Update(exec boil.Executor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\taccountdatumUpdateCacheMut.RLock()\n\tcache, cached := accountdatumUpdateCache[key]\n\taccountdatumUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\taccountdatumColumns,\n\t\t\taccountdatumPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"stellarcore: unable to update accountdata, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"accountdata\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, accountdatumPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(accountdatumType, accountdatumMapping, append(wl, accountdatumPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\tvar result sql.Result\n\tresult, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"stellarcore: unable to update accountdata row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"stellarcore: failed to get rows affected by update for accountdata\")\n\t}\n\n\tif !cached {\n\t\taccountdatumUpdateCacheMut.Lock()\n\t\taccountdatumUpdateCache[key] = cache\n\t\taccountdatumUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "80928d66dc4b6f20cff6389b35c504c2", "score": "0.52509296", "text": "func (m *ManagerMock) Update(ctx context.Context, r *hub.Repository) error {\n\targs := m.Called(ctx, r)\n\treturn args.Error(0)\n}", "title": "" }, { "docid": "34da94135343712e8ab89e2324f51bd6", "score": "0.5247027", "text": "func (c *taskCache) Update(ctx context.Context) error {\n\tctx, span := trace.StartSpan(ctx, \"taskcache_Update\")\n\tdefer span.End()\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\tc.modMtx.Lock()\n\tdefer c.modMtx.Unlock()\n\tc.expireTasks()\n\tfor _, t := range c.modified {\n\t\tif c.timeWindow.TestTime(t.Repo, t.Created) {\n\t\t\tc.insertOrUpdateTask(t)\n\t\t}\n\t}\n\tc.modified = map[string]*types.Task{}\n\treturn nil\n}", "title": "" }, { "docid": "56b54fb9e1b5c50d27893dcc451461f0", "score": "0.5242797", "text": "func (o *Claim) Update(exec boil.Executor, columns boil.Columns) error {\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tclaimUpdateCacheMut.RLock()\n\tcache, cached := claimUpdateCache[key]\n\tclaimUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tclaimAllColumns,\n\t\t\tclaimPrimaryKeyColumns,\n\t\t)\n\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"model: unable to update claim, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE `claim` SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, claimPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(claimType, claimMapping, append(wl, claimPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"model: unable to update claim row\")\n\t}\n\n\tif !cached {\n\t\tclaimUpdateCacheMut.Lock()\n\t\tclaimUpdateCache[key] = cache\n\t\tclaimUpdateCacheMut.Unlock()\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "9455dfb00b203ad2bf2b1cddf2684cc4", "score": "0.5242754", "text": "func (o *Blob) Update(exec boil.Executor, columns boil.Columns) (int64, error) {\n\tcurrTime := time.Now().In(boil.GetLocation())\n\n\to.UpdatedAt = currTime\n\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tblobUpdateCacheMut.RLock()\n\tcache, cached := blobUpdateCache[key]\n\tblobUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tblobAllColumns,\n\t\t\tblobPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"db: unable to update blobs, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"blobs\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, blobPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(blobType, blobMapping, append(wl, blobPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"db: unable to update blobs row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"db: failed to get rows affected by update for blobs\")\n\t}\n\n\tif !cached {\n\t\tblobUpdateCacheMut.Lock()\n\t\tblobUpdateCache[key] = cache\n\t\tblobUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "45ca383184cdfc1e6274399ef3bbc1b8", "score": "0.52398854", "text": "func (this *Context) Update(spec *ar.ApicurioRegistry) {\n\tthis.configuration.Update(spec)\n\n\tspecEntry := NewResourceCacheEntry(spec.Name, spec)\n\tthis.resourceCache.Set(RC_KEY_SPEC, specEntry)\n}", "title": "" }, { "docid": "bf48272b9ac20fecac52db0b73a5ebaf", "score": "0.5233055", "text": "func (evc *Cache) Sync(publisher Publisher) error {\n\tvar err error\n\tfor _, mi := range evc.events {\n\t\tpublishErr := publisher.Publish(mi.ctx, mi.message, mi.tags)\n\t\t// Capture first by try to sync the rest\n\t\tif publishErr != nil && err == nil {\n\t\t\terr = publishErr\n\t\t}\n\t}\n\treturn err\n}", "title": "" }, { "docid": "1bdbd79ff0bca607a755e68cbd52b458", "score": "0.5232458", "text": "func (e *Entity) ApplyUpdate(reader *bitread.BitReader) {\n\te.Called(reader)\n}", "title": "" }, { "docid": "d915d4f794e7cdf74688a866c3cde583", "score": "0.5231846", "text": "func (o *Institution) Update(exec boil.Executor, whitelist ...string) error {\n\tcurrTime := time.Now().In(boil.GetLocation())\n\n\to.UpdatedAt = currTime\n\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\tinstitutionUpdateCacheMut.RLock()\n\tcache, cached := institutionUpdateCache[key]\n\tinstitutionUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(\n\t\t\tinstitutionColumns,\n\t\t\tinstitutionPrimaryKeyColumns,\n\t\t\twhitelist,\n\t\t)\n\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update institution, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"instruments\\\".\\\"institution\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, institutionPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(institutionType, institutionMapping, append(wl, institutionPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update institution row\")\n\t}\n\n\tif !cached {\n\t\tinstitutionUpdateCacheMut.Lock()\n\t\tinstitutionUpdateCache[key] = cache\n\t\tinstitutionUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "39a3b689fc0b08e8426f1053db10dc9d", "score": "0.5231431", "text": "func (w *HotSpotCache) Update(key uint64, item *core.RegionStat, kind FlowKind) {\n\tswitch kind {\n\tcase WriteFlow:\n\t\tif item == nil {\n\t\t\tw.writeFlow.Remove(key)\n\t\t} else {\n\t\t\tw.writeFlow.Put(key, item)\n\t\t\tw.incMetrics(\"update_item\", kind)\n\t\t}\n\tcase ReadFlow:\n\t\tif item == nil {\n\t\t\tw.readFlow.Remove(key)\n\t\t} else {\n\t\t\tw.readFlow.Put(key, item)\n\t\t\tw.incMetrics(\"update_item\", kind)\n\t\t}\n\t}\n}", "title": "" }, { "docid": "5825a40342fac14b0a6ef2fa99648b4b", "score": "0.522997", "text": "func (cmd *AccUpdateCmd) Execute(args []string) error {\n\t_, _, err := updateDetailsCache()\n\treturn err\n}", "title": "" }, { "docid": "8eb129b7962701ae4da575368961b110", "score": "0.5215953", "text": "func (c *Client) commit(data *Data) error {\n\n\tdata.Index++\n\n\t// try to write to disk before updating in memory\n\tif c.node != nil {\n\t\tif err := snapshot(c.node.Path, data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// update in memory\n\tc.cacheData = data\n\n\t// close channels to signal changes\n\tclose(c.changed)\n\tc.changed = make(chan struct{})\n\n\treturn nil\n}", "title": "" }, { "docid": "2764c6b36cb6a559e53d31bea863b544", "score": "0.5215111", "text": "func (s *lockBasedTxSimulator) ExecuteUpdate(query string) error {\n\treturn errors.New(\"Not supported\")\n}", "title": "" }, { "docid": "85feb3159057c4ccc0d1f4d281711e7b", "score": "0.52133805", "text": "func (s *LockBasedTxSimulator) ExecuteUpdate(query string) error {\n\treturn errors.New(\"Not supported by KV data model\")\n}", "title": "" }, { "docid": "3cfa094018a6e343136981fef03b46dd", "score": "0.5210557", "text": "func (s *static) Sync(add []cache.Container, del []cache.Container) error {\n\ts.Debug(\"synchronizing state...\")\n\tfor _, c := range del {\n\t\ts.ReleaseResources(c)\n\t}\n\tfor _, c := range add {\n\t\ts.AllocateResources(c)\n\t}\n\n\treturn nil\n}", "title": "" }, { "docid": "f3bca6393d787df5ed806fdce44a4f44", "score": "0.5204667", "text": "func (fc *FanCache) Update(force bool) {\n\n\t// Lock the cache object\n\tfc.CacheLock.Lock()\n\tdefer fc.CacheLock.Unlock()\n\n\t// Freeze transaction time to start of method\n\tnow := time.Now()\n\n\t// Do not update cache if we're not forced to\n\t// and if the update is not due yet\n\tif !force && !isStale(fc.LastUpdated, now) {\n\t\treturn\n\t}\n\n\t// Call out to the unit and update object\n\tif gf, err := libcomfo.GetFans(comfoConn); err == nil {\n\t\tfc.Fans = gf\n\t\tfc.LastUpdated = now\n\t} else {\n\t\tlog.Printf(\"FanCache.Update() - Error updating fan cache: %s\", err)\n\t}\n}", "title": "" }, { "docid": "8b90b788ea9ed3a902557e3dac47faa5", "score": "0.52015865", "text": "func (t *Transaction) Update(objectstorage.StorableObject) {\n\tpanic(\"updates disabled\")\n}", "title": "" }, { "docid": "e26d3834169be6616c082b41c824b57d", "score": "0.5199432", "text": "func (rmc *ReactiveMockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {\n\tfun := getReactorFuncForObject(obj, &rmc.ReactorFuncs)\n\tif fun != nil {\n\t\treturn fun()\n\t}\n\n\treturn rmc.FakeClient.Update(ctx, obj, opts...)\n}", "title": "" }, { "docid": "b50794047f38edeee1b5ce47d70e4326", "score": "0.519718", "text": "func (c *CRDBManager) Update(policy ladon.Policy) error {\n\n\treturn nil\n}", "title": "" }, { "docid": "3f8bc84fb7ba7d60d1c0ebd49d609607", "score": "0.5192841", "text": "func (o *Datum) Update(exec boil.Executor, whitelist ...string) error {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(exec); err != nil {\n\t\treturn err\n\t}\n\tkey := makeCacheKey(whitelist, nil)\n\tdatumUpdateCacheMut.RLock()\n\tcache, cached := datumUpdateCache[key]\n\tdatumUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := strmangle.UpdateColumnSet(\n\t\t\tdatumColumns,\n\t\t\tdatumPrimaryKeyColumns,\n\t\t\twhitelist,\n\t\t)\n\n\t\tif len(whitelist) == 0 {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn errors.New(\"models: unable to update data, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"data\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, datumPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(datumType, datumMapping, append(wl, datumPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\t_, err = exec.Exec(cache.query, values...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to update data row\")\n\t}\n\n\tif !cached {\n\t\tdatumUpdateCacheMut.Lock()\n\t\tdatumUpdateCache[key] = cache\n\t\tdatumUpdateCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterUpdateHooks(exec)\n}", "title": "" }, { "docid": "449ced9e9146d49bd66e53e147c60815", "score": "0.51911604", "text": "func (o *Oracle) updateCache(d dao.DAO) error {\n\torc, _ := o.Module.Load().(services.Oracle)\n\tif orc == nil {\n\t\treturn nil\n\t}\n\n\treqs := o.newRequests\n\to.newRequests = make(map[uint64]*state.OracleRequest)\n\tfor id := range reqs {\n\t\tkey := makeRequestKey(id)\n\t\tif si := d.GetStorageItem(o.ID, key); si == nil { // tx has failed\n\t\t\tdelete(reqs, id)\n\t\t}\n\t}\n\torc.AddRequests(reqs)\n\treturn nil\n}", "title": "" } ]
01e48f2cb76abc64371fa530fb3a0a2b
Inherit inherits previous generation of EurekaServiceRegistry.
[{"docid":"bbab7759d23ccad8c9e5bbac02ed32b7","score":"0.657721","text":"func (eureka *EurekaServiceR(...TRUNCATED)
[{"docid":"ae0c18695e1419c19a87730cafb12de2","score":"0.55394065","text":"func (sr *ServiceRegistry)(...TRUNCATED)
ee6d6c59b28361eaa4ffb0c05066549f
UUIDv5Val creates a UUID v5 string based on the given inputs. Return value is a big.Int
[{"docid":"bdcfdc9f5de81cc7a980e2c0f367c4ce","score":"0.8887028","text":"func UUIDv5Val(inputs ...st(...TRUNCATED)
[{"docid":"e2f929082cfd98df4bc6c1546b48f7a9","score":"0.78646064","text":"func UUIDv5(inputs ...stri(...TRUNCATED)
e88ffae39ed9e4e8175ad29d6b7c02c2
Where returns a new SchemaSlice whose elements return true for func. See:
[{"docid":"a2843a6ba3e4806297aa8b270e9f1726","score":"0.7655883","text":"func (rcv SchemaSlice) Wher(...TRUNCATED)
[{"docid":"2bdeb1b329840bca4c6ad03a78492d0e","score":"0.5590401","text":"func (rcv PlatformSlice) Wh(...TRUNCATED)
7a86926a2f642cb79cc248e44fb7de2d
A reference to frontend IP addresses.
[{"docid":"69ae72c648b919da837b10c59c1d8417","score":"0.0","text":"func (o InboundNatPoolOutput) Fro(...TRUNCATED)
[{"docid":"e2203f24da477df1beaf874e8821d71b","score":"0.65561885","text":"func (o DeploymentFrontend(...TRUNCATED)
48d250c036dc71b7837e5c4ff396dffc
getStatementCategory returns the proto.BL_ category for a SQL statement.
[{"docid":"44c2c8fa1226be605d04ab921181e1b5","score":"0.7585344","text":"func getStatementCategory(s(...TRUNCATED)
[{"docid":"15e23cd09b1c779a2fd4c2bf29e436e3","score":"0.5986222","text":"func (s *Swift) GetCategory(...TRUNCATED)
e75811b9207449488b7028f3759bc47a
"Connectorspecific properties required when using Slack. See Generic Connector Profile Properties fo(...TRUNCATED)
[{"docid":"071262a246174aec75adc8e96629dcbc","score":"0.5846951","text":"func (o ConnectorProfileCon(...TRUNCATED)
[{"docid":"a42c2a39f4527b767ee5da69a53c3d50","score":"0.56841123","text":"func (o ConnectorProfileCo(...TRUNCATED)
d59ec7d22dea69d9bc007606192c0e1c
Send sends a new line to the terminal, as if a user typed it
[{"docid":"88f3be8bdb7cca52143d01a3fe1ae19e","score":"0.66261685","text":"func (cp *ConsoleProcess) (...TRUNCATED)
[{"docid":"1926cc12c475a0f855845c34ee81c555","score":"0.6657518","text":"func (cp *ConsoleProcess) S(...TRUNCATED)
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
28