Address review comments and add more tests
Yuri Shkuro
6 years ago
42 | 42 | instancer = consul.NewInstancer(sdclient, logger, consulService, consulTags, passingOnly) |
43 | 43 | endpoints profilesvc.Endpoints |
44 | 44 | ) |
45 | // TODO: thought experiment | |
46 | mapping := []struct { | |
47 | factory func(s profilesvc.Service) endpoint.Endpoint | |
48 | endpoint *endpoint.Endpoint | |
49 | }{ | |
50 | { | |
51 | factory: profilesvc.MakePostProfileEndpoint, | |
52 | endpoint: &endpoints.PostProfileEndpoint, | |
53 | }, | |
54 | { | |
55 | factory: profilesvc.MakeGetProfileEndpoint, | |
56 | endpoint: &endpoints.GetProfileEndpoint, | |
57 | }, | |
58 | } | |
59 | for _, m := range mapping { | |
60 | factory := factoryFor(m.factory) | |
61 | endpointer := sd.NewEndpointer(instancer, factory, logger) | |
62 | balancer := lb.NewRoundRobin(endpointer) | |
63 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
64 | *m.endpoint = retry | |
65 | } | |
66 | // TODO: why not 2 lines per endpoint registration above instead of 7 lines per endpoint below? | |
67 | 45 | { |
68 | 46 | factory := factoryFor(profilesvc.MakePostProfileEndpoint) |
69 | 47 | endpointer := sd.NewEndpointer(instancer, factory, logger) |
21 | 21 | endpoints []endpoint.Endpoint |
22 | 22 | logger log.Logger |
23 | 23 | invalidateDeadline time.Time |
24 | timeNow func() time.Time | |
24 | 25 | } |
25 | 26 | |
26 | 27 | type endpointCloser struct { |
35 | 36 | factory: factory, |
36 | 37 | cache: map[string]endpointCloser{}, |
37 | 38 | logger: logger, |
39 | timeNow: time.Now, | |
38 | 40 | } |
39 | 41 | } |
40 | 42 | |
46 | 48 | c.mtx.Lock() |
47 | 49 | defer c.mtx.Unlock() |
48 | 50 | |
51 | // Happy path. | |
49 | 52 | if event.Err == nil { |
50 | 53 | c.updateCache(event.Instances) |
51 | c.invalidateDeadline = time.Time{} | |
52 | 54 | c.err = nil |
53 | } | |
54 | ||
55 | c.logger.Log("err", event.Err) | |
56 | ||
57 | if c.options.invalidateOnErrorTimeout == nil { | |
58 | // keep returning the last known endpoints on error | |
59 | 55 | return |
60 | 56 | } |
61 | 57 | |
58 | // Sad path. Something's gone wrong in sd. | |
59 | c.logger.Log("err", event.Err) | |
60 | if c.options.invalidateOnErrorTimeout == nil { | |
61 | return // keep returning the last known endpoints on error | |
62 | } | |
63 | if c.err != nil { | |
64 | return // already in the error state, do nothing & keep original error | |
65 | } | |
62 | 66 | c.err = event.Err |
63 | ||
64 | if !c.invalidateDeadline.IsZero() { | |
65 | // aleady in the error state, do nothing | |
66 | return | |
67 | } | |
68 | 67 | // set new deadline to invalidate Endpoints unless non-error Event is received |
69 | c.invalidateDeadline = time.Now().Add(*c.options.invalidateOnErrorTimeout) | |
68 | c.invalidateDeadline = c.timeNow().Add(*c.options.invalidateOnErrorTimeout) | |
70 | 69 | return |
71 | 70 | } |
72 | 71 | |
120 | 119 | func (c *endpointCache) Endpoints() ([]endpoint.Endpoint, error) { |
121 | 120 | c.mtx.RLock() |
122 | 121 | |
123 | if c.err == nil || time.Now().Before(c.invalidateDeadline) { | |
122 | if c.err == nil || c.timeNow().Before(c.invalidateDeadline) { | |
124 | 123 | defer c.mtx.RUnlock() |
125 | 124 | return c.endpoints, nil |
126 | 125 | } |
129 | 128 | c.mtx.Lock() |
130 | 129 | defer c.mtx.Unlock() |
131 | 130 | |
131 | // Re-check due to a race between RUnlock() and Lock(). | |
132 | if c.err == nil || c.timeNow().Before(c.invalidateDeadline) { | |
133 | return c.endpoints, nil | |
134 | } | |
135 | ||
132 | 136 | c.updateCache(nil) // close any remaining active endpoints |
133 | ||
134 | 137 | return nil, c.err |
135 | 138 | } |
42 | 42 | } |
43 | 43 | assertEndpointsLen(t, cache, 2) |
44 | 44 | |
45 | // Error, should continue returning old endpoints | |
46 | cache.Update(Event{Err: errors.New("sd error")}) | |
47 | select { | |
48 | case <-ca: | |
49 | t.Errorf("endpoint a closed, not good") | |
50 | case <-cb: | |
51 | t.Errorf("endpoint b closed, not good") | |
52 | case <-time.After(time.Millisecond): | |
53 | t.Logf("no closures yet, good") | |
54 | } | |
55 | assertEndpointsLen(t, cache, 2) | |
56 | ||
45 | 57 | // Delete b |
46 | 58 | go cache.Update(Event{Instances: []string{"a"}}) |
47 | 59 | select { |
66 | 78 | assertEndpointsLen(t, cache, 0) |
67 | 79 | } |
68 | 80 | |
81 | func TestCacheErrorAndTimeout(t *testing.T) { | |
82 | var ( | |
83 | ca = make(closer) | |
84 | cb = make(closer) | |
85 | c = map[string]io.Closer{"a": ca, "b": cb} | |
86 | f = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, c[instance], nil } | |
87 | timeOut = 100 * time.Millisecond | |
88 | cache = newEndpointCache(f, log.NewNopLogger(), endpointerOptions{invalidateOnErrorTimeout: &timeOut}) | |
89 | ) | |
90 | ||
91 | timeNow := time.Now() | |
92 | cache.timeNow = func() time.Time { return timeNow } | |
93 | ||
94 | // Populate | |
95 | cache.Update(Event{Instances: []string{"a"}}) | |
96 | select { | |
97 | case <-ca: | |
98 | t.Errorf("endpoint a closed, not good") | |
99 | case <-time.After(time.Millisecond): | |
100 | t.Logf("no closures yet, good") | |
101 | } | |
102 | assertEndpointsLen(t, cache, 1) | |
103 | ||
104 | // Send error, keep time still. | |
105 | cache.Update(Event{Err: errors.New("sd error")}) | |
106 | select { | |
107 | case <-ca: | |
108 | t.Errorf("endpoint a closed, not good") | |
109 | case <-time.After(time.Millisecond): | |
110 | t.Logf("no closures yet, good") | |
111 | } | |
112 | assertEndpointsLen(t, cache, 1) | |
113 | ||
114 | // Move the time, but less than the timeout | |
115 | timeNow = timeNow.Add(timeOut / 2) | |
116 | assertEndpointsLen(t, cache, 1) | |
117 | select { | |
118 | case <-ca: | |
119 | t.Errorf("endpoint a closed, not good") | |
120 | case <-time.After(time.Millisecond): | |
121 | t.Logf("no closures yet, good") | |
122 | } | |
123 | ||
124 | // Move the time past the timeout | |
125 | timeNow = timeNow.Add(timeOut) | |
126 | assertEndpointsError(t, cache, "sd error") | |
127 | select { | |
128 | case <-ca: | |
129 | t.Logf("endpoint a closed, good") | |
130 | case <-time.After(time.Millisecond): | |
131 | t.Errorf("didn't close the deleted instance in time") | |
132 | } | |
133 | ||
134 | // Send another error | |
135 | cache.Update(Event{Err: errors.New("another sd error")}) | |
136 | assertEndpointsError(t, cache, "sd error") // expect original error | |
137 | } | |
138 | ||
69 | 139 | func TestBadFactory(t *testing.T) { |
70 | 140 | cache := newEndpointCache(func(string) (endpoint.Endpoint, io.Closer, error) { |
71 | 141 | return nil, nil, errors.New("bad factory") |
86 | 156 | } |
87 | 157 | } |
88 | 158 | |
159 | func assertEndpointsError(t *testing.T, cache *endpointCache, wantErr string) { | |
160 | endpoints, err := cache.Endpoints() | |
161 | if err == nil { | |
162 | t.Errorf("expecting error, not good") | |
163 | return | |
164 | } | |
165 | if want, have := wantErr, err.Error(); want != have { | |
166 | t.Errorf("want %s, have %s", want, have) | |
167 | return | |
168 | } | |
169 | if want, have := 0, len(endpoints); want != have { | |
170 | t.Errorf("want %d, have %d", want, have) | |
171 | } | |
172 | } | |
173 | ||
89 | 174 | type closer chan struct{} |
90 | 175 | |
91 | 176 | func (c closer) Close() error { close(c); return nil } |
14 | 14 | |
15 | 15 | // Instancer yields instances for a service in Consul. |
16 | 16 | type Instancer struct { |
17 | instance.Cache | |
17 | cache *instance.Cache | |
18 | 18 | client Client |
19 | 19 | logger log.Logger |
20 | 20 | service string |
28 | 28 | // are present. |
29 | 29 | func NewInstancer(client Client, logger log.Logger, service string, tags []string, passingOnly bool) *Instancer { |
30 | 30 | s := &Instancer{ |
31 | Cache: *instance.NewCache(), | |
31 | cache: instance.NewCache(), | |
32 | 32 | client: client, |
33 | 33 | logger: log.With(logger, "service", service, "tags", fmt.Sprint(tags)), |
34 | 34 | service: service, |
44 | 44 | s.logger.Log("err", err) |
45 | 45 | } |
46 | 46 | |
47 | s.Update(sd.Event{Instances: instances, Err: err}) | |
47 | s.cache.Update(sd.Event{Instances: instances, Err: err}) | |
48 | 48 | go s.loop(index) |
49 | 49 | return s |
50 | 50 | } |
66 | 66 | return // stopped via quitc |
67 | 67 | case err != nil: |
68 | 68 | s.logger.Log("err", err) |
69 | s.Update(sd.Event{Err: err}) | |
69 | s.cache.Update(sd.Event{Err: err}) | |
70 | 70 | default: |
71 | s.Update(sd.Event{Instances: instances}) | |
71 | s.cache.Update(sd.Event{Instances: instances}) | |
72 | 72 | } |
73 | 73 | } |
74 | 74 | } |
122 | 122 | } |
123 | 123 | } |
124 | 124 | |
125 | // Register implements Instancer. | |
126 | func (s *Instancer) Register(ch chan<- sd.Event) { | |
127 | s.cache.Register(ch) | |
128 | } | |
129 | ||
130 | // Deregister implements Instancer. | |
131 | func (s *Instancer) Deregister(ch chan<- sd.Event) { | |
132 | s.cache.Deregister(ch) | |
133 | } | |
134 | ||
125 | 135 | func filterEntries(entries []*consul.ServiceEntry, tags ...string) []*consul.ServiceEntry { |
126 | 136 | var es []*consul.ServiceEntry |
127 | 137 |
68 | 68 | s := NewInstancer(client, logger, "search", []string{"api"}, true) |
69 | 69 | defer s.Stop() |
70 | 70 | |
71 | state := s.State() | |
71 | state := s.cache.State() | |
72 | 72 | if want, have := 2, len(state.Instances); want != have { |
73 | 73 | t.Errorf("want %d, have %d", want, have) |
74 | 74 | } |
83 | 83 | s := NewInstancer(client, logger, "feed", []string{}, true) |
84 | 84 | defer s.Stop() |
85 | 85 | |
86 | state := s.State() | |
86 | state := s.cache.State() | |
87 | 87 | if want, have := 0, len(state.Instances); want != have { |
88 | 88 | t.Fatalf("want %d, have %d", want, have) |
89 | 89 | } |
98 | 98 | s := NewInstancer(client, logger, "search", []string{"api", "v2"}, true) |
99 | 99 | defer s.Stop() |
100 | 100 | |
101 | state := s.State() | |
101 | state := s.cache.State() | |
102 | 102 | if want, have := 1, len(state.Instances); want != have { |
103 | 103 | t.Fatalf("want %d, have %d", want, have) |
104 | 104 | } |
108 | 108 | s := NewInstancer(newTestClient(consulState), log.NewNopLogger(), "search", []string{"db"}, true) |
109 | 109 | defer s.Stop() |
110 | 110 | |
111 | state := s.State() | |
111 | state := s.cache.State() | |
112 | 112 | if want, have := 1, len(state.Instances); want != have { |
113 | 113 | t.Fatalf("want %d, have %d", want, have) |
114 | 114 | } |
12 | 12 | // Instancer yields instances from the named DNS SRV record. The name is |
13 | 13 | // resolved on a fixed schedule. Priorities and weights are ignored. |
14 | 14 | type Instancer struct { |
15 | instance.Cache | |
15 | cache *instance.Cache | |
16 | 16 | name string |
17 | 17 | logger log.Logger |
18 | 18 | quit chan struct{} |
37 | 37 | logger log.Logger, |
38 | 38 | ) *Instancer { |
39 | 39 | p := &Instancer{ |
40 | Cache: *instance.NewCache(), | |
40 | cache: instance.NewCache(), | |
41 | 41 | name: name, |
42 | 42 | logger: logger, |
43 | 43 | quit: make(chan struct{}), |
49 | 49 | } else { |
50 | 50 | logger.Log("name", name, "err", err) |
51 | 51 | } |
52 | p.Update(sd.Event{Instances: instances, Err: err}) | |
52 | p.cache.Update(sd.Event{Instances: instances, Err: err}) | |
53 | 53 | |
54 | 54 | go p.loop(refresh, lookup) |
55 | 55 | return p |
68 | 68 | instances, err := p.resolve(lookup) |
69 | 69 | if err != nil { |
70 | 70 | p.logger.Log("name", p.name, "err", err) |
71 | p.Update(sd.Event{Err: err}) | |
71 | p.cache.Update(sd.Event{Err: err}) | |
72 | 72 | continue // don't replace potentially-good with bad |
73 | 73 | } |
74 | p.Update(sd.Event{Instances: instances}) | |
74 | p.cache.Update(sd.Event{Instances: instances}) | |
75 | 75 | |
76 | 76 | case <-p.quit: |
77 | 77 | return |
90 | 90 | } |
91 | 91 | return instances, nil |
92 | 92 | } |
93 | ||
94 | // Register implements Instancer. | |
95 | func (s *Instancer) Register(ch chan<- sd.Event) { | |
96 | s.cache.Register(ch) | |
97 | } | |
98 | ||
99 | // Deregister implements Instancer. | |
100 | func (s *Instancer) Deregister(ch chan<- sd.Event) { | |
101 | s.cache.Deregister(ch) | |
102 | } |
31 | 31 | defer instancer.Stop() |
32 | 32 | |
33 | 33 | // First lookup, empty |
34 | state := instancer.State() | |
34 | state := instancer.cache.State() | |
35 | 35 | if state.Err != nil { |
36 | 36 | t.Error(state.Err) |
37 | 37 | } |
55 | 55 | // TODO(pb): solve by running the read through the loop goroutine. |
56 | 56 | time.Sleep(100 * time.Millisecond) |
57 | 57 | |
58 | state = instancer.State() | |
58 | state = instancer.cache.State() | |
59 | 59 | if state.Err != nil { |
60 | 60 | t.Error(state.Err) |
61 | 61 | } |
23 | 23 | // NewEndpointer creates an Endpointer that subscribes to updates from Instancer src |
24 | 24 | // and uses factory f to create Endpoints. If src notifies of an error, the Endpointer |
25 | 25 | // keeps returning previously created Endpoints assuming they are still good, unless |
26 | // this behavior is disabled with ResetOnError option. | |
27 | func NewEndpointer(src Instancer, f Factory, logger log.Logger, options ...EndpointerOption) Endpointer { | |
26 | // this behavior is disabled via InvalidateOnError option. | |
27 | func NewEndpointer(src Instancer, f Factory, logger log.Logger, options ...EndpointerOption) *DefaultEndpointer { | |
28 | 28 | opts := endpointerOptions{} |
29 | 29 | for _, opt := range options { |
30 | 30 | opt(&opts) |
31 | 31 | } |
32 | se := &simpleEndpointer{ | |
33 | endpointCache: *newEndpointCache(f, logger, opts), | |
34 | instancer: src, | |
35 | ch: make(chan Event), | |
32 | se := &DefaultEndpointer{ | |
33 | cache: newEndpointCache(f, logger, opts), | |
34 | instancer: src, | |
35 | ch: make(chan Event), | |
36 | 36 | } |
37 | 37 | go se.receive() |
38 | 38 | src.Register(se.ch) |
59 | 59 | invalidateOnErrorTimeout *time.Duration |
60 | 60 | } |
61 | 61 | |
62 | type simpleEndpointer struct { | |
63 | endpointCache | |
64 | ||
62 | // DefaultEndpointer implements an Endpointer interface. | |
63 | // When created with NewEndpointer function, it automatically registers | |
64 | // as a subscriber to events from the Instances and maintains a list | |
65 | // of active Endpoints. | |
66 | type DefaultEndpointer struct { | |
67 | cache *endpointCache | |
65 | 68 | instancer Instancer |
66 | 69 | ch chan Event |
67 | 70 | } |
68 | 71 | |
69 | func (se *simpleEndpointer) receive() { | |
70 | for event := range se.ch { | |
71 | se.Update(event) | |
72 | func (de *DefaultEndpointer) receive() { | |
73 | for event := range de.ch { | |
74 | de.cache.Update(event) | |
72 | 75 | } |
73 | 76 | } |
74 | 77 | |
75 | func (se *simpleEndpointer) Close() { | |
76 | se.instancer.Deregister(se.ch) | |
77 | close(se.ch) | |
78 | // Close de-registeres DefaultEndpointer from the Instancer and stops the internal go-routine. | |
79 | func (de *DefaultEndpointer) Close() { | |
80 | de.instancer.Deregister(de.ch) | |
81 | close(de.ch) | |
78 | 82 | } |
83 | ||
84 | // Endpoints implements Endpointer. | |
85 | func (de *DefaultEndpointer) Endpoints() ([]endpoint.Endpoint, error) { | |
86 | return de.cache.Endpoints() | |
87 | } |
8 | 8 | // Instancer yields instances stored in a certain etcd keyspace. Any kind of |
9 | 9 | // change in that keyspace is watched and will update the Instancer's Instancers. |
10 | 10 | type Instancer struct { |
11 | instance.Cache | |
11 | cache *instance.Cache | |
12 | 12 | client Client |
13 | 13 | prefix string |
14 | 14 | logger log.Logger |
21 | 21 | s := &Instancer{ |
22 | 22 | client: c, |
23 | 23 | prefix: prefix, |
24 | Cache: *instance.NewCache(), | |
24 | cache: instance.NewCache(), | |
25 | 25 | logger: logger, |
26 | 26 | quitc: make(chan struct{}), |
27 | 27 | } |
32 | 32 | } else { |
33 | 33 | logger.Log("prefix", s.prefix, "err", err) |
34 | 34 | } |
35 | s.Update(sd.Event{Instances: instances, Err: err}) | |
35 | s.cache.Update(sd.Event{Instances: instances, Err: err}) | |
36 | 36 | |
37 | 37 | go s.loop() |
38 | 38 | return s, nil |
47 | 47 | instances, err := s.client.GetEntries(s.prefix) |
48 | 48 | if err != nil { |
49 | 49 | s.logger.Log("msg", "failed to retrieve entries", "err", err) |
50 | s.Update(sd.Event{Err: err}) | |
50 | s.cache.Update(sd.Event{Err: err}) | |
51 | 51 | continue |
52 | 52 | } |
53 | s.Update(sd.Event{Instances: instances}) | |
53 | s.cache.Update(sd.Event{Instances: instances}) | |
54 | 54 | |
55 | 55 | case <-s.quitc: |
56 | 56 | return |
62 | 62 | func (s *Instancer) Stop() { |
63 | 63 | close(s.quitc) |
64 | 64 | } |
65 | ||
66 | // Register implements Instancer. | |
67 | func (s *Instancer) Register(ch chan<- sd.Event) { | |
68 | s.cache.Register(ch) | |
69 | } | |
70 | ||
71 | // Deregister implements Instancer. | |
72 | func (s *Instancer) Deregister(ch chan<- sd.Event) { | |
73 | s.cache.Deregister(ch) | |
74 | } |
35 | 35 | } |
36 | 36 | defer s.Stop() |
37 | 37 | |
38 | if state := s.State(); state.Err != nil { | |
38 | if state := s.cache.State(); state.Err != nil { | |
39 | 39 | t.Fatal(state.Err) |
40 | 40 | } |
41 | 41 | } |
12 | 12 | // Instancer yields instances stored in the Eureka registry for the given app. |
13 | 13 | // Changes in that app are watched and will update the subscribers. |
14 | 14 | type Instancer struct { |
15 | instance.Cache | |
15 | cache *instance.Cache | |
16 | 16 | conn fargoConnection |
17 | 17 | app string |
18 | 18 | logger log.Logger |
25 | 25 | logger = log.With(logger, "app", app) |
26 | 26 | |
27 | 27 | s := &Instancer{ |
28 | Cache: *instance.NewCache(), | |
28 | cache: instance.NewCache(), | |
29 | 29 | conn: conn, |
30 | 30 | app: app, |
31 | 31 | logger: logger, |
39 | 39 | s.logger.Log("during", "getInstances", "err", err) |
40 | 40 | } |
41 | 41 | |
42 | s.Update(sd.Event{Instances: instances, Err: err}) | |
42 | s.cache.Update(sd.Event{Instances: instances, Err: err}) | |
43 | 43 | go s.loop() |
44 | 44 | return s |
45 | 45 | } |
65 | 65 | case update := <-updatec: |
66 | 66 | if update.Err != nil { |
67 | 67 | s.logger.Log("during", "Update", "err", update.Err) |
68 | s.Update(sd.Event{Err: update.Err}) | |
68 | s.cache.Update(sd.Event{Err: update.Err}) | |
69 | 69 | continue |
70 | 70 | } |
71 | 71 | instances := convertFargoAppToInstances(update.App) |
72 | 72 | s.logger.Log("instances", len(instances)) |
73 | s.Update(sd.Event{Instances: instances}) | |
73 | s.cache.Update(sd.Event{Instances: instances}) | |
74 | 74 | |
75 | 75 | case q := <-s.quitc: |
76 | 76 | close(q) |
94 | 94 | } |
95 | 95 | return instances |
96 | 96 | } |
97 | ||
98 | // Register implements Instancer. | |
99 | func (s *Instancer) Register(ch chan<- sd.Event) { | |
100 | s.cache.Register(ch) | |
101 | } | |
102 | ||
103 | // Deregister implements Instancer. | |
104 | func (s *Instancer) Deregister(ch chan<- sd.Event) { | |
105 | s.cache.Deregister(ch) | |
106 | } |
20 | 20 | instancer := NewInstancer(connection, appNameTest, loggerTest) |
21 | 21 | defer instancer.Stop() |
22 | 22 | |
23 | state := instancer.State() | |
23 | state := instancer.cache.State() | |
24 | 24 | if state.Err != nil { |
25 | 25 | t.Fatal(state.Err) |
26 | 26 | } |
40 | 40 | instancer := NewInstancer(connection, appNameTest, loggerTest) |
41 | 41 | defer instancer.Stop() |
42 | 42 | |
43 | state := instancer.State() | |
43 | state := instancer.cache.State() | |
44 | 44 | if want, have := 1, len(state.Instances); want != have { |
45 | 45 | t.Errorf("want %d, have %d", want, have) |
46 | 46 | } |
47 | 47 | |
48 | 48 | time.Sleep(50 * time.Millisecond) |
49 | 49 | |
50 | state = instancer.State() | |
50 | state = instancer.cache.State() | |
51 | 51 | if want, have := 2, len(state.Instances); want != have { |
52 | 52 | t.Errorf("want %v, have %v", want, have) |
53 | 53 | } |
64 | 64 | instancer := NewInstancer(connection, appNameTest, loggerTest) |
65 | 65 | defer instancer.Stop() |
66 | 66 | |
67 | state := instancer.State() | |
67 | state := instancer.cache.State() | |
68 | 68 | if state.Err == nil { |
69 | 69 | t.Fatal("expecting error") |
70 | 70 | } |
84 | 84 | instancer := NewInstancer(connection, appNameTest, loggerTest) |
85 | 85 | defer instancer.Stop() |
86 | 86 | |
87 | state := instancer.State() | |
87 | state := instancer.cache.State() | |
88 | 88 | if state.Err != nil { |
89 | 89 | t.Error(state.Err) |
90 | 90 | } |
94 | 94 | |
95 | 95 | time.Sleep(50 * time.Millisecond) |
96 | 96 | |
97 | state = instancer.State() | |
97 | state = instancer.cache.State() | |
98 | 98 | if state.Err == nil { |
99 | 99 | t.Fatal("expecting error") |
100 | 100 | } |
10 | 10 | // Instancer yield instances stored in a certain ZooKeeper path. Any kind of |
11 | 11 | // change in that path is watched and will update the subscribers. |
12 | 12 | type Instancer struct { |
13 | instance.Cache | |
13 | cache *instance.Cache | |
14 | 14 | client Client |
15 | 15 | path string |
16 | 16 | logger log.Logger |
21 | 21 | // the given path for changes and update the Instancer endpoints. |
22 | 22 | func NewInstancer(c Client, path string, logger log.Logger) (*Instancer, error) { |
23 | 23 | s := &Instancer{ |
24 | Cache: *instance.NewCache(), | |
24 | cache: instance.NewCache(), | |
25 | 25 | client: c, |
26 | 26 | path: path, |
27 | 27 | logger: logger, |
36 | 36 | instances, eventc, err := s.client.GetEntries(s.path) |
37 | 37 | if err != nil { |
38 | 38 | logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) |
39 | // TODO why zk constructor exits when other implementations continue? | |
39 | // other implementations continue here, but we exit because we don't know if eventc is valid | |
40 | 40 | return nil, err |
41 | 41 | } |
42 | 42 | logger.Log("path", s.path, "instances", len(instances)) |
43 | s.Update(sd.Event{Instances: instances}) | |
43 | s.cache.Update(sd.Event{Instances: instances}) | |
44 | 44 | |
45 | 45 | go s.loop(eventc) |
46 | 46 | |
61 | 61 | instances, eventc, err = s.client.GetEntries(s.path) |
62 | 62 | if err != nil { |
63 | 63 | s.logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) |
64 | s.Update(sd.Event{Err: err}) | |
64 | s.cache.Update(sd.Event{Err: err}) | |
65 | 65 | continue |
66 | 66 | } |
67 | 67 | s.logger.Log("path", s.path, "instances", len(instances)) |
68 | s.Update(sd.Event{Instances: instances}) | |
68 | s.cache.Update(sd.Event{Instances: instances}) | |
69 | 69 | |
70 | 70 | case <-s.quitc: |
71 | 71 | return |
77 | 77 | func (s *Instancer) Stop() { |
78 | 78 | close(s.quitc) |
79 | 79 | } |
80 | ||
81 | // Register implements Instancer. | |
82 | func (s *Instancer) Register(ch chan<- sd.Event) { | |
83 | s.cache.Register(ch) | |
84 | } | |
85 | ||
86 | // Deregister implements Instancer. | |
87 | func (s *Instancer) Deregister(ch chan<- sd.Event) { | |
88 | s.cache.Deregister(ch) | |
89 | } |