Merge pull request #276 from go-kit/sd-v2
package sd (take 2)
Peter Bourgon
7 years ago
8 | 8 | // Endpoint is the fundamental building block of servers and clients. |
9 | 9 | // It represents a single RPC method. |
10 | 10 | type Endpoint func(ctx context.Context, request interface{}) (response interface{}, err error) |
11 | ||
12 | // Nop is an endpoint that does nothing and returns a nil error. | |
13 | // Useful for tests. | |
14 | func Nop(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
11 | 15 | |
12 | 16 | // Middleware is a chainable behavior modifier for endpoints. |
13 | 17 | type Middleware func(Endpoint) Endpoint |
74 | 74 | |
75 | 75 | type uppercaseResponse struct { |
76 | 76 | V string `json:"v"` |
77 | Err string `json:"err,omitempty"` // errors don't define JSON marshaling | |
77 | Err string `json:"err,omitempty"` // errors don't JSON-marshal, so we use a string | |
78 | 78 | } |
79 | 79 | |
80 | 80 | type countRequest struct { |
97 | 97 | An endpoint represents a single RPC. |
98 | 98 | That is, a single method in our service interface. |
99 | 99 | We'll write simple adapters to convert each of our service's methods into an endpoint. |
100 | Each adapter takes a StringService, and returns an endpoint that corresponds to one of the methods. | |
100 | 101 | |
101 | 102 | ```go |
102 | 103 | import ( |
280 | 281 | which wraps an existing StringService, and performs the extra logging duties. |
281 | 282 | |
282 | 283 | ```go |
283 | type loggingMiddleware struct{ | |
284 | type loggingMiddleware struct { | |
284 | 285 | logger log.Logger |
285 | StringService | |
286 | next StringService | |
286 | 287 | } |
287 | 288 | |
288 | 289 | func (mw loggingMiddleware) Uppercase(s string) (output string, err error) { |
296 | 297 | ) |
297 | 298 | }(time.Now()) |
298 | 299 | |
299 | output, err = mw.StringService.Uppercase(s) | |
300 | output, err = mw.next.Uppercase(s) | |
300 | 301 | return |
301 | 302 | } |
302 | 303 | |
310 | 311 | ) |
311 | 312 | }(time.Now()) |
312 | 313 | |
313 | n = mw.StringService.Count(s) | |
314 | n = mw.next.Count(s) | |
314 | 315 | return |
315 | 316 | } |
316 | 317 | ``` |
328 | 329 | func main() { |
329 | 330 | logger := log.NewLogfmtLogger(os.Stderr) |
330 | 331 | |
331 | svc := stringService{} | |
332 | var svc StringService | |
333 | svc = stringsvc{} | |
332 | 334 | svc = loggingMiddleware{logger, svc} |
335 | ||
336 | // ... | |
333 | 337 | |
334 | 338 | uppercaseHandler := httptransport.NewServer( |
335 | 339 | // ... |
363 | 367 | requestCount metrics.Counter |
364 | 368 | requestLatency metrics.TimeHistogram |
365 | 369 | countResult metrics.Histogram |
366 | StringService | |
370 | next StringService | |
367 | 371 | } |
368 | 372 | |
369 | 373 | func (mw instrumentingMiddleware) Uppercase(s string) (output string, err error) { |
374 | 378 | mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) |
375 | 379 | }(time.Now()) |
376 | 380 | |
377 | output, err = mw.StringService.Uppercase(s) | |
381 | output, err = mw.next.Uppercase(s) | |
378 | 382 | return |
379 | 383 | } |
380 | 384 | |
387 | 391 | mw.countResult.Observe(int64(n)) |
388 | 392 | }(time.Now()) |
389 | 393 | |
390 | n = mw.StringService.Count(s) | |
394 | n = mw.next.Count(s) | |
391 | 395 | return |
392 | 396 | } |
393 | 397 | ``` |
415 | 419 | // ... |
416 | 420 | }, []string{})) |
417 | 421 | |
418 | svc := stringService{} | |
422 | var svc StringService | |
423 | svc = stringService{} | |
419 | 424 | svc = loggingMiddleware{logger, svc} |
420 | 425 | svc = instrumentingMiddleware{requestCount, requestLatency, countResult, svc} |
421 | 426 | |
422 | uppercaseHandler := httptransport.NewServer( | |
423 | // ... | |
424 | makeUppercaseEndpoint(svc), | |
425 | // ... | |
426 | ) | |
427 | ||
428 | countHandler := httptransport.NewServer( | |
429 | // ... | |
430 | makeCountEndpoint(svc), | |
431 | // ... | |
432 | ) | |
427 | // ... | |
433 | 428 | |
434 | 429 | http.Handle("/metrics", stdprometheus.Handler()) |
435 | 430 | } |
466 | 461 | **This is where Go kit shines**. |
467 | 462 | We provide transport middlewares to solve many of the problems that come up. |
468 | 463 | |
469 | Let's implement the proxying middleware as a ServiceMiddleware. | |
470 | We'll only proxy one method, Uppercase. | |
464 | Let's say that we want to have our string service call out to a _different_ string service | |
465 | to satisfy the Uppercase method. | |
466 | In effect, proxying the request to another service. | |
467 | Let's implement the proxying middleware as a ServiceMiddleware, same as a logging or instrumenting middleware. | |
471 | 468 | |
472 | 469 | ```go |
473 | 470 | // proxymw implements StringService, forwarding Uppercase requests to the |
474 | 471 | // provided endpoint, and serving all other (i.e. Count) requests via the |
475 | // embedded StringService. | |
472 | // next StringService. | |
476 | 473 | type proxymw struct { |
477 | context.Context | |
478 | StringService // Serve most requests via this embedded service... | |
479 | UppercaseEndpoint endpoint.Endpoint // ...except Uppercase, which gets served by this endpoint | |
474 | ctx context.Context | |
475 | next StringService // Serve most requests via this service... | |
476 | uppercase endpoint.Endpoint // ...except Uppercase, which gets served by this endpoint | |
480 | 477 | } |
481 | 478 | ``` |
482 | 479 | |
488 | 485 | |
489 | 486 | ```go |
490 | 487 | func (mw proxymw) Uppercase(s string) (string, error) { |
491 | response, err := mw.UppercaseEndpoint(mw.Context, uppercaseRequest{S: s}) | |
488 | response, err := mw.uppercase(mw.Context, uppercaseRequest{S: s}) | |
492 | 489 | if err != nil { |
493 | 490 | return "", err |
494 | 491 | } |
532 | 529 | And if any of those instances start to behave badly, we want to deal with that, without affecting our own service's reliability. |
533 | 530 | |
534 | 531 | Go kit offers adapters to different service discovery systems, to get up-to-date sets of instances, exposed as individual endpoints. |
535 | Those adapters are called publishers. | |
536 | ||
537 | ```go | |
538 | type Publisher interface { | |
532 | Those adapters are called subscribers. | |
533 | ||
534 | ```go | |
535 | type Subscriber interface { | |
539 | 536 | Endpoints() ([]endpoint.Endpoint, error) |
540 | 537 | } |
541 | 538 | ``` |
542 | 539 | |
543 | Internally, publishers use a provided factory function to convert each discovered host:port string to a usable endpoint. | |
540 | Internally, subscribers use a provided factory function to convert each discovered instance string (typically host:port) to a usable endpoint. | |
544 | 541 | |
545 | 542 | ```go |
546 | 543 | type Factory func(instance string) (endpoint.Endpoint, error) |
550 | 547 | But it's important to put some safety middleware, like circuit breakers and rate limiters, into your factory, too. |
551 | 548 | |
552 | 549 | ```go |
553 | func factory(ctx context.Context, maxQPS int) loadbalancer.Factory { | |
554 | return func(instance string) (endpoint.Endpoint, error) { | |
555 | var e endpoint.Endpoint | |
556 | e = makeUppercaseProxy(ctx, instance) | |
557 | e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) | |
558 | e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(maxQPS), int64(maxQPS)))(e) | |
559 | return e, nil | |
560 | } | |
550 | var e endpoint.Endpoint | |
551 | e = makeUppercaseProxy(ctx, instance) | |
552 | e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) | |
553 | e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(maxQPS), int64(maxQPS)))(e) | |
561 | 554 | } |
562 | 555 | ``` |
563 | 556 | |
564 | 557 | Now that we've got a set of endpoints, we need to choose one. |
565 | Load balancers wrap publishers, and select one endpoint from many. | |
558 | Load balancers wrap subscribers, and select one endpoint from many. | |
566 | 559 | Go kit provides a couple of basic load balancers, and it's easy to write your own if you want more advanced heuristics. |
567 | 560 | |
568 | 561 | ```go |
569 | type LoadBalancer interface { | |
562 | type Balancer interface { | |
570 | 563 | Endpoint() (endpoint.Endpoint, error) |
571 | 564 | } |
572 | 565 | ``` |
577 | 570 | The retry strategy will retry failed requests until either the max attempts or timeout has been reached. |
578 | 571 | |
579 | 572 | ```go |
580 | func Retry(max int, timeout time.Duration, lb LoadBalancer) endpoint.Endpoint | |
573 | func Retry(max int, timeout time.Duration, lb Balancer) endpoint.Endpoint | |
581 | 574 | ``` |
582 | 575 | |
583 | 576 | Let's wire up our final proxying middleware. |
584 | 577 | For simplicity, we'll assume the user will specify multiple comma-separate instance endpoints with a flag. |
585 | 578 | |
586 | 579 | ```go |
587 | func proxyingMiddleware(proxyList string, ctx context.Context, logger log.Logger) ServiceMiddleware { | |
580 | func proxyingMiddleware(instances string, ctx context.Context, logger log.Logger) ServiceMiddleware { | |
581 | // If instances is empty, don't proxy. | |
582 | if instances == "" { | |
583 | logger.Log("proxy_to", "none") | |
584 | return func(next StringService) StringService { return next } | |
585 | } | |
586 | ||
587 | // Set some parameters for our client. | |
588 | var ( | |
589 | qps = 100 // beyond which we will return an error | |
590 | maxAttempts = 3 // per request, before giving up | |
591 | maxTime = 250 * time.Millisecond // wallclock time, before giving up | |
592 | ) | |
593 | ||
594 | // Otherwise, construct an endpoint for each instance in the list, and add | |
595 | // it to a fixed set of endpoints. In a real service, rather than doing this | |
596 | // by hand, you'd probably use package sd's support for your service | |
597 | // discovery system. | |
598 | var ( | |
599 | instanceList = split(instances) | |
600 | subscriber sd.FixedSubscriber | |
601 | ) | |
602 | logger.Log("proxy_to", fmt.Sprint(instanceList)) | |
603 | for _, instance := range instanceList { | |
604 | var e endpoint.Endpoint | |
605 | e = makeUppercaseProxy(ctx, instance) | |
606 | e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) | |
607 | e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e) | |
608 | subscriber = append(subscriber, e) | |
609 | } | |
610 | ||
611 | // Now, build a single, retrying, load-balancing endpoint out of all of | |
612 | // those individual endpoints. | |
613 | balancer := lb.NewRoundRobin(subscriber) | |
614 | retry := lb.Retry(maxAttempts, maxTime, balancer) | |
615 | ||
616 | // And finally, return the ServiceMiddleware, implemented by proxymw. | |
588 | 617 | return func(next StringService) StringService { |
589 | var ( | |
590 | qps = 100 // max to each instance | |
591 | publisher = static.NewPublisher(split(proxyList), factory(ctx, qps), logger) | |
592 | lb = loadbalancer.NewRoundRobin(publisher) | |
593 | maxAttempts = 3 | |
594 | maxTime = 100 * time.Millisecond | |
595 | endpoint = loadbalancer.Retry(maxAttempts, maxTime, lb) | |
596 | ) | |
597 | return proxymw{ctx, endpoint, next} | |
618 | return proxymw{ctx, next, retry} | |
598 | 619 | } |
599 | 620 | } |
600 | 621 | ``` |
666 | 687 | |
667 | 688 | It's possible to use Go kit to create a client package to your service, to make consuming your service easier from other Go programs. |
668 | 689 | Effectively, your client package will provide an implementation of your service interface, which invokes a remote service instance using a specific transport. |
669 | See [package addsvc/client](https://github.com/go-kit/kit/tree/master/examples/addsvc/client) for an example. | |
690 | See [package addsvc/client](https://github.com/go-kit/kit/tree/master/examples/addsvc/client) | |
691 | or [package profilesvc/client](https://github.com/go-kit/kit/tree/master/examples/profilesvc/client) | |
692 | for examples. | |
670 | 693 | |
671 | 694 | ## Other examples |
672 | 695 | |
673 | 696 | ### addsvc |
674 | 697 | |
675 | [addsvc](https://github.com/go-kit/kit/blob/master/examples/addsvc) was the original example application. | |
698 | [addsvc](https://github.com/go-kit/kit/blob/master/examples/addsvc) is the original example service. | |
676 | 699 | It exposes a set of operations over **all supported transports**. |
677 | 700 | It's fully logged, instrumented, and uses Zipkin request tracing. |
678 | 701 | It also demonstrates how to create and use client packages. |
0 | package main | |
1 | ||
2 | import ( | |
3 | "golang.org/x/net/context" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/examples/addsvc/server" | |
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | // NewClient returns an AddService that's backed by the provided Endpoints | |
11 | func newClient(ctx context.Context, sumEndpoint endpoint.Endpoint, concatEndpoint endpoint.Endpoint, logger log.Logger) server.AddService { | |
12 | return client{ | |
13 | Context: ctx, | |
14 | Logger: logger, | |
15 | sum: sumEndpoint, | |
16 | concat: concatEndpoint, | |
17 | } | |
18 | } | |
19 | ||
20 | type client struct { | |
21 | context.Context | |
22 | log.Logger | |
23 | sum endpoint.Endpoint | |
24 | concat endpoint.Endpoint | |
25 | } | |
26 | ||
27 | // TODO(pb): If your service interface methods don't return an error, we have | |
28 | // no way to signal problems with a service client. If they don't take a | |
29 | // context, we have to provide a global context for any transport that | |
30 | // requires one, effectively making your service a black box to any context- | |
31 | // specific information. So, we should make some recommendations: | |
32 | // | |
33 | // - To get started, a simple service interface is probably fine. | |
34 | // | |
35 | // - To properly deal with transport errors, every method on your service | |
36 | // should return an error. This is probably important. | |
37 | // | |
38 | // - To properly deal with context information, every method on your service | |
39 | // can take a context as its first argument. This may or may not be | |
40 | // important. | |
41 | ||
42 | func (c client) Sum(a, b int) int { | |
43 | request := server.SumRequest{ | |
44 | A: a, | |
45 | B: b, | |
46 | } | |
47 | reply, err := c.sum(c.Context, request) | |
48 | if err != nil { | |
49 | c.Logger.Log("err", err) // Without an error return parameter, we can't do anything else... | |
50 | return 0 | |
51 | } | |
52 | ||
53 | r := reply.(server.SumResponse) | |
54 | return r.V | |
55 | } | |
56 | ||
57 | func (c client) Concat(a, b string) string { | |
58 | request := server.ConcatRequest{ | |
59 | A: a, | |
60 | B: b, | |
61 | } | |
62 | reply, err := c.concat(c.Context, request) | |
63 | if err != nil { | |
64 | c.Logger.Log("err", err) // Without an error return parameter, we can't do anything else... | |
65 | return "" | |
66 | } | |
67 | ||
68 | r := reply.(server.ConcatResponse) | |
69 | return r.V | |
70 | } |
0 | // Package grpc provides a gRPC client for the add service. | |
1 | package grpc | |
2 | ||
3 | import ( | |
4 | "time" | |
5 | ||
6 | jujuratelimit "github.com/juju/ratelimit" | |
7 | stdopentracing "github.com/opentracing/opentracing-go" | |
8 | "github.com/sony/gobreaker" | |
9 | "google.golang.org/grpc" | |
10 | ||
11 | "github.com/go-kit/kit/circuitbreaker" | |
12 | "github.com/go-kit/kit/endpoint" | |
13 | "github.com/go-kit/kit/examples/addsvc" | |
14 | "github.com/go-kit/kit/examples/addsvc/pb" | |
15 | "github.com/go-kit/kit/log" | |
16 | "github.com/go-kit/kit/ratelimit" | |
17 | "github.com/go-kit/kit/tracing/opentracing" | |
18 | grpctransport "github.com/go-kit/kit/transport/grpc" | |
19 | ) | |
20 | ||
21 | // New returns an AddService backed by a gRPC client connection. It is the | |
22 | // responsibility of the caller to dial, and later close, the connection. | |
23 | func New(conn *grpc.ClientConn, tracer stdopentracing.Tracer, logger log.Logger) addsvc.Service { | |
24 | // We construct a single ratelimiter middleware, to limit the total outgoing | |
25 | // QPS from this client to all methods on the remote instance. We also | |
26 | // construct per-endpoint circuitbreaker middlewares to demonstrate how | |
27 | // that's done, although they could easily be combined into a single breaker | |
28 | // for the entire remote instance, too. | |
29 | ||
30 | limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) | |
31 | ||
32 | var sumEndpoint endpoint.Endpoint | |
33 | { | |
34 | sumEndpoint = grpctransport.NewClient( | |
35 | conn, | |
36 | "Add", | |
37 | "Sum", | |
38 | addsvc.EncodeGRPCSumRequest, | |
39 | addsvc.DecodeGRPCSumResponse, | |
40 | pb.SumReply{}, | |
41 | grpctransport.SetClientBefore(opentracing.FromGRPCRequest(tracer, "Sum", logger)), | |
42 | ).Endpoint() | |
43 | sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint) | |
44 | sumEndpoint = limiter(sumEndpoint) | |
45 | sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ | |
46 | Name: "Sum", | |
47 | Timeout: 30 * time.Second, | |
48 | }))(sumEndpoint) | |
49 | } | |
50 | ||
51 | var concatEndpoint endpoint.Endpoint | |
52 | { | |
53 | concatEndpoint = grpctransport.NewClient( | |
54 | conn, | |
55 | "Add", | |
56 | "Concat", | |
57 | addsvc.EncodeGRPCConcatRequest, | |
58 | addsvc.DecodeGRPCConcatResponse, | |
59 | pb.ConcatReply{}, | |
60 | grpctransport.SetClientBefore(opentracing.FromGRPCRequest(tracer, "Concat", logger)), | |
61 | ).Endpoint() | |
62 | concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint) | |
63 | concatEndpoint = limiter(concatEndpoint) | |
64 | sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ | |
65 | Name: "Concat", | |
66 | Timeout: 30 * time.Second, | |
67 | }))(sumEndpoint) | |
68 | } | |
69 | ||
70 | return addsvc.Endpoints{ | |
71 | SumEndpoint: sumEndpoint, | |
72 | ConcatEndpoint: concatEndpoint, | |
73 | } | |
74 | } |
0 | package grpc | |
1 | ||
2 | import ( | |
3 | "golang.org/x/net/context" | |
4 | ||
5 | "github.com/go-kit/kit/examples/addsvc/pb" | |
6 | "github.com/go-kit/kit/examples/addsvc/server" | |
7 | ) | |
8 | ||
9 | func encodeSumRequest(ctx context.Context, request interface{}) (interface{}, error) { | |
10 | req := request.(server.SumRequest) | |
11 | return &pb.SumRequest{ | |
12 | A: int64(req.A), | |
13 | B: int64(req.B), | |
14 | }, nil | |
15 | } | |
16 | ||
17 | func encodeConcatRequest(ctx context.Context, request interface{}) (interface{}, error) { | |
18 | req := request.(server.ConcatRequest) | |
19 | return &pb.ConcatRequest{ | |
20 | A: req.A, | |
21 | B: req.B, | |
22 | }, nil | |
23 | } | |
24 | ||
25 | func decodeSumResponse(ctx context.Context, response interface{}) (interface{}, error) { | |
26 | resp := response.(*pb.SumReply) | |
27 | return server.SumResponse{ | |
28 | V: int(resp.V), | |
29 | }, nil | |
30 | } | |
31 | ||
32 | func decodeConcatResponse(ctx context.Context, response interface{}) (interface{}, error) { | |
33 | resp := response.(*pb.ConcatReply) | |
34 | return server.ConcatResponse{ | |
35 | V: resp.V, | |
36 | }, nil | |
37 | } |
0 | package grpc | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | ||
5 | kitot "github.com/go-kit/kit/tracing/opentracing" | |
6 | "github.com/opentracing/opentracing-go" | |
7 | "google.golang.org/grpc" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/examples/addsvc/pb" | |
11 | "github.com/go-kit/kit/loadbalancer" | |
12 | "github.com/go-kit/kit/log" | |
13 | grpctransport "github.com/go-kit/kit/transport/grpc" | |
14 | ) | |
15 | ||
16 | // MakeSumEndpointFactory returns a loadbalancer.Factory that transforms GRPC | |
17 | // host:port strings into Endpoints that call the Sum method on a GRPC server | |
18 | // at that address. | |
19 | func MakeSumEndpointFactory(tracer opentracing.Tracer, tracingLogger log.Logger) loadbalancer.Factory { | |
20 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
21 | cc, err := grpc.Dial(instance, grpc.WithInsecure()) | |
22 | return grpctransport.NewClient( | |
23 | cc, | |
24 | "Add", | |
25 | "Sum", | |
26 | encodeSumRequest, | |
27 | decodeSumResponse, | |
28 | pb.SumReply{}, | |
29 | grpctransport.SetClientBefore(kitot.ToGRPCRequest(tracer, tracingLogger)), | |
30 | ).Endpoint(), cc, err | |
31 | } | |
32 | } | |
33 | ||
34 | // MakeConcatEndpointFactory returns a loadbalancer.Factory that transforms | |
35 | // GRPC host:port strings into Endpoints that call the Concat method on a GRPC | |
36 | // server at that address. | |
37 | func MakeConcatEndpointFactory(tracer opentracing.Tracer, tracingLogger log.Logger) loadbalancer.Factory { | |
38 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
39 | cc, err := grpc.Dial(instance, grpc.WithInsecure()) | |
40 | return grpctransport.NewClient( | |
41 | cc, | |
42 | "Add", | |
43 | "Concat", | |
44 | encodeConcatRequest, | |
45 | decodeConcatResponse, | |
46 | pb.ConcatReply{}, | |
47 | grpctransport.SetClientBefore(kitot.ToGRPCRequest(tracer, tracingLogger)), | |
48 | ).Endpoint(), cc, err | |
49 | } | |
50 | } |
0 | // Package http provides an HTTP client for the add service. | |
1 | package http | |
2 | ||
3 | import ( | |
4 | "net/url" | |
5 | "strings" | |
6 | "time" | |
7 | ||
8 | jujuratelimit "github.com/juju/ratelimit" | |
9 | stdopentracing "github.com/opentracing/opentracing-go" | |
10 | "github.com/sony/gobreaker" | |
11 | ||
12 | "github.com/go-kit/kit/circuitbreaker" | |
13 | "github.com/go-kit/kit/endpoint" | |
14 | "github.com/go-kit/kit/examples/addsvc" | |
15 | "github.com/go-kit/kit/log" | |
16 | "github.com/go-kit/kit/ratelimit" | |
17 | "github.com/go-kit/kit/tracing/opentracing" | |
18 | httptransport "github.com/go-kit/kit/transport/http" | |
19 | ) | |
20 | ||
21 | // New returns an AddService backed by an HTTP server living at the remote | |
22 | // instance. We expect instance to come from a service discovery system, so | |
23 | // likely of the form "host:port". | |
24 | func New(instance string, tracer stdopentracing.Tracer, logger log.Logger) (addsvc.Service, error) { | |
25 | if !strings.HasPrefix(instance, "http") { | |
26 | instance = "http://" + instance | |
27 | } | |
28 | u, err := url.Parse(instance) | |
29 | if err != nil { | |
30 | return nil, err | |
31 | } | |
32 | ||
33 | // We construct a single ratelimiter middleware, to limit the total outgoing | |
34 | // QPS from this client to all methods on the remote instance. We also | |
35 | // construct per-endpoint circuitbreaker middlewares to demonstrate how | |
36 | // that's done, although they could easily be combined into a single breaker | |
37 | // for the entire remote instance, too. | |
38 | ||
39 | limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) | |
40 | ||
41 | var sumEndpoint endpoint.Endpoint | |
42 | { | |
43 | sumEndpoint = httptransport.NewClient( | |
44 | "POST", | |
45 | copyURL(u, "/sum"), | |
46 | addsvc.EncodeHTTPGenericRequest, | |
47 | addsvc.DecodeHTTPSumResponse, | |
48 | httptransport.SetClientBefore(opentracing.FromHTTPRequest(tracer, "Sum", logger)), | |
49 | ).Endpoint() | |
50 | sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint) | |
51 | sumEndpoint = limiter(sumEndpoint) | |
52 | sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ | |
53 | Name: "Sum", | |
54 | Timeout: 30 * time.Second, | |
55 | }))(sumEndpoint) | |
56 | } | |
57 | ||
58 | var concatEndpoint endpoint.Endpoint | |
59 | { | |
60 | concatEndpoint = httptransport.NewClient( | |
61 | "POST", | |
62 | copyURL(u, "/concat"), | |
63 | addsvc.EncodeHTTPGenericRequest, | |
64 | addsvc.DecodeHTTPConcatResponse, | |
65 | httptransport.SetClientBefore(opentracing.FromHTTPRequest(tracer, "Concat", logger)), | |
66 | ).Endpoint() | |
67 | concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint) | |
68 | concatEndpoint = limiter(concatEndpoint) | |
69 | sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ | |
70 | Name: "Concat", | |
71 | Timeout: 30 * time.Second, | |
72 | }))(sumEndpoint) | |
73 | } | |
74 | ||
75 | return addsvc.Endpoints{ | |
76 | SumEndpoint: sumEndpoint, | |
77 | ConcatEndpoint: concatEndpoint, | |
78 | }, nil | |
79 | } | |
80 | ||
81 | func copyURL(base *url.URL, path string) *url.URL { | |
82 | next := *base | |
83 | next.Path = path | |
84 | return &next | |
85 | } |
0 | package httpjson | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "net/url" | |
5 | ||
6 | "github.com/opentracing/opentracing-go" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/examples/addsvc/server" | |
10 | "github.com/go-kit/kit/loadbalancer" | |
11 | "github.com/go-kit/kit/log" | |
12 | kitot "github.com/go-kit/kit/tracing/opentracing" | |
13 | httptransport "github.com/go-kit/kit/transport/http" | |
14 | ) | |
15 | ||
16 | // MakeSumEndpointFactory generates a Factory that transforms an http url into | |
17 | // an Endpoint. | |
18 | // | |
19 | // The path of the url is reset to /sum. | |
20 | func MakeSumEndpointFactory(tracer opentracing.Tracer, tracingLogger log.Logger) loadbalancer.Factory { | |
21 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
22 | sumURL, err := url.Parse(instance) | |
23 | if err != nil { | |
24 | return nil, nil, err | |
25 | } | |
26 | sumURL.Path = "/sum" | |
27 | ||
28 | client := httptransport.NewClient( | |
29 | "GET", | |
30 | sumURL, | |
31 | server.EncodeSumRequest, | |
32 | server.DecodeSumResponse, | |
33 | httptransport.SetClient(nil), | |
34 | httptransport.SetClientBefore(kitot.ToHTTPRequest(tracer, tracingLogger)), | |
35 | ) | |
36 | ||
37 | return client.Endpoint(), nil, nil | |
38 | } | |
39 | } | |
40 | ||
41 | // MakeConcatEndpointFactory generates a Factory that transforms an http url | |
42 | // into an Endpoint. | |
43 | // | |
44 | // The path of the url is reset to /concat. | |
45 | func MakeConcatEndpointFactory(tracer opentracing.Tracer, tracingLogger log.Logger) loadbalancer.Factory { | |
46 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
47 | concatURL, err := url.Parse(instance) | |
48 | if err != nil { | |
49 | return nil, nil, err | |
50 | } | |
51 | concatURL.Path = "/concat" | |
52 | ||
53 | client := httptransport.NewClient( | |
54 | "GET", | |
55 | concatURL, | |
56 | server.EncodeConcatRequest, | |
57 | server.DecodeConcatResponse, | |
58 | httptransport.SetClient(nil), | |
59 | httptransport.SetClientBefore(kitot.ToHTTPRequest(tracer, tracingLogger)), | |
60 | ) | |
61 | ||
62 | return client.Endpoint(), nil, nil | |
63 | } | |
64 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "flag" | |
4 | "fmt" | |
5 | "os" | |
6 | "path/filepath" | |
7 | "strconv" | |
8 | "strings" | |
9 | "time" | |
10 | ||
11 | "github.com/lightstep/lightstep-tracer-go" | |
12 | "github.com/opentracing/opentracing-go" | |
13 | zipkin "github.com/openzipkin/zipkin-go-opentracing" | |
14 | appdashot "github.com/sourcegraph/appdash/opentracing" | |
15 | "golang.org/x/net/context" | |
16 | "sourcegraph.com/sourcegraph/appdash" | |
17 | ||
18 | "github.com/go-kit/kit/endpoint" | |
19 | grpcclient "github.com/go-kit/kit/examples/addsvc/client/grpc" | |
20 | httpjsonclient "github.com/go-kit/kit/examples/addsvc/client/httpjson" | |
21 | netrpcclient "github.com/go-kit/kit/examples/addsvc/client/netrpc" | |
22 | thriftclient "github.com/go-kit/kit/examples/addsvc/client/thrift" | |
23 | "github.com/go-kit/kit/loadbalancer" | |
24 | "github.com/go-kit/kit/loadbalancer/static" | |
25 | "github.com/go-kit/kit/log" | |
26 | kitot "github.com/go-kit/kit/tracing/opentracing" | |
27 | ) | |
28 | ||
29 | func main() { | |
30 | var ( | |
31 | transport = flag.String("transport", "httpjson", "httpjson, grpc, netrpc, thrift") | |
32 | httpAddrs = flag.String("http.addrs", "localhost:8001", "Comma-separated list of addresses for HTTP (JSON) servers") | |
33 | grpcAddrs = flag.String("grpc.addrs", "localhost:8002", "Comma-separated list of addresses for gRPC servers") | |
34 | netrpcAddrs = flag.String("netrpc.addrs", "localhost:8003", "Comma-separated list of addresses for net/rpc servers") | |
35 | thriftAddrs = flag.String("thrift.addrs", "localhost:8004", "Comma-separated list of addresses for Thrift servers") | |
36 | thriftProtocol = flag.String("thrift.protocol", "binary", "binary, compact, json, simplejson") | |
37 | thriftBufferSize = flag.Int("thrift.buffer.size", 0, "0 for unbuffered") | |
38 | thriftFramed = flag.Bool("thrift.framed", false, "true to enable framing") | |
39 | ||
40 | // Three OpenTracing backends (to demonstrate how they can be interchanged): | |
41 | zipkinAddr = flag.String("zipkin.kafka.addr", "", "Enable Zipkin tracing via a Kafka Collector host:port") | |
42 | appdashAddr = flag.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") | |
43 | lightstepAccessToken = flag.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") | |
44 | ) | |
45 | flag.Parse() | |
46 | if len(os.Args) < 4 { | |
47 | fmt.Fprintf(os.Stderr, "\n%s [flags] method arg1 arg2\n\n", filepath.Base(os.Args[0])) | |
48 | flag.Usage() | |
49 | os.Exit(1) | |
50 | } | |
51 | ||
52 | randomSeed := time.Now().UnixNano() | |
53 | ||
54 | root := context.Background() | |
55 | method, s1, s2 := flag.Arg(0), flag.Arg(1), flag.Arg(2) | |
56 | ||
57 | var logger log.Logger | |
58 | logger = log.NewLogfmtLogger(os.Stdout) | |
59 | logger = log.NewContext(logger).With("caller", log.DefaultCaller) | |
60 | logger = log.NewContext(logger).With("transport", *transport) | |
61 | tracingLogger := log.NewContext(logger).With("component", "tracing") | |
62 | ||
63 | // Set up OpenTracing | |
64 | var tracer opentracing.Tracer | |
65 | { | |
66 | switch { | |
67 | case *appdashAddr != "" && *lightstepAccessToken == "" && *zipkinAddr == "": | |
68 | tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) | |
69 | case *appdashAddr == "" && *lightstepAccessToken != "" && *zipkinAddr == "": | |
70 | tracer = lightstep.NewTracer(lightstep.Options{ | |
71 | AccessToken: *lightstepAccessToken, | |
72 | }) | |
73 | defer lightstep.FlushLightStepTracer(tracer) | |
74 | case *appdashAddr == "" && *lightstepAccessToken == "" && *zipkinAddr != "": | |
75 | collector, err := zipkin.NewKafkaCollector( | |
76 | strings.Split(*zipkinAddr, ","), | |
77 | zipkin.KafkaLogger(tracingLogger), | |
78 | ) | |
79 | if err != nil { | |
80 | tracingLogger.Log("err", "unable to create kafka collector", "fatal", err) | |
81 | os.Exit(1) | |
82 | } | |
83 | tracer, err = zipkin.NewTracer( | |
84 | zipkin.NewRecorder(collector, false, "localhost:8000", "addsvc-client"), | |
85 | ) | |
86 | if err != nil { | |
87 | tracingLogger.Log("err", "unable to create zipkin tracer", "fatal", err) | |
88 | os.Exit(1) | |
89 | } | |
90 | case *appdashAddr == "" && *lightstepAccessToken == "" && *zipkinAddr == "": | |
91 | tracer = opentracing.GlobalTracer() // no-op | |
92 | default: | |
93 | tracingLogger.Log("fatal", "specify a single -appdash.addr, -lightstep.access.token or -zipkin.kafka.addr") | |
94 | os.Exit(1) | |
95 | } | |
96 | } | |
97 | ||
98 | var ( | |
99 | instances []string | |
100 | sumFactory, concatFactory loadbalancer.Factory | |
101 | ) | |
102 | ||
103 | switch *transport { | |
104 | case "grpc": | |
105 | instances = strings.Split(*grpcAddrs, ",") | |
106 | sumFactory = grpcclient.MakeSumEndpointFactory(tracer, tracingLogger) | |
107 | concatFactory = grpcclient.MakeConcatEndpointFactory(tracer, tracingLogger) | |
108 | ||
109 | case "httpjson": | |
110 | instances = strings.Split(*httpAddrs, ",") | |
111 | for i, rawurl := range instances { | |
112 | if !strings.HasPrefix("http", rawurl) { | |
113 | instances[i] = "http://" + rawurl | |
114 | } | |
115 | } | |
116 | sumFactory = httpjsonclient.MakeSumEndpointFactory(tracer, tracingLogger) | |
117 | concatFactory = httpjsonclient.MakeConcatEndpointFactory(tracer, tracingLogger) | |
118 | ||
119 | case "netrpc": | |
120 | instances = strings.Split(*netrpcAddrs, ",") | |
121 | sumFactory = netrpcclient.SumEndpointFactory | |
122 | concatFactory = netrpcclient.ConcatEndpointFactory | |
123 | ||
124 | case "thrift": | |
125 | instances = strings.Split(*thriftAddrs, ",") | |
126 | thriftClient := thriftclient.New(*thriftProtocol, *thriftBufferSize, *thriftFramed, logger) | |
127 | sumFactory = thriftClient.SumEndpoint | |
128 | concatFactory = thriftClient.ConcatEndpoint | |
129 | ||
130 | default: | |
131 | logger.Log("err", "invalid transport") | |
132 | os.Exit(1) | |
133 | } | |
134 | ||
135 | sum := buildEndpoint(tracer, "sum", instances, sumFactory, randomSeed, logger) | |
136 | concat := buildEndpoint(tracer, "concat", instances, concatFactory, randomSeed, logger) | |
137 | ||
138 | svc := newClient(root, sum, concat, logger) | |
139 | ||
140 | begin := time.Now() | |
141 | switch method { | |
142 | case "sum": | |
143 | a, _ := strconv.Atoi(s1) | |
144 | b, _ := strconv.Atoi(s2) | |
145 | v := svc.Sum(a, b) | |
146 | logger.Log("method", "sum", "a", a, "b", b, "v", v, "took", time.Since(begin)) | |
147 | ||
148 | case "concat": | |
149 | a, b := s1, s2 | |
150 | v := svc.Concat(a, b) | |
151 | logger.Log("method", "concat", "a", a, "b", b, "v", v, "took", time.Since(begin)) | |
152 | ||
153 | default: | |
154 | logger.Log("err", "invalid method "+method) | |
155 | os.Exit(1) | |
156 | } | |
157 | // wait for collector | |
158 | time.Sleep(2 * time.Second) | |
159 | } | |
160 | ||
161 | func buildEndpoint(tracer opentracing.Tracer, operationName string, instances []string, factory loadbalancer.Factory, seed int64, logger log.Logger) endpoint.Endpoint { | |
162 | publisher := static.NewPublisher(instances, factory, logger) | |
163 | random := loadbalancer.NewRandom(publisher, seed) | |
164 | endpoint := loadbalancer.Retry(10, 10*time.Second, random) | |
165 | return kitot.TraceClient(tracer, operationName)(endpoint) | |
166 | } |
0 | package netrpc | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "net/rpc" | |
5 | ||
6 | "golang.org/x/net/context" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/examples/addsvc/server" | |
10 | ) | |
11 | ||
12 | // SumEndpointFactory transforms host:port strings into Endpoints. | |
13 | func SumEndpointFactory(instance string) (endpoint.Endpoint, io.Closer, error) { | |
14 | client, err := rpc.DialHTTP("tcp", instance) | |
15 | if err != nil { | |
16 | return nil, nil, err | |
17 | } | |
18 | ||
19 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
20 | var reply server.SumResponse | |
21 | if err := client.Call("addsvc.Sum", request.(server.SumRequest), &reply); err != nil { | |
22 | return server.SumResponse{}, err | |
23 | } | |
24 | return reply, nil | |
25 | }, client, nil | |
26 | } | |
27 | ||
28 | // ConcatEndpointFactory transforms host:port strings into Endpoints. | |
29 | func ConcatEndpointFactory(instance string) (endpoint.Endpoint, io.Closer, error) { | |
30 | client, err := rpc.DialHTTP("tcp", instance) | |
31 | if err != nil { | |
32 | return nil, nil, err | |
33 | } | |
34 | ||
35 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
36 | var reply server.ConcatResponse | |
37 | if err := client.Call("addsvc.Concat", request.(server.ConcatRequest), &reply); err != nil { | |
38 | return server.ConcatResponse{}, err | |
39 | } | |
40 | return reply, nil | |
41 | }, client, nil | |
42 | } |
0 | // Package thrift provides a Thrift client for the add service. | |
0 | 1 | package thrift |
1 | 2 | |
2 | 3 | import ( |
3 | "io" | |
4 | "time" | |
4 | 5 | |
5 | "github.com/apache/thrift/lib/go/thrift" | |
6 | jujuratelimit "github.com/juju/ratelimit" | |
7 | "github.com/sony/gobreaker" | |
8 | ||
9 | "github.com/go-kit/kit/circuitbreaker" | |
6 | 10 | "github.com/go-kit/kit/endpoint" |
7 | "github.com/go-kit/kit/examples/addsvc/server" | |
8 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/add" | |
9 | "github.com/go-kit/kit/log" | |
10 | "golang.org/x/net/context" | |
11 | "github.com/go-kit/kit/examples/addsvc" | |
12 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" | |
13 | "github.com/go-kit/kit/ratelimit" | |
11 | 14 | ) |
12 | 15 | |
13 | // New returns a stateful factory for Sum and Concat Endpoints | |
14 | func New(protocol string, bufferSize int, framed bool, logger log.Logger) client { | |
15 | var protocolFactory thrift.TProtocolFactory | |
16 | switch protocol { | |
17 | case "compact": | |
18 | protocolFactory = thrift.NewTCompactProtocolFactory() | |
19 | case "simplejson": | |
20 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() | |
21 | case "json": | |
22 | protocolFactory = thrift.NewTJSONProtocolFactory() | |
23 | case "binary", "": | |
24 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() | |
25 | default: | |
26 | panic("invalid protocol") | |
16 | // New returns an AddService backed by a Thrift server described by the provided | |
17 | // client. The caller is responsible for constructing the client, and eventually | |
18 | // closing the underlying transport. | |
19 | func New(client *thriftadd.AddServiceClient) addsvc.Service { | |
20 | // We construct a single ratelimiter middleware, to limit the total outgoing | |
21 | // QPS from this client to all methods on the remote instance. We also | |
22 | // construct per-endpoint circuitbreaker middlewares to demonstrate how | |
23 | // that's done, although they could easily be combined into a single breaker | |
24 | // for the entire remote instance, too. | |
25 | ||
26 | limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) | |
27 | ||
28 | // Thrift does not currently have tracer bindings, so we skip tracing. | |
29 | ||
30 | var sumEndpoint endpoint.Endpoint | |
31 | { | |
32 | sumEndpoint = addsvc.MakeThriftSumEndpoint(client) | |
33 | sumEndpoint = limiter(sumEndpoint) | |
34 | sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ | |
35 | Name: "Sum", | |
36 | Timeout: 30 * time.Second, | |
37 | }))(sumEndpoint) | |
27 | 38 | } |
28 | 39 | |
29 | var transportFactory thrift.TTransportFactory | |
30 | if bufferSize > 0 { | |
31 | transportFactory = thrift.NewTBufferedTransportFactory(bufferSize) | |
32 | } else { | |
33 | transportFactory = thrift.NewTTransportFactory() | |
34 | } | |
35 | if framed { | |
36 | transportFactory = thrift.NewTFramedTransportFactory(transportFactory) | |
40 | var concatEndpoint endpoint.Endpoint | |
41 | { | |
42 | concatEndpoint = addsvc.MakeThriftConcatEndpoint(client) | |
43 | concatEndpoint = limiter(concatEndpoint) | |
44 | sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ | |
45 | Name: "Concat", | |
46 | Timeout: 30 * time.Second, | |
47 | }))(sumEndpoint) | |
37 | 48 | } |
38 | 49 | |
39 | return client{transportFactory, protocolFactory, logger} | |
50 | return addsvc.Endpoints{ | |
51 | SumEndpoint: addsvc.MakeThriftSumEndpoint(client), | |
52 | ConcatEndpoint: addsvc.MakeThriftConcatEndpoint(client), | |
53 | } | |
40 | 54 | } |
41 | ||
42 | type client struct { | |
43 | thrift.TTransportFactory | |
44 | thrift.TProtocolFactory | |
45 | log.Logger | |
46 | } | |
47 | ||
48 | // SumEndpointFactory transforms host:port strings into Endpoints. | |
49 | func (c client) SumEndpoint(instance string) (endpoint.Endpoint, io.Closer, error) { | |
50 | transportSocket, err := thrift.NewTSocket(instance) | |
51 | if err != nil { | |
52 | c.Logger.Log("during", "thrift.NewTSocket", "err", err) | |
53 | return nil, nil, err | |
54 | } | |
55 | trans := c.TTransportFactory.GetTransport(transportSocket) | |
56 | ||
57 | if err := trans.Open(); err != nil { | |
58 | c.Logger.Log("during", "thrift transport.Open", "err", err) | |
59 | return nil, nil, err | |
60 | } | |
61 | cli := thriftadd.NewAddServiceClientFactory(trans, c.TProtocolFactory) | |
62 | ||
63 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
64 | sumRequest := request.(server.SumRequest) | |
65 | reply, err := cli.Sum(int64(sumRequest.A), int64(sumRequest.B)) | |
66 | if err != nil { | |
67 | return server.SumResponse{}, err | |
68 | } | |
69 | return server.SumResponse{V: int(reply.Value)}, nil | |
70 | }, trans, nil | |
71 | } | |
72 | ||
73 | // ConcatEndpointFactory transforms host:port strings into Endpoints. | |
74 | func (c client) ConcatEndpoint(instance string) (endpoint.Endpoint, io.Closer, error) { | |
75 | transportSocket, err := thrift.NewTSocket(instance) | |
76 | if err != nil { | |
77 | c.Logger.Log("during", "thrift.NewTSocket", "err", err) | |
78 | return nil, nil, err | |
79 | } | |
80 | trans := c.TTransportFactory.GetTransport(transportSocket) | |
81 | ||
82 | if err := trans.Open(); err != nil { | |
83 | c.Logger.Log("during", "thrift transport.Open", "err", err) | |
84 | return nil, nil, err | |
85 | } | |
86 | cli := thriftadd.NewAddServiceClientFactory(trans, c.TProtocolFactory) | |
87 | ||
88 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
89 | concatRequest := request.(server.ConcatRequest) | |
90 | reply, err := cli.Concat(concatRequest.A, concatRequest.B) | |
91 | if err != nil { | |
92 | return server.ConcatResponse{}, err | |
93 | } | |
94 | return server.ConcatResponse{V: reply.Value}, nil | |
95 | }, trans, nil | |
96 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "flag" | |
4 | "fmt" | |
5 | "os" | |
6 | "strconv" | |
7 | "strings" | |
8 | "time" | |
9 | ||
10 | "github.com/apache/thrift/lib/go/thrift" | |
11 | "github.com/lightstep/lightstep-tracer-go" | |
12 | stdopentracing "github.com/opentracing/opentracing-go" | |
13 | zipkin "github.com/openzipkin/zipkin-go-opentracing" | |
14 | appdashot "github.com/sourcegraph/appdash/opentracing" | |
15 | "golang.org/x/net/context" | |
16 | "google.golang.org/grpc" | |
17 | "sourcegraph.com/sourcegraph/appdash" | |
18 | ||
19 | "github.com/go-kit/kit/examples/addsvc" | |
20 | grpcclient "github.com/go-kit/kit/examples/addsvc/client/grpc" | |
21 | httpclient "github.com/go-kit/kit/examples/addsvc/client/http" | |
22 | thriftclient "github.com/go-kit/kit/examples/addsvc/client/thrift" | |
23 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" | |
24 | "github.com/go-kit/kit/log" | |
25 | ) | |
26 | ||
27 | func main() { | |
28 | // The addcli presumes no service discovery system, and expects users to | |
29 | // provide the direct address of an addsvc. This presumption is reflected in | |
30 | // the addcli binary and the the client packages: the -transport.addr flags | |
31 | // and various client constructors both expect host:port strings. For an | |
32 | // example service with a client built on top of a service discovery system, | |
33 | // see profilesvc. | |
34 | ||
35 | var ( | |
36 | httpAddr = flag.String("http.addr", "", "HTTP address of addsvc") | |
37 | grpcAddr = flag.String("grpc.addr", "", "gRPC (HTTP) address of addsvc") | |
38 | thriftAddr = flag.String("thrift.addr", "", "Thrift address of addsvc") | |
39 | thriftProtocol = flag.String("thrift.protocol", "binary", "binary, compact, json, simplejson") | |
40 | thriftBufferSize = flag.Int("thrift.buffer.size", 0, "0 for unbuffered") | |
41 | thriftFramed = flag.Bool("thrift.framed", false, "true to enable framing") | |
42 | zipkinAddr = flag.String("zipkin.addr", "", "Enable Zipkin tracing via a Kafka Collector host:port") | |
43 | appdashAddr = flag.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") | |
44 | lightstepToken = flag.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") | |
45 | method = flag.String("method", "sum", "sum, concat") | |
46 | ) | |
47 | flag.Parse() | |
48 | ||
49 | if len(flag.Args()) != 2 { | |
50 | fmt.Fprintf(os.Stderr, "usage: addcli [flags] <a> <b>\n") | |
51 | os.Exit(1) | |
52 | } | |
53 | ||
54 | // This is a demonstration client, which supports multiple tracers. | |
55 | // Your clients will probably just use one tracer. | |
56 | var tracer stdopentracing.Tracer | |
57 | { | |
58 | if *zipkinAddr != "" { | |
59 | collector, err := zipkin.NewKafkaCollector( | |
60 | strings.Split(*zipkinAddr, ","), | |
61 | zipkin.KafkaLogger(log.NewNopLogger()), | |
62 | ) | |
63 | if err != nil { | |
64 | fmt.Fprintf(os.Stderr, "%v\n", err) | |
65 | os.Exit(1) | |
66 | } | |
67 | tracer, err = zipkin.NewTracer( | |
68 | zipkin.NewRecorder(collector, false, "localhost:8000", "addcli"), | |
69 | ) | |
70 | if err != nil { | |
71 | fmt.Fprintf(os.Stderr, "%v\n", err) | |
72 | os.Exit(1) | |
73 | } | |
74 | } else if *appdashAddr != "" { | |
75 | tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) | |
76 | } else if *lightstepToken != "" { | |
77 | tracer = lightstep.NewTracer(lightstep.Options{ | |
78 | AccessToken: *lightstepToken, | |
79 | }) | |
80 | defer lightstep.FlushLightStepTracer(tracer) | |
81 | } else { | |
82 | tracer = stdopentracing.GlobalTracer() // no-op | |
83 | } | |
84 | } | |
85 | ||
86 | // This is a demonstration client, which supports multiple transports. | |
87 | // Your clients will probably just define and stick with 1 transport. | |
88 | ||
89 | var ( | |
90 | service addsvc.Service | |
91 | err error | |
92 | ) | |
93 | if *httpAddr != "" { | |
94 | service, err = httpclient.New(*httpAddr, tracer, log.NewNopLogger()) | |
95 | } else if *grpcAddr != "" { | |
96 | conn, err := grpc.Dial(*grpcAddr, grpc.WithInsecure(), grpc.WithTimeout(time.Second)) | |
97 | if err != nil { | |
98 | fmt.Fprintf(os.Stderr, "error: %v", err) | |
99 | os.Exit(1) | |
100 | } | |
101 | defer conn.Close() | |
102 | service = grpcclient.New(conn, tracer, log.NewNopLogger()) | |
103 | } else if *thriftAddr != "" { | |
104 | // It's necessary to do all of this construction in the func main, | |
105 | // because (among other reasons) we need to control the lifecycle of the | |
106 | // Thrift transport, i.e. close it eventually. | |
107 | var protocolFactory thrift.TProtocolFactory | |
108 | switch *thriftProtocol { | |
109 | case "compact": | |
110 | protocolFactory = thrift.NewTCompactProtocolFactory() | |
111 | case "simplejson": | |
112 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() | |
113 | case "json": | |
114 | protocolFactory = thrift.NewTJSONProtocolFactory() | |
115 | case "binary", "": | |
116 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() | |
117 | default: | |
118 | fmt.Fprintf(os.Stderr, "error: invalid protocol %q\n", *thriftProtocol) | |
119 | os.Exit(1) | |
120 | } | |
121 | var transportFactory thrift.TTransportFactory | |
122 | if *thriftBufferSize > 0 { | |
123 | transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) | |
124 | } else { | |
125 | transportFactory = thrift.NewTTransportFactory() | |
126 | } | |
127 | if *thriftFramed { | |
128 | transportFactory = thrift.NewTFramedTransportFactory(transportFactory) | |
129 | } | |
130 | transportSocket, err := thrift.NewTSocket(*thriftAddr) | |
131 | if err != nil { | |
132 | fmt.Fprintf(os.Stderr, "error: %v\n", err) | |
133 | os.Exit(1) | |
134 | } | |
135 | transport := transportFactory.GetTransport(transportSocket) | |
136 | if err := transport.Open(); err != nil { | |
137 | fmt.Fprintf(os.Stderr, "error: %v\n", err) | |
138 | os.Exit(1) | |
139 | } | |
140 | defer transport.Close() | |
141 | client := thriftadd.NewAddServiceClientFactory(transport, protocolFactory) | |
142 | service = thriftclient.New(client) | |
143 | } else { | |
144 | fmt.Fprintf(os.Stderr, "error: no remote address specified\n") | |
145 | os.Exit(1) | |
146 | } | |
147 | if err != nil { | |
148 | fmt.Fprintf(os.Stderr, "error: %v\n", err) | |
149 | os.Exit(1) | |
150 | } | |
151 | ||
152 | switch *method { | |
153 | case "sum": | |
154 | a, _ := strconv.ParseInt(flag.Args()[0], 10, 64) | |
155 | b, _ := strconv.ParseInt(flag.Args()[1], 10, 64) | |
156 | v, err := service.Sum(context.Background(), int(a), int(b)) | |
157 | if err != nil { | |
158 | fmt.Fprintf(os.Stderr, "error: %v\n", err) | |
159 | os.Exit(1) | |
160 | } | |
161 | fmt.Fprintf(os.Stdout, "%d + %d = %d\n", a, b, v) | |
162 | ||
163 | case "concat": | |
164 | a := flag.Args()[0] | |
165 | b := flag.Args()[1] | |
166 | v, err := service.Concat(context.Background(), a, b) | |
167 | if err != nil { | |
168 | fmt.Fprintf(os.Stderr, "error: %v\n", err) | |
169 | os.Exit(1) | |
170 | } | |
171 | fmt.Fprintf(os.Stdout, "%q + %q = %q\n", a, b, v) | |
172 | ||
173 | default: | |
174 | fmt.Fprintf(os.Stderr, "error: invalid method %q\n", method) | |
175 | os.Exit(1) | |
176 | } | |
177 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "flag" | |
4 | "fmt" | |
5 | "net" | |
6 | "net/http" | |
7 | "net/http/pprof" | |
8 | "os" | |
9 | "os/signal" | |
10 | "strings" | |
11 | "syscall" | |
12 | "time" | |
13 | ||
14 | "github.com/apache/thrift/lib/go/thrift" | |
15 | lightstep "github.com/lightstep/lightstep-tracer-go" | |
16 | stdopentracing "github.com/opentracing/opentracing-go" | |
17 | zipkin "github.com/openzipkin/zipkin-go-opentracing" | |
18 | stdprometheus "github.com/prometheus/client_golang/prometheus" | |
19 | appdashot "github.com/sourcegraph/appdash/opentracing" | |
20 | "golang.org/x/net/context" | |
21 | "google.golang.org/grpc" | |
22 | "sourcegraph.com/sourcegraph/appdash" | |
23 | ||
24 | "github.com/go-kit/kit/endpoint" | |
25 | "github.com/go-kit/kit/examples/addsvc" | |
26 | "github.com/go-kit/kit/examples/addsvc/pb" | |
27 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" | |
28 | "github.com/go-kit/kit/log" | |
29 | "github.com/go-kit/kit/metrics" | |
30 | "github.com/go-kit/kit/metrics/prometheus" | |
31 | "github.com/go-kit/kit/tracing/opentracing" | |
32 | ) | |
33 | ||
34 | func main() { | |
35 | var ( | |
36 | debugAddr = flag.String("debug.addr", ":8080", "Debug and metrics listen address") | |
37 | httpAddr = flag.String("http.addr", ":8081", "HTTP listen address") | |
38 | grpcAddr = flag.String("grpc.addr", ":8082", "gRPC (HTTP) listen address") | |
39 | thriftAddr = flag.String("thrift.addr", ":8083", "Thrift listen address") | |
40 | thriftProtocol = flag.String("thrift.protocol", "binary", "binary, compact, json, simplejson") | |
41 | thriftBufferSize = flag.Int("thrift.buffer.size", 0, "0 for unbuffered") | |
42 | thriftFramed = flag.Bool("thrift.framed", false, "true to enable framing") | |
43 | zipkinAddr = flag.String("zipkin.addr", "", "Enable Zipkin tracing via a Kafka server host:port") | |
44 | appdashAddr = flag.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") | |
45 | lightstepToken = flag.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") | |
46 | ) | |
47 | flag.Parse() | |
48 | ||
49 | // Logging domain. | |
50 | var logger log.Logger | |
51 | { | |
52 | logger = log.NewLogfmtLogger(os.Stdout) | |
53 | logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) | |
54 | logger = log.NewContext(logger).With("caller", log.DefaultCaller) | |
55 | } | |
56 | logger.Log("msg", "hello") | |
57 | defer logger.Log("msg", "goodbye") | |
58 | ||
59 | // Metrics domain. | |
60 | var ints, chars metrics.Counter | |
61 | { | |
62 | // Business level metrics. | |
63 | ints = prometheus.NewCounter(stdprometheus.CounterOpts{ | |
64 | Namespace: "addsvc", | |
65 | Name: "integers_summed", | |
66 | Help: "Total count of integers summed via the Sum method.", | |
67 | }, []string{}) | |
68 | chars = prometheus.NewCounter(stdprometheus.CounterOpts{ | |
69 | Namespace: "addsvc", | |
70 | Name: "characters_concatenated", | |
71 | Help: "Total count of characters concatenated via the Concat method.", | |
72 | }, []string{}) | |
73 | } | |
74 | var duration metrics.TimeHistogram | |
75 | { | |
76 | // Transport level metrics. | |
77 | duration = metrics.NewTimeHistogram(time.Nanosecond, prometheus.NewSummary(stdprometheus.SummaryOpts{ | |
78 | Namespace: "addsvc", | |
79 | Name: "request_duration_ns", | |
80 | Help: "Request duration in nanoseconds.", | |
81 | }, []string{"method", "success"})) | |
82 | } | |
83 | ||
84 | // Tracing domain. | |
85 | var tracer stdopentracing.Tracer | |
86 | { | |
87 | if *zipkinAddr != "" { | |
88 | logger := log.NewContext(logger).With("tracer", "Zipkin") | |
89 | logger.Log("addr", *zipkinAddr) | |
90 | collector, err := zipkin.NewKafkaCollector( | |
91 | strings.Split(*zipkinAddr, ","), | |
92 | zipkin.KafkaLogger(logger), | |
93 | ) | |
94 | if err != nil { | |
95 | logger.Log("err", err) | |
96 | os.Exit(1) | |
97 | } | |
98 | tracer, err = zipkin.NewTracer( | |
99 | zipkin.NewRecorder(collector, false, "localhost:80", "addsvc"), | |
100 | ) | |
101 | if err != nil { | |
102 | logger.Log("err", err) | |
103 | os.Exit(1) | |
104 | } | |
105 | } else if *appdashAddr != "" { | |
106 | logger := log.NewContext(logger).With("tracer", "Appdash") | |
107 | logger.Log("addr", *appdashAddr) | |
108 | tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) | |
109 | } else if *lightstepToken != "" { | |
110 | logger := log.NewContext(logger).With("tracer", "LightStep") | |
111 | logger.Log() // probably don't want to print out the token :) | |
112 | tracer = lightstep.NewTracer(lightstep.Options{ | |
113 | AccessToken: *lightstepToken, | |
114 | }) | |
115 | defer lightstep.FlushLightStepTracer(tracer) | |
116 | } else { | |
117 | logger := log.NewContext(logger).With("tracer", "none") | |
118 | logger.Log() | |
119 | tracer = stdopentracing.GlobalTracer() // no-op | |
120 | } | |
121 | } | |
122 | ||
123 | // Business domain. | |
124 | var service addsvc.Service | |
125 | { | |
126 | service = addsvc.NewBasicService() | |
127 | service = addsvc.ServiceLoggingMiddleware(logger)(service) | |
128 | service = addsvc.ServiceInstrumentingMiddleware(ints, chars)(service) | |
129 | } | |
130 | ||
131 | // Endpoint domain. | |
132 | var sumEndpoint endpoint.Endpoint | |
133 | { | |
134 | sumDuration := duration.With(metrics.Field{Key: "method", Value: "Sum"}) | |
135 | sumLogger := log.NewContext(logger).With("method", "Sum") | |
136 | ||
137 | sumEndpoint = addsvc.MakeSumEndpoint(service) | |
138 | sumEndpoint = opentracing.TraceServer(tracer, "Sum")(sumEndpoint) | |
139 | sumEndpoint = addsvc.EndpointInstrumentingMiddleware(sumDuration)(sumEndpoint) | |
140 | sumEndpoint = addsvc.EndpointLoggingMiddleware(sumLogger)(sumEndpoint) | |
141 | } | |
142 | var concatEndpoint endpoint.Endpoint | |
143 | { | |
144 | concatDuration := duration.With(metrics.Field{Key: "method", Value: "Concat"}) | |
145 | concatLogger := log.NewContext(logger).With("method", "Concat") | |
146 | ||
147 | concatEndpoint = addsvc.MakeConcatEndpoint(service) | |
148 | concatEndpoint = opentracing.TraceServer(tracer, "Concat")(concatEndpoint) | |
149 | concatEndpoint = addsvc.EndpointInstrumentingMiddleware(concatDuration)(concatEndpoint) | |
150 | concatEndpoint = addsvc.EndpointLoggingMiddleware(concatLogger)(concatEndpoint) | |
151 | } | |
152 | endpoints := addsvc.Endpoints{ | |
153 | SumEndpoint: sumEndpoint, | |
154 | ConcatEndpoint: concatEndpoint, | |
155 | } | |
156 | ||
157 | // Mechanical domain. | |
158 | errc := make(chan error) | |
159 | ctx := context.Background() | |
160 | ||
161 | // Interrupt handler. | |
162 | go func() { | |
163 | c := make(chan os.Signal, 1) | |
164 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) | |
165 | errc <- fmt.Errorf("%s", <-c) | |
166 | }() | |
167 | ||
168 | // Debug listener. | |
169 | go func() { | |
170 | logger := log.NewContext(logger).With("transport", "debug") | |
171 | ||
172 | m := http.NewServeMux() | |
173 | m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) | |
174 | m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) | |
175 | m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) | |
176 | m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) | |
177 | m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) | |
178 | m.Handle("/metrics", stdprometheus.Handler()) | |
179 | ||
180 | logger.Log("addr", *debugAddr) | |
181 | errc <- http.ListenAndServe(*debugAddr, m) | |
182 | }() | |
183 | ||
184 | // HTTP transport. | |
185 | go func() { | |
186 | logger := log.NewContext(logger).With("transport", "HTTP") | |
187 | h := addsvc.MakeHTTPHandler(ctx, endpoints, tracer, logger) | |
188 | logger.Log("addr", *httpAddr) | |
189 | errc <- http.ListenAndServe(*httpAddr, h) | |
190 | }() | |
191 | ||
192 | // gRPC transport. | |
193 | go func() { | |
194 | logger := log.NewContext(logger).With("transport", "gRPC") | |
195 | ||
196 | ln, err := net.Listen("tcp", *grpcAddr) | |
197 | if err != nil { | |
198 | errc <- err | |
199 | return | |
200 | } | |
201 | ||
202 | srv := addsvc.MakeGRPCServer(ctx, endpoints, tracer, logger) | |
203 | s := grpc.NewServer() | |
204 | pb.RegisterAddServer(s, srv) | |
205 | ||
206 | logger.Log("addr", *grpcAddr) | |
207 | errc <- s.Serve(ln) | |
208 | }() | |
209 | ||
210 | // Thrift transport. | |
211 | go func() { | |
212 | logger := log.NewContext(logger).With("transport", "Thrift") | |
213 | ||
214 | var protocolFactory thrift.TProtocolFactory | |
215 | switch *thriftProtocol { | |
216 | case "binary": | |
217 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() | |
218 | case "compact": | |
219 | protocolFactory = thrift.NewTCompactProtocolFactory() | |
220 | case "json": | |
221 | protocolFactory = thrift.NewTJSONProtocolFactory() | |
222 | case "simplejson": | |
223 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() | |
224 | default: | |
225 | errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) | |
226 | return | |
227 | } | |
228 | ||
229 | var transportFactory thrift.TTransportFactory | |
230 | if *thriftBufferSize > 0 { | |
231 | transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) | |
232 | } else { | |
233 | transportFactory = thrift.NewTTransportFactory() | |
234 | } | |
235 | if *thriftFramed { | |
236 | transportFactory = thrift.NewTFramedTransportFactory(transportFactory) | |
237 | } | |
238 | ||
239 | transport, err := thrift.NewTServerSocket(*thriftAddr) | |
240 | if err != nil { | |
241 | errc <- err | |
242 | return | |
243 | } | |
244 | ||
245 | logger.Log("addr", *thriftAddr) | |
246 | errc <- thrift.NewTSimpleServer4( | |
247 | thriftadd.NewAddServiceProcessor(addsvc.MakeThriftHandler(ctx, endpoints)), | |
248 | transport, | |
249 | transportFactory, | |
250 | protocolFactory, | |
251 | ).Serve() | |
252 | }() | |
253 | ||
254 | // Run! | |
255 | logger.Log("exit", <-errc) | |
256 | } |
0 | // Package addsvc implements the business and transport logic for an example | |
1 | // service that can sum integers and concatenate strings. | |
2 | // | |
3 | // A client library is available in the client subdirectory. A server binary is | |
4 | // available in cmd/addsrv. An example client binary is available in cmd/addcli. | |
5 | package addsvc |
0 | package main | |
1 | ||
2 | import ( | |
3 | "golang.org/x/net/context" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/examples/addsvc/server" | |
7 | ) | |
8 | ||
9 | func makeSumEndpoint(svc server.AddService) endpoint.Endpoint { | |
10 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
11 | req := request.(*server.SumRequest) | |
12 | v := svc.Sum(req.A, req.B) | |
13 | return server.SumResponse{V: v}, nil | |
14 | } | |
15 | } | |
16 | ||
17 | func makeConcatEndpoint(svc server.AddService) endpoint.Endpoint { | |
18 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
19 | req := request.(*server.ConcatRequest) | |
20 | v := svc.Concat(req.A, req.B) | |
21 | return server.ConcatResponse{V: v}, nil | |
22 | } | |
23 | } |
0 | package addsvc | |
1 | ||
2 | // This file contains methods to make individual endpoints from services, | |
3 | // request and response types to serve those endpoints, as well as encoders and | |
4 | // decoders for those types, for all of our supported transport serialization | |
5 | // formats. It also includes endpoint middlewares. | |
6 | ||
7 | import ( | |
8 | "fmt" | |
9 | "time" | |
10 | ||
11 | "golang.org/x/net/context" | |
12 | ||
13 | "github.com/go-kit/kit/endpoint" | |
14 | "github.com/go-kit/kit/log" | |
15 | "github.com/go-kit/kit/metrics" | |
16 | ) | |
17 | ||
18 | // Endpoints collects all of the endpoints that compose an add service. It's | |
19 | // meant to be used as a helper struct, to collect all of the endpoints into a | |
20 | // single parameter. | |
21 | // | |
22 | // In a server, it's useful for functions that need to operate on a per-endpoint | |
23 | // basis. For example, you might pass an Endpoints to a function that produces | |
24 | // an http.Handler, with each method (endpoint) wired up to a specific path. (It | |
25 | // is probably a mistake in design to invoke the Service methods on the | |
26 | // Endpoints struct in a server.) | |
27 | // | |
28 | // In a client, it's useful to collect individually constructed endpoints into a | |
29 | // single type that implements the Service interface. For example, you might | |
30 | // construct individual endpoints using transport/http.NewClient, combine them | |
31 | // into an Endpoints, and return it to the caller as a Service. | |
32 | type Endpoints struct { | |
33 | SumEndpoint endpoint.Endpoint | |
34 | ConcatEndpoint endpoint.Endpoint | |
35 | } | |
36 | ||
37 | // Sum implements Service. Primarily useful in a client. | |
38 | func (e Endpoints) Sum(ctx context.Context, a, b int) (int, error) { | |
39 | request := sumRequest{A: a, B: b} | |
40 | response, err := e.SumEndpoint(ctx, request) | |
41 | if err != nil { | |
42 | return 0, err | |
43 | } | |
44 | return response.(sumResponse).V, nil | |
45 | } | |
46 | ||
47 | // Concat implements Service. Primarily useful in a client. | |
48 | func (e Endpoints) Concat(ctx context.Context, a, b string) (string, error) { | |
49 | request := concatRequest{A: a, B: b} | |
50 | response, err := e.ConcatEndpoint(ctx, request) | |
51 | if err != nil { | |
52 | return "", err | |
53 | } | |
54 | return response.(concatResponse).V, err | |
55 | } | |
56 | ||
57 | // MakeSumEndpoint returns an endpoint that invokes Sum on the service. | |
58 | // Primarily useful in a server. | |
59 | func MakeSumEndpoint(s Service) endpoint.Endpoint { | |
60 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
61 | sumReq := request.(sumRequest) | |
62 | v, err := s.Sum(ctx, sumReq.A, sumReq.B) | |
63 | if err != nil { | |
64 | return nil, err | |
65 | } | |
66 | return sumResponse{ | |
67 | V: v, | |
68 | }, nil | |
69 | } | |
70 | } | |
71 | ||
72 | // MakeConcatEndpoint returns an endpoint that invokes Concat on the service. | |
73 | // Primarily useful in a server. | |
74 | func MakeConcatEndpoint(s Service) endpoint.Endpoint { | |
75 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
76 | concatReq := request.(concatRequest) | |
77 | v, err := s.Concat(ctx, concatReq.A, concatReq.B) | |
78 | if err != nil { | |
79 | return nil, err | |
80 | } | |
81 | return concatResponse{ | |
82 | V: v, | |
83 | }, nil | |
84 | } | |
85 | } | |
86 | ||
87 | // EndpointInstrumentingMiddleware returns an endpoint middleware that records | |
88 | // the duration of each invocation to the passed histogram. The middleware adds | |
89 | // a single field: "success", which is "true" if no error is returned, and | |
90 | // "false" otherwise. | |
91 | func EndpointInstrumentingMiddleware(duration metrics.TimeHistogram) endpoint.Middleware { | |
92 | return func(next endpoint.Endpoint) endpoint.Endpoint { | |
93 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
94 | ||
95 | defer func(begin time.Time) { | |
96 | f := metrics.Field{Key: "success", Value: fmt.Sprint(err == nil)} | |
97 | duration.With(f).Observe(time.Since(begin)) | |
98 | }(time.Now()) | |
99 | return next(ctx, request) | |
100 | ||
101 | } | |
102 | } | |
103 | } | |
104 | ||
105 | // EndpointLoggingMiddleware returns an endpoint middleware that logs the | |
106 | // duration of each invocation, and the resulting error, if any. | |
107 | func EndpointLoggingMiddleware(logger log.Logger) endpoint.Middleware { | |
108 | return func(next endpoint.Endpoint) endpoint.Endpoint { | |
109 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
110 | ||
111 | defer func(begin time.Time) { | |
112 | logger.Log("error", err, "took", time.Since(begin)) | |
113 | }(time.Now()) | |
114 | return next(ctx, request) | |
115 | ||
116 | } | |
117 | } | |
118 | } | |
119 | ||
120 | // These types are unexported because they only exist to serve the endpoint | |
121 | // domain, which is totally encapsulated in this package. They are otherwise | |
122 | // opaque to all callers. | |
123 | ||
124 | type sumRequest struct{ A, B int } | |
125 | ||
126 | type sumResponse struct{ V int } | |
127 | ||
128 | type concatRequest struct{ A, B string } | |
129 | ||
130 | type concatResponse struct{ V string } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "golang.org/x/net/context" | |
4 | ||
5 | "github.com/opentracing/opentracing-go" | |
6 | ||
7 | "github.com/go-kit/kit/examples/addsvc/pb" | |
8 | "github.com/go-kit/kit/examples/addsvc/server" | |
9 | servergrpc "github.com/go-kit/kit/examples/addsvc/server/grpc" | |
10 | "github.com/go-kit/kit/log" | |
11 | kitot "github.com/go-kit/kit/tracing/opentracing" | |
12 | "github.com/go-kit/kit/transport/grpc" | |
13 | ) | |
14 | ||
15 | type grpcBinding struct { | |
16 | sum, concat grpc.Handler | |
17 | } | |
18 | ||
19 | func newGRPCBinding(ctx context.Context, tracer opentracing.Tracer, svc server.AddService, tracingLogger log.Logger) grpcBinding { | |
20 | return grpcBinding{ | |
21 | sum: grpc.NewServer( | |
22 | ctx, | |
23 | kitot.TraceServer(tracer, "sum")(makeSumEndpoint(svc)), | |
24 | servergrpc.DecodeSumRequest, | |
25 | servergrpc.EncodeSumResponse, | |
26 | grpc.ServerBefore(kitot.FromGRPCRequest(tracer, "", tracingLogger)), | |
27 | ), | |
28 | concat: grpc.NewServer( | |
29 | ctx, | |
30 | kitot.TraceServer(tracer, "concat")(makeConcatEndpoint(svc)), | |
31 | servergrpc.DecodeConcatRequest, | |
32 | servergrpc.EncodeConcatResponse, | |
33 | grpc.ServerBefore(kitot.FromGRPCRequest(tracer, "", tracingLogger)), | |
34 | ), | |
35 | } | |
36 | } | |
37 | ||
38 | func (b grpcBinding) Sum(ctx context.Context, req *pb.SumRequest) (*pb.SumReply, error) { | |
39 | _, resp, err := b.sum.ServeGRPC(ctx, req) | |
40 | return resp.(*pb.SumReply), err | |
41 | } | |
42 | ||
43 | func (b grpcBinding) Concat(ctx context.Context, req *pb.ConcatRequest) (*pb.ConcatReply, error) { | |
44 | _, resp, err := b.concat.ServeGRPC(ctx, req) | |
45 | return resp.(*pb.ConcatReply), err | |
46 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "flag" | |
4 | "fmt" | |
5 | stdlog "log" | |
6 | "math/rand" | |
7 | "net" | |
8 | "net/http" | |
9 | "net/rpc" | |
10 | "os" | |
11 | "os/signal" | |
12 | "strings" | |
13 | "syscall" | |
14 | "time" | |
15 | ||
16 | "github.com/apache/thrift/lib/go/thrift" | |
17 | "github.com/lightstep/lightstep-tracer-go" | |
18 | "github.com/opentracing/opentracing-go" | |
19 | zipkin "github.com/openzipkin/zipkin-go-opentracing" | |
20 | stdprometheus "github.com/prometheus/client_golang/prometheus" | |
21 | appdashot "github.com/sourcegraph/appdash/opentracing" | |
22 | "golang.org/x/net/context" | |
23 | "google.golang.org/grpc" | |
24 | "sourcegraph.com/sourcegraph/appdash" | |
25 | ||
26 | "github.com/go-kit/kit/endpoint" | |
27 | "github.com/go-kit/kit/examples/addsvc/pb" | |
28 | "github.com/go-kit/kit/examples/addsvc/server" | |
29 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/add" | |
30 | "github.com/go-kit/kit/log" | |
31 | "github.com/go-kit/kit/metrics" | |
32 | "github.com/go-kit/kit/metrics/expvar" | |
33 | "github.com/go-kit/kit/metrics/prometheus" | |
34 | kitot "github.com/go-kit/kit/tracing/opentracing" | |
35 | httptransport "github.com/go-kit/kit/transport/http" | |
36 | ) | |
37 | ||
38 | func main() { | |
39 | // Flag domain. Note that gRPC transitively registers flags via its import | |
40 | // of glog. So, we define a new flag set, to keep those domains distinct. | |
41 | fs := flag.NewFlagSet("", flag.ExitOnError) | |
42 | var ( | |
43 | debugAddr = fs.String("debug.addr", ":8000", "Address for HTTP debug/instrumentation server") | |
44 | httpAddr = fs.String("http.addr", ":8001", "Address for HTTP (JSON) server") | |
45 | grpcAddr = fs.String("grpc.addr", ":8002", "Address for gRPC server") | |
46 | netrpcAddr = fs.String("netrpc.addr", ":8003", "Address for net/rpc server") | |
47 | thriftAddr = fs.String("thrift.addr", ":8004", "Address for Thrift server") | |
48 | thriftProtocol = fs.String("thrift.protocol", "binary", "binary, compact, json, simplejson") | |
49 | thriftBufferSize = fs.Int("thrift.buffer.size", 0, "0 for unbuffered") | |
50 | thriftFramed = fs.Bool("thrift.framed", false, "true to enable framing") | |
51 | ||
52 | // Supported OpenTracing backends | |
53 | zipkinAddr = fs.String("zipkin.kafka.addr", "", "Enable Zipkin tracing via a Kafka server host:port") | |
54 | appdashAddr = fs.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") | |
55 | lightstepAccessToken = fs.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") | |
56 | ) | |
57 | flag.Usage = fs.Usage // only show our flags | |
58 | if err := fs.Parse(os.Args[1:]); err != nil { | |
59 | fmt.Fprintf(os.Stderr, "%v", err) | |
60 | os.Exit(1) | |
61 | } | |
62 | ||
63 | // package log | |
64 | var logger log.Logger | |
65 | { | |
66 | logger = log.NewLogfmtLogger(os.Stderr) | |
67 | logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC).With("caller", log.DefaultCaller) | |
68 | stdlog.SetFlags(0) // flags are handled by Go kit's logger | |
69 | stdlog.SetOutput(log.NewStdlibAdapter(logger)) // redirect anything using stdlib log to us | |
70 | } | |
71 | ||
72 | // package metrics | |
73 | var requestDuration metrics.TimeHistogram | |
74 | { | |
75 | requestDuration = metrics.NewTimeHistogram(time.Nanosecond, metrics.NewMultiHistogram( | |
76 | "request_duration_ns", | |
77 | expvar.NewHistogram("request_duration_ns", 0, 5e9, 1, 50, 95, 99), | |
78 | prometheus.NewSummary(stdprometheus.SummaryOpts{ | |
79 | Namespace: "myorg", | |
80 | Subsystem: "addsvc", | |
81 | Name: "duration_ns", | |
82 | Help: "Request duration in nanoseconds.", | |
83 | }, []string{"method"}), | |
84 | )) | |
85 | } | |
86 | ||
87 | // Set up OpenTracing | |
88 | var tracer opentracing.Tracer | |
89 | { | |
90 | switch { | |
91 | case *appdashAddr != "" && *lightstepAccessToken == "" && *zipkinAddr == "": | |
92 | tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) | |
93 | case *appdashAddr == "" && *lightstepAccessToken != "" && *zipkinAddr == "": | |
94 | tracer = lightstep.NewTracer(lightstep.Options{ | |
95 | AccessToken: *lightstepAccessToken, | |
96 | }) | |
97 | defer lightstep.FlushLightStepTracer(tracer) | |
98 | case *appdashAddr == "" && *lightstepAccessToken == "" && *zipkinAddr != "": | |
99 | collector, err := zipkin.NewKafkaCollector( | |
100 | strings.Split(*zipkinAddr, ","), | |
101 | zipkin.KafkaLogger(logger), | |
102 | ) | |
103 | if err != nil { | |
104 | logger.Log("err", "unable to create collector", "fatal", err) | |
105 | os.Exit(1) | |
106 | } | |
107 | tracer, err = zipkin.NewTracer( | |
108 | zipkin.NewRecorder(collector, false, "localhost:80", "addsvc"), | |
109 | ) | |
110 | if err != nil { | |
111 | logger.Log("err", "unable to create zipkin tracer", "fatal", err) | |
112 | os.Exit(1) | |
113 | } | |
114 | case *appdashAddr == "" && *lightstepAccessToken == "" && *zipkinAddr == "": | |
115 | tracer = opentracing.GlobalTracer() // no-op | |
116 | default: | |
117 | logger.Log("fatal", "specify a single -appdash.addr, -lightstep.access.token or -zipkin.kafka.addr") | |
118 | os.Exit(1) | |
119 | } | |
120 | } | |
121 | ||
122 | // Business domain | |
123 | var svc server.AddService | |
124 | { | |
125 | svc = pureAddService{} | |
126 | svc = loggingMiddleware{svc, logger} | |
127 | svc = instrumentingMiddleware{svc, requestDuration} | |
128 | } | |
129 | ||
130 | // Mechanical stuff | |
131 | rand.Seed(time.Now().UnixNano()) | |
132 | root := context.Background() | |
133 | errc := make(chan error) | |
134 | ||
135 | go func() { | |
136 | errc <- interrupt() | |
137 | }() | |
138 | ||
139 | // Debug/instrumentation | |
140 | go func() { | |
141 | transportLogger := log.NewContext(logger).With("transport", "debug") | |
142 | transportLogger.Log("addr", *debugAddr) | |
143 | errc <- http.ListenAndServe(*debugAddr, nil) // DefaultServeMux | |
144 | }() | |
145 | ||
146 | // Transport: HTTP/JSON | |
147 | go func() { | |
148 | var ( | |
149 | transportLogger = log.NewContext(logger).With("transport", "HTTP/JSON") | |
150 | tracingLogger = log.NewContext(transportLogger).With("component", "tracing") | |
151 | mux = http.NewServeMux() | |
152 | sum, concat endpoint.Endpoint | |
153 | ) | |
154 | ||
155 | sum = makeSumEndpoint(svc) | |
156 | sum = kitot.TraceServer(tracer, "sum")(sum) | |
157 | mux.Handle("/sum", httptransport.NewServer( | |
158 | root, | |
159 | sum, | |
160 | server.DecodeSumRequest, | |
161 | server.EncodeSumResponse, | |
162 | httptransport.ServerErrorLogger(transportLogger), | |
163 | httptransport.ServerBefore(kitot.FromHTTPRequest(tracer, "sum", tracingLogger)), | |
164 | )) | |
165 | ||
166 | concat = makeConcatEndpoint(svc) | |
167 | concat = kitot.TraceServer(tracer, "concat")(concat) | |
168 | mux.Handle("/concat", httptransport.NewServer( | |
169 | root, | |
170 | concat, | |
171 | server.DecodeConcatRequest, | |
172 | server.EncodeConcatResponse, | |
173 | httptransport.ServerErrorLogger(transportLogger), | |
174 | httptransport.ServerBefore(kitot.FromHTTPRequest(tracer, "concat", tracingLogger)), | |
175 | )) | |
176 | ||
177 | transportLogger.Log("addr", *httpAddr) | |
178 | errc <- http.ListenAndServe(*httpAddr, mux) | |
179 | }() | |
180 | ||
181 | // Transport: gRPC | |
182 | go func() { | |
183 | transportLogger := log.NewContext(logger).With("transport", "gRPC") | |
184 | tracingLogger := log.NewContext(transportLogger).With("component", "tracing") | |
185 | ln, err := net.Listen("tcp", *grpcAddr) | |
186 | if err != nil { | |
187 | errc <- err | |
188 | return | |
189 | } | |
190 | s := grpc.NewServer() // uses its own, internal context | |
191 | pb.RegisterAddServer(s, newGRPCBinding(root, tracer, svc, tracingLogger)) | |
192 | transportLogger.Log("addr", *grpcAddr) | |
193 | errc <- s.Serve(ln) | |
194 | }() | |
195 | ||
196 | // Transport: net/rpc | |
197 | go func() { | |
198 | transportLogger := log.NewContext(logger).With("transport", "net/rpc") | |
199 | s := rpc.NewServer() | |
200 | if err := s.RegisterName("addsvc", netrpcBinding{svc}); err != nil { | |
201 | errc <- err | |
202 | return | |
203 | } | |
204 | s.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath) | |
205 | transportLogger.Log("addr", *netrpcAddr) | |
206 | errc <- http.ListenAndServe(*netrpcAddr, s) | |
207 | }() | |
208 | ||
209 | // Transport: Thrift | |
210 | go func() { | |
211 | var protocolFactory thrift.TProtocolFactory | |
212 | switch *thriftProtocol { | |
213 | case "binary": | |
214 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() | |
215 | case "compact": | |
216 | protocolFactory = thrift.NewTCompactProtocolFactory() | |
217 | case "json": | |
218 | protocolFactory = thrift.NewTJSONProtocolFactory() | |
219 | case "simplejson": | |
220 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() | |
221 | default: | |
222 | errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) | |
223 | return | |
224 | } | |
225 | var transportFactory thrift.TTransportFactory | |
226 | if *thriftBufferSize > 0 { | |
227 | transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) | |
228 | } else { | |
229 | transportFactory = thrift.NewTTransportFactory() | |
230 | } | |
231 | if *thriftFramed { | |
232 | transportFactory = thrift.NewTFramedTransportFactory(transportFactory) | |
233 | } | |
234 | transport, err := thrift.NewTServerSocket(*thriftAddr) | |
235 | if err != nil { | |
236 | errc <- err | |
237 | return | |
238 | } | |
239 | transportLogger := log.NewContext(logger).With("transport", "thrift") | |
240 | transportLogger.Log("addr", *thriftAddr) | |
241 | errc <- thrift.NewTSimpleServer4( | |
242 | thriftadd.NewAddServiceProcessor(thriftBinding{svc}), | |
243 | transport, | |
244 | transportFactory, | |
245 | protocolFactory, | |
246 | ).Serve() | |
247 | }() | |
248 | ||
249 | logger.Log("fatal", <-errc) | |
250 | } | |
251 | ||
252 | func interrupt() error { | |
253 | c := make(chan os.Signal) | |
254 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) | |
255 | return fmt.Errorf("%s", <-c) | |
256 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "github.com/go-kit/kit/examples/addsvc/server" | |
4 | ) | |
5 | ||
6 | type netrpcBinding struct { | |
7 | server.AddService | |
8 | } | |
9 | ||
10 | func (b netrpcBinding) Sum(request server.SumRequest, response *server.SumResponse) error { | |
11 | v := b.AddService.Sum(request.A, request.B) | |
12 | (*response) = server.SumResponse{V: v} | |
13 | return nil | |
14 | } | |
15 | ||
16 | func (b netrpcBinding) Concat(request server.ConcatRequest, response *server.ConcatResponse) error { | |
17 | v := b.AddService.Concat(request.A, request.B) | |
18 | (*response) = server.ConcatResponse{V: v} | |
19 | return nil | |
20 | } |
0 | // Code generated by protoc-gen-go. | |
1 | // source: add.proto | |
2 | // DO NOT EDIT! | |
3 | ||
4 | /* | |
5 | Package pb is a generated protocol buffer package. | |
6 | ||
7 | It is generated from these files: | |
8 | add.proto | |
9 | ||
10 | It has these top-level messages: | |
11 | SumRequest | |
12 | SumReply | |
13 | ConcatRequest | |
14 | ConcatReply | |
15 | */ | |
16 | package pb | |
17 | ||
18 | import proto "github.com/golang/protobuf/proto" | |
19 | import fmt "fmt" | |
20 | import math "math" | |
21 | ||
22 | import ( | |
23 | context "golang.org/x/net/context" | |
24 | grpc "google.golang.org/grpc" | |
25 | ) | |
26 | ||
27 | // Reference imports to suppress errors if they are not otherwise used. | |
28 | var _ = proto.Marshal | |
29 | var _ = fmt.Errorf | |
30 | var _ = math.Inf | |
31 | ||
32 | // This is a compile-time assertion to ensure that this generated file | |
33 | // is compatible with the proto package it is being compiled against. | |
34 | const _ = proto.ProtoPackageIsVersion1 | |
35 | ||
36 | // The sum request contains two parameters. | |
37 | type SumRequest struct { | |
38 | A int64 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` | |
39 | B int64 `protobuf:"varint,2,opt,name=b" json:"b,omitempty"` | |
40 | } | |
41 | ||
42 | func (m *SumRequest) Reset() { *m = SumRequest{} } | |
43 | func (m *SumRequest) String() string { return proto.CompactTextString(m) } | |
44 | func (*SumRequest) ProtoMessage() {} | |
45 | func (*SumRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | |
46 | ||
47 | // The sum response contains the result of the calculation. | |
48 | type SumReply struct { | |
49 | V int64 `protobuf:"varint,1,opt,name=v" json:"v,omitempty"` | |
50 | } | |
51 | ||
52 | func (m *SumReply) Reset() { *m = SumReply{} } | |
53 | func (m *SumReply) String() string { return proto.CompactTextString(m) } | |
54 | func (*SumReply) ProtoMessage() {} | |
55 | func (*SumReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | |
56 | ||
57 | // The Concat request contains two parameters. | |
58 | type ConcatRequest struct { | |
59 | A string `protobuf:"bytes,1,opt,name=a" json:"a,omitempty"` | |
60 | B string `protobuf:"bytes,2,opt,name=b" json:"b,omitempty"` | |
61 | } | |
62 | ||
63 | func (m *ConcatRequest) Reset() { *m = ConcatRequest{} } | |
64 | func (m *ConcatRequest) String() string { return proto.CompactTextString(m) } | |
65 | func (*ConcatRequest) ProtoMessage() {} | |
66 | func (*ConcatRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } | |
67 | ||
68 | // The Concat response contains the result of the concatenation. | |
69 | type ConcatReply struct { | |
70 | V string `protobuf:"bytes,1,opt,name=v" json:"v,omitempty"` | |
71 | } | |
72 | ||
73 | func (m *ConcatReply) Reset() { *m = ConcatReply{} } | |
74 | func (m *ConcatReply) String() string { return proto.CompactTextString(m) } | |
75 | func (*ConcatReply) ProtoMessage() {} | |
76 | func (*ConcatReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } | |
77 | ||
78 | func init() { | |
79 | proto.RegisterType((*SumRequest)(nil), "pb.SumRequest") | |
80 | proto.RegisterType((*SumReply)(nil), "pb.SumReply") | |
81 | proto.RegisterType((*ConcatRequest)(nil), "pb.ConcatRequest") | |
82 | proto.RegisterType((*ConcatReply)(nil), "pb.ConcatReply") | |
83 | } | |
84 | ||
85 | // Reference imports to suppress errors if they are not otherwise used. | |
86 | var _ context.Context | |
87 | var _ grpc.ClientConn | |
88 | ||
89 | // This is a compile-time assertion to ensure that this generated file | |
90 | // is compatible with the grpc package it is being compiled against. | |
91 | const _ = grpc.SupportPackageIsVersion2 | |
92 | ||
93 | // Client API for Add service | |
94 | ||
95 | type AddClient interface { | |
96 | // Sums two integers. | |
97 | Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) | |
98 | // Concatenates two strings | |
99 | Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) | |
100 | } | |
101 | ||
102 | type addClient struct { | |
103 | cc *grpc.ClientConn | |
104 | } | |
105 | ||
106 | func NewAddClient(cc *grpc.ClientConn) AddClient { | |
107 | return &addClient{cc} | |
108 | } | |
109 | ||
110 | func (c *addClient) Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) { | |
111 | out := new(SumReply) | |
112 | err := grpc.Invoke(ctx, "/pb.Add/Sum", in, out, c.cc, opts...) | |
113 | if err != nil { | |
114 | return nil, err | |
115 | } | |
116 | return out, nil | |
117 | } | |
118 | ||
119 | func (c *addClient) Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) { | |
120 | out := new(ConcatReply) | |
121 | err := grpc.Invoke(ctx, "/pb.Add/Concat", in, out, c.cc, opts...) | |
122 | if err != nil { | |
123 | return nil, err | |
124 | } | |
125 | return out, nil | |
126 | } | |
127 | ||
128 | // Server API for Add service | |
129 | ||
130 | type AddServer interface { | |
131 | // Sums two integers. | |
132 | Sum(context.Context, *SumRequest) (*SumReply, error) | |
133 | // Concatenates two strings | |
134 | Concat(context.Context, *ConcatRequest) (*ConcatReply, error) | |
135 | } | |
136 | ||
137 | func RegisterAddServer(s *grpc.Server, srv AddServer) { | |
138 | s.RegisterService(&_Add_serviceDesc, srv) | |
139 | } | |
140 | ||
141 | func _Add_Sum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
142 | in := new(SumRequest) | |
143 | if err := dec(in); err != nil { | |
144 | return nil, err | |
145 | } | |
146 | if interceptor == nil { | |
147 | return srv.(AddServer).Sum(ctx, in) | |
148 | } | |
149 | info := &grpc.UnaryServerInfo{ | |
150 | Server: srv, | |
151 | FullMethod: "/pb.Add/Sum", | |
152 | } | |
153 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
154 | return srv.(AddServer).Sum(ctx, req.(*SumRequest)) | |
155 | } | |
156 | return interceptor(ctx, in, info, handler) | |
157 | } | |
158 | ||
159 | func _Add_Concat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
160 | in := new(ConcatRequest) | |
161 | if err := dec(in); err != nil { | |
162 | return nil, err | |
163 | } | |
164 | if interceptor == nil { | |
165 | return srv.(AddServer).Concat(ctx, in) | |
166 | } | |
167 | info := &grpc.UnaryServerInfo{ | |
168 | Server: srv, | |
169 | FullMethod: "/pb.Add/Concat", | |
170 | } | |
171 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
172 | return srv.(AddServer).Concat(ctx, req.(*ConcatRequest)) | |
173 | } | |
174 | return interceptor(ctx, in, info, handler) | |
175 | } | |
176 | ||
177 | var _Add_serviceDesc = grpc.ServiceDesc{ | |
178 | ServiceName: "pb.Add", | |
179 | HandlerType: (*AddServer)(nil), | |
180 | Methods: []grpc.MethodDesc{ | |
181 | { | |
182 | MethodName: "Sum", | |
183 | Handler: _Add_Sum_Handler, | |
184 | }, | |
185 | { | |
186 | MethodName: "Concat", | |
187 | Handler: _Add_Concat_Handler, | |
188 | }, | |
189 | }, | |
190 | Streams: []grpc.StreamDesc{}, | |
191 | } | |
192 | ||
193 | var fileDescriptor0 = []byte{ | |
194 | // 171 bytes of a gzipped FileDescriptorProto | |
195 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x4c, 0x49, 0xd1, | |
196 | 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0xd2, 0xe0, 0xe2, 0x0a, 0x2e, 0xcd, | |
197 | 0x0d, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0xe2, 0xe1, 0x62, 0x4c, 0x94, 0x60, 0x54, 0x60, | |
198 | 0xd4, 0x60, 0x0e, 0x62, 0x4c, 0x04, 0xf1, 0x92, 0x24, 0x98, 0x20, 0xbc, 0x24, 0x25, 0x09, 0x2e, | |
199 | 0x0e, 0xb0, 0xca, 0x82, 0x9c, 0x4a, 0x90, 0x4c, 0x19, 0x4c, 0x5d, 0x99, 0x92, 0x36, 0x17, 0xaf, | |
200 | 0x73, 0x7e, 0x5e, 0x72, 0x62, 0x09, 0x86, 0x31, 0x9c, 0x28, 0xc6, 0x70, 0x82, 0x8c, 0x91, 0xe6, | |
201 | 0xe2, 0x86, 0x29, 0x46, 0x31, 0x09, 0x28, 0x59, 0x66, 0x14, 0xc3, 0xc5, 0xec, 0x98, 0x92, 0x22, | |
202 | 0xa4, 0xca, 0xc5, 0x0c, 0xb4, 0x4a, 0x88, 0x4f, 0xaf, 0x20, 0x49, 0x0f, 0xe1, 0x3a, 0x29, 0x1e, | |
203 | 0x38, 0x1f, 0xa8, 0x53, 0x89, 0x41, 0x48, 0x8f, 0x8b, 0x0d, 0x62, 0x94, 0x90, 0x20, 0x48, 0x06, | |
204 | 0xc5, 0x0d, 0x52, 0xfc, 0xc8, 0x42, 0x60, 0xf5, 0x49, 0x6c, 0x60, 0x6f, 0x1b, 0x03, 0x02, 0x00, | |
205 | 0x00, 0xff, 0xff, 0xb4, 0xc9, 0xe7, 0x58, 0x03, 0x01, 0x00, 0x00, | |
206 | } |
0 | syntax = "proto3"; | |
1 | ||
2 | package pb; | |
3 | ||
4 | // The Add service definition. | |
5 | service Add { | |
6 | // Sums two integers. | |
7 | rpc Sum (SumRequest) returns (SumReply) {} | |
8 | ||
9 | // Concatenates two strings | |
10 | rpc Concat (ConcatRequest) returns (ConcatReply) {} | |
11 | } | |
12 | ||
13 | // The sum request contains two parameters. | |
14 | message SumRequest { | |
15 | int64 a = 1; | |
16 | int64 b = 2; | |
17 | } | |
18 | ||
19 | // The sum response contains the result of the calculation. | |
20 | message SumReply { | |
21 | int64 v = 1; | |
22 | } | |
23 | ||
24 | // The Concat request contains two parameters. | |
25 | message ConcatRequest { | |
26 | string a = 1; | |
27 | string b = 2; | |
28 | } | |
29 | ||
30 | // The Concat response contains the result of the concatenation. | |
31 | message ConcatReply { | |
32 | string v = 1; | |
33 | } |
0 | // Code generated by protoc-gen-go. | |
1 | // source: addsvc.proto | |
2 | // DO NOT EDIT! | |
3 | ||
4 | /* | |
5 | Package pb is a generated protocol buffer package. | |
6 | ||
7 | It is generated from these files: | |
8 | addsvc.proto | |
9 | ||
10 | It has these top-level messages: | |
11 | SumRequest | |
12 | SumReply | |
13 | ConcatRequest | |
14 | ConcatReply | |
15 | */ | |
16 | package pb | |
17 | ||
18 | import proto "github.com/golang/protobuf/proto" | |
19 | import fmt "fmt" | |
20 | import math "math" | |
21 | ||
22 | import ( | |
23 | context "golang.org/x/net/context" | |
24 | grpc "google.golang.org/grpc" | |
25 | ) | |
26 | ||
27 | // Reference imports to suppress errors if they are not otherwise used. | |
28 | var _ = proto.Marshal | |
29 | var _ = fmt.Errorf | |
30 | var _ = math.Inf | |
31 | ||
32 | // This is a compile-time assertion to ensure that this generated file | |
33 | // is compatible with the proto package it is being compiled against. | |
34 | const _ = proto.ProtoPackageIsVersion1 | |
35 | ||
36 | // The sum request contains two parameters. | |
37 | type SumRequest struct { | |
38 | A int64 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` | |
39 | B int64 `protobuf:"varint,2,opt,name=b" json:"b,omitempty"` | |
40 | } | |
41 | ||
42 | func (m *SumRequest) Reset() { *m = SumRequest{} } | |
43 | func (m *SumRequest) String() string { return proto.CompactTextString(m) } | |
44 | func (*SumRequest) ProtoMessage() {} | |
45 | func (*SumRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | |
46 | ||
47 | // The sum response contains the result of the calculation. | |
48 | type SumReply struct { | |
49 | V int64 `protobuf:"varint,1,opt,name=v" json:"v,omitempty"` | |
50 | } | |
51 | ||
52 | func (m *SumReply) Reset() { *m = SumReply{} } | |
53 | func (m *SumReply) String() string { return proto.CompactTextString(m) } | |
54 | func (*SumReply) ProtoMessage() {} | |
55 | func (*SumReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | |
56 | ||
57 | // The Concat request contains two parameters. | |
58 | type ConcatRequest struct { | |
59 | A string `protobuf:"bytes,1,opt,name=a" json:"a,omitempty"` | |
60 | B string `protobuf:"bytes,2,opt,name=b" json:"b,omitempty"` | |
61 | } | |
62 | ||
63 | func (m *ConcatRequest) Reset() { *m = ConcatRequest{} } | |
64 | func (m *ConcatRequest) String() string { return proto.CompactTextString(m) } | |
65 | func (*ConcatRequest) ProtoMessage() {} | |
66 | func (*ConcatRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } | |
67 | ||
68 | // The Concat response contains the result of the concatenation. | |
69 | type ConcatReply struct { | |
70 | V string `protobuf:"bytes,1,opt,name=v" json:"v,omitempty"` | |
71 | } | |
72 | ||
73 | func (m *ConcatReply) Reset() { *m = ConcatReply{} } | |
74 | func (m *ConcatReply) String() string { return proto.CompactTextString(m) } | |
75 | func (*ConcatReply) ProtoMessage() {} | |
76 | func (*ConcatReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } | |
77 | ||
78 | func init() { | |
79 | proto.RegisterType((*SumRequest)(nil), "pb.SumRequest") | |
80 | proto.RegisterType((*SumReply)(nil), "pb.SumReply") | |
81 | proto.RegisterType((*ConcatRequest)(nil), "pb.ConcatRequest") | |
82 | proto.RegisterType((*ConcatReply)(nil), "pb.ConcatReply") | |
83 | } | |
84 | ||
85 | // Reference imports to suppress errors if they are not otherwise used. | |
86 | var _ context.Context | |
87 | var _ grpc.ClientConn | |
88 | ||
89 | // This is a compile-time assertion to ensure that this generated file | |
90 | // is compatible with the grpc package it is being compiled against. | |
91 | const _ = grpc.SupportPackageIsVersion2 | |
92 | ||
93 | // Client API for Add service | |
94 | ||
95 | type AddClient interface { | |
96 | // Sums two integers. | |
97 | Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) | |
98 | // Concatenates two strings | |
99 | Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) | |
100 | } | |
101 | ||
102 | type addClient struct { | |
103 | cc *grpc.ClientConn | |
104 | } | |
105 | ||
106 | func NewAddClient(cc *grpc.ClientConn) AddClient { | |
107 | return &addClient{cc} | |
108 | } | |
109 | ||
110 | func (c *addClient) Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) { | |
111 | out := new(SumReply) | |
112 | err := grpc.Invoke(ctx, "/pb.Add/Sum", in, out, c.cc, opts...) | |
113 | if err != nil { | |
114 | return nil, err | |
115 | } | |
116 | return out, nil | |
117 | } | |
118 | ||
119 | func (c *addClient) Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) { | |
120 | out := new(ConcatReply) | |
121 | err := grpc.Invoke(ctx, "/pb.Add/Concat", in, out, c.cc, opts...) | |
122 | if err != nil { | |
123 | return nil, err | |
124 | } | |
125 | return out, nil | |
126 | } | |
127 | ||
128 | // Server API for Add service | |
129 | ||
130 | type AddServer interface { | |
131 | // Sums two integers. | |
132 | Sum(context.Context, *SumRequest) (*SumReply, error) | |
133 | // Concatenates two strings | |
134 | Concat(context.Context, *ConcatRequest) (*ConcatReply, error) | |
135 | } | |
136 | ||
137 | func RegisterAddServer(s *grpc.Server, srv AddServer) { | |
138 | s.RegisterService(&_Add_serviceDesc, srv) | |
139 | } | |
140 | ||
141 | func _Add_Sum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
142 | in := new(SumRequest) | |
143 | if err := dec(in); err != nil { | |
144 | return nil, err | |
145 | } | |
146 | if interceptor == nil { | |
147 | return srv.(AddServer).Sum(ctx, in) | |
148 | } | |
149 | info := &grpc.UnaryServerInfo{ | |
150 | Server: srv, | |
151 | FullMethod: "/pb.Add/Sum", | |
152 | } | |
153 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
154 | return srv.(AddServer).Sum(ctx, req.(*SumRequest)) | |
155 | } | |
156 | return interceptor(ctx, in, info, handler) | |
157 | } | |
158 | ||
159 | func _Add_Concat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
160 | in := new(ConcatRequest) | |
161 | if err := dec(in); err != nil { | |
162 | return nil, err | |
163 | } | |
164 | if interceptor == nil { | |
165 | return srv.(AddServer).Concat(ctx, in) | |
166 | } | |
167 | info := &grpc.UnaryServerInfo{ | |
168 | Server: srv, | |
169 | FullMethod: "/pb.Add/Concat", | |
170 | } | |
171 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
172 | return srv.(AddServer).Concat(ctx, req.(*ConcatRequest)) | |
173 | } | |
174 | return interceptor(ctx, in, info, handler) | |
175 | } | |
176 | ||
177 | var _Add_serviceDesc = grpc.ServiceDesc{ | |
178 | ServiceName: "pb.Add", | |
179 | HandlerType: (*AddServer)(nil), | |
180 | Methods: []grpc.MethodDesc{ | |
181 | { | |
182 | MethodName: "Sum", | |
183 | Handler: _Add_Sum_Handler, | |
184 | }, | |
185 | { | |
186 | MethodName: "Concat", | |
187 | Handler: _Add_Concat_Handler, | |
188 | }, | |
189 | }, | |
190 | Streams: []grpc.StreamDesc{}, | |
191 | } | |
192 | ||
193 | var fileDescriptor0 = []byte{ | |
194 | // 174 bytes of a gzipped FileDescriptorProto | |
195 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x4c, 0x49, 0x29, | |
196 | 0x2e, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0xd2, 0xe0, 0xe2, | |
197 | 0x0a, 0x2e, 0xcd, 0x0d, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0xe2, 0xe1, 0x62, 0x4c, 0x94, | |
198 | 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x62, 0x4c, 0x04, 0xf1, 0x92, 0x24, 0x98, 0x20, 0xbc, 0x24, | |
199 | 0x25, 0x09, 0x2e, 0x0e, 0xb0, 0xca, 0x82, 0x9c, 0x4a, 0x90, 0x4c, 0x19, 0x4c, 0x5d, 0x99, 0x92, | |
200 | 0x36, 0x17, 0xaf, 0x73, 0x7e, 0x5e, 0x72, 0x62, 0x09, 0x86, 0x31, 0x9c, 0x28, 0xc6, 0x70, 0x82, | |
201 | 0x8c, 0x91, 0xe6, 0xe2, 0x86, 0x29, 0x46, 0x31, 0x09, 0x28, 0x59, 0x66, 0x14, 0xc3, 0xc5, 0xec, | |
202 | 0x98, 0x92, 0x22, 0xa4, 0xca, 0xc5, 0x0c, 0xb4, 0x4a, 0x88, 0x4f, 0xaf, 0x20, 0x49, 0x0f, 0xe1, | |
203 | 0x3a, 0x29, 0x1e, 0x38, 0x1f, 0xa8, 0x53, 0x89, 0x41, 0x48, 0x8f, 0x8b, 0x0d, 0x62, 0x94, 0x90, | |
204 | 0x20, 0x48, 0x06, 0xc5, 0x0d, 0x52, 0xfc, 0xc8, 0x42, 0x60, 0xf5, 0x49, 0x6c, 0x60, 0x6f, 0x1b, | |
205 | 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x2c, 0x12, 0xb4, 0x06, 0x01, 0x00, 0x00, | |
206 | } |
0 | syntax = "proto3"; | |
1 | ||
2 | package pb; | |
3 | ||
4 | // The Add service definition. | |
5 | service Add { | |
6 | // Sums two integers. | |
7 | rpc Sum (SumRequest) returns (SumReply) {} | |
8 | ||
9 | // Concatenates two strings | |
10 | rpc Concat (ConcatRequest) returns (ConcatReply) {} | |
11 | } | |
12 | ||
13 | // The sum request contains two parameters. | |
14 | message SumRequest { | |
15 | int64 a = 1; | |
16 | int64 b = 2; | |
17 | } | |
18 | ||
19 | // The sum response contains the result of the calculation. | |
20 | message SumReply { | |
21 | int64 v = 1; | |
22 | } | |
23 | ||
24 | // The Concat request contains two parameters. | |
25 | message ConcatRequest { | |
26 | string a = 1; | |
27 | string b = 2; | |
28 | } | |
29 | ||
30 | // The Concat response contains the result of the concatenation. | |
31 | message ConcatReply { | |
32 | string v = 1; | |
33 | } |
10 | 10 | # See also |
11 | 11 | # https://github.com/grpc/grpc-go/tree/master/examples |
12 | 12 | |
13 | protoc add.proto --go_out=plugins=grpc:. | |
13 | protoc addsvc.proto --go_out=plugins=grpc:. |
0 | package server | |
1 | ||
2 | import ( | |
3 | "bytes" | |
4 | "encoding/json" | |
5 | "io/ioutil" | |
6 | "net/http" | |
7 | ||
8 | "golang.org/x/net/context" | |
9 | ) | |
10 | ||
11 | // DecodeSumRequest decodes the request from the provided HTTP request, simply | |
12 | // by JSON decoding from the request body. It's designed to be used in | |
13 | // transport/http.Server. | |
14 | func DecodeSumRequest(_ context.Context, r *http.Request) (interface{}, error) { | |
15 | var request SumRequest | |
16 | err := json.NewDecoder(r.Body).Decode(&request) | |
17 | return &request, err | |
18 | } | |
19 | ||
20 | // EncodeSumResponse encodes the response to the provided HTTP response | |
21 | // writer, simply by JSON encoding to the writer. It's designed to be used in | |
22 | // transport/http.Server. | |
23 | func EncodeSumResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { | |
24 | return json.NewEncoder(w).Encode(response) | |
25 | } | |
26 | ||
27 | // DecodeConcatRequest decodes the request from the provided HTTP request, | |
28 | // simply by JSON decoding from the request body. It's designed to be used in | |
29 | // transport/http.Server. | |
30 | func DecodeConcatRequest(_ context.Context, r *http.Request) (interface{}, error) { | |
31 | var request ConcatRequest | |
32 | err := json.NewDecoder(r.Body).Decode(&request) | |
33 | return &request, err | |
34 | } | |
35 | ||
36 | // EncodeConcatResponse encodes the response to the provided HTTP response | |
37 | // writer, simply by JSON encoding to the writer. It's designed to be used in | |
38 | // transport/http.Server. | |
39 | func EncodeConcatResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { | |
40 | return json.NewEncoder(w).Encode(response) | |
41 | } | |
42 | ||
43 | // EncodeSumRequest encodes the request to the provided HTTP request, simply | |
44 | // by JSON encoding to the request body. It's designed to be used in | |
45 | // transport/http.Client. | |
46 | func EncodeSumRequest(_ context.Context, r *http.Request, request interface{}) error { | |
47 | var buf bytes.Buffer | |
48 | if err := json.NewEncoder(&buf).Encode(request); err != nil { | |
49 | return err | |
50 | } | |
51 | r.Body = ioutil.NopCloser(&buf) | |
52 | return nil | |
53 | } | |
54 | ||
55 | // DecodeSumResponse decodes the response from the provided HTTP response, | |
56 | // simply by JSON decoding from the response body. It's designed to be used in | |
57 | // transport/http.Client. | |
58 | func DecodeSumResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
59 | var response SumResponse | |
60 | err := json.NewDecoder(resp.Body).Decode(&response) | |
61 | return response, err | |
62 | } | |
63 | ||
64 | // EncodeConcatRequest encodes the request to the provided HTTP request, | |
65 | // simply by JSON encoding to the request body. It's designed to be used in | |
66 | // transport/http.Client. | |
67 | func EncodeConcatRequest(_ context.Context, r *http.Request, request interface{}) error { | |
68 | var buf bytes.Buffer | |
69 | if err := json.NewEncoder(&buf).Encode(request); err != nil { | |
70 | return err | |
71 | } | |
72 | r.Body = ioutil.NopCloser(&buf) | |
73 | return nil | |
74 | } | |
75 | ||
76 | // DecodeConcatResponse decodes the response from the provided HTTP response, | |
77 | // simply by JSON decoding from the response body. It's designed to be used in | |
78 | // transport/http.Client. | |
79 | func DecodeConcatResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
80 | var response ConcatResponse | |
81 | err := json.NewDecoder(resp.Body).Decode(&response) | |
82 | return response, err | |
83 | } |
0 | package grpc | |
1 | ||
2 | import ( | |
3 | "golang.org/x/net/context" | |
4 | ||
5 | "github.com/go-kit/kit/examples/addsvc/pb" | |
6 | "github.com/go-kit/kit/examples/addsvc/server" | |
7 | ) | |
8 | ||
9 | func DecodeSumRequest(ctx context.Context, req interface{}) (interface{}, error) { | |
10 | sumRequest := req.(*pb.SumRequest) | |
11 | ||
12 | return &server.SumRequest{ | |
13 | A: int(sumRequest.A), | |
14 | B: int(sumRequest.B), | |
15 | }, nil | |
16 | } | |
17 | ||
18 | func DecodeConcatRequest(ctx context.Context, req interface{}) (interface{}, error) { | |
19 | concatRequest := req.(*pb.ConcatRequest) | |
20 | ||
21 | return &server.ConcatRequest{ | |
22 | A: concatRequest.A, | |
23 | B: concatRequest.B, | |
24 | }, nil | |
25 | } | |
26 | ||
27 | func EncodeSumResponse(ctx context.Context, resp interface{}) (interface{}, error) { | |
28 | domainResponse := resp.(server.SumResponse) | |
29 | ||
30 | return &pb.SumReply{ | |
31 | V: int64(domainResponse.V), | |
32 | }, nil | |
33 | } | |
34 | ||
35 | func EncodeConcatResponse(ctx context.Context, resp interface{}) (interface{}, error) { | |
36 | domainResponse := resp.(server.ConcatResponse) | |
37 | ||
38 | return &pb.ConcatReply{ | |
39 | V: domainResponse.V, | |
40 | }, nil | |
41 | } |
0 | package server | |
1 | ||
2 | // SumRequest is the business domain type for a Sum method request. | |
3 | type SumRequest struct { | |
4 | A int `json:"a"` | |
5 | B int `json:"b"` | |
6 | } | |
7 | ||
8 | // SumResponse is the business domain type for a Sum method response. | |
9 | type SumResponse struct { | |
10 | V int `json:"v"` | |
11 | } | |
12 | ||
13 | // ConcatRequest is the business domain type for a Concat method request. | |
14 | type ConcatRequest struct { | |
15 | A string `json:"a"` | |
16 | B string `json:"b"` | |
17 | } | |
18 | ||
19 | // ConcatResponse is the business domain type for a Concat method response. | |
20 | type ConcatResponse struct { | |
21 | V string `json:"v"` | |
22 | } |
0 | package server | |
1 | ||
2 | // AddService is the abstract representation of this service. | |
3 | type AddService interface { | |
4 | Sum(a, b int) int | |
5 | Concat(a, b string) string | |
6 | } |
0 | package main | |
0 | package addsvc | |
1 | ||
2 | // This file contains the Service definition, and a basic service | |
3 | // implementation. It also includes service middlewares. | |
1 | 4 | |
2 | 5 | import ( |
6 | "errors" | |
3 | 7 | "time" |
4 | 8 | |
5 | "github.com/go-kit/kit/examples/addsvc/server" | |
9 | "golang.org/x/net/context" | |
10 | ||
6 | 11 | "github.com/go-kit/kit/log" |
7 | 12 | "github.com/go-kit/kit/metrics" |
8 | 13 | ) |
9 | 14 | |
10 | type pureAddService struct{} | |
11 | ||
12 | func (pureAddService) Sum(a, b int) int { return a + b } | |
13 | ||
14 | func (pureAddService) Concat(a, b string) string { return a + b } | |
15 | ||
16 | type loggingMiddleware struct { | |
17 | server.AddService | |
18 | log.Logger | |
15 | // Service describes a service that adds things together. | |
16 | type Service interface { | |
17 | Sum(ctx context.Context, a, b int) (int, error) | |
18 | Concat(ctx context.Context, a, b string) (string, error) | |
19 | 19 | } |
20 | 20 | |
21 | func (m loggingMiddleware) Sum(a, b int) (v int) { | |
21 | var ( | |
22 | // ErrTwoZeroes is an arbitrary business rule for the Add method. | |
23 | ErrTwoZeroes = errors.New("can't sum two zeroes") | |
24 | ||
25 | // ErrIntOverflow protects the Add method. | |
26 | ErrIntOverflow = errors.New("integer overflow") | |
27 | ||
28 | // ErrMaxSizeExceeded protects the Concat method. | |
29 | ErrMaxSizeExceeded = errors.New("result exceeds maximum size") | |
30 | ) | |
31 | ||
32 | // NewBasicService returns a naïve, stateless implementation of Service. | |
33 | func NewBasicService() Service { | |
34 | return basicService{} | |
35 | } | |
36 | ||
37 | type basicService struct{} | |
38 | ||
39 | const ( | |
40 | intMax = 1<<31 - 1 | |
41 | intMin = -(intMax + 1) | |
42 | maxLen = 102400 | |
43 | ) | |
44 | ||
45 | // Sum implements Service. | |
46 | func (s basicService) Sum(_ context.Context, a, b int) (int, error) { | |
47 | if a == 0 && b == 0 { | |
48 | return 0, ErrTwoZeroes | |
49 | } | |
50 | if (b > 0 && a > (intMax-b)) || (b < 0 && a < (intMin-b)) { | |
51 | return 0, ErrIntOverflow | |
52 | } | |
53 | return a + b, nil | |
54 | } | |
55 | ||
56 | // Concat implements Service. | |
57 | func (s basicService) Concat(_ context.Context, a, b string) (string, error) { | |
58 | if len(a)+len(b) > maxLen { | |
59 | return "", ErrMaxSizeExceeded | |
60 | } | |
61 | return a + b, nil | |
62 | } | |
63 | ||
64 | // Middleware describes a service (as opposed to endpoint) middleware. | |
65 | type Middleware func(Service) Service | |
66 | ||
67 | // ServiceLoggingMiddleware returns a service middleware that logs the | |
68 | // parameters and result of each method invocation. | |
69 | func ServiceLoggingMiddleware(logger log.Logger) Middleware { | |
70 | return func(next Service) Service { | |
71 | return serviceLoggingMiddleware{ | |
72 | logger: logger, | |
73 | next: next, | |
74 | } | |
75 | } | |
76 | } | |
77 | ||
78 | type serviceLoggingMiddleware struct { | |
79 | logger log.Logger | |
80 | next Service | |
81 | } | |
82 | ||
83 | func (mw serviceLoggingMiddleware) Sum(ctx context.Context, a, b int) (v int, err error) { | |
22 | 84 | defer func(begin time.Time) { |
23 | m.Logger.Log( | |
24 | "method", "sum", | |
25 | "a", a, | |
26 | "b", b, | |
27 | "v", v, | |
85 | mw.logger.Log( | |
86 | "method", "Sum", | |
87 | "a", a, "b", b, "result", v, "error", err, | |
28 | 88 | "took", time.Since(begin), |
29 | 89 | ) |
30 | 90 | }(time.Now()) |
31 | v = m.AddService.Sum(a, b) | |
32 | return | |
91 | return mw.next.Sum(ctx, a, b) | |
33 | 92 | } |
34 | 93 | |
35 | func (m loggingMiddleware) Concat(a, b string) (v string) { | |
94 | func (mw serviceLoggingMiddleware) Concat(ctx context.Context, a, b string) (v string, err error) { | |
36 | 95 | defer func(begin time.Time) { |
37 | m.Logger.Log( | |
38 | "method", "concat", | |
39 | "a", a, | |
40 | "b", b, | |
41 | "v", v, | |
96 | mw.logger.Log( | |
97 | "method", "Concat", | |
98 | "a", a, "b", b, "result", v, "error", err, | |
42 | 99 | "took", time.Since(begin), |
43 | 100 | ) |
44 | 101 | }(time.Now()) |
45 | v = m.AddService.Concat(a, b) | |
46 | return | |
102 | return mw.next.Concat(ctx, a, b) | |
47 | 103 | } |
48 | 104 | |
49 | type instrumentingMiddleware struct { | |
50 | server.AddService | |
51 | requestDuration metrics.TimeHistogram | |
105 | // ServiceInstrumentingMiddleware returns a service middleware that instruments | |
106 | // the number of integers summed and characters concatenated over the lifetime of | |
107 | // the service. | |
108 | func ServiceInstrumentingMiddleware(ints, chars metrics.Counter) Middleware { | |
109 | return func(next Service) Service { | |
110 | return serviceInstrumentingMiddleware{ | |
111 | ints: ints, | |
112 | chars: chars, | |
113 | next: next, | |
114 | } | |
115 | } | |
52 | 116 | } |
53 | 117 | |
54 | func (m instrumentingMiddleware) Sum(a, b int) (v int) { | |
55 | defer func(begin time.Time) { | |
56 | methodField := metrics.Field{Key: "method", Value: "sum"} | |
57 | m.requestDuration.With(methodField).Observe(time.Since(begin)) | |
58 | }(time.Now()) | |
59 | v = m.AddService.Sum(a, b) | |
60 | return | |
118 | type serviceInstrumentingMiddleware struct { | |
119 | ints metrics.Counter | |
120 | chars metrics.Counter | |
121 | next Service | |
61 | 122 | } |
62 | 123 | |
63 | func (m instrumentingMiddleware) Concat(a, b string) (v string) { | |
64 | defer func(begin time.Time) { | |
65 | methodField := metrics.Field{Key: "method", Value: "concat"} | |
66 | m.requestDuration.With(methodField).Observe(time.Since(begin)) | |
67 | }(time.Now()) | |
68 | v = m.AddService.Concat(a, b) | |
69 | return | |
124 | func (mw serviceInstrumentingMiddleware) Sum(ctx context.Context, a, b int) (int, error) { | |
125 | v, err := mw.next.Sum(ctx, a, b) | |
126 | mw.ints.Add(uint64(v)) | |
127 | return v, err | |
70 | 128 | } |
129 | ||
130 | func (mw serviceInstrumentingMiddleware) Concat(ctx context.Context, a, b string) (string, error) { | |
131 | v, err := mw.next.Concat(ctx, a, b) | |
132 | mw.chars.Add(uint64(len(v))) | |
133 | return v, err | |
134 | } |
0 | struct SumReply { | |
1 | 1: i64 value | |
2 | } | |
3 | ||
4 | struct ConcatReply { | |
5 | 1: string value | |
6 | } | |
7 | ||
8 | service AddService { | |
9 | SumReply Sum(1: i64 a, 2: i64 b) | |
10 | ConcatReply Concat(1: string a, 2: string b) | |
11 | } |
0 | struct SumReply { | |
1 | 1: i64 value | |
2 | } | |
3 | ||
4 | struct ConcatReply { | |
5 | 1: string value | |
6 | } | |
7 | ||
8 | service AddService { | |
9 | SumReply Sum(1: i64 a, 2: i64 b) | |
10 | ConcatReply Concat(1: string a, 2: string b) | |
11 | } |
1 | 1 | |
2 | 2 | # See also https://thrift.apache.org/tutorial/go |
3 | 3 | |
4 | thrift -r --gen "go:package_prefix=github.com/go-kit/kit/examples/addsvc/thrift/gen-go/,thrift_import=github.com/apache/thrift/lib/go/thrift" add.thrift | |
4 | thrift -r --gen "go:package_prefix=github.com/go-kit/kit/examples/addsvc/thrift/gen-go/,thrift_import=github.com/apache/thrift/lib/go/thrift" addsvc.thrift |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package main | |
4 | ||
5 | import ( | |
6 | "flag" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/add" | |
10 | "math" | |
11 | "net" | |
12 | "net/url" | |
13 | "os" | |
14 | "strconv" | |
15 | "strings" | |
16 | ) | |
17 | ||
18 | func Usage() { | |
19 | fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") | |
20 | flag.PrintDefaults() | |
21 | fmt.Fprintln(os.Stderr, "\nFunctions:") | |
22 | fmt.Fprintln(os.Stderr, " SumReply Sum(i64 a, i64 b)") | |
23 | fmt.Fprintln(os.Stderr, " ConcatReply Concat(string a, string b)") | |
24 | fmt.Fprintln(os.Stderr) | |
25 | os.Exit(0) | |
26 | } | |
27 | ||
28 | func main() { | |
29 | flag.Usage = Usage | |
30 | var host string | |
31 | var port int | |
32 | var protocol string | |
33 | var urlString string | |
34 | var framed bool | |
35 | var useHttp bool | |
36 | var parsedUrl url.URL | |
37 | var trans thrift.TTransport | |
38 | _ = strconv.Atoi | |
39 | _ = math.Abs | |
40 | flag.Usage = Usage | |
41 | flag.StringVar(&host, "h", "localhost", "Specify host and port") | |
42 | flag.IntVar(&port, "p", 9090, "Specify port") | |
43 | flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") | |
44 | flag.StringVar(&urlString, "u", "", "Specify the url") | |
45 | flag.BoolVar(&framed, "framed", false, "Use framed transport") | |
46 | flag.BoolVar(&useHttp, "http", false, "Use http") | |
47 | flag.Parse() | |
48 | ||
49 | if len(urlString) > 0 { | |
50 | parsedUrl, err := url.Parse(urlString) | |
51 | if err != nil { | |
52 | fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) | |
53 | flag.Usage() | |
54 | } | |
55 | host = parsedUrl.Host | |
56 | useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" | |
57 | } else if useHttp { | |
58 | _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) | |
59 | if err != nil { | |
60 | fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) | |
61 | flag.Usage() | |
62 | } | |
63 | } | |
64 | ||
65 | cmd := flag.Arg(0) | |
66 | var err error | |
67 | if useHttp { | |
68 | trans, err = thrift.NewTHttpClient(parsedUrl.String()) | |
69 | } else { | |
70 | portStr := fmt.Sprint(port) | |
71 | if strings.Contains(host, ":") { | |
72 | host, portStr, err = net.SplitHostPort(host) | |
73 | if err != nil { | |
74 | fmt.Fprintln(os.Stderr, "error with host:", err) | |
75 | os.Exit(1) | |
76 | } | |
77 | } | |
78 | trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) | |
79 | if err != nil { | |
80 | fmt.Fprintln(os.Stderr, "error resolving address:", err) | |
81 | os.Exit(1) | |
82 | } | |
83 | if framed { | |
84 | trans = thrift.NewTFramedTransport(trans) | |
85 | } | |
86 | } | |
87 | if err != nil { | |
88 | fmt.Fprintln(os.Stderr, "Error creating transport", err) | |
89 | os.Exit(1) | |
90 | } | |
91 | defer trans.Close() | |
92 | var protocolFactory thrift.TProtocolFactory | |
93 | switch protocol { | |
94 | case "compact": | |
95 | protocolFactory = thrift.NewTCompactProtocolFactory() | |
96 | break | |
97 | case "simplejson": | |
98 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() | |
99 | break | |
100 | case "json": | |
101 | protocolFactory = thrift.NewTJSONProtocolFactory() | |
102 | break | |
103 | case "binary", "": | |
104 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() | |
105 | break | |
106 | default: | |
107 | fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) | |
108 | Usage() | |
109 | os.Exit(1) | |
110 | } | |
111 | client := add.NewAddServiceClientFactory(trans, protocolFactory) | |
112 | if err := trans.Open(); err != nil { | |
113 | fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) | |
114 | os.Exit(1) | |
115 | } | |
116 | ||
117 | switch cmd { | |
118 | case "Sum": | |
119 | if flag.NArg()-1 != 2 { | |
120 | fmt.Fprintln(os.Stderr, "Sum requires 2 args") | |
121 | flag.Usage() | |
122 | } | |
123 | argvalue0, err6 := (strconv.ParseInt(flag.Arg(1), 10, 64)) | |
124 | if err6 != nil { | |
125 | Usage() | |
126 | return | |
127 | } | |
128 | value0 := argvalue0 | |
129 | argvalue1, err7 := (strconv.ParseInt(flag.Arg(2), 10, 64)) | |
130 | if err7 != nil { | |
131 | Usage() | |
132 | return | |
133 | } | |
134 | value1 := argvalue1 | |
135 | fmt.Print(client.Sum(value0, value1)) | |
136 | fmt.Print("\n") | |
137 | break | |
138 | case "Concat": | |
139 | if flag.NArg()-1 != 2 { | |
140 | fmt.Fprintln(os.Stderr, "Concat requires 2 args") | |
141 | flag.Usage() | |
142 | } | |
143 | argvalue0 := flag.Arg(1) | |
144 | value0 := argvalue0 | |
145 | argvalue1 := flag.Arg(2) | |
146 | value1 := argvalue1 | |
147 | fmt.Print(client.Concat(value0, value1)) | |
148 | fmt.Print("\n") | |
149 | break | |
150 | case "": | |
151 | Usage() | |
152 | break | |
153 | default: | |
154 | fmt.Fprintln(os.Stderr, "Invalid function ", cmd) | |
155 | } | |
156 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package add | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | ) | |
10 | ||
11 | // (needed to ensure safety because of naive import list construction.) | |
12 | var _ = thrift.ZERO | |
13 | var _ = fmt.Printf | |
14 | var _ = bytes.Equal | |
15 | ||
16 | type AddService interface { | |
17 | // Parameters: | |
18 | // - A | |
19 | // - B | |
20 | Sum(a int64, b int64) (r *SumReply, err error) | |
21 | // Parameters: | |
22 | // - A | |
23 | // - B | |
24 | Concat(a string, b string) (r *ConcatReply, err error) | |
25 | } | |
26 | ||
27 | type AddServiceClient struct { | |
28 | Transport thrift.TTransport | |
29 | ProtocolFactory thrift.TProtocolFactory | |
30 | InputProtocol thrift.TProtocol | |
31 | OutputProtocol thrift.TProtocol | |
32 | SeqId int32 | |
33 | } | |
34 | ||
35 | func NewAddServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AddServiceClient { | |
36 | return &AddServiceClient{Transport: t, | |
37 | ProtocolFactory: f, | |
38 | InputProtocol: f.GetProtocol(t), | |
39 | OutputProtocol: f.GetProtocol(t), | |
40 | SeqId: 0, | |
41 | } | |
42 | } | |
43 | ||
44 | func NewAddServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AddServiceClient { | |
45 | return &AddServiceClient{Transport: t, | |
46 | ProtocolFactory: nil, | |
47 | InputProtocol: iprot, | |
48 | OutputProtocol: oprot, | |
49 | SeqId: 0, | |
50 | } | |
51 | } | |
52 | ||
53 | // Parameters: | |
54 | // - A | |
55 | // - B | |
56 | func (p *AddServiceClient) Sum(a int64, b int64) (r *SumReply, err error) { | |
57 | if err = p.sendSum(a, b); err != nil { | |
58 | return | |
59 | } | |
60 | return p.recvSum() | |
61 | } | |
62 | ||
63 | func (p *AddServiceClient) sendSum(a int64, b int64) (err error) { | |
64 | oprot := p.OutputProtocol | |
65 | if oprot == nil { | |
66 | oprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
67 | p.OutputProtocol = oprot | |
68 | } | |
69 | p.SeqId++ | |
70 | if err = oprot.WriteMessageBegin("Sum", thrift.CALL, p.SeqId); err != nil { | |
71 | return | |
72 | } | |
73 | args := AddServiceSumArgs{ | |
74 | A: a, | |
75 | B: b, | |
76 | } | |
77 | if err = args.Write(oprot); err != nil { | |
78 | return | |
79 | } | |
80 | if err = oprot.WriteMessageEnd(); err != nil { | |
81 | return | |
82 | } | |
83 | return oprot.Flush() | |
84 | } | |
85 | ||
86 | func (p *AddServiceClient) recvSum() (value *SumReply, err error) { | |
87 | iprot := p.InputProtocol | |
88 | if iprot == nil { | |
89 | iprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
90 | p.InputProtocol = iprot | |
91 | } | |
92 | method, mTypeId, seqId, err := iprot.ReadMessageBegin() | |
93 | if err != nil { | |
94 | return | |
95 | } | |
96 | if method != "Sum" { | |
97 | err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Sum failed: wrong method name") | |
98 | return | |
99 | } | |
100 | if p.SeqId != seqId { | |
101 | err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Sum failed: out of sequence response") | |
102 | return | |
103 | } | |
104 | if mTypeId == thrift.EXCEPTION { | |
105 | error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") | |
106 | var error1 error | |
107 | error1, err = error0.Read(iprot) | |
108 | if err != nil { | |
109 | return | |
110 | } | |
111 | if err = iprot.ReadMessageEnd(); err != nil { | |
112 | return | |
113 | } | |
114 | err = error1 | |
115 | return | |
116 | } | |
117 | if mTypeId != thrift.REPLY { | |
118 | err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Sum failed: invalid message type") | |
119 | return | |
120 | } | |
121 | result := AddServiceSumResult{} | |
122 | if err = result.Read(iprot); err != nil { | |
123 | return | |
124 | } | |
125 | if err = iprot.ReadMessageEnd(); err != nil { | |
126 | return | |
127 | } | |
128 | value = result.GetSuccess() | |
129 | return | |
130 | } | |
131 | ||
132 | // Parameters: | |
133 | // - A | |
134 | // - B | |
135 | func (p *AddServiceClient) Concat(a string, b string) (r *ConcatReply, err error) { | |
136 | if err = p.sendConcat(a, b); err != nil { | |
137 | return | |
138 | } | |
139 | return p.recvConcat() | |
140 | } | |
141 | ||
142 | func (p *AddServiceClient) sendConcat(a string, b string) (err error) { | |
143 | oprot := p.OutputProtocol | |
144 | if oprot == nil { | |
145 | oprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
146 | p.OutputProtocol = oprot | |
147 | } | |
148 | p.SeqId++ | |
149 | if err = oprot.WriteMessageBegin("Concat", thrift.CALL, p.SeqId); err != nil { | |
150 | return | |
151 | } | |
152 | args := AddServiceConcatArgs{ | |
153 | A: a, | |
154 | B: b, | |
155 | } | |
156 | if err = args.Write(oprot); err != nil { | |
157 | return | |
158 | } | |
159 | if err = oprot.WriteMessageEnd(); err != nil { | |
160 | return | |
161 | } | |
162 | return oprot.Flush() | |
163 | } | |
164 | ||
165 | func (p *AddServiceClient) recvConcat() (value *ConcatReply, err error) { | |
166 | iprot := p.InputProtocol | |
167 | if iprot == nil { | |
168 | iprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
169 | p.InputProtocol = iprot | |
170 | } | |
171 | method, mTypeId, seqId, err := iprot.ReadMessageBegin() | |
172 | if err != nil { | |
173 | return | |
174 | } | |
175 | if method != "Concat" { | |
176 | err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Concat failed: wrong method name") | |
177 | return | |
178 | } | |
179 | if p.SeqId != seqId { | |
180 | err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Concat failed: out of sequence response") | |
181 | return | |
182 | } | |
183 | if mTypeId == thrift.EXCEPTION { | |
184 | error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") | |
185 | var error3 error | |
186 | error3, err = error2.Read(iprot) | |
187 | if err != nil { | |
188 | return | |
189 | } | |
190 | if err = iprot.ReadMessageEnd(); err != nil { | |
191 | return | |
192 | } | |
193 | err = error3 | |
194 | return | |
195 | } | |
196 | if mTypeId != thrift.REPLY { | |
197 | err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Concat failed: invalid message type") | |
198 | return | |
199 | } | |
200 | result := AddServiceConcatResult{} | |
201 | if err = result.Read(iprot); err != nil { | |
202 | return | |
203 | } | |
204 | if err = iprot.ReadMessageEnd(); err != nil { | |
205 | return | |
206 | } | |
207 | value = result.GetSuccess() | |
208 | return | |
209 | } | |
210 | ||
211 | type AddServiceProcessor struct { | |
212 | processorMap map[string]thrift.TProcessorFunction | |
213 | handler AddService | |
214 | } | |
215 | ||
216 | func (p *AddServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { | |
217 | p.processorMap[key] = processor | |
218 | } | |
219 | ||
220 | func (p *AddServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { | |
221 | processor, ok = p.processorMap[key] | |
222 | return processor, ok | |
223 | } | |
224 | ||
225 | func (p *AddServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { | |
226 | return p.processorMap | |
227 | } | |
228 | ||
229 | func NewAddServiceProcessor(handler AddService) *AddServiceProcessor { | |
230 | ||
231 | self4 := &AddServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} | |
232 | self4.processorMap["Sum"] = &addServiceProcessorSum{handler: handler} | |
233 | self4.processorMap["Concat"] = &addServiceProcessorConcat{handler: handler} | |
234 | return self4 | |
235 | } | |
236 | ||
237 | func (p *AddServiceProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { | |
238 | name, _, seqId, err := iprot.ReadMessageBegin() | |
239 | if err != nil { | |
240 | return false, err | |
241 | } | |
242 | if processor, ok := p.GetProcessorFunction(name); ok { | |
243 | return processor.Process(seqId, iprot, oprot) | |
244 | } | |
245 | iprot.Skip(thrift.STRUCT) | |
246 | iprot.ReadMessageEnd() | |
247 | x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) | |
248 | oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) | |
249 | x5.Write(oprot) | |
250 | oprot.WriteMessageEnd() | |
251 | oprot.Flush() | |
252 | return false, x5 | |
253 | ||
254 | } | |
255 | ||
256 | type addServiceProcessorSum struct { | |
257 | handler AddService | |
258 | } | |
259 | ||
260 | func (p *addServiceProcessorSum) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { | |
261 | args := AddServiceSumArgs{} | |
262 | if err = args.Read(iprot); err != nil { | |
263 | iprot.ReadMessageEnd() | |
264 | x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) | |
265 | oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) | |
266 | x.Write(oprot) | |
267 | oprot.WriteMessageEnd() | |
268 | oprot.Flush() | |
269 | return false, err | |
270 | } | |
271 | ||
272 | iprot.ReadMessageEnd() | |
273 | result := AddServiceSumResult{} | |
274 | var retval *SumReply | |
275 | var err2 error | |
276 | if retval, err2 = p.handler.Sum(args.A, args.B); err2 != nil { | |
277 | x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Sum: "+err2.Error()) | |
278 | oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) | |
279 | x.Write(oprot) | |
280 | oprot.WriteMessageEnd() | |
281 | oprot.Flush() | |
282 | return true, err2 | |
283 | } else { | |
284 | result.Success = retval | |
285 | } | |
286 | if err2 = oprot.WriteMessageBegin("Sum", thrift.REPLY, seqId); err2 != nil { | |
287 | err = err2 | |
288 | } | |
289 | if err2 = result.Write(oprot); err == nil && err2 != nil { | |
290 | err = err2 | |
291 | } | |
292 | if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { | |
293 | err = err2 | |
294 | } | |
295 | if err2 = oprot.Flush(); err == nil && err2 != nil { | |
296 | err = err2 | |
297 | } | |
298 | if err != nil { | |
299 | return | |
300 | } | |
301 | return true, err | |
302 | } | |
303 | ||
304 | type addServiceProcessorConcat struct { | |
305 | handler AddService | |
306 | } | |
307 | ||
308 | func (p *addServiceProcessorConcat) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { | |
309 | args := AddServiceConcatArgs{} | |
310 | if err = args.Read(iprot); err != nil { | |
311 | iprot.ReadMessageEnd() | |
312 | x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) | |
313 | oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) | |
314 | x.Write(oprot) | |
315 | oprot.WriteMessageEnd() | |
316 | oprot.Flush() | |
317 | return false, err | |
318 | } | |
319 | ||
320 | iprot.ReadMessageEnd() | |
321 | result := AddServiceConcatResult{} | |
322 | var retval *ConcatReply | |
323 | var err2 error | |
324 | if retval, err2 = p.handler.Concat(args.A, args.B); err2 != nil { | |
325 | x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Concat: "+err2.Error()) | |
326 | oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) | |
327 | x.Write(oprot) | |
328 | oprot.WriteMessageEnd() | |
329 | oprot.Flush() | |
330 | return true, err2 | |
331 | } else { | |
332 | result.Success = retval | |
333 | } | |
334 | if err2 = oprot.WriteMessageBegin("Concat", thrift.REPLY, seqId); err2 != nil { | |
335 | err = err2 | |
336 | } | |
337 | if err2 = result.Write(oprot); err == nil && err2 != nil { | |
338 | err = err2 | |
339 | } | |
340 | if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { | |
341 | err = err2 | |
342 | } | |
343 | if err2 = oprot.Flush(); err == nil && err2 != nil { | |
344 | err = err2 | |
345 | } | |
346 | if err != nil { | |
347 | return | |
348 | } | |
349 | return true, err | |
350 | } | |
351 | ||
352 | // HELPER FUNCTIONS AND STRUCTURES | |
353 | ||
354 | // Attributes: | |
355 | // - A | |
356 | // - B | |
357 | type AddServiceSumArgs struct { | |
358 | A int64 `thrift:"a,1" json:"a"` | |
359 | B int64 `thrift:"b,2" json:"b"` | |
360 | } | |
361 | ||
362 | func NewAddServiceSumArgs() *AddServiceSumArgs { | |
363 | return &AddServiceSumArgs{} | |
364 | } | |
365 | ||
366 | func (p *AddServiceSumArgs) GetA() int64 { | |
367 | return p.A | |
368 | } | |
369 | ||
370 | func (p *AddServiceSumArgs) GetB() int64 { | |
371 | return p.B | |
372 | } | |
373 | func (p *AddServiceSumArgs) Read(iprot thrift.TProtocol) error { | |
374 | if _, err := iprot.ReadStructBegin(); err != nil { | |
375 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
376 | } | |
377 | ||
378 | for { | |
379 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
380 | if err != nil { | |
381 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
382 | } | |
383 | if fieldTypeId == thrift.STOP { | |
384 | break | |
385 | } | |
386 | switch fieldId { | |
387 | case 1: | |
388 | if err := p.readField1(iprot); err != nil { | |
389 | return err | |
390 | } | |
391 | case 2: | |
392 | if err := p.readField2(iprot); err != nil { | |
393 | return err | |
394 | } | |
395 | default: | |
396 | if err := iprot.Skip(fieldTypeId); err != nil { | |
397 | return err | |
398 | } | |
399 | } | |
400 | if err := iprot.ReadFieldEnd(); err != nil { | |
401 | return err | |
402 | } | |
403 | } | |
404 | if err := iprot.ReadStructEnd(); err != nil { | |
405 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
406 | } | |
407 | return nil | |
408 | } | |
409 | ||
410 | func (p *AddServiceSumArgs) readField1(iprot thrift.TProtocol) error { | |
411 | if v, err := iprot.ReadI64(); err != nil { | |
412 | return thrift.PrependError("error reading field 1: ", err) | |
413 | } else { | |
414 | p.A = v | |
415 | } | |
416 | return nil | |
417 | } | |
418 | ||
419 | func (p *AddServiceSumArgs) readField2(iprot thrift.TProtocol) error { | |
420 | if v, err := iprot.ReadI64(); err != nil { | |
421 | return thrift.PrependError("error reading field 2: ", err) | |
422 | } else { | |
423 | p.B = v | |
424 | } | |
425 | return nil | |
426 | } | |
427 | ||
428 | func (p *AddServiceSumArgs) Write(oprot thrift.TProtocol) error { | |
429 | if err := oprot.WriteStructBegin("Sum_args"); err != nil { | |
430 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
431 | } | |
432 | if err := p.writeField1(oprot); err != nil { | |
433 | return err | |
434 | } | |
435 | if err := p.writeField2(oprot); err != nil { | |
436 | return err | |
437 | } | |
438 | if err := oprot.WriteFieldStop(); err != nil { | |
439 | return thrift.PrependError("write field stop error: ", err) | |
440 | } | |
441 | if err := oprot.WriteStructEnd(); err != nil { | |
442 | return thrift.PrependError("write struct stop error: ", err) | |
443 | } | |
444 | return nil | |
445 | } | |
446 | ||
447 | func (p *AddServiceSumArgs) writeField1(oprot thrift.TProtocol) (err error) { | |
448 | if err := oprot.WriteFieldBegin("a", thrift.I64, 1); err != nil { | |
449 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) | |
450 | } | |
451 | if err := oprot.WriteI64(int64(p.A)); err != nil { | |
452 | return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) | |
453 | } | |
454 | if err := oprot.WriteFieldEnd(); err != nil { | |
455 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) | |
456 | } | |
457 | return err | |
458 | } | |
459 | ||
460 | func (p *AddServiceSumArgs) writeField2(oprot thrift.TProtocol) (err error) { | |
461 | if err := oprot.WriteFieldBegin("b", thrift.I64, 2); err != nil { | |
462 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) | |
463 | } | |
464 | if err := oprot.WriteI64(int64(p.B)); err != nil { | |
465 | return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) | |
466 | } | |
467 | if err := oprot.WriteFieldEnd(); err != nil { | |
468 | return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) | |
469 | } | |
470 | return err | |
471 | } | |
472 | ||
473 | func (p *AddServiceSumArgs) String() string { | |
474 | if p == nil { | |
475 | return "<nil>" | |
476 | } | |
477 | return fmt.Sprintf("AddServiceSumArgs(%+v)", *p) | |
478 | } | |
479 | ||
480 | // Attributes: | |
481 | // - Success | |
482 | type AddServiceSumResult struct { | |
483 | Success *SumReply `thrift:"success,0" json:"success,omitempty"` | |
484 | } | |
485 | ||
486 | func NewAddServiceSumResult() *AddServiceSumResult { | |
487 | return &AddServiceSumResult{} | |
488 | } | |
489 | ||
490 | var AddServiceSumResult_Success_DEFAULT *SumReply | |
491 | ||
492 | func (p *AddServiceSumResult) GetSuccess() *SumReply { | |
493 | if !p.IsSetSuccess() { | |
494 | return AddServiceSumResult_Success_DEFAULT | |
495 | } | |
496 | return p.Success | |
497 | } | |
498 | func (p *AddServiceSumResult) IsSetSuccess() bool { | |
499 | return p.Success != nil | |
500 | } | |
501 | ||
502 | func (p *AddServiceSumResult) Read(iprot thrift.TProtocol) error { | |
503 | if _, err := iprot.ReadStructBegin(); err != nil { | |
504 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
505 | } | |
506 | ||
507 | for { | |
508 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
509 | if err != nil { | |
510 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
511 | } | |
512 | if fieldTypeId == thrift.STOP { | |
513 | break | |
514 | } | |
515 | switch fieldId { | |
516 | case 0: | |
517 | if err := p.readField0(iprot); err != nil { | |
518 | return err | |
519 | } | |
520 | default: | |
521 | if err := iprot.Skip(fieldTypeId); err != nil { | |
522 | return err | |
523 | } | |
524 | } | |
525 | if err := iprot.ReadFieldEnd(); err != nil { | |
526 | return err | |
527 | } | |
528 | } | |
529 | if err := iprot.ReadStructEnd(); err != nil { | |
530 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
531 | } | |
532 | return nil | |
533 | } | |
534 | ||
535 | func (p *AddServiceSumResult) readField0(iprot thrift.TProtocol) error { | |
536 | p.Success = &SumReply{} | |
537 | if err := p.Success.Read(iprot); err != nil { | |
538 | return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) | |
539 | } | |
540 | return nil | |
541 | } | |
542 | ||
543 | func (p *AddServiceSumResult) Write(oprot thrift.TProtocol) error { | |
544 | if err := oprot.WriteStructBegin("Sum_result"); err != nil { | |
545 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
546 | } | |
547 | if err := p.writeField0(oprot); err != nil { | |
548 | return err | |
549 | } | |
550 | if err := oprot.WriteFieldStop(); err != nil { | |
551 | return thrift.PrependError("write field stop error: ", err) | |
552 | } | |
553 | if err := oprot.WriteStructEnd(); err != nil { | |
554 | return thrift.PrependError("write struct stop error: ", err) | |
555 | } | |
556 | return nil | |
557 | } | |
558 | ||
559 | func (p *AddServiceSumResult) writeField0(oprot thrift.TProtocol) (err error) { | |
560 | if p.IsSetSuccess() { | |
561 | if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { | |
562 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) | |
563 | } | |
564 | if err := p.Success.Write(oprot); err != nil { | |
565 | return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) | |
566 | } | |
567 | if err := oprot.WriteFieldEnd(); err != nil { | |
568 | return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) | |
569 | } | |
570 | } | |
571 | return err | |
572 | } | |
573 | ||
574 | func (p *AddServiceSumResult) String() string { | |
575 | if p == nil { | |
576 | return "<nil>" | |
577 | } | |
578 | return fmt.Sprintf("AddServiceSumResult(%+v)", *p) | |
579 | } | |
580 | ||
581 | // Attributes: | |
582 | // - A | |
583 | // - B | |
584 | type AddServiceConcatArgs struct { | |
585 | A string `thrift:"a,1" json:"a"` | |
586 | B string `thrift:"b,2" json:"b"` | |
587 | } | |
588 | ||
589 | func NewAddServiceConcatArgs() *AddServiceConcatArgs { | |
590 | return &AddServiceConcatArgs{} | |
591 | } | |
592 | ||
593 | func (p *AddServiceConcatArgs) GetA() string { | |
594 | return p.A | |
595 | } | |
596 | ||
597 | func (p *AddServiceConcatArgs) GetB() string { | |
598 | return p.B | |
599 | } | |
600 | func (p *AddServiceConcatArgs) Read(iprot thrift.TProtocol) error { | |
601 | if _, err := iprot.ReadStructBegin(); err != nil { | |
602 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
603 | } | |
604 | ||
605 | for { | |
606 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
607 | if err != nil { | |
608 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
609 | } | |
610 | if fieldTypeId == thrift.STOP { | |
611 | break | |
612 | } | |
613 | switch fieldId { | |
614 | case 1: | |
615 | if err := p.readField1(iprot); err != nil { | |
616 | return err | |
617 | } | |
618 | case 2: | |
619 | if err := p.readField2(iprot); err != nil { | |
620 | return err | |
621 | } | |
622 | default: | |
623 | if err := iprot.Skip(fieldTypeId); err != nil { | |
624 | return err | |
625 | } | |
626 | } | |
627 | if err := iprot.ReadFieldEnd(); err != nil { | |
628 | return err | |
629 | } | |
630 | } | |
631 | if err := iprot.ReadStructEnd(); err != nil { | |
632 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
633 | } | |
634 | return nil | |
635 | } | |
636 | ||
637 | func (p *AddServiceConcatArgs) readField1(iprot thrift.TProtocol) error { | |
638 | if v, err := iprot.ReadString(); err != nil { | |
639 | return thrift.PrependError("error reading field 1: ", err) | |
640 | } else { | |
641 | p.A = v | |
642 | } | |
643 | return nil | |
644 | } | |
645 | ||
646 | func (p *AddServiceConcatArgs) readField2(iprot thrift.TProtocol) error { | |
647 | if v, err := iprot.ReadString(); err != nil { | |
648 | return thrift.PrependError("error reading field 2: ", err) | |
649 | } else { | |
650 | p.B = v | |
651 | } | |
652 | return nil | |
653 | } | |
654 | ||
655 | func (p *AddServiceConcatArgs) Write(oprot thrift.TProtocol) error { | |
656 | if err := oprot.WriteStructBegin("Concat_args"); err != nil { | |
657 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
658 | } | |
659 | if err := p.writeField1(oprot); err != nil { | |
660 | return err | |
661 | } | |
662 | if err := p.writeField2(oprot); err != nil { | |
663 | return err | |
664 | } | |
665 | if err := oprot.WriteFieldStop(); err != nil { | |
666 | return thrift.PrependError("write field stop error: ", err) | |
667 | } | |
668 | if err := oprot.WriteStructEnd(); err != nil { | |
669 | return thrift.PrependError("write struct stop error: ", err) | |
670 | } | |
671 | return nil | |
672 | } | |
673 | ||
674 | func (p *AddServiceConcatArgs) writeField1(oprot thrift.TProtocol) (err error) { | |
675 | if err := oprot.WriteFieldBegin("a", thrift.STRING, 1); err != nil { | |
676 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) | |
677 | } | |
678 | if err := oprot.WriteString(string(p.A)); err != nil { | |
679 | return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) | |
680 | } | |
681 | if err := oprot.WriteFieldEnd(); err != nil { | |
682 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) | |
683 | } | |
684 | return err | |
685 | } | |
686 | ||
687 | func (p *AddServiceConcatArgs) writeField2(oprot thrift.TProtocol) (err error) { | |
688 | if err := oprot.WriteFieldBegin("b", thrift.STRING, 2); err != nil { | |
689 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) | |
690 | } | |
691 | if err := oprot.WriteString(string(p.B)); err != nil { | |
692 | return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) | |
693 | } | |
694 | if err := oprot.WriteFieldEnd(); err != nil { | |
695 | return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) | |
696 | } | |
697 | return err | |
698 | } | |
699 | ||
700 | func (p *AddServiceConcatArgs) String() string { | |
701 | if p == nil { | |
702 | return "<nil>" | |
703 | } | |
704 | return fmt.Sprintf("AddServiceConcatArgs(%+v)", *p) | |
705 | } | |
706 | ||
707 | // Attributes: | |
708 | // - Success | |
709 | type AddServiceConcatResult struct { | |
710 | Success *ConcatReply `thrift:"success,0" json:"success,omitempty"` | |
711 | } | |
712 | ||
713 | func NewAddServiceConcatResult() *AddServiceConcatResult { | |
714 | return &AddServiceConcatResult{} | |
715 | } | |
716 | ||
717 | var AddServiceConcatResult_Success_DEFAULT *ConcatReply | |
718 | ||
719 | func (p *AddServiceConcatResult) GetSuccess() *ConcatReply { | |
720 | if !p.IsSetSuccess() { | |
721 | return AddServiceConcatResult_Success_DEFAULT | |
722 | } | |
723 | return p.Success | |
724 | } | |
725 | func (p *AddServiceConcatResult) IsSetSuccess() bool { | |
726 | return p.Success != nil | |
727 | } | |
728 | ||
729 | func (p *AddServiceConcatResult) Read(iprot thrift.TProtocol) error { | |
730 | if _, err := iprot.ReadStructBegin(); err != nil { | |
731 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
732 | } | |
733 | ||
734 | for { | |
735 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
736 | if err != nil { | |
737 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
738 | } | |
739 | if fieldTypeId == thrift.STOP { | |
740 | break | |
741 | } | |
742 | switch fieldId { | |
743 | case 0: | |
744 | if err := p.readField0(iprot); err != nil { | |
745 | return err | |
746 | } | |
747 | default: | |
748 | if err := iprot.Skip(fieldTypeId); err != nil { | |
749 | return err | |
750 | } | |
751 | } | |
752 | if err := iprot.ReadFieldEnd(); err != nil { | |
753 | return err | |
754 | } | |
755 | } | |
756 | if err := iprot.ReadStructEnd(); err != nil { | |
757 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
758 | } | |
759 | return nil | |
760 | } | |
761 | ||
762 | func (p *AddServiceConcatResult) readField0(iprot thrift.TProtocol) error { | |
763 | p.Success = &ConcatReply{} | |
764 | if err := p.Success.Read(iprot); err != nil { | |
765 | return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) | |
766 | } | |
767 | return nil | |
768 | } | |
769 | ||
770 | func (p *AddServiceConcatResult) Write(oprot thrift.TProtocol) error { | |
771 | if err := oprot.WriteStructBegin("Concat_result"); err != nil { | |
772 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
773 | } | |
774 | if err := p.writeField0(oprot); err != nil { | |
775 | return err | |
776 | } | |
777 | if err := oprot.WriteFieldStop(); err != nil { | |
778 | return thrift.PrependError("write field stop error: ", err) | |
779 | } | |
780 | if err := oprot.WriteStructEnd(); err != nil { | |
781 | return thrift.PrependError("write struct stop error: ", err) | |
782 | } | |
783 | return nil | |
784 | } | |
785 | ||
786 | func (p *AddServiceConcatResult) writeField0(oprot thrift.TProtocol) (err error) { | |
787 | if p.IsSetSuccess() { | |
788 | if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { | |
789 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) | |
790 | } | |
791 | if err := p.Success.Write(oprot); err != nil { | |
792 | return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) | |
793 | } | |
794 | if err := oprot.WriteFieldEnd(); err != nil { | |
795 | return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) | |
796 | } | |
797 | } | |
798 | return err | |
799 | } | |
800 | ||
801 | func (p *AddServiceConcatResult) String() string { | |
802 | if p == nil { | |
803 | return "<nil>" | |
804 | } | |
805 | return fmt.Sprintf("AddServiceConcatResult(%+v)", *p) | |
806 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package add | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | ) | |
10 | ||
11 | // (needed to ensure safety because of naive import list construction.) | |
12 | var _ = thrift.ZERO | |
13 | var _ = fmt.Printf | |
14 | var _ = bytes.Equal | |
15 | ||
16 | func init() { | |
17 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package add | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | ) | |
10 | ||
11 | // (needed to ensure safety because of naive import list construction.) | |
12 | var _ = thrift.ZERO | |
13 | var _ = fmt.Printf | |
14 | var _ = bytes.Equal | |
15 | ||
16 | var GoUnusedProtection__ int | |
17 | ||
18 | // Attributes: | |
19 | // - Value | |
20 | type SumReply struct { | |
21 | Value int64 `thrift:"value,1" json:"value"` | |
22 | } | |
23 | ||
24 | func NewSumReply() *SumReply { | |
25 | return &SumReply{} | |
26 | } | |
27 | ||
28 | func (p *SumReply) GetValue() int64 { | |
29 | return p.Value | |
30 | } | |
31 | func (p *SumReply) Read(iprot thrift.TProtocol) error { | |
32 | if _, err := iprot.ReadStructBegin(); err != nil { | |
33 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
34 | } | |
35 | ||
36 | for { | |
37 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
38 | if err != nil { | |
39 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
40 | } | |
41 | if fieldTypeId == thrift.STOP { | |
42 | break | |
43 | } | |
44 | switch fieldId { | |
45 | case 1: | |
46 | if err := p.readField1(iprot); err != nil { | |
47 | return err | |
48 | } | |
49 | default: | |
50 | if err := iprot.Skip(fieldTypeId); err != nil { | |
51 | return err | |
52 | } | |
53 | } | |
54 | if err := iprot.ReadFieldEnd(); err != nil { | |
55 | return err | |
56 | } | |
57 | } | |
58 | if err := iprot.ReadStructEnd(); err != nil { | |
59 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
60 | } | |
61 | return nil | |
62 | } | |
63 | ||
64 | func (p *SumReply) readField1(iprot thrift.TProtocol) error { | |
65 | if v, err := iprot.ReadI64(); err != nil { | |
66 | return thrift.PrependError("error reading field 1: ", err) | |
67 | } else { | |
68 | p.Value = v | |
69 | } | |
70 | return nil | |
71 | } | |
72 | ||
73 | func (p *SumReply) Write(oprot thrift.TProtocol) error { | |
74 | if err := oprot.WriteStructBegin("SumReply"); err != nil { | |
75 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
76 | } | |
77 | if err := p.writeField1(oprot); err != nil { | |
78 | return err | |
79 | } | |
80 | if err := oprot.WriteFieldStop(); err != nil { | |
81 | return thrift.PrependError("write field stop error: ", err) | |
82 | } | |
83 | if err := oprot.WriteStructEnd(); err != nil { | |
84 | return thrift.PrependError("write struct stop error: ", err) | |
85 | } | |
86 | return nil | |
87 | } | |
88 | ||
89 | func (p *SumReply) writeField1(oprot thrift.TProtocol) (err error) { | |
90 | if err := oprot.WriteFieldBegin("value", thrift.I64, 1); err != nil { | |
91 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) | |
92 | } | |
93 | if err := oprot.WriteI64(int64(p.Value)); err != nil { | |
94 | return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) | |
95 | } | |
96 | if err := oprot.WriteFieldEnd(); err != nil { | |
97 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) | |
98 | } | |
99 | return err | |
100 | } | |
101 | ||
102 | func (p *SumReply) String() string { | |
103 | if p == nil { | |
104 | return "<nil>" | |
105 | } | |
106 | return fmt.Sprintf("SumReply(%+v)", *p) | |
107 | } | |
108 | ||
109 | // Attributes: | |
110 | // - Value | |
111 | type ConcatReply struct { | |
112 | Value string `thrift:"value,1" json:"value"` | |
113 | } | |
114 | ||
115 | func NewConcatReply() *ConcatReply { | |
116 | return &ConcatReply{} | |
117 | } | |
118 | ||
119 | func (p *ConcatReply) GetValue() string { | |
120 | return p.Value | |
121 | } | |
122 | func (p *ConcatReply) Read(iprot thrift.TProtocol) error { | |
123 | if _, err := iprot.ReadStructBegin(); err != nil { | |
124 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
125 | } | |
126 | ||
127 | for { | |
128 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
129 | if err != nil { | |
130 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
131 | } | |
132 | if fieldTypeId == thrift.STOP { | |
133 | break | |
134 | } | |
135 | switch fieldId { | |
136 | case 1: | |
137 | if err := p.readField1(iprot); err != nil { | |
138 | return err | |
139 | } | |
140 | default: | |
141 | if err := iprot.Skip(fieldTypeId); err != nil { | |
142 | return err | |
143 | } | |
144 | } | |
145 | if err := iprot.ReadFieldEnd(); err != nil { | |
146 | return err | |
147 | } | |
148 | } | |
149 | if err := iprot.ReadStructEnd(); err != nil { | |
150 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
151 | } | |
152 | return nil | |
153 | } | |
154 | ||
155 | func (p *ConcatReply) readField1(iprot thrift.TProtocol) error { | |
156 | if v, err := iprot.ReadString(); err != nil { | |
157 | return thrift.PrependError("error reading field 1: ", err) | |
158 | } else { | |
159 | p.Value = v | |
160 | } | |
161 | return nil | |
162 | } | |
163 | ||
164 | func (p *ConcatReply) Write(oprot thrift.TProtocol) error { | |
165 | if err := oprot.WriteStructBegin("ConcatReply"); err != nil { | |
166 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
167 | } | |
168 | if err := p.writeField1(oprot); err != nil { | |
169 | return err | |
170 | } | |
171 | if err := oprot.WriteFieldStop(); err != nil { | |
172 | return thrift.PrependError("write field stop error: ", err) | |
173 | } | |
174 | if err := oprot.WriteStructEnd(); err != nil { | |
175 | return thrift.PrependError("write struct stop error: ", err) | |
176 | } | |
177 | return nil | |
178 | } | |
179 | ||
180 | func (p *ConcatReply) writeField1(oprot thrift.TProtocol) (err error) { | |
181 | if err := oprot.WriteFieldBegin("value", thrift.STRING, 1); err != nil { | |
182 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) | |
183 | } | |
184 | if err := oprot.WriteString(string(p.Value)); err != nil { | |
185 | return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) | |
186 | } | |
187 | if err := oprot.WriteFieldEnd(); err != nil { | |
188 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) | |
189 | } | |
190 | return err | |
191 | } | |
192 | ||
193 | func (p *ConcatReply) String() string { | |
194 | if p == nil { | |
195 | return "<nil>" | |
196 | } | |
197 | return fmt.Sprintf("ConcatReply(%+v)", *p) | |
198 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package main | |
4 | ||
5 | import ( | |
6 | "flag" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" | |
10 | "math" | |
11 | "net" | |
12 | "net/url" | |
13 | "os" | |
14 | "strconv" | |
15 | "strings" | |
16 | ) | |
17 | ||
18 | func Usage() { | |
19 | fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") | |
20 | flag.PrintDefaults() | |
21 | fmt.Fprintln(os.Stderr, "\nFunctions:") | |
22 | fmt.Fprintln(os.Stderr, " SumReply Sum(i64 a, i64 b)") | |
23 | fmt.Fprintln(os.Stderr, " ConcatReply Concat(string a, string b)") | |
24 | fmt.Fprintln(os.Stderr) | |
25 | os.Exit(0) | |
26 | } | |
27 | ||
28 | func main() { | |
29 | flag.Usage = Usage | |
30 | var host string | |
31 | var port int | |
32 | var protocol string | |
33 | var urlString string | |
34 | var framed bool | |
35 | var useHttp bool | |
36 | var parsedUrl url.URL | |
37 | var trans thrift.TTransport | |
38 | _ = strconv.Atoi | |
39 | _ = math.Abs | |
40 | flag.Usage = Usage | |
41 | flag.StringVar(&host, "h", "localhost", "Specify host and port") | |
42 | flag.IntVar(&port, "p", 9090, "Specify port") | |
43 | flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") | |
44 | flag.StringVar(&urlString, "u", "", "Specify the url") | |
45 | flag.BoolVar(&framed, "framed", false, "Use framed transport") | |
46 | flag.BoolVar(&useHttp, "http", false, "Use http") | |
47 | flag.Parse() | |
48 | ||
49 | if len(urlString) > 0 { | |
50 | parsedUrl, err := url.Parse(urlString) | |
51 | if err != nil { | |
52 | fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) | |
53 | flag.Usage() | |
54 | } | |
55 | host = parsedUrl.Host | |
56 | useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" | |
57 | } else if useHttp { | |
58 | _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) | |
59 | if err != nil { | |
60 | fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) | |
61 | flag.Usage() | |
62 | } | |
63 | } | |
64 | ||
65 | cmd := flag.Arg(0) | |
66 | var err error | |
67 | if useHttp { | |
68 | trans, err = thrift.NewTHttpClient(parsedUrl.String()) | |
69 | } else { | |
70 | portStr := fmt.Sprint(port) | |
71 | if strings.Contains(host, ":") { | |
72 | host, portStr, err = net.SplitHostPort(host) | |
73 | if err != nil { | |
74 | fmt.Fprintln(os.Stderr, "error with host:", err) | |
75 | os.Exit(1) | |
76 | } | |
77 | } | |
78 | trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) | |
79 | if err != nil { | |
80 | fmt.Fprintln(os.Stderr, "error resolving address:", err) | |
81 | os.Exit(1) | |
82 | } | |
83 | if framed { | |
84 | trans = thrift.NewTFramedTransport(trans) | |
85 | } | |
86 | } | |
87 | if err != nil { | |
88 | fmt.Fprintln(os.Stderr, "Error creating transport", err) | |
89 | os.Exit(1) | |
90 | } | |
91 | defer trans.Close() | |
92 | var protocolFactory thrift.TProtocolFactory | |
93 | switch protocol { | |
94 | case "compact": | |
95 | protocolFactory = thrift.NewTCompactProtocolFactory() | |
96 | break | |
97 | case "simplejson": | |
98 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() | |
99 | break | |
100 | case "json": | |
101 | protocolFactory = thrift.NewTJSONProtocolFactory() | |
102 | break | |
103 | case "binary", "": | |
104 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() | |
105 | break | |
106 | default: | |
107 | fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) | |
108 | Usage() | |
109 | os.Exit(1) | |
110 | } | |
111 | client := addsvc.NewAddServiceClientFactory(trans, protocolFactory) | |
112 | if err := trans.Open(); err != nil { | |
113 | fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) | |
114 | os.Exit(1) | |
115 | } | |
116 | ||
117 | switch cmd { | |
118 | case "Sum": | |
119 | if flag.NArg()-1 != 2 { | |
120 | fmt.Fprintln(os.Stderr, "Sum requires 2 args") | |
121 | flag.Usage() | |
122 | } | |
123 | argvalue0, err6 := (strconv.ParseInt(flag.Arg(1), 10, 64)) | |
124 | if err6 != nil { | |
125 | Usage() | |
126 | return | |
127 | } | |
128 | value0 := argvalue0 | |
129 | argvalue1, err7 := (strconv.ParseInt(flag.Arg(2), 10, 64)) | |
130 | if err7 != nil { | |
131 | Usage() | |
132 | return | |
133 | } | |
134 | value1 := argvalue1 | |
135 | fmt.Print(client.Sum(value0, value1)) | |
136 | fmt.Print("\n") | |
137 | break | |
138 | case "Concat": | |
139 | if flag.NArg()-1 != 2 { | |
140 | fmt.Fprintln(os.Stderr, "Concat requires 2 args") | |
141 | flag.Usage() | |
142 | } | |
143 | argvalue0 := flag.Arg(1) | |
144 | value0 := argvalue0 | |
145 | argvalue1 := flag.Arg(2) | |
146 | value1 := argvalue1 | |
147 | fmt.Print(client.Concat(value0, value1)) | |
148 | fmt.Print("\n") | |
149 | break | |
150 | case "": | |
151 | Usage() | |
152 | break | |
153 | default: | |
154 | fmt.Fprintln(os.Stderr, "Invalid function ", cmd) | |
155 | } | |
156 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package addsvc | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | ) | |
10 | ||
11 | // (needed to ensure safety because of naive import list construction.) | |
12 | var _ = thrift.ZERO | |
13 | var _ = fmt.Printf | |
14 | var _ = bytes.Equal | |
15 | ||
16 | type AddService interface { | |
17 | // Parameters: | |
18 | // - A | |
19 | // - B | |
20 | Sum(a int64, b int64) (r *SumReply, err error) | |
21 | // Parameters: | |
22 | // - A | |
23 | // - B | |
24 | Concat(a string, b string) (r *ConcatReply, err error) | |
25 | } | |
26 | ||
27 | type AddServiceClient struct { | |
28 | Transport thrift.TTransport | |
29 | ProtocolFactory thrift.TProtocolFactory | |
30 | InputProtocol thrift.TProtocol | |
31 | OutputProtocol thrift.TProtocol | |
32 | SeqId int32 | |
33 | } | |
34 | ||
35 | func NewAddServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AddServiceClient { | |
36 | return &AddServiceClient{Transport: t, | |
37 | ProtocolFactory: f, | |
38 | InputProtocol: f.GetProtocol(t), | |
39 | OutputProtocol: f.GetProtocol(t), | |
40 | SeqId: 0, | |
41 | } | |
42 | } | |
43 | ||
44 | func NewAddServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AddServiceClient { | |
45 | return &AddServiceClient{Transport: t, | |
46 | ProtocolFactory: nil, | |
47 | InputProtocol: iprot, | |
48 | OutputProtocol: oprot, | |
49 | SeqId: 0, | |
50 | } | |
51 | } | |
52 | ||
53 | // Parameters: | |
54 | // - A | |
55 | // - B | |
56 | func (p *AddServiceClient) Sum(a int64, b int64) (r *SumReply, err error) { | |
57 | if err = p.sendSum(a, b); err != nil { | |
58 | return | |
59 | } | |
60 | return p.recvSum() | |
61 | } | |
62 | ||
63 | func (p *AddServiceClient) sendSum(a int64, b int64) (err error) { | |
64 | oprot := p.OutputProtocol | |
65 | if oprot == nil { | |
66 | oprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
67 | p.OutputProtocol = oprot | |
68 | } | |
69 | p.SeqId++ | |
70 | if err = oprot.WriteMessageBegin("Sum", thrift.CALL, p.SeqId); err != nil { | |
71 | return | |
72 | } | |
73 | args := AddServiceSumArgs{ | |
74 | A: a, | |
75 | B: b, | |
76 | } | |
77 | if err = args.Write(oprot); err != nil { | |
78 | return | |
79 | } | |
80 | if err = oprot.WriteMessageEnd(); err != nil { | |
81 | return | |
82 | } | |
83 | return oprot.Flush() | |
84 | } | |
85 | ||
86 | func (p *AddServiceClient) recvSum() (value *SumReply, err error) { | |
87 | iprot := p.InputProtocol | |
88 | if iprot == nil { | |
89 | iprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
90 | p.InputProtocol = iprot | |
91 | } | |
92 | method, mTypeId, seqId, err := iprot.ReadMessageBegin() | |
93 | if err != nil { | |
94 | return | |
95 | } | |
96 | if method != "Sum" { | |
97 | err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Sum failed: wrong method name") | |
98 | return | |
99 | } | |
100 | if p.SeqId != seqId { | |
101 | err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Sum failed: out of sequence response") | |
102 | return | |
103 | } | |
104 | if mTypeId == thrift.EXCEPTION { | |
105 | error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") | |
106 | var error1 error | |
107 | error1, err = error0.Read(iprot) | |
108 | if err != nil { | |
109 | return | |
110 | } | |
111 | if err = iprot.ReadMessageEnd(); err != nil { | |
112 | return | |
113 | } | |
114 | err = error1 | |
115 | return | |
116 | } | |
117 | if mTypeId != thrift.REPLY { | |
118 | err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Sum failed: invalid message type") | |
119 | return | |
120 | } | |
121 | result := AddServiceSumResult{} | |
122 | if err = result.Read(iprot); err != nil { | |
123 | return | |
124 | } | |
125 | if err = iprot.ReadMessageEnd(); err != nil { | |
126 | return | |
127 | } | |
128 | value = result.GetSuccess() | |
129 | return | |
130 | } | |
131 | ||
132 | // Parameters: | |
133 | // - A | |
134 | // - B | |
135 | func (p *AddServiceClient) Concat(a string, b string) (r *ConcatReply, err error) { | |
136 | if err = p.sendConcat(a, b); err != nil { | |
137 | return | |
138 | } | |
139 | return p.recvConcat() | |
140 | } | |
141 | ||
142 | func (p *AddServiceClient) sendConcat(a string, b string) (err error) { | |
143 | oprot := p.OutputProtocol | |
144 | if oprot == nil { | |
145 | oprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
146 | p.OutputProtocol = oprot | |
147 | } | |
148 | p.SeqId++ | |
149 | if err = oprot.WriteMessageBegin("Concat", thrift.CALL, p.SeqId); err != nil { | |
150 | return | |
151 | } | |
152 | args := AddServiceConcatArgs{ | |
153 | A: a, | |
154 | B: b, | |
155 | } | |
156 | if err = args.Write(oprot); err != nil { | |
157 | return | |
158 | } | |
159 | if err = oprot.WriteMessageEnd(); err != nil { | |
160 | return | |
161 | } | |
162 | return oprot.Flush() | |
163 | } | |
164 | ||
165 | func (p *AddServiceClient) recvConcat() (value *ConcatReply, err error) { | |
166 | iprot := p.InputProtocol | |
167 | if iprot == nil { | |
168 | iprot = p.ProtocolFactory.GetProtocol(p.Transport) | |
169 | p.InputProtocol = iprot | |
170 | } | |
171 | method, mTypeId, seqId, err := iprot.ReadMessageBegin() | |
172 | if err != nil { | |
173 | return | |
174 | } | |
175 | if method != "Concat" { | |
176 | err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Concat failed: wrong method name") | |
177 | return | |
178 | } | |
179 | if p.SeqId != seqId { | |
180 | err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Concat failed: out of sequence response") | |
181 | return | |
182 | } | |
183 | if mTypeId == thrift.EXCEPTION { | |
184 | error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") | |
185 | var error3 error | |
186 | error3, err = error2.Read(iprot) | |
187 | if err != nil { | |
188 | return | |
189 | } | |
190 | if err = iprot.ReadMessageEnd(); err != nil { | |
191 | return | |
192 | } | |
193 | err = error3 | |
194 | return | |
195 | } | |
196 | if mTypeId != thrift.REPLY { | |
197 | err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Concat failed: invalid message type") | |
198 | return | |
199 | } | |
200 | result := AddServiceConcatResult{} | |
201 | if err = result.Read(iprot); err != nil { | |
202 | return | |
203 | } | |
204 | if err = iprot.ReadMessageEnd(); err != nil { | |
205 | return | |
206 | } | |
207 | value = result.GetSuccess() | |
208 | return | |
209 | } | |
210 | ||
211 | type AddServiceProcessor struct { | |
212 | processorMap map[string]thrift.TProcessorFunction | |
213 | handler AddService | |
214 | } | |
215 | ||
216 | func (p *AddServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { | |
217 | p.processorMap[key] = processor | |
218 | } | |
219 | ||
220 | func (p *AddServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { | |
221 | processor, ok = p.processorMap[key] | |
222 | return processor, ok | |
223 | } | |
224 | ||
225 | func (p *AddServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { | |
226 | return p.processorMap | |
227 | } | |
228 | ||
229 | func NewAddServiceProcessor(handler AddService) *AddServiceProcessor { | |
230 | ||
231 | self4 := &AddServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} | |
232 | self4.processorMap["Sum"] = &addServiceProcessorSum{handler: handler} | |
233 | self4.processorMap["Concat"] = &addServiceProcessorConcat{handler: handler} | |
234 | return self4 | |
235 | } | |
236 | ||
237 | func (p *AddServiceProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { | |
238 | name, _, seqId, err := iprot.ReadMessageBegin() | |
239 | if err != nil { | |
240 | return false, err | |
241 | } | |
242 | if processor, ok := p.GetProcessorFunction(name); ok { | |
243 | return processor.Process(seqId, iprot, oprot) | |
244 | } | |
245 | iprot.Skip(thrift.STRUCT) | |
246 | iprot.ReadMessageEnd() | |
247 | x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) | |
248 | oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) | |
249 | x5.Write(oprot) | |
250 | oprot.WriteMessageEnd() | |
251 | oprot.Flush() | |
252 | return false, x5 | |
253 | ||
254 | } | |
255 | ||
256 | type addServiceProcessorSum struct { | |
257 | handler AddService | |
258 | } | |
259 | ||
260 | func (p *addServiceProcessorSum) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { | |
261 | args := AddServiceSumArgs{} | |
262 | if err = args.Read(iprot); err != nil { | |
263 | iprot.ReadMessageEnd() | |
264 | x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) | |
265 | oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) | |
266 | x.Write(oprot) | |
267 | oprot.WriteMessageEnd() | |
268 | oprot.Flush() | |
269 | return false, err | |
270 | } | |
271 | ||
272 | iprot.ReadMessageEnd() | |
273 | result := AddServiceSumResult{} | |
274 | var retval *SumReply | |
275 | var err2 error | |
276 | if retval, err2 = p.handler.Sum(args.A, args.B); err2 != nil { | |
277 | x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Sum: "+err2.Error()) | |
278 | oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) | |
279 | x.Write(oprot) | |
280 | oprot.WriteMessageEnd() | |
281 | oprot.Flush() | |
282 | return true, err2 | |
283 | } else { | |
284 | result.Success = retval | |
285 | } | |
286 | if err2 = oprot.WriteMessageBegin("Sum", thrift.REPLY, seqId); err2 != nil { | |
287 | err = err2 | |
288 | } | |
289 | if err2 = result.Write(oprot); err == nil && err2 != nil { | |
290 | err = err2 | |
291 | } | |
292 | if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { | |
293 | err = err2 | |
294 | } | |
295 | if err2 = oprot.Flush(); err == nil && err2 != nil { | |
296 | err = err2 | |
297 | } | |
298 | if err != nil { | |
299 | return | |
300 | } | |
301 | return true, err | |
302 | } | |
303 | ||
304 | type addServiceProcessorConcat struct { | |
305 | handler AddService | |
306 | } | |
307 | ||
308 | func (p *addServiceProcessorConcat) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { | |
309 | args := AddServiceConcatArgs{} | |
310 | if err = args.Read(iprot); err != nil { | |
311 | iprot.ReadMessageEnd() | |
312 | x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) | |
313 | oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) | |
314 | x.Write(oprot) | |
315 | oprot.WriteMessageEnd() | |
316 | oprot.Flush() | |
317 | return false, err | |
318 | } | |
319 | ||
320 | iprot.ReadMessageEnd() | |
321 | result := AddServiceConcatResult{} | |
322 | var retval *ConcatReply | |
323 | var err2 error | |
324 | if retval, err2 = p.handler.Concat(args.A, args.B); err2 != nil { | |
325 | x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Concat: "+err2.Error()) | |
326 | oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) | |
327 | x.Write(oprot) | |
328 | oprot.WriteMessageEnd() | |
329 | oprot.Flush() | |
330 | return true, err2 | |
331 | } else { | |
332 | result.Success = retval | |
333 | } | |
334 | if err2 = oprot.WriteMessageBegin("Concat", thrift.REPLY, seqId); err2 != nil { | |
335 | err = err2 | |
336 | } | |
337 | if err2 = result.Write(oprot); err == nil && err2 != nil { | |
338 | err = err2 | |
339 | } | |
340 | if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { | |
341 | err = err2 | |
342 | } | |
343 | if err2 = oprot.Flush(); err == nil && err2 != nil { | |
344 | err = err2 | |
345 | } | |
346 | if err != nil { | |
347 | return | |
348 | } | |
349 | return true, err | |
350 | } | |
351 | ||
352 | // HELPER FUNCTIONS AND STRUCTURES | |
353 | ||
354 | // Attributes: | |
355 | // - A | |
356 | // - B | |
357 | type AddServiceSumArgs struct { | |
358 | A int64 `thrift:"a,1" json:"a"` | |
359 | B int64 `thrift:"b,2" json:"b"` | |
360 | } | |
361 | ||
362 | func NewAddServiceSumArgs() *AddServiceSumArgs { | |
363 | return &AddServiceSumArgs{} | |
364 | } | |
365 | ||
366 | func (p *AddServiceSumArgs) GetA() int64 { | |
367 | return p.A | |
368 | } | |
369 | ||
370 | func (p *AddServiceSumArgs) GetB() int64 { | |
371 | return p.B | |
372 | } | |
373 | func (p *AddServiceSumArgs) Read(iprot thrift.TProtocol) error { | |
374 | if _, err := iprot.ReadStructBegin(); err != nil { | |
375 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
376 | } | |
377 | ||
378 | for { | |
379 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
380 | if err != nil { | |
381 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
382 | } | |
383 | if fieldTypeId == thrift.STOP { | |
384 | break | |
385 | } | |
386 | switch fieldId { | |
387 | case 1: | |
388 | if err := p.readField1(iprot); err != nil { | |
389 | return err | |
390 | } | |
391 | case 2: | |
392 | if err := p.readField2(iprot); err != nil { | |
393 | return err | |
394 | } | |
395 | default: | |
396 | if err := iprot.Skip(fieldTypeId); err != nil { | |
397 | return err | |
398 | } | |
399 | } | |
400 | if err := iprot.ReadFieldEnd(); err != nil { | |
401 | return err | |
402 | } | |
403 | } | |
404 | if err := iprot.ReadStructEnd(); err != nil { | |
405 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
406 | } | |
407 | return nil | |
408 | } | |
409 | ||
410 | func (p *AddServiceSumArgs) readField1(iprot thrift.TProtocol) error { | |
411 | if v, err := iprot.ReadI64(); err != nil { | |
412 | return thrift.PrependError("error reading field 1: ", err) | |
413 | } else { | |
414 | p.A = v | |
415 | } | |
416 | return nil | |
417 | } | |
418 | ||
419 | func (p *AddServiceSumArgs) readField2(iprot thrift.TProtocol) error { | |
420 | if v, err := iprot.ReadI64(); err != nil { | |
421 | return thrift.PrependError("error reading field 2: ", err) | |
422 | } else { | |
423 | p.B = v | |
424 | } | |
425 | return nil | |
426 | } | |
427 | ||
428 | func (p *AddServiceSumArgs) Write(oprot thrift.TProtocol) error { | |
429 | if err := oprot.WriteStructBegin("Sum_args"); err != nil { | |
430 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
431 | } | |
432 | if err := p.writeField1(oprot); err != nil { | |
433 | return err | |
434 | } | |
435 | if err := p.writeField2(oprot); err != nil { | |
436 | return err | |
437 | } | |
438 | if err := oprot.WriteFieldStop(); err != nil { | |
439 | return thrift.PrependError("write field stop error: ", err) | |
440 | } | |
441 | if err := oprot.WriteStructEnd(); err != nil { | |
442 | return thrift.PrependError("write struct stop error: ", err) | |
443 | } | |
444 | return nil | |
445 | } | |
446 | ||
447 | func (p *AddServiceSumArgs) writeField1(oprot thrift.TProtocol) (err error) { | |
448 | if err := oprot.WriteFieldBegin("a", thrift.I64, 1); err != nil { | |
449 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) | |
450 | } | |
451 | if err := oprot.WriteI64(int64(p.A)); err != nil { | |
452 | return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) | |
453 | } | |
454 | if err := oprot.WriteFieldEnd(); err != nil { | |
455 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) | |
456 | } | |
457 | return err | |
458 | } | |
459 | ||
460 | func (p *AddServiceSumArgs) writeField2(oprot thrift.TProtocol) (err error) { | |
461 | if err := oprot.WriteFieldBegin("b", thrift.I64, 2); err != nil { | |
462 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) | |
463 | } | |
464 | if err := oprot.WriteI64(int64(p.B)); err != nil { | |
465 | return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) | |
466 | } | |
467 | if err := oprot.WriteFieldEnd(); err != nil { | |
468 | return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) | |
469 | } | |
470 | return err | |
471 | } | |
472 | ||
473 | func (p *AddServiceSumArgs) String() string { | |
474 | if p == nil { | |
475 | return "<nil>" | |
476 | } | |
477 | return fmt.Sprintf("AddServiceSumArgs(%+v)", *p) | |
478 | } | |
479 | ||
480 | // Attributes: | |
481 | // - Success | |
482 | type AddServiceSumResult struct { | |
483 | Success *SumReply `thrift:"success,0" json:"success,omitempty"` | |
484 | } | |
485 | ||
486 | func NewAddServiceSumResult() *AddServiceSumResult { | |
487 | return &AddServiceSumResult{} | |
488 | } | |
489 | ||
490 | var AddServiceSumResult_Success_DEFAULT *SumReply | |
491 | ||
492 | func (p *AddServiceSumResult) GetSuccess() *SumReply { | |
493 | if !p.IsSetSuccess() { | |
494 | return AddServiceSumResult_Success_DEFAULT | |
495 | } | |
496 | return p.Success | |
497 | } | |
498 | func (p *AddServiceSumResult) IsSetSuccess() bool { | |
499 | return p.Success != nil | |
500 | } | |
501 | ||
502 | func (p *AddServiceSumResult) Read(iprot thrift.TProtocol) error { | |
503 | if _, err := iprot.ReadStructBegin(); err != nil { | |
504 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
505 | } | |
506 | ||
507 | for { | |
508 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
509 | if err != nil { | |
510 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
511 | } | |
512 | if fieldTypeId == thrift.STOP { | |
513 | break | |
514 | } | |
515 | switch fieldId { | |
516 | case 0: | |
517 | if err := p.readField0(iprot); err != nil { | |
518 | return err | |
519 | } | |
520 | default: | |
521 | if err := iprot.Skip(fieldTypeId); err != nil { | |
522 | return err | |
523 | } | |
524 | } | |
525 | if err := iprot.ReadFieldEnd(); err != nil { | |
526 | return err | |
527 | } | |
528 | } | |
529 | if err := iprot.ReadStructEnd(); err != nil { | |
530 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
531 | } | |
532 | return nil | |
533 | } | |
534 | ||
535 | func (p *AddServiceSumResult) readField0(iprot thrift.TProtocol) error { | |
536 | p.Success = &SumReply{} | |
537 | if err := p.Success.Read(iprot); err != nil { | |
538 | return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) | |
539 | } | |
540 | return nil | |
541 | } | |
542 | ||
543 | func (p *AddServiceSumResult) Write(oprot thrift.TProtocol) error { | |
544 | if err := oprot.WriteStructBegin("Sum_result"); err != nil { | |
545 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
546 | } | |
547 | if err := p.writeField0(oprot); err != nil { | |
548 | return err | |
549 | } | |
550 | if err := oprot.WriteFieldStop(); err != nil { | |
551 | return thrift.PrependError("write field stop error: ", err) | |
552 | } | |
553 | if err := oprot.WriteStructEnd(); err != nil { | |
554 | return thrift.PrependError("write struct stop error: ", err) | |
555 | } | |
556 | return nil | |
557 | } | |
558 | ||
559 | func (p *AddServiceSumResult) writeField0(oprot thrift.TProtocol) (err error) { | |
560 | if p.IsSetSuccess() { | |
561 | if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { | |
562 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) | |
563 | } | |
564 | if err := p.Success.Write(oprot); err != nil { | |
565 | return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) | |
566 | } | |
567 | if err := oprot.WriteFieldEnd(); err != nil { | |
568 | return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) | |
569 | } | |
570 | } | |
571 | return err | |
572 | } | |
573 | ||
574 | func (p *AddServiceSumResult) String() string { | |
575 | if p == nil { | |
576 | return "<nil>" | |
577 | } | |
578 | return fmt.Sprintf("AddServiceSumResult(%+v)", *p) | |
579 | } | |
580 | ||
581 | // Attributes: | |
582 | // - A | |
583 | // - B | |
584 | type AddServiceConcatArgs struct { | |
585 | A string `thrift:"a,1" json:"a"` | |
586 | B string `thrift:"b,2" json:"b"` | |
587 | } | |
588 | ||
589 | func NewAddServiceConcatArgs() *AddServiceConcatArgs { | |
590 | return &AddServiceConcatArgs{} | |
591 | } | |
592 | ||
593 | func (p *AddServiceConcatArgs) GetA() string { | |
594 | return p.A | |
595 | } | |
596 | ||
597 | func (p *AddServiceConcatArgs) GetB() string { | |
598 | return p.B | |
599 | } | |
600 | func (p *AddServiceConcatArgs) Read(iprot thrift.TProtocol) error { | |
601 | if _, err := iprot.ReadStructBegin(); err != nil { | |
602 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
603 | } | |
604 | ||
605 | for { | |
606 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
607 | if err != nil { | |
608 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
609 | } | |
610 | if fieldTypeId == thrift.STOP { | |
611 | break | |
612 | } | |
613 | switch fieldId { | |
614 | case 1: | |
615 | if err := p.readField1(iprot); err != nil { | |
616 | return err | |
617 | } | |
618 | case 2: | |
619 | if err := p.readField2(iprot); err != nil { | |
620 | return err | |
621 | } | |
622 | default: | |
623 | if err := iprot.Skip(fieldTypeId); err != nil { | |
624 | return err | |
625 | } | |
626 | } | |
627 | if err := iprot.ReadFieldEnd(); err != nil { | |
628 | return err | |
629 | } | |
630 | } | |
631 | if err := iprot.ReadStructEnd(); err != nil { | |
632 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
633 | } | |
634 | return nil | |
635 | } | |
636 | ||
637 | func (p *AddServiceConcatArgs) readField1(iprot thrift.TProtocol) error { | |
638 | if v, err := iprot.ReadString(); err != nil { | |
639 | return thrift.PrependError("error reading field 1: ", err) | |
640 | } else { | |
641 | p.A = v | |
642 | } | |
643 | return nil | |
644 | } | |
645 | ||
646 | func (p *AddServiceConcatArgs) readField2(iprot thrift.TProtocol) error { | |
647 | if v, err := iprot.ReadString(); err != nil { | |
648 | return thrift.PrependError("error reading field 2: ", err) | |
649 | } else { | |
650 | p.B = v | |
651 | } | |
652 | return nil | |
653 | } | |
654 | ||
655 | func (p *AddServiceConcatArgs) Write(oprot thrift.TProtocol) error { | |
656 | if err := oprot.WriteStructBegin("Concat_args"); err != nil { | |
657 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
658 | } | |
659 | if err := p.writeField1(oprot); err != nil { | |
660 | return err | |
661 | } | |
662 | if err := p.writeField2(oprot); err != nil { | |
663 | return err | |
664 | } | |
665 | if err := oprot.WriteFieldStop(); err != nil { | |
666 | return thrift.PrependError("write field stop error: ", err) | |
667 | } | |
668 | if err := oprot.WriteStructEnd(); err != nil { | |
669 | return thrift.PrependError("write struct stop error: ", err) | |
670 | } | |
671 | return nil | |
672 | } | |
673 | ||
674 | func (p *AddServiceConcatArgs) writeField1(oprot thrift.TProtocol) (err error) { | |
675 | if err := oprot.WriteFieldBegin("a", thrift.STRING, 1); err != nil { | |
676 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) | |
677 | } | |
678 | if err := oprot.WriteString(string(p.A)); err != nil { | |
679 | return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) | |
680 | } | |
681 | if err := oprot.WriteFieldEnd(); err != nil { | |
682 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) | |
683 | } | |
684 | return err | |
685 | } | |
686 | ||
687 | func (p *AddServiceConcatArgs) writeField2(oprot thrift.TProtocol) (err error) { | |
688 | if err := oprot.WriteFieldBegin("b", thrift.STRING, 2); err != nil { | |
689 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) | |
690 | } | |
691 | if err := oprot.WriteString(string(p.B)); err != nil { | |
692 | return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) | |
693 | } | |
694 | if err := oprot.WriteFieldEnd(); err != nil { | |
695 | return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) | |
696 | } | |
697 | return err | |
698 | } | |
699 | ||
700 | func (p *AddServiceConcatArgs) String() string { | |
701 | if p == nil { | |
702 | return "<nil>" | |
703 | } | |
704 | return fmt.Sprintf("AddServiceConcatArgs(%+v)", *p) | |
705 | } | |
706 | ||
707 | // Attributes: | |
708 | // - Success | |
709 | type AddServiceConcatResult struct { | |
710 | Success *ConcatReply `thrift:"success,0" json:"success,omitempty"` | |
711 | } | |
712 | ||
713 | func NewAddServiceConcatResult() *AddServiceConcatResult { | |
714 | return &AddServiceConcatResult{} | |
715 | } | |
716 | ||
717 | var AddServiceConcatResult_Success_DEFAULT *ConcatReply | |
718 | ||
719 | func (p *AddServiceConcatResult) GetSuccess() *ConcatReply { | |
720 | if !p.IsSetSuccess() { | |
721 | return AddServiceConcatResult_Success_DEFAULT | |
722 | } | |
723 | return p.Success | |
724 | } | |
725 | func (p *AddServiceConcatResult) IsSetSuccess() bool { | |
726 | return p.Success != nil | |
727 | } | |
728 | ||
729 | func (p *AddServiceConcatResult) Read(iprot thrift.TProtocol) error { | |
730 | if _, err := iprot.ReadStructBegin(); err != nil { | |
731 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
732 | } | |
733 | ||
734 | for { | |
735 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
736 | if err != nil { | |
737 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
738 | } | |
739 | if fieldTypeId == thrift.STOP { | |
740 | break | |
741 | } | |
742 | switch fieldId { | |
743 | case 0: | |
744 | if err := p.readField0(iprot); err != nil { | |
745 | return err | |
746 | } | |
747 | default: | |
748 | if err := iprot.Skip(fieldTypeId); err != nil { | |
749 | return err | |
750 | } | |
751 | } | |
752 | if err := iprot.ReadFieldEnd(); err != nil { | |
753 | return err | |
754 | } | |
755 | } | |
756 | if err := iprot.ReadStructEnd(); err != nil { | |
757 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
758 | } | |
759 | return nil | |
760 | } | |
761 | ||
762 | func (p *AddServiceConcatResult) readField0(iprot thrift.TProtocol) error { | |
763 | p.Success = &ConcatReply{} | |
764 | if err := p.Success.Read(iprot); err != nil { | |
765 | return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) | |
766 | } | |
767 | return nil | |
768 | } | |
769 | ||
770 | func (p *AddServiceConcatResult) Write(oprot thrift.TProtocol) error { | |
771 | if err := oprot.WriteStructBegin("Concat_result"); err != nil { | |
772 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
773 | } | |
774 | if err := p.writeField0(oprot); err != nil { | |
775 | return err | |
776 | } | |
777 | if err := oprot.WriteFieldStop(); err != nil { | |
778 | return thrift.PrependError("write field stop error: ", err) | |
779 | } | |
780 | if err := oprot.WriteStructEnd(); err != nil { | |
781 | return thrift.PrependError("write struct stop error: ", err) | |
782 | } | |
783 | return nil | |
784 | } | |
785 | ||
786 | func (p *AddServiceConcatResult) writeField0(oprot thrift.TProtocol) (err error) { | |
787 | if p.IsSetSuccess() { | |
788 | if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { | |
789 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) | |
790 | } | |
791 | if err := p.Success.Write(oprot); err != nil { | |
792 | return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) | |
793 | } | |
794 | if err := oprot.WriteFieldEnd(); err != nil { | |
795 | return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) | |
796 | } | |
797 | } | |
798 | return err | |
799 | } | |
800 | ||
801 | func (p *AddServiceConcatResult) String() string { | |
802 | if p == nil { | |
803 | return "<nil>" | |
804 | } | |
805 | return fmt.Sprintf("AddServiceConcatResult(%+v)", *p) | |
806 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package addsvc | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | ) | |
10 | ||
11 | // (needed to ensure safety because of naive import list construction.) | |
12 | var _ = thrift.ZERO | |
13 | var _ = fmt.Printf | |
14 | var _ = bytes.Equal | |
15 | ||
16 | func init() { | |
17 | } |
0 | // Autogenerated by Thrift Compiler (0.9.3) | |
1 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | |
2 | ||
3 | package addsvc | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "fmt" | |
8 | "github.com/apache/thrift/lib/go/thrift" | |
9 | ) | |
10 | ||
11 | // (needed to ensure safety because of naive import list construction.) | |
12 | var _ = thrift.ZERO | |
13 | var _ = fmt.Printf | |
14 | var _ = bytes.Equal | |
15 | ||
16 | var GoUnusedProtection__ int | |
17 | ||
18 | // Attributes: | |
19 | // - Value | |
20 | type SumReply struct { | |
21 | Value int64 `thrift:"value,1" json:"value"` | |
22 | } | |
23 | ||
24 | func NewSumReply() *SumReply { | |
25 | return &SumReply{} | |
26 | } | |
27 | ||
28 | func (p *SumReply) GetValue() int64 { | |
29 | return p.Value | |
30 | } | |
31 | func (p *SumReply) Read(iprot thrift.TProtocol) error { | |
32 | if _, err := iprot.ReadStructBegin(); err != nil { | |
33 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
34 | } | |
35 | ||
36 | for { | |
37 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
38 | if err != nil { | |
39 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
40 | } | |
41 | if fieldTypeId == thrift.STOP { | |
42 | break | |
43 | } | |
44 | switch fieldId { | |
45 | case 1: | |
46 | if err := p.readField1(iprot); err != nil { | |
47 | return err | |
48 | } | |
49 | default: | |
50 | if err := iprot.Skip(fieldTypeId); err != nil { | |
51 | return err | |
52 | } | |
53 | } | |
54 | if err := iprot.ReadFieldEnd(); err != nil { | |
55 | return err | |
56 | } | |
57 | } | |
58 | if err := iprot.ReadStructEnd(); err != nil { | |
59 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
60 | } | |
61 | return nil | |
62 | } | |
63 | ||
64 | func (p *SumReply) readField1(iprot thrift.TProtocol) error { | |
65 | if v, err := iprot.ReadI64(); err != nil { | |
66 | return thrift.PrependError("error reading field 1: ", err) | |
67 | } else { | |
68 | p.Value = v | |
69 | } | |
70 | return nil | |
71 | } | |
72 | ||
73 | func (p *SumReply) Write(oprot thrift.TProtocol) error { | |
74 | if err := oprot.WriteStructBegin("SumReply"); err != nil { | |
75 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
76 | } | |
77 | if err := p.writeField1(oprot); err != nil { | |
78 | return err | |
79 | } | |
80 | if err := oprot.WriteFieldStop(); err != nil { | |
81 | return thrift.PrependError("write field stop error: ", err) | |
82 | } | |
83 | if err := oprot.WriteStructEnd(); err != nil { | |
84 | return thrift.PrependError("write struct stop error: ", err) | |
85 | } | |
86 | return nil | |
87 | } | |
88 | ||
89 | func (p *SumReply) writeField1(oprot thrift.TProtocol) (err error) { | |
90 | if err := oprot.WriteFieldBegin("value", thrift.I64, 1); err != nil { | |
91 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) | |
92 | } | |
93 | if err := oprot.WriteI64(int64(p.Value)); err != nil { | |
94 | return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) | |
95 | } | |
96 | if err := oprot.WriteFieldEnd(); err != nil { | |
97 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) | |
98 | } | |
99 | return err | |
100 | } | |
101 | ||
102 | func (p *SumReply) String() string { | |
103 | if p == nil { | |
104 | return "<nil>" | |
105 | } | |
106 | return fmt.Sprintf("SumReply(%+v)", *p) | |
107 | } | |
108 | ||
109 | // Attributes: | |
110 | // - Value | |
111 | type ConcatReply struct { | |
112 | Value string `thrift:"value,1" json:"value"` | |
113 | } | |
114 | ||
115 | func NewConcatReply() *ConcatReply { | |
116 | return &ConcatReply{} | |
117 | } | |
118 | ||
119 | func (p *ConcatReply) GetValue() string { | |
120 | return p.Value | |
121 | } | |
122 | func (p *ConcatReply) Read(iprot thrift.TProtocol) error { | |
123 | if _, err := iprot.ReadStructBegin(); err != nil { | |
124 | return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) | |
125 | } | |
126 | ||
127 | for { | |
128 | _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() | |
129 | if err != nil { | |
130 | return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) | |
131 | } | |
132 | if fieldTypeId == thrift.STOP { | |
133 | break | |
134 | } | |
135 | switch fieldId { | |
136 | case 1: | |
137 | if err := p.readField1(iprot); err != nil { | |
138 | return err | |
139 | } | |
140 | default: | |
141 | if err := iprot.Skip(fieldTypeId); err != nil { | |
142 | return err | |
143 | } | |
144 | } | |
145 | if err := iprot.ReadFieldEnd(); err != nil { | |
146 | return err | |
147 | } | |
148 | } | |
149 | if err := iprot.ReadStructEnd(); err != nil { | |
150 | return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) | |
151 | } | |
152 | return nil | |
153 | } | |
154 | ||
155 | func (p *ConcatReply) readField1(iprot thrift.TProtocol) error { | |
156 | if v, err := iprot.ReadString(); err != nil { | |
157 | return thrift.PrependError("error reading field 1: ", err) | |
158 | } else { | |
159 | p.Value = v | |
160 | } | |
161 | return nil | |
162 | } | |
163 | ||
164 | func (p *ConcatReply) Write(oprot thrift.TProtocol) error { | |
165 | if err := oprot.WriteStructBegin("ConcatReply"); err != nil { | |
166 | return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) | |
167 | } | |
168 | if err := p.writeField1(oprot); err != nil { | |
169 | return err | |
170 | } | |
171 | if err := oprot.WriteFieldStop(); err != nil { | |
172 | return thrift.PrependError("write field stop error: ", err) | |
173 | } | |
174 | if err := oprot.WriteStructEnd(); err != nil { | |
175 | return thrift.PrependError("write struct stop error: ", err) | |
176 | } | |
177 | return nil | |
178 | } | |
179 | ||
180 | func (p *ConcatReply) writeField1(oprot thrift.TProtocol) (err error) { | |
181 | if err := oprot.WriteFieldBegin("value", thrift.STRING, 1); err != nil { | |
182 | return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) | |
183 | } | |
184 | if err := oprot.WriteString(string(p.Value)); err != nil { | |
185 | return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) | |
186 | } | |
187 | if err := oprot.WriteFieldEnd(); err != nil { | |
188 | return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) | |
189 | } | |
190 | return err | |
191 | } | |
192 | ||
193 | func (p *ConcatReply) String() string { | |
194 | if p == nil { | |
195 | return "<nil>" | |
196 | } | |
197 | return fmt.Sprintf("ConcatReply(%+v)", *p) | |
198 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "github.com/go-kit/kit/examples/addsvc/server" | |
4 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/add" | |
5 | ) | |
6 | ||
7 | type thriftBinding struct { | |
8 | server.AddService | |
9 | } | |
10 | ||
11 | func (tb thriftBinding) Sum(a, b int64) (*thriftadd.SumReply, error) { | |
12 | v := tb.AddService.Sum(int(a), int(b)) | |
13 | return &thriftadd.SumReply{Value: int64(v)}, nil | |
14 | } | |
15 | ||
16 | func (tb thriftBinding) Concat(a, b string) (*thriftadd.ConcatReply, error) { | |
17 | v := tb.AddService.Concat(a, b) | |
18 | return &thriftadd.ConcatReply{Value: v}, nil | |
19 | } |
0 | package addsvc | |
1 | ||
2 | // This file provides server-side bindings for the gRPC transport. | |
3 | // It utilizes the transport/grpc.Server. | |
4 | ||
5 | import ( | |
6 | stdopentracing "github.com/opentracing/opentracing-go" | |
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/examples/addsvc/pb" | |
10 | "github.com/go-kit/kit/log" | |
11 | "github.com/go-kit/kit/tracing/opentracing" | |
12 | grpctransport "github.com/go-kit/kit/transport/grpc" | |
13 | ) | |
14 | ||
15 | // MakeGRPCServer makes a set of endpoints available as a gRPC AddServer. | |
16 | func MakeGRPCServer(ctx context.Context, endpoints Endpoints, tracer stdopentracing.Tracer, logger log.Logger) pb.AddServer { | |
17 | options := []grpctransport.ServerOption{ | |
18 | grpctransport.ServerErrorLogger(logger), | |
19 | } | |
20 | return &grpcServer{ | |
21 | sum: grpctransport.NewServer( | |
22 | ctx, | |
23 | endpoints.SumEndpoint, | |
24 | DecodeGRPCSumRequest, | |
25 | EncodeGRPCSumResponse, | |
26 | append(options, grpctransport.ServerBefore(opentracing.FromGRPCRequest(tracer, "Sum", logger)))..., | |
27 | ), | |
28 | concat: grpctransport.NewServer( | |
29 | ctx, | |
30 | endpoints.ConcatEndpoint, | |
31 | DecodeGRPCConcatRequest, | |
32 | EncodeGRPCConcatResponse, | |
33 | append(options, grpctransport.ServerBefore(opentracing.FromGRPCRequest(tracer, "Concat", logger)))..., | |
34 | ), | |
35 | } | |
36 | } | |
37 | ||
38 | type grpcServer struct { | |
39 | sum grpctransport.Handler | |
40 | concat grpctransport.Handler | |
41 | } | |
42 | ||
43 | func (s *grpcServer) Sum(ctx context.Context, req *pb.SumRequest) (*pb.SumReply, error) { | |
44 | _, rep, err := s.sum.ServeGRPC(ctx, req) | |
45 | return rep.(*pb.SumReply), err | |
46 | } | |
47 | ||
48 | func (s *grpcServer) Concat(ctx context.Context, req *pb.ConcatRequest) (*pb.ConcatReply, error) { | |
49 | _, rep, err := s.concat.ServeGRPC(ctx, req) | |
50 | return rep.(*pb.ConcatReply), err | |
51 | } | |
52 | ||
53 | // DecodeGRPCSumRequest is a transport/grpc.DecodeRequestFunc that converts a | |
54 | // gRPC sum request to a user-domain sum request. Primarily useful in a server. | |
55 | func DecodeGRPCSumRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { | |
56 | req := grpcReq.(*pb.SumRequest) | |
57 | return sumRequest{A: int(req.A), B: int(req.B)}, nil | |
58 | } | |
59 | ||
60 | // DecodeGRPCConcatRequest is a transport/grpc.DecodeRequestFunc that converts a | |
61 | // gRPC concat request to a user-domain concat request. Primarily useful in a | |
62 | // server. | |
63 | func DecodeGRPCConcatRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { | |
64 | req := grpcReq.(*pb.ConcatRequest) | |
65 | return concatRequest{A: req.A, B: req.B}, nil | |
66 | } | |
67 | ||
68 | // DecodeGRPCSumResponse is a transport/grpc.DecodeResponseFunc that converts a | |
69 | // gRPC sum reply to a user-domain sum response. Primarily useful in a client. | |
70 | func DecodeGRPCSumResponse(_ context.Context, grpcReply interface{}) (interface{}, error) { | |
71 | reply := grpcReply.(*pb.SumReply) | |
72 | return sumResponse{V: int(reply.V)}, nil | |
73 | } | |
74 | ||
75 | // DecodeGRPCConcatResponse is a transport/grpc.DecodeResponseFunc that converts | |
76 | // a gRPC concat reply to a user-domain concat response. Primarily useful in a | |
77 | // client. | |
78 | func DecodeGRPCConcatResponse(_ context.Context, grpcReply interface{}) (interface{}, error) { | |
79 | reply := grpcReply.(*pb.ConcatReply) | |
80 | return concatResponse{V: reply.V}, nil | |
81 | } | |
82 | ||
83 | // EncodeGRPCSumResponse is a transport/grpc.EncodeResponseFunc that converts a | |
84 | // user-domain sum response to a gRPC sum reply. Primarily useful in a server. | |
85 | func EncodeGRPCSumResponse(_ context.Context, response interface{}) (interface{}, error) { | |
86 | resp := response.(sumResponse) | |
87 | return &pb.SumReply{V: int64(resp.V)}, nil | |
88 | } | |
89 | ||
90 | // EncodeGRPCConcatResponse is a transport/grpc.EncodeResponseFunc that converts | |
91 | // a user-domain concat response to a gRPC concat reply. Primarily useful in a | |
92 | // server. | |
93 | func EncodeGRPCConcatResponse(_ context.Context, response interface{}) (interface{}, error) { | |
94 | resp := response.(concatResponse) | |
95 | return &pb.ConcatReply{V: resp.V}, nil | |
96 | } | |
97 | ||
98 | // EncodeGRPCSumRequest is a transport/grpc.EncodeRequestFunc that converts a | |
99 | // user-domain sum request to a gRPC sum request. Primarily useful in a client. | |
100 | func EncodeGRPCSumRequest(_ context.Context, request interface{}) (interface{}, error) { | |
101 | req := request.(sumRequest) | |
102 | return &pb.SumRequest{A: int64(req.A), B: int64(req.B)}, nil | |
103 | } | |
104 | ||
105 | // EncodeGRPCConcatRequest is a transport/grpc.EncodeRequestFunc that converts a | |
106 | // user-domain concat request to a gRPC concat request. Primarily useful in a | |
107 | // client. | |
108 | func EncodeGRPCConcatRequest(_ context.Context, request interface{}) (interface{}, error) { | |
109 | req := request.(concatRequest) | |
110 | return &pb.ConcatRequest{A: req.A, B: req.B}, nil | |
111 | } |
0 | package addsvc | |
1 | ||
2 | // This file provides server-side bindings for the HTTP transport. | |
3 | // It utilizes the transport/http.Server. | |
4 | ||
5 | import ( | |
6 | "bytes" | |
7 | "encoding/json" | |
8 | "errors" | |
9 | "io/ioutil" | |
10 | "net/http" | |
11 | ||
12 | stdopentracing "github.com/opentracing/opentracing-go" | |
13 | "golang.org/x/net/context" | |
14 | ||
15 | "github.com/go-kit/kit/log" | |
16 | "github.com/go-kit/kit/tracing/opentracing" | |
17 | httptransport "github.com/go-kit/kit/transport/http" | |
18 | ) | |
19 | ||
20 | // MakeHTTPHandler returns a handler that makes a set of endpoints available | |
21 | // on predefined paths. | |
22 | func MakeHTTPHandler(ctx context.Context, endpoints Endpoints, tracer stdopentracing.Tracer, logger log.Logger) http.Handler { | |
23 | options := []httptransport.ServerOption{ | |
24 | httptransport.ServerErrorEncoder(errorEncoder), | |
25 | httptransport.ServerErrorLogger(logger), | |
26 | } | |
27 | m := http.NewServeMux() | |
28 | m.Handle("/sum", httptransport.NewServer( | |
29 | ctx, | |
30 | endpoints.SumEndpoint, | |
31 | DecodeHTTPSumRequest, | |
32 | EncodeHTTPGenericResponse, | |
33 | append(options, httptransport.ServerBefore(opentracing.FromHTTPRequest(tracer, "Sum", logger)))..., | |
34 | )) | |
35 | m.Handle("/concat", httptransport.NewServer( | |
36 | ctx, | |
37 | endpoints.ConcatEndpoint, | |
38 | DecodeHTTPConcatRequest, | |
39 | EncodeHTTPGenericResponse, | |
40 | append(options, httptransport.ServerBefore(opentracing.FromHTTPRequest(tracer, "Concat", logger)))..., | |
41 | )) | |
42 | return m | |
43 | } | |
44 | ||
45 | func errorEncoder(_ context.Context, err error, w http.ResponseWriter) { | |
46 | code := http.StatusInternalServerError | |
47 | msg := err.Error() | |
48 | ||
49 | if e, ok := err.(httptransport.Error); ok { | |
50 | msg = e.Err.Error() | |
51 | switch e.Domain { | |
52 | case httptransport.DomainDecode: | |
53 | code = http.StatusBadRequest | |
54 | ||
55 | case httptransport.DomainDo: | |
56 | switch e.Err { | |
57 | case ErrTwoZeroes, ErrMaxSizeExceeded, ErrIntOverflow: | |
58 | code = http.StatusBadRequest | |
59 | } | |
60 | } | |
61 | } | |
62 | ||
63 | w.WriteHeader(code) | |
64 | json.NewEncoder(w).Encode(errorWrapper{Error: msg}) | |
65 | } | |
66 | ||
67 | func errorDecoder(r *http.Response) error { | |
68 | var w errorWrapper | |
69 | if err := json.NewDecoder(r.Body).Decode(&w); err != nil { | |
70 | return err | |
71 | } | |
72 | return errors.New(w.Error) | |
73 | } | |
74 | ||
75 | type errorWrapper struct { | |
76 | Error string `json:"error"` | |
77 | } | |
78 | ||
79 | // DecodeHTTPSumRequest is a transport/http.DecodeRequestFunc that decodes a | |
80 | // JSON-encoded sum request from the HTTP request body. Primarily useful in a | |
81 | // server. | |
82 | func DecodeHTTPSumRequest(_ context.Context, r *http.Request) (interface{}, error) { | |
83 | var req sumRequest | |
84 | err := json.NewDecoder(r.Body).Decode(&req) | |
85 | return req, err | |
86 | } | |
87 | ||
88 | // DecodeHTTPConcatRequest is a transport/http.DecodeRequestFunc that decodes a | |
89 | // JSON-encoded concat request from the HTTP request body. Primarily useful in a | |
90 | // server. | |
91 | func DecodeHTTPConcatRequest(_ context.Context, r *http.Request) (interface{}, error) { | |
92 | var req concatRequest | |
93 | err := json.NewDecoder(r.Body).Decode(&req) | |
94 | return req, err | |
95 | } | |
96 | ||
97 | // DecodeHTTPSumResponse is a transport/http.DecodeResponseFunc that decodes a | |
98 | // JSON-encoded sum response from the HTTP response body. If the response has a | |
99 | // non-200 status code, we will interpret that as an error and attempt to decode | |
100 | // the specific error message from the response body. Primarily useful in a | |
101 | // client. | |
102 | func DecodeHTTPSumResponse(_ context.Context, r *http.Response) (interface{}, error) { | |
103 | if r.StatusCode != http.StatusOK { | |
104 | return nil, errorDecoder(r) | |
105 | } | |
106 | var resp sumResponse | |
107 | err := json.NewDecoder(r.Body).Decode(&resp) | |
108 | return resp, err | |
109 | } | |
110 | ||
111 | // DecodeHTTPConcatResponse is a transport/http.DecodeResponseFunc that decodes | |
112 | // a JSON-encoded concat response from the HTTP response body. If the response | |
113 | // has a non-200 status code, we will interpret that as an error and attempt to | |
114 | // decode the specific error message from the response body. Primarily useful in | |
115 | // a client. | |
116 | func DecodeHTTPConcatResponse(_ context.Context, r *http.Response) (interface{}, error) { | |
117 | if r.StatusCode != http.StatusOK { | |
118 | return nil, errorDecoder(r) | |
119 | } | |
120 | var resp concatResponse | |
121 | err := json.NewDecoder(r.Body).Decode(&resp) | |
122 | return resp, err | |
123 | } | |
124 | ||
125 | // EncodeHTTPGenericRequest is a transport/http.EncodeRequestFunc that | |
126 | // JSON-encodes any request to the request body. Primarily useful in a client. | |
127 | func EncodeHTTPGenericRequest(_ context.Context, r *http.Request, request interface{}) error { | |
128 | var buf bytes.Buffer | |
129 | if err := json.NewEncoder(&buf).Encode(request); err != nil { | |
130 | return err | |
131 | } | |
132 | r.Body = ioutil.NopCloser(&buf) | |
133 | return nil | |
134 | } | |
135 | ||
136 | // EncodeHTTPGenericResponse is a transport/http.EncodeResponseFunc that encodes | |
137 | // the response as JSON to the response writer. Primarily useful in a server. | |
138 | func EncodeHTTPGenericResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { | |
139 | return json.NewEncoder(w).Encode(response) | |
140 | } |
0 | package addsvc | |
1 | ||
2 | // This file provides server-side bindings for the Thrift transport. | |
3 | // | |
4 | // This file also provides endpoint constructors that utilize a Thrift client, | |
5 | // for use in client packages, because package transport/thrift doesn't exist | |
6 | // yet. See https://github.com/go-kit/kit/issues/184. | |
7 | ||
8 | import ( | |
9 | "golang.org/x/net/context" | |
10 | ||
11 | "github.com/go-kit/kit/endpoint" | |
12 | thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" | |
13 | ) | |
14 | ||
15 | // MakeThriftHandler makes a set of endpoints available as a Thrift service. | |
16 | func MakeThriftHandler(ctx context.Context, e Endpoints) thriftadd.AddService { | |
17 | return &thriftServer{ | |
18 | ctx: ctx, | |
19 | sum: e.SumEndpoint, | |
20 | concat: e.ConcatEndpoint, | |
21 | } | |
22 | } | |
23 | ||
24 | type thriftServer struct { | |
25 | ctx context.Context | |
26 | sum endpoint.Endpoint | |
27 | concat endpoint.Endpoint | |
28 | } | |
29 | ||
30 | func (s *thriftServer) Sum(a int64, b int64) (*thriftadd.SumReply, error) { | |
31 | request := sumRequest{A: int(a), B: int(b)} | |
32 | response, err := s.sum(s.ctx, request) | |
33 | if err != nil { | |
34 | return nil, err | |
35 | } | |
36 | resp := response.(sumResponse) | |
37 | return &thriftadd.SumReply{Value: int64(resp.V)}, nil | |
38 | } | |
39 | ||
40 | func (s *thriftServer) Concat(a string, b string) (*thriftadd.ConcatReply, error) { | |
41 | request := concatRequest{A: a, B: b} | |
42 | response, err := s.concat(s.ctx, request) | |
43 | if err != nil { | |
44 | return nil, err | |
45 | } | |
46 | resp := response.(concatResponse) | |
47 | return &thriftadd.ConcatReply{Value: resp.V}, nil | |
48 | } | |
49 | ||
50 | // MakeThriftSumEndpoint returns an endpoint that invokes the passed Thrift client. | |
51 | // Useful only in clients, and only until a proper transport/thrift.Client exists. | |
52 | func MakeThriftSumEndpoint(client *thriftadd.AddServiceClient) endpoint.Endpoint { | |
53 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
54 | req := request.(sumRequest) | |
55 | reply, err := client.Sum(int64(req.A), int64(req.B)) | |
56 | if err != nil { | |
57 | return nil, err | |
58 | } | |
59 | return sumResponse{V: int(reply.Value)}, nil | |
60 | } | |
61 | } | |
62 | ||
63 | // MakeThriftConcatEndpoint returns an endpoint that invokes the passed Thrift | |
64 | // client. Useful only in clients, and only until a proper | |
65 | // transport/thrift.Client exists. | |
66 | func MakeThriftConcatEndpoint(client *thriftadd.AddServiceClient) endpoint.Endpoint { | |
67 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
68 | req := request.(concatRequest) | |
69 | reply, err := client.Concat(req.A, req.B) | |
70 | if err != nil { | |
71 | return nil, err | |
72 | } | |
73 | return concatResponse{V: reply.Value}, nil | |
74 | } | |
75 | } |
0 | 0 | package main |
1 | 1 | |
2 | 2 | import ( |
3 | "bytes" | |
3 | 4 | "encoding/json" |
4 | 5 | "flag" |
5 | 6 | "fmt" |
6 | 7 | "io" |
7 | 8 | "io/ioutil" |
8 | stdlog "log" | |
9 | 9 | "net/http" |
10 | 10 | "net/url" |
11 | 11 | "os" |
16 | 16 | |
17 | 17 | "github.com/gorilla/mux" |
18 | 18 | "github.com/hashicorp/consul/api" |
19 | "github.com/opentracing/opentracing-go" | |
19 | stdopentracing "github.com/opentracing/opentracing-go" | |
20 | 20 | "golang.org/x/net/context" |
21 | 21 | |
22 | 22 | "github.com/go-kit/kit/endpoint" |
23 | "github.com/go-kit/kit/examples/addsvc/client/grpc" | |
24 | "github.com/go-kit/kit/examples/addsvc/server" | |
25 | "github.com/go-kit/kit/loadbalancer" | |
26 | "github.com/go-kit/kit/loadbalancer/consul" | |
23 | "github.com/go-kit/kit/examples/addsvc" | |
24 | addsvcgrpcclient "github.com/go-kit/kit/examples/addsvc/client/grpc" | |
27 | 25 | "github.com/go-kit/kit/log" |
26 | "github.com/go-kit/kit/sd" | |
27 | consulsd "github.com/go-kit/kit/sd/consul" | |
28 | "github.com/go-kit/kit/sd/lb" | |
28 | 29 | httptransport "github.com/go-kit/kit/transport/http" |
30 | "google.golang.org/grpc" | |
29 | 31 | ) |
30 | 32 | |
31 | 33 | func main() { |
37 | 39 | ) |
38 | 40 | flag.Parse() |
39 | 41 | |
40 | // Log domain | |
41 | logger := log.NewLogfmtLogger(os.Stderr) | |
42 | logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC).With("caller", log.DefaultCaller) | |
43 | stdlog.SetFlags(0) // flags are handled by Go kit's logger | |
44 | stdlog.SetOutput(log.NewStdlibAdapter(logger)) // redirect anything using stdlib log to us | |
42 | // Logging domain. | |
43 | var logger log.Logger | |
44 | { | |
45 | logger = log.NewLogfmtLogger(os.Stderr) | |
46 | logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) | |
47 | logger = log.NewContext(logger).With("caller", log.DefaultCaller) | |
48 | } | |
45 | 49 | |
46 | 50 | // Service discovery domain. In this example we use Consul. |
47 | consulConfig := api.DefaultConfig() | |
48 | if len(*consulAddr) > 0 { | |
49 | consulConfig.Address = *consulAddr | |
50 | } | |
51 | consulClient, err := api.NewClient(consulConfig) | |
52 | if err != nil { | |
53 | logger.Log("err", err) | |
54 | os.Exit(1) | |
55 | } | |
56 | discoveryClient := consul.NewClient(consulClient) | |
57 | ||
58 | // Context domain. | |
51 | var client consulsd.Client | |
52 | { | |
53 | consulConfig := api.DefaultConfig() | |
54 | if len(*consulAddr) > 0 { | |
55 | consulConfig.Address = *consulAddr | |
56 | } | |
57 | consulClient, err := api.NewClient(consulConfig) | |
58 | if err != nil { | |
59 | logger.Log("err", err) | |
60 | os.Exit(1) | |
61 | } | |
62 | client = consulsd.NewClient(consulClient) | |
63 | } | |
64 | ||
65 | // Transport domain. | |
66 | tracer := stdopentracing.GlobalTracer() // no-op | |
59 | 67 | ctx := context.Background() |
60 | ||
61 | // Set up our routes. | |
62 | // | |
63 | // Each Consul service name maps to multiple instances of that service. We | |
64 | // connect to each instance according to its pre-determined transport: in this | |
65 | // case, we choose to access addsvc via its gRPC client, and stringsvc over | |
66 | // plain transport/http (it has no client package). | |
67 | // | |
68 | // Each service instance implements multiple methods, and we want to map each | |
69 | // method to a unique path on the API gateway. So, we define that path and its | |
70 | // corresponding factory function, which takes an instance string and returns an | |
71 | // endpoint.Endpoint for the specific method. | |
72 | // | |
73 | // Finally, we mount that path + endpoint handler into the router. | |
74 | 68 | r := mux.NewRouter() |
75 | for consulName, methods := range map[string][]struct { | |
76 | path string | |
77 | factory loadbalancer.Factory | |
78 | }{ | |
79 | "addsvc": { | |
80 | {path: "/api/addsvc/concat", factory: grpc.MakeConcatEndpointFactory(opentracing.GlobalTracer(), nil)}, | |
81 | {path: "/api/addsvc/sum", factory: grpc.MakeSumEndpointFactory(opentracing.GlobalTracer(), nil)}, | |
82 | }, | |
83 | "stringsvc": { | |
84 | {path: "/api/stringsvc/uppercase", factory: httpFactory(ctx, "GET", "uppercase/")}, | |
85 | {path: "/api/stringsvc/concat", factory: httpFactory(ctx, "GET", "concat/")}, | |
86 | }, | |
87 | } { | |
88 | for _, method := range methods { | |
89 | publisher, err := consul.NewPublisher(discoveryClient, method.factory, logger, consulName) | |
90 | if err != nil { | |
91 | logger.Log("service", consulName, "path", method.path, "err", err) | |
92 | continue | |
93 | } | |
94 | lb := loadbalancer.NewRoundRobin(publisher) | |
95 | e := loadbalancer.Retry(*retryMax, *retryTimeout, lb) | |
96 | h := makeHandler(ctx, e, logger) | |
97 | r.HandleFunc(method.path, h) | |
98 | } | |
99 | } | |
100 | ||
101 | // Mechanical stuff. | |
69 | ||
70 | // Now we begin installing the routes. Each route corresponds to a single | |
71 | // method: sum, concat, uppercase, and count. | |
72 | ||
73 | // addsvc routes. | |
74 | { | |
75 | // Each method gets constructed with a factory. Factories take an | |
76 | // instance string, and return a specific endpoint. In the factory we | |
77 | // dial the instance string we get from Consul, and then leverage an | |
78 | // addsvc client package to construct a complete service. We can then | |
79 | // leverage the addsvc.Make{Sum,Concat}Endpoint constructors to convert | |
80 | // the complete service to specific endpoint. | |
81 | ||
82 | var ( | |
83 | tags = []string{} | |
84 | passingOnly = true | |
85 | endpoints = addsvc.Endpoints{} | |
86 | ) | |
87 | { | |
88 | factory := addsvcFactory(addsvc.MakeSumEndpoint, tracer, logger) | |
89 | subscriber := consulsd.NewSubscriber(client, factory, logger, "addsvc", tags, passingOnly) | |
90 | balancer := lb.NewRoundRobin(subscriber) | |
91 | retry := lb.Retry(*retryMax, *retryTimeout, balancer) | |
92 | endpoints.SumEndpoint = retry | |
93 | } | |
94 | { | |
95 | factory := addsvcFactory(addsvc.MakeConcatEndpoint, tracer, logger) | |
96 | subscriber := consulsd.NewSubscriber(client, factory, logger, "addsvc", tags, passingOnly) | |
97 | balancer := lb.NewRoundRobin(subscriber) | |
98 | retry := lb.Retry(*retryMax, *retryTimeout, balancer) | |
99 | endpoints.ConcatEndpoint = retry | |
100 | } | |
101 | ||
102 | // Here we leverage the fact that addsvc comes with a constructor for an | |
103 | // HTTP handler, and just install it under a particular path prefix in | |
104 | // our router. | |
105 | ||
106 | r.PathPrefix("addsvc/").Handler(addsvc.MakeHTTPHandler(ctx, endpoints, tracer, logger)) | |
107 | } | |
108 | ||
109 | // stringsvc routes. | |
110 | { | |
111 | // addsvc had lots of nice importable Go packages we could leverage. | |
112 | // With stringsvc we are not so fortunate, it just has some endpoints | |
113 | // that we assume will exist. So we have to write that logic here. This | |
114 | // is by design, so you can see two totally different methods of | |
115 | // proxying to a remote service. | |
116 | ||
117 | var ( | |
118 | tags = []string{} | |
119 | passingOnly = true | |
120 | uppercase endpoint.Endpoint | |
121 | count endpoint.Endpoint | |
122 | ) | |
123 | { | |
124 | factory := stringsvcFactory(ctx, "GET", "/uppercase") | |
125 | subscriber := consulsd.NewSubscriber(client, factory, logger, "stringsvc", tags, passingOnly) | |
126 | balancer := lb.NewRoundRobin(subscriber) | |
127 | retry := lb.Retry(*retryMax, *retryTimeout, balancer) | |
128 | uppercase = retry | |
129 | } | |
130 | { | |
131 | factory := stringsvcFactory(ctx, "GET", "/count") | |
132 | subscriber := consulsd.NewSubscriber(client, factory, logger, "stringsvc", tags, passingOnly) | |
133 | balancer := lb.NewRoundRobin(subscriber) | |
134 | retry := lb.Retry(*retryMax, *retryTimeout, balancer) | |
135 | count = retry | |
136 | } | |
137 | ||
138 | // We can use the transport/http.Server to act as our handler, all we | |
139 | // have to do provide it with the encode and decode functions for our | |
140 | // stringsvc methods. | |
141 | ||
142 | r.Handle("/stringsvc/uppercase", httptransport.NewServer(ctx, uppercase, decodeUppercaseRequest, encodeJSONResponse)) | |
143 | r.Handle("/stringsvc/count", httptransport.NewServer(ctx, count, decodeCountRequest, encodeJSONResponse)) | |
144 | } | |
145 | ||
146 | // Interrupt handler. | |
102 | 147 | errc := make(chan error) |
103 | 148 | go func() { |
104 | errc <- interrupt() | |
149 | c := make(chan os.Signal) | |
150 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) | |
151 | errc <- fmt.Errorf("%s", <-c) | |
105 | 152 | }() |
153 | ||
154 | // HTTP transport. | |
106 | 155 | go func() { |
107 | logger.Log("transport", "http", "addr", *httpAddr) | |
156 | logger.Log("transport", "HTTP", "addr", *httpAddr) | |
108 | 157 | errc <- http.ListenAndServe(*httpAddr, r) |
109 | 158 | }() |
110 | logger.Log("err", <-errc) | |
111 | } | |
112 | ||
113 | func makeHandler(ctx context.Context, e endpoint.Endpoint, logger log.Logger) http.HandlerFunc { | |
114 | return func(w http.ResponseWriter, r *http.Request) { | |
115 | resp, err := e(ctx, r.Body) | |
159 | ||
160 | // Run! | |
161 | logger.Log("exit", <-errc) | |
162 | } | |
163 | ||
164 | func addsvcFactory(makeEndpoint func(addsvc.Service) endpoint.Endpoint, tracer stdopentracing.Tracer, logger log.Logger) sd.Factory { | |
165 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
166 | // We could just as easily use the HTTP or Thrift client package to make | |
167 | // the connection to addsvc. We've chosen gRPC arbitrarily. Note that | |
168 | // the transport is an implementation detail: it doesn't leak out of | |
169 | // this function. Nice! | |
170 | ||
171 | conn, err := grpc.Dial(instance, grpc.WithInsecure()) | |
116 | 172 | if err != nil { |
117 | logger.Log("err", err) | |
118 | http.Error(w, err.Error(), http.StatusInternalServerError) | |
119 | return | |
120 | } | |
121 | b, ok := resp.([]byte) | |
122 | if !ok { | |
123 | logger.Log("err", "endpoint response is not of type []byte") | |
124 | http.Error(w, err.Error(), http.StatusInternalServerError) | |
125 | return | |
126 | } | |
127 | _, err = w.Write(b) | |
128 | if err != nil { | |
129 | logger.Log("err", err) | |
130 | return | |
131 | } | |
132 | } | |
133 | } | |
134 | ||
135 | func makeSumEndpoint(svc server.AddService) endpoint.Endpoint { | |
136 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
137 | r := request.(io.Reader) | |
138 | var req server.SumRequest | |
139 | if err := json.NewDecoder(r).Decode(&req); err != nil { | |
140 | return nil, err | |
141 | } | |
142 | v := svc.Sum(req.A, req.B) | |
143 | return json.Marshal(v) | |
144 | } | |
145 | } | |
146 | ||
147 | func makeConcatEndpoint(svc server.AddService) endpoint.Endpoint { | |
148 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
149 | r := request.(io.Reader) | |
150 | var req server.ConcatRequest | |
151 | if err := json.NewDecoder(r).Decode(&req); err != nil { | |
152 | return nil, err | |
153 | } | |
154 | v := svc.Concat(req.A, req.B) | |
155 | return json.Marshal(v) | |
156 | } | |
157 | } | |
158 | ||
159 | func httpFactory(ctx context.Context, method, path string) loadbalancer.Factory { | |
173 | return nil, nil, err | |
174 | } | |
175 | service := addsvcgrpcclient.New(conn, tracer, logger) | |
176 | endpoint := makeEndpoint(service) | |
177 | ||
178 | // Notice that the addsvc gRPC client converts the connection to a | |
179 | // complete addsvc, and we just throw away everything except the method | |
180 | // we're interested in. A smarter factory would mux multiple methods | |
181 | // over the same connection. But that would require more work to manage | |
182 | // the returned io.Closer, e.g. reference counting. Since this is for | |
183 | // the purposes of demonstration, we'll just keep it simple. | |
184 | ||
185 | return endpoint, conn, nil | |
186 | } | |
187 | } | |
188 | ||
189 | func stringsvcFactory(ctx context.Context, method, path string) sd.Factory { | |
160 | 190 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { |
161 | var e endpoint.Endpoint | |
162 | 191 | if !strings.HasPrefix(instance, "http") { |
163 | 192 | instance = "http://" + instance |
164 | 193 | } |
165 | u, err := url.Parse(instance) | |
194 | tgt, err := url.Parse(instance) | |
166 | 195 | if err != nil { |
167 | 196 | return nil, nil, err |
168 | 197 | } |
169 | u.Path = path | |
170 | ||
171 | e = httptransport.NewClient(method, u, passEncode, passDecode).Endpoint() | |
172 | return e, nil, nil | |
173 | } | |
174 | } | |
175 | ||
176 | func passEncode(_ context.Context, r *http.Request, request interface{}) error { | |
177 | r.Body = request.(io.ReadCloser) | |
198 | tgt.Path = path | |
199 | ||
200 | // Since stringsvc doesn't have any kind of package we can import, or | |
201 | // any formal spec, we are forced to just assert where the endpoints | |
202 | // live, and write our own code to encode and decode requests and | |
203 | // responses. Ideally, if you write the service, you will want to | |
204 | // provide stronger guarantees to your clients. | |
205 | ||
206 | var ( | |
207 | enc httptransport.EncodeRequestFunc | |
208 | dec httptransport.DecodeResponseFunc | |
209 | ) | |
210 | switch path { | |
211 | case "/uppercase": | |
212 | enc, dec = encodeJSONRequest, decodeUppercaseResponse | |
213 | case "/count": | |
214 | enc, dec = encodeJSONRequest, decodeCountResponse | |
215 | default: | |
216 | return nil, nil, fmt.Errorf("unknown stringsvc path %q", path) | |
217 | } | |
218 | ||
219 | return httptransport.NewClient(method, tgt, enc, dec).Endpoint(), nil, nil | |
220 | } | |
221 | } | |
222 | ||
223 | func encodeJSONRequest(_ context.Context, req *http.Request, request interface{}) error { | |
224 | // Both uppercase and count requests are encoded in the same way: | |
225 | // simple JSON serialization to the request body. | |
226 | var buf bytes.Buffer | |
227 | if err := json.NewEncoder(&buf).Encode(request); err != nil { | |
228 | return err | |
229 | } | |
230 | req.Body = ioutil.NopCloser(&buf) | |
178 | 231 | return nil |
179 | 232 | } |
180 | 233 | |
181 | func passDecode(_ context.Context, r *http.Response) (interface{}, error) { | |
182 | return ioutil.ReadAll(r.Body) | |
183 | } | |
184 | ||
185 | func interrupt() error { | |
186 | c := make(chan os.Signal) | |
187 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) | |
188 | return fmt.Errorf("%s", <-c) | |
189 | } | |
234 | func encodeJSONResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { | |
235 | w.Header().Set("Content-Type", "application/json; charset=utf-8") | |
236 | return json.NewEncoder(w).Encode(response) | |
237 | } | |
238 | ||
239 | // I've just copied these functions from stringsvc3/transport.go, inlining the | |
240 | // struct definitions. | |
241 | ||
242 | func decodeUppercaseResponse(ctx context.Context, resp *http.Response) (interface{}, error) { | |
243 | var response struct { | |
244 | V string `json:"v"` | |
245 | Err string `json:"err,omitempty"` | |
246 | } | |
247 | if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { | |
248 | return nil, err | |
249 | } | |
250 | return response, nil | |
251 | } | |
252 | ||
253 | func decodeCountResponse(ctx context.Context, resp *http.Response) (interface{}, error) { | |
254 | var response struct { | |
255 | V int `json:"v"` | |
256 | } | |
257 | if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { | |
258 | return nil, err | |
259 | } | |
260 | return response, nil | |
261 | } | |
262 | ||
263 | func decodeUppercaseRequest(ctx context.Context, req *http.Request) (interface{}, error) { | |
264 | var request struct { | |
265 | S string `json:"s"` | |
266 | } | |
267 | if err := json.NewDecoder(req.Body).Decode(&request); err != nil { | |
268 | return nil, err | |
269 | } | |
270 | return request, nil | |
271 | } | |
272 | ||
273 | func decodeCountRequest(ctx context.Context, req *http.Request) (interface{}, error) { | |
274 | var request struct { | |
275 | S string `json:"s"` | |
276 | } | |
277 | if err := json.NewDecoder(req.Body).Decode(&request); err != nil { | |
278 | return nil, err | |
279 | } | |
280 | return request, nil | |
281 | } |
0 | // Package client provides a profilesvc client based on a predefined Consul | |
1 | // service name and relevant tags. Users must only provide the address of a | |
2 | // Consul server. | |
3 | package client | |
4 | ||
5 | import ( | |
6 | "io" | |
7 | "time" | |
8 | ||
9 | consulapi "github.com/hashicorp/consul/api" | |
10 | ||
11 | "github.com/go-kit/kit/endpoint" | |
12 | "github.com/go-kit/kit/examples/profilesvc" | |
13 | "github.com/go-kit/kit/log" | |
14 | "github.com/go-kit/kit/sd" | |
15 | "github.com/go-kit/kit/sd/consul" | |
16 | "github.com/go-kit/kit/sd/lb" | |
17 | ) | |
18 | ||
19 | // New returns a service that's load-balanced over instances of profilesvc found | |
20 | // in the provided Consul server. The mechanism of looking up profilesvc | |
21 | // instances in Consul is hard-coded into the client. | |
22 | func New(consulAddr string, logger log.Logger) (profilesvc.Service, error) { | |
23 | apiclient, err := consulapi.NewClient(&consulapi.Config{ | |
24 | Address: consulAddr, | |
25 | }) | |
26 | if err != nil { | |
27 | return nil, err | |
28 | } | |
29 | ||
30 | // As the implementer of profilesvc, we declare and enforce these | |
31 | // parameters for all of the profilesvc consumers. | |
32 | var ( | |
33 | consulService = "profilesvc" | |
34 | consulTags = []string{"prod"} | |
35 | passingOnly = true | |
36 | retryMax = 3 | |
37 | retryTimeout = 500 * time.Millisecond | |
38 | ) | |
39 | ||
40 | var ( | |
41 | sdclient = consul.NewClient(apiclient) | |
42 | endpoints profilesvc.Endpoints | |
43 | ) | |
44 | { | |
45 | factory := factoryFor(profilesvc.MakePostProfileEndpoint) | |
46 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
47 | balancer := lb.NewRoundRobin(subscriber) | |
48 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
49 | endpoints.PostProfileEndpoint = retry | |
50 | } | |
51 | { | |
52 | factory := factoryFor(profilesvc.MakeGetProfileEndpoint) | |
53 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
54 | balancer := lb.NewRoundRobin(subscriber) | |
55 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
56 | endpoints.GetProfileEndpoint = retry | |
57 | } | |
58 | { | |
59 | factory := factoryFor(profilesvc.MakePutProfileEndpoint) | |
60 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
61 | balancer := lb.NewRoundRobin(subscriber) | |
62 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
63 | endpoints.PutProfileEndpoint = retry | |
64 | } | |
65 | { | |
66 | factory := factoryFor(profilesvc.MakePatchProfileEndpoint) | |
67 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
68 | balancer := lb.NewRoundRobin(subscriber) | |
69 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
70 | endpoints.PatchProfileEndpoint = retry | |
71 | } | |
72 | { | |
73 | factory := factoryFor(profilesvc.MakeDeleteProfileEndpoint) | |
74 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
75 | balancer := lb.NewRoundRobin(subscriber) | |
76 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
77 | endpoints.DeleteProfileEndpoint = retry | |
78 | } | |
79 | { | |
80 | factory := factoryFor(profilesvc.MakeGetAddressesEndpoint) | |
81 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
82 | balancer := lb.NewRoundRobin(subscriber) | |
83 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
84 | endpoints.GetAddressesEndpoint = retry | |
85 | } | |
86 | { | |
87 | factory := factoryFor(profilesvc.MakeGetAddressEndpoint) | |
88 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
89 | balancer := lb.NewRoundRobin(subscriber) | |
90 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
91 | endpoints.GetAddressEndpoint = retry | |
92 | } | |
93 | { | |
94 | factory := factoryFor(profilesvc.MakePostAddressEndpoint) | |
95 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
96 | balancer := lb.NewRoundRobin(subscriber) | |
97 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
98 | endpoints.PostAddressEndpoint = retry | |
99 | } | |
100 | { | |
101 | factory := factoryFor(profilesvc.MakeDeleteAddressEndpoint) | |
102 | subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) | |
103 | balancer := lb.NewRoundRobin(subscriber) | |
104 | retry := lb.Retry(retryMax, retryTimeout, balancer) | |
105 | endpoints.DeleteAddressEndpoint = retry | |
106 | } | |
107 | ||
108 | return endpoints, nil | |
109 | } | |
110 | ||
111 | func factoryFor(makeEndpoint func(profilesvc.Service) endpoint.Endpoint) sd.Factory { | |
112 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
113 | service, err := profilesvc.MakeClientEndpoints(instance) | |
114 | if err != nil { | |
115 | return nil, nil, err | |
116 | } | |
117 | return makeEndpoint(service), nil, nil | |
118 | } | |
119 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "flag" | |
4 | "fmt" | |
5 | "net/http" | |
6 | "os" | |
7 | "os/signal" | |
8 | "syscall" | |
9 | ||
10 | "golang.org/x/net/context" | |
11 | ||
12 | "github.com/go-kit/kit/examples/profilesvc" | |
13 | "github.com/go-kit/kit/log" | |
14 | ) | |
15 | ||
16 | func main() { | |
17 | var ( | |
18 | httpAddr = flag.String("http.addr", ":8080", "HTTP listen address") | |
19 | ) | |
20 | flag.Parse() | |
21 | ||
22 | var logger log.Logger | |
23 | { | |
24 | logger = log.NewLogfmtLogger(os.Stderr) | |
25 | logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) | |
26 | logger = log.NewContext(logger).With("caller", log.DefaultCaller) | |
27 | } | |
28 | ||
29 | var ctx context.Context | |
30 | { | |
31 | ctx = context.Background() | |
32 | } | |
33 | ||
34 | var s profilesvc.Service | |
35 | { | |
36 | s = profilesvc.NewInmemService() | |
37 | s = profilesvc.LoggingMiddleware(logger)(s) | |
38 | } | |
39 | ||
40 | var h http.Handler | |
41 | { | |
42 | h = profilesvc.MakeHTTPHandler(ctx, s, log.NewContext(logger).With("component", "HTTP")) | |
43 | } | |
44 | ||
45 | errs := make(chan error) | |
46 | go func() { | |
47 | c := make(chan os.Signal) | |
48 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) | |
49 | errs <- fmt.Errorf("%s", <-c) | |
50 | }() | |
51 | ||
52 | go func() { | |
53 | logger.Log("transport", "HTTP", "addr", *httpAddr) | |
54 | errs <- http.ListenAndServe(*httpAddr, h) | |
55 | }() | |
56 | ||
57 | logger.Log("exit", <-errs) | |
58 | } |
0 | package main | |
0 | package profilesvc | |
1 | 1 | |
2 | 2 | import ( |
3 | "net/url" | |
4 | "strings" | |
5 | ||
6 | "golang.org/x/net/context" | |
7 | ||
3 | 8 | "github.com/go-kit/kit/endpoint" |
4 | "golang.org/x/net/context" | |
9 | httptransport "github.com/go-kit/kit/transport/http" | |
5 | 10 | ) |
6 | 11 | |
7 | type endpoints struct { | |
8 | postProfileEndpoint endpoint.Endpoint | |
9 | getProfileEndpoint endpoint.Endpoint | |
10 | putProfileEndpoint endpoint.Endpoint | |
11 | patchProfileEndpoint endpoint.Endpoint | |
12 | deleteProfileEndpoint endpoint.Endpoint | |
13 | getAddressesEndpoint endpoint.Endpoint | |
14 | getAddressEndpoint endpoint.Endpoint | |
15 | postAddressEndpoint endpoint.Endpoint | |
16 | deleteAddressEndpoint endpoint.Endpoint | |
17 | } | |
18 | ||
19 | func makeEndpoints(s ProfileService) endpoints { | |
20 | return endpoints{ | |
21 | postProfileEndpoint: makePostProfileEndpoint(s), | |
22 | getProfileEndpoint: makeGetProfileEndpoint(s), | |
23 | putProfileEndpoint: makePutProfileEndpoint(s), | |
24 | patchProfileEndpoint: makePatchProfileEndpoint(s), | |
25 | deleteProfileEndpoint: makeDeleteProfileEndpoint(s), | |
26 | getAddressesEndpoint: makeGetAddressesEndpoint(s), | |
27 | getAddressEndpoint: makeGetAddressEndpoint(s), | |
28 | postAddressEndpoint: makePostAddressEndpoint(s), | |
29 | deleteAddressEndpoint: makeDeleteAddressEndpoint(s), | |
30 | } | |
31 | } | |
32 | ||
33 | type postProfileRequest struct { | |
34 | Profile Profile | |
35 | } | |
36 | ||
37 | type postProfileResponse struct { | |
38 | Err error `json:"err,omitempty"` | |
39 | } | |
40 | ||
41 | func (r postProfileResponse) error() error { return r.Err } | |
42 | ||
43 | // Regarding errors returned from service (business logic) methods, we have two | |
44 | // options. We could return the error via the endpoint itself. That makes | |
45 | // certain things a little bit easier, like providing non-200 HTTP responses to | |
46 | // the client. But Go kit assumes that endpoint errors are (or may be treated | |
47 | // as) transport-domain errors. For example, an endpoint error will count | |
48 | // against a circuit breaker error count. Therefore, it's almost certainly | |
49 | // better to return service (business logic) errors in the response object. This | |
50 | // means we have to do a bit more work in the HTTP response encoder to detect | |
51 | // e.g. a not-found error and provide a proper HTTP status code. That work is | |
52 | // done with the errorer interface, in transport.go. | |
53 | ||
54 | func makePostProfileEndpoint(s ProfileService) endpoint.Endpoint { | |
12 | // Endpoints collects all of the endpoints that compose a profile service. It's | |
13 | // meant to be used as a helper struct, to collect all of the endpoints into a | |
14 | // single parameter. | |
15 | // | |
16 | // In a server, it's useful for functions that need to operate on a per-endpoint | |
17 | // basis. For example, you might pass an Endpoints to a function that produces | |
18 | // an http.Handler, with each method (endpoint) wired up to a specific path. (It | |
19 | // is probably a mistake in design to invoke the Service methods on the | |
20 | // Endpoints struct in a server.) | |
21 | // | |
22 | // In a client, it's useful to collect individually constructed endpoints into a | |
23 | // single type that implements the Service interface. For example, you might | |
24 | // construct individual endpoints using transport/http.NewClient, combine them | |
25 | // into an Endpoints, and return it to the caller as a Service. | |
26 | type Endpoints struct { | |
27 | PostProfileEndpoint endpoint.Endpoint | |
28 | GetProfileEndpoint endpoint.Endpoint | |
29 | PutProfileEndpoint endpoint.Endpoint | |
30 | PatchProfileEndpoint endpoint.Endpoint | |
31 | DeleteProfileEndpoint endpoint.Endpoint | |
32 | GetAddressesEndpoint endpoint.Endpoint | |
33 | GetAddressEndpoint endpoint.Endpoint | |
34 | PostAddressEndpoint endpoint.Endpoint | |
35 | DeleteAddressEndpoint endpoint.Endpoint | |
36 | } | |
37 | ||
38 | // MakeServerEndpoints returns an Endpoints struct where each endpoint invokes | |
39 | // the corresponding method on the provided service. Useful in a profilesvc | |
40 | // server. | |
41 | func MakeServerEndpoints(s Service) Endpoints { | |
42 | return Endpoints{ | |
43 | PostProfileEndpoint: MakePostProfileEndpoint(s), | |
44 | GetProfileEndpoint: MakeGetProfileEndpoint(s), | |
45 | PutProfileEndpoint: MakePutProfileEndpoint(s), | |
46 | PatchProfileEndpoint: MakePatchProfileEndpoint(s), | |
47 | DeleteProfileEndpoint: MakeDeleteProfileEndpoint(s), | |
48 | GetAddressesEndpoint: MakeGetAddressesEndpoint(s), | |
49 | GetAddressEndpoint: MakeGetAddressEndpoint(s), | |
50 | PostAddressEndpoint: MakePostAddressEndpoint(s), | |
51 | DeleteAddressEndpoint: MakeDeleteAddressEndpoint(s), | |
52 | } | |
53 | } | |
54 | ||
55 | // MakeClientEndpoints returns an Endpoints struct where each endpoint invokes | |
56 | // the corresponding method on the remote instance, via a transport/http.Client. | |
57 | // Useful in a profilesvc client. | |
58 | func MakeClientEndpoints(instance string) (Endpoints, error) { | |
59 | if !strings.HasPrefix(instance, "http") { | |
60 | instance = "http://" + instance | |
61 | } | |
62 | tgt, err := url.Parse(instance) | |
63 | if err != nil { | |
64 | return Endpoints{}, err | |
65 | } | |
66 | tgt.Path = "" | |
67 | ||
68 | options := []httptransport.ClientOption{} | |
69 | ||
70 | // Note that the request encoders need to modify the request URL, changing | |
71 | // the path and method. That's fine: we simply need to provide specific | |
72 | // encoders for each endpoint. | |
73 | ||
74 | return Endpoints{ | |
75 | PostProfileEndpoint: httptransport.NewClient("POST", tgt, encodePostProfileRequest, decodePostProfileResponse, options...).Endpoint(), | |
76 | GetProfileEndpoint: httptransport.NewClient("GET", tgt, encodeGetProfileRequest, decodeGetProfileResponse, options...).Endpoint(), | |
77 | PutProfileEndpoint: httptransport.NewClient("PUT", tgt, encodePutProfileRequest, decodePutProfileResponse, options...).Endpoint(), | |
78 | PatchProfileEndpoint: httptransport.NewClient("PATCH", tgt, encodePatchProfileRequest, decodePatchProfileResponse, options...).Endpoint(), | |
79 | DeleteProfileEndpoint: httptransport.NewClient("DELETE", tgt, encodeDeleteProfileRequest, decodeDeleteProfileResponse, options...).Endpoint(), | |
80 | GetAddressesEndpoint: httptransport.NewClient("GET", tgt, encodeGetAddressesRequest, decodeGetAddressesResponse, options...).Endpoint(), | |
81 | GetAddressEndpoint: httptransport.NewClient("GET", tgt, encodeGetAddressRequest, decodeGetAddressResponse, options...).Endpoint(), | |
82 | PostAddressEndpoint: httptransport.NewClient("POST", tgt, encodePostAddressRequest, decodePostAddressResponse, options...).Endpoint(), | |
83 | DeleteAddressEndpoint: httptransport.NewClient("DELETE", tgt, encodeDeleteAddressRequest, decodeDeleteAddressResponse, options...).Endpoint(), | |
84 | }, nil | |
85 | } | |
86 | ||
87 | // PostProfile implements Service. Primarily useful in a client. | |
88 | func (e Endpoints) PostProfile(ctx context.Context, p Profile) error { | |
89 | request := postProfileRequest{Profile: p} | |
90 | response, err := e.PostProfileEndpoint(ctx, request) | |
91 | if err != nil { | |
92 | return err | |
93 | } | |
94 | resp := response.(postProfileResponse) | |
95 | return resp.Err | |
96 | } | |
97 | ||
98 | // GetProfile implements Service. Primarily useful in a client. | |
99 | func (e Endpoints) GetProfile(ctx context.Context, id string) (Profile, error) { | |
100 | request := getProfileRequest{ID: id} | |
101 | response, err := e.GetProfileEndpoint(ctx, request) | |
102 | if err != nil { | |
103 | return Profile{}, err | |
104 | } | |
105 | resp := response.(getProfileResponse) | |
106 | return resp.Profile, resp.Err | |
107 | } | |
108 | ||
109 | // PutProfile implements Service. Primarily useful in a client. | |
110 | func (e Endpoints) PutProfile(ctx context.Context, id string, p Profile) error { | |
111 | request := putProfileRequest{ID: id, Profile: p} | |
112 | response, err := e.PutProfileEndpoint(ctx, request) | |
113 | if err != nil { | |
114 | return err | |
115 | } | |
116 | resp := response.(putProfileResponse) | |
117 | return resp.Err | |
118 | } | |
119 | ||
120 | // PatchProfile implements Service. Primarily useful in a client. | |
121 | func (e Endpoints) PatchProfile(ctx context.Context, id string, p Profile) error { | |
122 | request := patchProfileRequest{ID: id, Profile: p} | |
123 | response, err := e.PatchProfileEndpoint(ctx, request) | |
124 | if err != nil { | |
125 | return err | |
126 | } | |
127 | resp := response.(patchProfileResponse) | |
128 | return resp.Err | |
129 | } | |
130 | ||
131 | // DeleteProfile implements Service. Primarily useful in a client. | |
132 | func (e Endpoints) DeleteProfile(ctx context.Context, id string) error { | |
133 | request := deleteProfileRequest{ID: id} | |
134 | response, err := e.DeleteProfileEndpoint(ctx, request) | |
135 | if err != nil { | |
136 | return err | |
137 | } | |
138 | resp := response.(deleteProfileResponse) | |
139 | return resp.Err | |
140 | } | |
141 | ||
142 | // GetAddresses implements Service. Primarily useful in a client. | |
143 | func (e Endpoints) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { | |
144 | request := getAddressesRequest{ProfileID: profileID} | |
145 | response, err := e.GetAddressesEndpoint(ctx, request) | |
146 | if err != nil { | |
147 | return nil, err | |
148 | } | |
149 | resp := response.(getAddressesResponse) | |
150 | return resp.Addresses, resp.Err | |
151 | } | |
152 | ||
153 | // GetAddress implements Service. Primarily useful in a client. | |
154 | func (e Endpoints) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { | |
155 | request := getAddressRequest{ProfileID: profileID, AddressID: addressID} | |
156 | response, err := e.GetAddressEndpoint(ctx, request) | |
157 | if err != nil { | |
158 | return Address{}, err | |
159 | } | |
160 | resp := response.(getAddressResponse) | |
161 | return resp.Address, resp.Err | |
162 | } | |
163 | ||
164 | // PostAddress implements Service. Primarily useful in a client. | |
165 | func (e Endpoints) PostAddress(ctx context.Context, profileID string, a Address) error { | |
166 | request := postAddressRequest{ProfileID: profileID, Address: a} | |
167 | response, err := e.PostAddressEndpoint(ctx, request) | |
168 | if err != nil { | |
169 | return err | |
170 | } | |
171 | resp := response.(postAddressResponse) | |
172 | return resp.Err | |
173 | } | |
174 | ||
175 | // DeleteAddress implements Service. Primarily useful in a client. | |
176 | func (e Endpoints) DeleteAddress(ctx context.Context, profileID string, addressID string) error { | |
177 | request := deleteAddressRequest{ProfileID: profileID, AddressID: addressID} | |
178 | response, err := e.DeleteAddressEndpoint(ctx, request) | |
179 | if err != nil { | |
180 | return err | |
181 | } | |
182 | resp := response.(deleteAddressResponse) | |
183 | return resp.Err | |
184 | } | |
185 | ||
186 | // MakePostProfileEndpoint returns an endpoint via the passed service. | |
187 | // Primarily useful in a server. | |
188 | func MakePostProfileEndpoint(s Service) endpoint.Endpoint { | |
55 | 189 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { |
56 | 190 | req := request.(postProfileRequest) |
57 | 191 | e := s.PostProfile(ctx, req.Profile) |
59 | 193 | } |
60 | 194 | } |
61 | 195 | |
196 | // MakeGetProfileEndpoint returns an endpoint via the passed service. | |
197 | // Primarily useful in a server. | |
198 | func MakeGetProfileEndpoint(s Service) endpoint.Endpoint { | |
199 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
200 | req := request.(getProfileRequest) | |
201 | p, e := s.GetProfile(ctx, req.ID) | |
202 | return getProfileResponse{Profile: p, Err: e}, nil | |
203 | } | |
204 | } | |
205 | ||
206 | // MakePutProfileEndpoint returns an endpoint via the passed service. | |
207 | // Primarily useful in a server. | |
208 | func MakePutProfileEndpoint(s Service) endpoint.Endpoint { | |
209 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
210 | req := request.(putProfileRequest) | |
211 | e := s.PutProfile(ctx, req.ID, req.Profile) | |
212 | return putProfileResponse{Err: e}, nil | |
213 | } | |
214 | } | |
215 | ||
216 | // MakePatchProfileEndpoint returns an endpoint via the passed service. | |
217 | // Primarily useful in a server. | |
218 | func MakePatchProfileEndpoint(s Service) endpoint.Endpoint { | |
219 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
220 | req := request.(patchProfileRequest) | |
221 | e := s.PatchProfile(ctx, req.ID, req.Profile) | |
222 | return patchProfileResponse{Err: e}, nil | |
223 | } | |
224 | } | |
225 | ||
226 | // MakeDeleteProfileEndpoint returns an endpoint via the passed service. | |
227 | // Primarily useful in a server. | |
228 | func MakeDeleteProfileEndpoint(s Service) endpoint.Endpoint { | |
229 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
230 | req := request.(deleteProfileRequest) | |
231 | e := s.DeleteProfile(ctx, req.ID) | |
232 | return deleteProfileResponse{Err: e}, nil | |
233 | } | |
234 | } | |
235 | ||
236 | // MakeGetAddressesEndpoint returns an endpoint via the passed service. | |
237 | // Primarily useful in a server. | |
238 | func MakeGetAddressesEndpoint(s Service) endpoint.Endpoint { | |
239 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
240 | req := request.(getAddressesRequest) | |
241 | a, e := s.GetAddresses(ctx, req.ProfileID) | |
242 | return getAddressesResponse{Addresses: a, Err: e}, nil | |
243 | } | |
244 | } | |
245 | ||
246 | // MakeGetAddressEndpoint returns an endpoint via the passed service. | |
247 | // Primarily useful in a server. | |
248 | func MakeGetAddressEndpoint(s Service) endpoint.Endpoint { | |
249 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
250 | req := request.(getAddressRequest) | |
251 | a, e := s.GetAddress(ctx, req.ProfileID, req.AddressID) | |
252 | return getAddressResponse{Address: a, Err: e}, nil | |
253 | } | |
254 | } | |
255 | ||
256 | // MakePostAddressEndpoint returns an endpoint via the passed service. | |
257 | // Primarily useful in a server. | |
258 | func MakePostAddressEndpoint(s Service) endpoint.Endpoint { | |
259 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
260 | req := request.(postAddressRequest) | |
261 | e := s.PostAddress(ctx, req.ProfileID, req.Address) | |
262 | return postAddressResponse{Err: e}, nil | |
263 | } | |
264 | } | |
265 | ||
266 | // MakeDeleteAddressEndpoint returns an endpoint via the passed service. | |
267 | // Primarily useful in a server. | |
268 | func MakeDeleteAddressEndpoint(s Service) endpoint.Endpoint { | |
269 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
270 | req := request.(deleteAddressRequest) | |
271 | e := s.DeleteAddress(ctx, req.ProfileID, req.AddressID) | |
272 | return deleteAddressResponse{Err: e}, nil | |
273 | } | |
274 | } | |
275 | ||
276 | // We have two options to return errors from the business logic. | |
277 | // | |
278 | // We could return the error via the endpoint itself. That makes certain things | |
279 | // a little bit easier, like providing non-200 HTTP responses to the client. But | |
280 | // Go kit assumes that endpoint errors are (or may be treated as) | |
281 | // transport-domain errors. For example, an endpoint error will count against a | |
282 | // circuit breaker error count. | |
283 | // | |
284 | // Therefore, it's often better to return service (business logic) errors in the | |
285 | // response object. This means we have to do a bit more work in the HTTP | |
286 | // response encoder to detect e.g. a not-found error and provide a proper HTTP | |
287 | // status code. That work is done with the errorer interface, in transport.go. | |
288 | // Response types that may contain business-logic errors implement that | |
289 | // interface. | |
290 | ||
291 | type postProfileRequest struct { | |
292 | Profile Profile | |
293 | } | |
294 | ||
295 | type postProfileResponse struct { | |
296 | Err error `json:"err,omitempty"` | |
297 | } | |
298 | ||
299 | func (r postProfileResponse) error() error { return r.Err } | |
300 | ||
62 | 301 | type getProfileRequest struct { |
63 | 302 | ID string |
64 | 303 | } |
70 | 309 | |
71 | 310 | func (r getProfileResponse) error() error { return r.Err } |
72 | 311 | |
73 | func makeGetProfileEndpoint(s ProfileService) endpoint.Endpoint { | |
74 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
75 | req := request.(getProfileRequest) | |
76 | p, e := s.GetProfile(ctx, req.ID) | |
77 | return getProfileResponse{Profile: p, Err: e}, nil | |
78 | } | |
79 | } | |
80 | ||
81 | 312 | type putProfileRequest struct { |
82 | 313 | ID string |
83 | 314 | Profile Profile |
89 | 320 | |
90 | 321 | func (r putProfileResponse) error() error { return nil } |
91 | 322 | |
92 | func makePutProfileEndpoint(s ProfileService) endpoint.Endpoint { | |
93 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
94 | req := request.(putProfileRequest) | |
95 | e := s.PutProfile(ctx, req.ID, req.Profile) | |
96 | return putProfileResponse{Err: e}, nil | |
97 | } | |
98 | } | |
99 | ||
100 | 323 | type patchProfileRequest struct { |
101 | 324 | ID string |
102 | 325 | Profile Profile |
108 | 331 | |
109 | 332 | func (r patchProfileResponse) error() error { return r.Err } |
110 | 333 | |
111 | func makePatchProfileEndpoint(s ProfileService) endpoint.Endpoint { | |
112 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
113 | req := request.(patchProfileRequest) | |
114 | e := s.PatchProfile(ctx, req.ID, req.Profile) | |
115 | return patchProfileResponse{Err: e}, nil | |
116 | } | |
117 | } | |
118 | ||
119 | 334 | type deleteProfileRequest struct { |
120 | 335 | ID string |
121 | 336 | } |
125 | 340 | } |
126 | 341 | |
127 | 342 | func (r deleteProfileResponse) error() error { return r.Err } |
128 | ||
129 | func makeDeleteProfileEndpoint(s ProfileService) endpoint.Endpoint { | |
130 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
131 | req := request.(deleteProfileRequest) | |
132 | e := s.DeleteProfile(ctx, req.ID) | |
133 | return deleteProfileResponse{Err: e}, nil | |
134 | } | |
135 | } | |
136 | 343 | |
137 | 344 | type getAddressesRequest struct { |
138 | 345 | ProfileID string |
145 | 352 | |
146 | 353 | func (r getAddressesResponse) error() error { return r.Err } |
147 | 354 | |
148 | func makeGetAddressesEndpoint(s ProfileService) endpoint.Endpoint { | |
149 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
150 | req := request.(getAddressesRequest) | |
151 | a, e := s.GetAddresses(ctx, req.ProfileID) | |
152 | return getAddressesResponse{Addresses: a, Err: e}, nil | |
153 | } | |
154 | } | |
155 | ||
156 | 355 | type getAddressRequest struct { |
157 | 356 | ProfileID string |
158 | 357 | AddressID string |
165 | 364 | |
166 | 365 | func (r getAddressResponse) error() error { return r.Err } |
167 | 366 | |
168 | func makeGetAddressEndpoint(s ProfileService) endpoint.Endpoint { | |
169 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
170 | req := request.(getAddressRequest) | |
171 | a, e := s.GetAddress(ctx, req.ProfileID, req.AddressID) | |
172 | return getAddressResponse{Address: a, Err: e}, nil | |
173 | } | |
174 | } | |
175 | ||
176 | 367 | type postAddressRequest struct { |
177 | 368 | ProfileID string |
178 | 369 | Address Address |
184 | 375 | |
185 | 376 | func (r postAddressResponse) error() error { return r.Err } |
186 | 377 | |
187 | func makePostAddressEndpoint(s ProfileService) endpoint.Endpoint { | |
188 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
189 | req := request.(postAddressRequest) | |
190 | e := s.PostAddress(ctx, req.ProfileID, req.Address) | |
191 | return postAddressResponse{Err: e}, nil | |
192 | } | |
193 | } | |
194 | ||
195 | 378 | type deleteAddressRequest struct { |
196 | 379 | ProfileID string |
197 | 380 | AddressID string |
202 | 385 | } |
203 | 386 | |
204 | 387 | func (r deleteAddressResponse) error() error { return r.Err } |
205 | ||
206 | func makeDeleteAddressEndpoint(s ProfileService) endpoint.Endpoint { | |
207 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
208 | req := request.(deleteAddressRequest) | |
209 | e := s.DeleteAddress(ctx, req.ProfileID, req.AddressID) | |
210 | return deleteAddressResponse{Err: e}, nil | |
211 | } | |
212 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "flag" | |
4 | "fmt" | |
5 | "net/http" | |
6 | "os" | |
7 | "os/signal" | |
8 | "syscall" | |
9 | ||
10 | "golang.org/x/net/context" | |
11 | ||
12 | "github.com/go-kit/kit/log" | |
13 | ) | |
14 | ||
15 | func main() { | |
16 | var ( | |
17 | httpAddr = flag.String("http.addr", ":8080", "HTTP listen address") | |
18 | ) | |
19 | flag.Parse() | |
20 | ||
21 | var logger log.Logger | |
22 | { | |
23 | logger = log.NewLogfmtLogger(os.Stderr) | |
24 | logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) | |
25 | logger = log.NewContext(logger).With("caller", log.DefaultCaller) | |
26 | } | |
27 | ||
28 | var ctx context.Context | |
29 | { | |
30 | ctx = context.Background() | |
31 | } | |
32 | ||
33 | var s ProfileService | |
34 | { | |
35 | s = newInmemService() | |
36 | s = loggingMiddleware{s, log.NewContext(logger).With("component", "svc")} | |
37 | } | |
38 | ||
39 | var h http.Handler | |
40 | { | |
41 | h = makeHandler(ctx, s, log.NewContext(logger).With("component", "http")) | |
42 | } | |
43 | ||
44 | errs := make(chan error, 2) | |
45 | go func() { | |
46 | logger.Log("transport", "http", "address", *httpAddr, "msg", "listening") | |
47 | errs <- http.ListenAndServe(*httpAddr, h) | |
48 | }() | |
49 | go func() { | |
50 | c := make(chan os.Signal) | |
51 | signal.Notify(c, syscall.SIGINT) | |
52 | errs <- fmt.Errorf("%s", <-c) | |
53 | }() | |
54 | ||
55 | logger.Log("terminated", <-errs) | |
56 | } |
0 | package main | |
0 | package profilesvc | |
1 | 1 | |
2 | 2 | import ( |
3 | 3 | "time" |
7 | 7 | "github.com/go-kit/kit/log" |
8 | 8 | ) |
9 | 9 | |
10 | // Middleware describes a service (as opposed to endpoint) middleware. | |
11 | type Middleware func(Service) Service | |
12 | ||
13 | func LoggingMiddleware(logger log.Logger) Middleware { | |
14 | return func(next Service) Service { | |
15 | return &loggingMiddleware{ | |
16 | next: next, | |
17 | logger: logger, | |
18 | } | |
19 | } | |
20 | } | |
21 | ||
10 | 22 | type loggingMiddleware struct { |
11 | next ProfileService | |
23 | next Service | |
12 | 24 | logger log.Logger |
13 | 25 | } |
14 | 26 |
0 | package main | |
0 | package profilesvc | |
1 | 1 | |
2 | 2 | import ( |
3 | 3 | "errors" |
6 | 6 | "golang.org/x/net/context" |
7 | 7 | ) |
8 | 8 | |
9 | // ProfileService is a simple CRUD interface for user profiles. | |
10 | type ProfileService interface { | |
9 | // Service is a simple CRUD interface for user profiles. | |
10 | type Service interface { | |
11 | 11 | PostProfile(ctx context.Context, p Profile) error |
12 | 12 | GetProfile(ctx context.Context, id string) (Profile, error) |
13 | 13 | PutProfile(ctx context.Context, id string, p Profile) error |
35 | 35 | } |
36 | 36 | |
37 | 37 | var ( |
38 | errInconsistentIDs = errors.New("inconsistent IDs") | |
39 | errAlreadyExists = errors.New("already exists") | |
40 | errNotFound = errors.New("not found") | |
38 | ErrInconsistentIDs = errors.New("inconsistent IDs") | |
39 | ErrAlreadyExists = errors.New("already exists") | |
40 | ErrNotFound = errors.New("not found") | |
41 | 41 | ) |
42 | 42 | |
43 | 43 | type inmemService struct { |
45 | 45 | m map[string]Profile |
46 | 46 | } |
47 | 47 | |
48 | func newInmemService() ProfileService { | |
48 | func NewInmemService() Service { | |
49 | 49 | return &inmemService{ |
50 | 50 | m: map[string]Profile{}, |
51 | 51 | } |
55 | 55 | s.mtx.Lock() |
56 | 56 | defer s.mtx.Unlock() |
57 | 57 | if _, ok := s.m[p.ID]; ok { |
58 | return errAlreadyExists // POST = create, don't overwrite | |
58 | return ErrAlreadyExists // POST = create, don't overwrite | |
59 | 59 | } |
60 | 60 | s.m[p.ID] = p |
61 | 61 | return nil |
66 | 66 | defer s.mtx.RUnlock() |
67 | 67 | p, ok := s.m[id] |
68 | 68 | if !ok { |
69 | return Profile{}, errNotFound | |
69 | return Profile{}, ErrNotFound | |
70 | 70 | } |
71 | 71 | return p, nil |
72 | 72 | } |
73 | 73 | |
74 | 74 | func (s *inmemService) PutProfile(ctx context.Context, id string, p Profile) error { |
75 | 75 | if id != p.ID { |
76 | return errInconsistentIDs | |
76 | return ErrInconsistentIDs | |
77 | 77 | } |
78 | 78 | s.mtx.Lock() |
79 | 79 | defer s.mtx.Unlock() |
83 | 83 | |
84 | 84 | func (s *inmemService) PatchProfile(ctx context.Context, id string, p Profile) error { |
85 | 85 | if p.ID != "" && id != p.ID { |
86 | return errInconsistentIDs | |
86 | return ErrInconsistentIDs | |
87 | 87 | } |
88 | 88 | |
89 | 89 | s.mtx.Lock() |
91 | 91 | |
92 | 92 | existing, ok := s.m[id] |
93 | 93 | if !ok { |
94 | return errNotFound // PATCH = update existing, don't create | |
94 | return ErrNotFound // PATCH = update existing, don't create | |
95 | 95 | } |
96 | 96 | |
97 | 97 | // We assume that it's not possible to PATCH the ID, and that it's not |
114 | 114 | s.mtx.Lock() |
115 | 115 | defer s.mtx.Unlock() |
116 | 116 | if _, ok := s.m[id]; !ok { |
117 | return errNotFound | |
117 | return ErrNotFound | |
118 | 118 | } |
119 | 119 | delete(s.m, id) |
120 | 120 | return nil |
125 | 125 | defer s.mtx.RUnlock() |
126 | 126 | p, ok := s.m[profileID] |
127 | 127 | if !ok { |
128 | return []Address{}, errNotFound | |
128 | return []Address{}, ErrNotFound | |
129 | 129 | } |
130 | 130 | return p.Addresses, nil |
131 | 131 | } |
135 | 135 | defer s.mtx.RUnlock() |
136 | 136 | p, ok := s.m[profileID] |
137 | 137 | if !ok { |
138 | return Address{}, errNotFound | |
138 | return Address{}, ErrNotFound | |
139 | 139 | } |
140 | 140 | for _, address := range p.Addresses { |
141 | 141 | if address.ID == addressID { |
142 | 142 | return address, nil |
143 | 143 | } |
144 | 144 | } |
145 | return Address{}, errNotFound | |
145 | return Address{}, ErrNotFound | |
146 | 146 | } |
147 | 147 | |
148 | 148 | func (s *inmemService) PostAddress(ctx context.Context, profileID string, a Address) error { |
150 | 150 | defer s.mtx.Unlock() |
151 | 151 | p, ok := s.m[profileID] |
152 | 152 | if !ok { |
153 | return errNotFound | |
153 | return ErrNotFound | |
154 | 154 | } |
155 | 155 | for _, address := range p.Addresses { |
156 | 156 | if address.ID == a.ID { |
157 | return errAlreadyExists | |
157 | return ErrAlreadyExists | |
158 | 158 | } |
159 | 159 | } |
160 | 160 | p.Addresses = append(p.Addresses, a) |
167 | 167 | defer s.mtx.Unlock() |
168 | 168 | p, ok := s.m[profileID] |
169 | 169 | if !ok { |
170 | return errNotFound | |
170 | return ErrNotFound | |
171 | 171 | } |
172 | 172 | newAddresses := make([]Address, 0, len(p.Addresses)) |
173 | 173 | for _, address := range p.Addresses { |
177 | 177 | newAddresses = append(newAddresses, address) |
178 | 178 | } |
179 | 179 | if len(newAddresses) == len(p.Addresses) { |
180 | return errNotFound | |
180 | return ErrNotFound | |
181 | 181 | } |
182 | 182 | p.Addresses = newAddresses |
183 | 183 | s.m[profileID] = p |
0 | package main | |
0 | package profilesvc | |
1 | ||
2 | // The profilesvc is just over HTTP, so we just have a single transport.go. | |
1 | 3 | |
2 | 4 | import ( |
5 | "bytes" | |
3 | 6 | "encoding/json" |
4 | 7 | "errors" |
5 | stdhttp "net/http" | |
8 | "io/ioutil" | |
9 | "net/http" | |
6 | 10 | |
7 | 11 | "github.com/gorilla/mux" |
8 | 12 | "golang.org/x/net/context" |
9 | 13 | |
10 | kitlog "github.com/go-kit/kit/log" | |
11 | kithttp "github.com/go-kit/kit/transport/http" | |
14 | "net/url" | |
15 | ||
16 | "github.com/go-kit/kit/log" | |
17 | httptransport "github.com/go-kit/kit/transport/http" | |
12 | 18 | ) |
13 | 19 | |
14 | 20 | var ( |
15 | errBadRouting = errors.New("inconsistent mapping between route and handler (programmer error)") | |
21 | // ErrBadRouting is returned when an expected path variable is missing. | |
22 | // It always indicates programmer error. | |
23 | ErrBadRouting = errors.New("inconsistent mapping between route and handler (programmer error)") | |
16 | 24 | ) |
17 | 25 | |
18 | func makeHandler(ctx context.Context, s ProfileService, logger kitlog.Logger) stdhttp.Handler { | |
19 | e := makeEndpoints(s) | |
26 | // MakeHTTPHandler mounts all of the service endpoints into an http.Handler. | |
27 | // Useful in a profilesvc server. | |
28 | func MakeHTTPHandler(ctx context.Context, s Service, logger log.Logger) http.Handler { | |
20 | 29 | r := mux.NewRouter() |
21 | ||
22 | commonOptions := []kithttp.ServerOption{ | |
23 | kithttp.ServerErrorLogger(logger), | |
24 | kithttp.ServerErrorEncoder(encodeError), | |
30 | e := MakeServerEndpoints(s) | |
31 | options := []httptransport.ServerOption{ | |
32 | httptransport.ServerErrorLogger(logger), | |
33 | httptransport.ServerErrorEncoder(encodeError), | |
25 | 34 | } |
26 | 35 | |
27 | 36 | // POST /profiles adds another profile |
34 | 43 | // POST /profiles/:id/addresses add a new address |
35 | 44 | // DELETE /profiles/:id/addresses/:addressID remove an address |
36 | 45 | |
37 | r.Methods("POST").Path("/profiles/").Handler(kithttp.NewServer( | |
38 | ctx, | |
39 | e.postProfileEndpoint, | |
46 | r.Methods("POST").Path("/profiles/").Handler(httptransport.NewServer( | |
47 | ctx, | |
48 | e.PostProfileEndpoint, | |
40 | 49 | decodePostProfileRequest, |
41 | 50 | encodeResponse, |
42 | commonOptions..., | |
43 | )) | |
44 | r.Methods("GET").Path("/profiles/{id}").Handler(kithttp.NewServer( | |
45 | ctx, | |
46 | e.getProfileEndpoint, | |
51 | options..., | |
52 | )) | |
53 | r.Methods("GET").Path("/profiles/{id}").Handler(httptransport.NewServer( | |
54 | ctx, | |
55 | e.GetProfileEndpoint, | |
47 | 56 | decodeGetProfileRequest, |
48 | 57 | encodeResponse, |
49 | commonOptions..., | |
50 | )) | |
51 | r.Methods("PUT").Path("/profiles/{id}").Handler(kithttp.NewServer( | |
52 | ctx, | |
53 | e.putProfileEndpoint, | |
58 | options..., | |
59 | )) | |
60 | r.Methods("PUT").Path("/profiles/{id}").Handler(httptransport.NewServer( | |
61 | ctx, | |
62 | e.PutProfileEndpoint, | |
54 | 63 | decodePutProfileRequest, |
55 | 64 | encodeResponse, |
56 | commonOptions..., | |
57 | )) | |
58 | r.Methods("PATCH").Path("/profiles/{id}").Handler(kithttp.NewServer( | |
59 | ctx, | |
60 | e.patchProfileEndpoint, | |
65 | options..., | |
66 | )) | |
67 | r.Methods("PATCH").Path("/profiles/{id}").Handler(httptransport.NewServer( | |
68 | ctx, | |
69 | e.PatchProfileEndpoint, | |
61 | 70 | decodePatchProfileRequest, |
62 | 71 | encodeResponse, |
63 | commonOptions..., | |
64 | )) | |
65 | r.Methods("DELETE").Path("/profiles/{id}").Handler(kithttp.NewServer( | |
66 | ctx, | |
67 | e.deleteProfileEndpoint, | |
72 | options..., | |
73 | )) | |
74 | r.Methods("DELETE").Path("/profiles/{id}").Handler(httptransport.NewServer( | |
75 | ctx, | |
76 | e.DeleteProfileEndpoint, | |
68 | 77 | decodeDeleteProfileRequest, |
69 | 78 | encodeResponse, |
70 | commonOptions..., | |
71 | )) | |
72 | r.Methods("GET").Path("/profiles/{id}/addresses/").Handler(kithttp.NewServer( | |
73 | ctx, | |
74 | e.getAddressesEndpoint, | |
79 | options..., | |
80 | )) | |
81 | r.Methods("GET").Path("/profiles/{id}/addresses/").Handler(httptransport.NewServer( | |
82 | ctx, | |
83 | e.GetAddressesEndpoint, | |
75 | 84 | decodeGetAddressesRequest, |
76 | 85 | encodeResponse, |
77 | commonOptions..., | |
78 | )) | |
79 | r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}").Handler(kithttp.NewServer( | |
80 | ctx, | |
81 | e.getAddressEndpoint, | |
86 | options..., | |
87 | )) | |
88 | r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}").Handler(httptransport.NewServer( | |
89 | ctx, | |
90 | e.GetAddressEndpoint, | |
82 | 91 | decodeGetAddressRequest, |
83 | 92 | encodeResponse, |
84 | commonOptions..., | |
85 | )) | |
86 | r.Methods("POST").Path("/profiles/{id}/addresses/").Handler(kithttp.NewServer( | |
87 | ctx, | |
88 | e.postAddressEndpoint, | |
93 | options..., | |
94 | )) | |
95 | r.Methods("POST").Path("/profiles/{id}/addresses/").Handler(httptransport.NewServer( | |
96 | ctx, | |
97 | e.PostAddressEndpoint, | |
89 | 98 | decodePostAddressRequest, |
90 | 99 | encodeResponse, |
91 | commonOptions..., | |
92 | )) | |
93 | r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}").Handler(kithttp.NewServer( | |
94 | ctx, | |
95 | e.deleteAddressEndpoint, | |
100 | options..., | |
101 | )) | |
102 | r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}").Handler(httptransport.NewServer( | |
103 | ctx, | |
104 | e.DeleteAddressEndpoint, | |
96 | 105 | decodeDeleteAddressRequest, |
97 | 106 | encodeResponse, |
98 | commonOptions..., | |
107 | options..., | |
99 | 108 | )) |
100 | 109 | return r |
101 | 110 | } |
102 | 111 | |
103 | func decodePostProfileRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
112 | func decodePostProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
104 | 113 | var req postProfileRequest |
105 | 114 | if e := json.NewDecoder(r.Body).Decode(&req.Profile); e != nil { |
106 | 115 | return nil, e |
108 | 117 | return req, nil |
109 | 118 | } |
110 | 119 | |
111 | func decodeGetProfileRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
112 | vars := mux.Vars(r) | |
113 | id, ok := vars["id"] | |
114 | if !ok { | |
115 | return nil, errBadRouting | |
120 | func decodeGetProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
121 | vars := mux.Vars(r) | |
122 | id, ok := vars["id"] | |
123 | if !ok { | |
124 | return nil, ErrBadRouting | |
116 | 125 | } |
117 | 126 | return getProfileRequest{ID: id}, nil |
118 | 127 | } |
119 | 128 | |
120 | func decodePutProfileRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
121 | vars := mux.Vars(r) | |
122 | id, ok := vars["id"] | |
123 | if !ok { | |
124 | return nil, errBadRouting | |
129 | func decodePutProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
130 | vars := mux.Vars(r) | |
131 | id, ok := vars["id"] | |
132 | if !ok { | |
133 | return nil, ErrBadRouting | |
125 | 134 | } |
126 | 135 | var profile Profile |
127 | 136 | if err := json.NewDecoder(r.Body).Decode(&profile); err != nil { |
133 | 142 | }, nil |
134 | 143 | } |
135 | 144 | |
136 | func decodePatchProfileRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
137 | vars := mux.Vars(r) | |
138 | id, ok := vars["id"] | |
139 | if !ok { | |
140 | return nil, errBadRouting | |
145 | func decodePatchProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
146 | vars := mux.Vars(r) | |
147 | id, ok := vars["id"] | |
148 | if !ok { | |
149 | return nil, ErrBadRouting | |
141 | 150 | } |
142 | 151 | var profile Profile |
143 | 152 | if err := json.NewDecoder(r.Body).Decode(&profile); err != nil { |
149 | 158 | }, nil |
150 | 159 | } |
151 | 160 | |
152 | func decodeDeleteProfileRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
153 | vars := mux.Vars(r) | |
154 | id, ok := vars["id"] | |
155 | if !ok { | |
156 | return nil, errBadRouting | |
161 | func decodeDeleteProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
162 | vars := mux.Vars(r) | |
163 | id, ok := vars["id"] | |
164 | if !ok { | |
165 | return nil, ErrBadRouting | |
157 | 166 | } |
158 | 167 | return deleteProfileRequest{ID: id}, nil |
159 | 168 | } |
160 | 169 | |
161 | func decodeGetAddressesRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
162 | vars := mux.Vars(r) | |
163 | id, ok := vars["id"] | |
164 | if !ok { | |
165 | return nil, errBadRouting | |
170 | func decodeGetAddressesRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
171 | vars := mux.Vars(r) | |
172 | id, ok := vars["id"] | |
173 | if !ok { | |
174 | return nil, ErrBadRouting | |
166 | 175 | } |
167 | 176 | return getAddressesRequest{ProfileID: id}, nil |
168 | 177 | } |
169 | 178 | |
170 | func decodeGetAddressRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
171 | vars := mux.Vars(r) | |
172 | id, ok := vars["id"] | |
173 | if !ok { | |
174 | return nil, errBadRouting | |
179 | func decodeGetAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
180 | vars := mux.Vars(r) | |
181 | id, ok := vars["id"] | |
182 | if !ok { | |
183 | return nil, ErrBadRouting | |
175 | 184 | } |
176 | 185 | addressID, ok := vars["addressID"] |
177 | 186 | if !ok { |
178 | return nil, errBadRouting | |
187 | return nil, ErrBadRouting | |
179 | 188 | } |
180 | 189 | return getAddressRequest{ |
181 | 190 | ProfileID: id, |
183 | 192 | }, nil |
184 | 193 | } |
185 | 194 | |
186 | func decodePostAddressRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
187 | vars := mux.Vars(r) | |
188 | id, ok := vars["id"] | |
189 | if !ok { | |
190 | return nil, errBadRouting | |
195 | func decodePostAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
196 | vars := mux.Vars(r) | |
197 | id, ok := vars["id"] | |
198 | if !ok { | |
199 | return nil, ErrBadRouting | |
191 | 200 | } |
192 | 201 | var address Address |
193 | 202 | if err := json.NewDecoder(r.Body).Decode(&address); err != nil { |
199 | 208 | }, nil |
200 | 209 | } |
201 | 210 | |
202 | func decodeDeleteAddressRequest(_ context.Context, r *stdhttp.Request) (request interface{}, err error) { | |
203 | vars := mux.Vars(r) | |
204 | id, ok := vars["id"] | |
205 | if !ok { | |
206 | return nil, errBadRouting | |
211 | func decodeDeleteAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { | |
212 | vars := mux.Vars(r) | |
213 | id, ok := vars["id"] | |
214 | if !ok { | |
215 | return nil, ErrBadRouting | |
207 | 216 | } |
208 | 217 | addressID, ok := vars["addressID"] |
209 | 218 | if !ok { |
210 | return nil, errBadRouting | |
219 | return nil, ErrBadRouting | |
211 | 220 | } |
212 | 221 | return deleteAddressRequest{ |
213 | 222 | ProfileID: id, |
215 | 224 | }, nil |
216 | 225 | } |
217 | 226 | |
218 | // errorer is implemented by all concrete response types. It allows us to | |
219 | // change the HTTP response code without needing to trigger an endpoint | |
220 | // (transport-level) error. For more information, read the big comment in | |
221 | // endpoints.go. | |
227 | func encodePostProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
228 | // r.Methods("POST").Path("/profiles/") | |
229 | req.Method, req.URL.Path = "POST", url.QueryEscape("/profiles/") | |
230 | return encodeRequest(ctx, req, request) | |
231 | } | |
232 | ||
233 | func encodeGetProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
234 | // r.Methods("GET").Path("/profiles/{id}") | |
235 | r := request.(getProfileRequest) | |
236 | req.Method, req.URL.Path = "GET", url.QueryEscape("/profiles/"+r.ID) | |
237 | return encodeRequest(ctx, req, request) | |
238 | } | |
239 | ||
240 | func encodePutProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
241 | // r.Methods("PUT").Path("/profiles/{id}") | |
242 | r := request.(putProfileRequest) | |
243 | req.Method, req.URL.Path = "PUT", url.QueryEscape("/profiles/"+r.ID) | |
244 | return encodeRequest(ctx, req, request) | |
245 | } | |
246 | ||
247 | func encodePatchProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
248 | // r.Methods("PATCH").Path("/profiles/{id}") | |
249 | r := request.(patchProfileRequest) | |
250 | req.Method, req.URL.Path = "PATCH", url.QueryEscape("/profiles/"+r.ID) | |
251 | return encodeRequest(ctx, req, request) | |
252 | } | |
253 | ||
254 | func encodeDeleteProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
255 | // r.Methods("DELETE").Path("/profiles/{id}") | |
256 | r := request.(deleteProfileRequest) | |
257 | req.Method, req.URL.Path = "DELETE", url.QueryEscape("/profiles/"+r.ID) | |
258 | return encodeRequest(ctx, req, request) | |
259 | } | |
260 | ||
261 | func encodeGetAddressesRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
262 | // r.Methods("GET").Path("/profiles/{id}/addresses/") | |
263 | r := request.(getAddressesRequest) | |
264 | req.Method, req.URL.Path = "GET", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/") | |
265 | return encodeRequest(ctx, req, request) | |
266 | } | |
267 | ||
268 | func encodeGetAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
269 | // r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}") | |
270 | r := request.(getAddressRequest) | |
271 | req.Method, req.URL.Path = "GET", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/"+r.AddressID) | |
272 | return encodeRequest(ctx, req, request) | |
273 | } | |
274 | ||
275 | func encodePostAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
276 | // r.Methods("POST").Path("/profiles/{id}/addresses/") | |
277 | r := request.(postAddressRequest) | |
278 | req.Method, req.URL.Path = "POST", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/") | |
279 | return encodeRequest(ctx, req, request) | |
280 | } | |
281 | ||
282 | func encodeDeleteAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { | |
283 | // r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}") | |
284 | r := request.(deleteAddressRequest) | |
285 | req.Method, req.URL.Path = "DELETE", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/"+r.AddressID) | |
286 | return encodeRequest(ctx, req, request) | |
287 | } | |
288 | ||
289 | func decodePostProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
290 | var response postProfileResponse | |
291 | err := json.NewDecoder(resp.Body).Decode(&response) | |
292 | return response, err | |
293 | } | |
294 | ||
295 | func decodeGetProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
296 | var response getProfileResponse | |
297 | err := json.NewDecoder(resp.Body).Decode(&response) | |
298 | return response, err | |
299 | } | |
300 | ||
301 | func decodePutProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
302 | var response putProfileResponse | |
303 | err := json.NewDecoder(resp.Body).Decode(&response) | |
304 | return response, err | |
305 | } | |
306 | ||
307 | func decodePatchProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
308 | var response patchProfileResponse | |
309 | err := json.NewDecoder(resp.Body).Decode(&response) | |
310 | return response, err | |
311 | } | |
312 | ||
313 | func decodeDeleteProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
314 | var response deleteProfileResponse | |
315 | err := json.NewDecoder(resp.Body).Decode(&response) | |
316 | return response, err | |
317 | } | |
318 | ||
319 | func decodeGetAddressesResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
320 | var response getAddressesResponse | |
321 | err := json.NewDecoder(resp.Body).Decode(&response) | |
322 | return response, err | |
323 | } | |
324 | ||
325 | func decodeGetAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
326 | var response getAddressResponse | |
327 | err := json.NewDecoder(resp.Body).Decode(&response) | |
328 | return response, err | |
329 | } | |
330 | ||
331 | func decodePostAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
332 | var response postAddressResponse | |
333 | err := json.NewDecoder(resp.Body).Decode(&response) | |
334 | return response, err | |
335 | } | |
336 | ||
337 | func decodeDeleteAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { | |
338 | var response deleteAddressResponse | |
339 | err := json.NewDecoder(resp.Body).Decode(&response) | |
340 | return response, err | |
341 | } | |
342 | ||
343 | // errorer is implemented by all concrete response types that may contain | |
344 | // errors. It allows us to change the HTTP response code without needing to | |
345 | // trigger an endpoint (transport-level) error. For more information, read the | |
346 | // big comment in endpoints.go. | |
222 | 347 | type errorer interface { |
223 | 348 | error() error |
224 | 349 | } |
225 | 350 | |
226 | 351 | // encodeResponse is the common method to encode all response types to the |
227 | // client. I chose to do it this way because I didn't know if something more | |
228 | // specific was necessary. It's certainly possible to specialize on a | |
229 | // per-response (per-method) basis. | |
230 | func encodeResponse(ctx context.Context, w stdhttp.ResponseWriter, response interface{}) error { | |
352 | // client. I chose to do it this way because, since we're using JSON, there's no | |
353 | // reason to provide anything more specific. It's certainly possible to | |
354 | // specialize on a per-response (per-method) basis. | |
355 | func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { | |
231 | 356 | if e, ok := response.(errorer); ok && e.error() != nil { |
232 | 357 | // Not a Go kit transport error, but a business-logic error. |
233 | 358 | // Provide those as HTTP errors. |
234 | 359 | encodeError(ctx, e.error(), w) |
235 | 360 | return nil |
236 | 361 | } |
362 | w.Header().Set("Content-Type", "application/json; charset=utf-8") | |
237 | 363 | return json.NewEncoder(w).Encode(response) |
238 | 364 | } |
239 | 365 | |
240 | func encodeError(_ context.Context, err error, w stdhttp.ResponseWriter) { | |
366 | // encodeRequest likewise JSON-encodes the request to the HTTP request body. | |
367 | // Don't use it directly as a transport/http.Client EncodeRequestFunc: | |
368 | // profilesvc endpoints require mutating the HTTP method and request path. | |
369 | func encodeRequest(_ context.Context, req *http.Request, request interface{}) error { | |
370 | var buf bytes.Buffer | |
371 | err := json.NewEncoder(&buf).Encode(request) | |
372 | if err != nil { | |
373 | return err | |
374 | } | |
375 | req.Body = ioutil.NopCloser(&buf) | |
376 | return nil | |
377 | } | |
378 | ||
379 | func encodeError(_ context.Context, err error, w http.ResponseWriter) { | |
241 | 380 | if err == nil { |
242 | 381 | panic("encodeError with nil error") |
243 | 382 | } |
383 | w.Header().Set("Content-Type", "application/json; charset=utf-8") | |
244 | 384 | w.WriteHeader(codeFrom(err)) |
245 | 385 | json.NewEncoder(w).Encode(map[string]interface{}{ |
246 | 386 | "error": err.Error(), |
249 | 389 | |
250 | 390 | func codeFrom(err error) int { |
251 | 391 | switch err { |
252 | case errNotFound: | |
253 | return stdhttp.StatusNotFound | |
254 | case errAlreadyExists, errInconsistentIDs: | |
255 | return stdhttp.StatusBadRequest | |
392 | case ErrNotFound: | |
393 | return http.StatusNotFound | |
394 | case ErrAlreadyExists, ErrInconsistentIDs: | |
395 | return http.StatusBadRequest | |
256 | 396 | default: |
257 | if e, ok := err.(kithttp.Error); ok { | |
397 | if e, ok := err.(httptransport.Error); ok { | |
258 | 398 | switch e.Domain { |
259 | case kithttp.DomainDecode: | |
260 | return stdhttp.StatusBadRequest | |
261 | case kithttp.DomainDo: | |
262 | return stdhttp.StatusServiceUnavailable | |
399 | case httptransport.DomainDecode: | |
400 | return http.StatusBadRequest | |
401 | case httptransport.DomainDo: | |
402 | return http.StatusServiceUnavailable | |
263 | 403 | default: |
264 | return stdhttp.StatusInternalServerError | |
404 | return http.StatusInternalServerError | |
265 | 405 | } |
266 | 406 | } |
267 | return stdhttp.StatusInternalServerError | |
268 | } | |
269 | } | |
407 | return http.StatusInternalServerError | |
408 | } | |
409 | } |
10 | 10 | requestCount metrics.Counter |
11 | 11 | requestLatency metrics.TimeHistogram |
12 | 12 | countResult metrics.Histogram |
13 | StringService | |
13 | next StringService | |
14 | 14 | } |
15 | 15 | |
16 | 16 | func (mw instrumentingMiddleware) Uppercase(s string) (output string, err error) { |
21 | 21 | mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) |
22 | 22 | }(time.Now()) |
23 | 23 | |
24 | output, err = mw.StringService.Uppercase(s) | |
24 | output, err = mw.next.Uppercase(s) | |
25 | 25 | return |
26 | 26 | } |
27 | 27 | |
34 | 34 | mw.countResult.Observe(int64(n)) |
35 | 35 | }(time.Now()) |
36 | 36 | |
37 | n = mw.StringService.Count(s) | |
37 | n = mw.next.Count(s) | |
38 | 38 | return |
39 | 39 | } |
7 | 7 | |
8 | 8 | type loggingMiddleware struct { |
9 | 9 | logger log.Logger |
10 | StringService | |
10 | next StringService | |
11 | 11 | } |
12 | 12 | |
13 | 13 | func (mw loggingMiddleware) Uppercase(s string) (output string, err error) { |
21 | 21 | ) |
22 | 22 | }(time.Now()) |
23 | 23 | |
24 | output, err = mw.StringService.Uppercase(s) | |
24 | output, err = mw.next.Uppercase(s) | |
25 | 25 | return |
26 | 26 | } |
27 | 27 | |
35 | 35 | ) |
36 | 36 | }(time.Now()) |
37 | 37 | |
38 | n = mw.StringService.Count(s) | |
38 | n = mw.next.Count(s) | |
39 | 39 | return |
40 | 40 | } |
2 | 2 | import ( |
3 | 3 | "errors" |
4 | 4 | "fmt" |
5 | "io" | |
6 | 5 | "net/url" |
7 | 6 | "strings" |
8 | 7 | "time" |
13 | 12 | |
14 | 13 | "github.com/go-kit/kit/circuitbreaker" |
15 | 14 | "github.com/go-kit/kit/endpoint" |
16 | "github.com/go-kit/kit/loadbalancer" | |
17 | "github.com/go-kit/kit/loadbalancer/static" | |
18 | 15 | "github.com/go-kit/kit/log" |
19 | kitratelimit "github.com/go-kit/kit/ratelimit" | |
16 | "github.com/go-kit/kit/ratelimit" | |
17 | "github.com/go-kit/kit/sd" | |
18 | "github.com/go-kit/kit/sd/lb" | |
20 | 19 | httptransport "github.com/go-kit/kit/transport/http" |
21 | 20 | ) |
22 | 21 | |
23 | func proxyingMiddleware(proxyList string, ctx context.Context, logger log.Logger) ServiceMiddleware { | |
24 | if proxyList == "" { | |
22 | func proxyingMiddleware(instances string, ctx context.Context, logger log.Logger) ServiceMiddleware { | |
23 | // If instances is empty, don't proxy. | |
24 | if instances == "" { | |
25 | 25 | logger.Log("proxy_to", "none") |
26 | 26 | return func(next StringService) StringService { return next } |
27 | 27 | } |
28 | proxies := split(proxyList) | |
29 | logger.Log("proxy_to", fmt.Sprint(proxies)) | |
30 | 28 | |
29 | // Set some parameters for our client. | |
30 | var ( | |
31 | qps = 100 // beyond which we will return an error | |
32 | maxAttempts = 3 // per request, before giving up | |
33 | maxTime = 250 * time.Millisecond // wallclock time, before giving up | |
34 | ) | |
35 | ||
36 | // Otherwise, construct an endpoint for each instance in the list, and add | |
37 | // it to a fixed set of endpoints. In a real service, rather than doing this | |
38 | // by hand, you'd probably use package sd's support for your service | |
39 | // discovery system. | |
40 | var ( | |
41 | instanceList = split(instances) | |
42 | subscriber sd.FixedSubscriber | |
43 | ) | |
44 | logger.Log("proxy_to", fmt.Sprint(instanceList)) | |
45 | for _, instance := range instanceList { | |
46 | var e endpoint.Endpoint | |
47 | e = makeUppercaseProxy(ctx, instance) | |
48 | e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) | |
49 | e = ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e) | |
50 | subscriber = append(subscriber, e) | |
51 | } | |
52 | ||
53 | // Now, build a single, retrying, load-balancing endpoint out of all of | |
54 | // those individual endpoints. | |
55 | balancer := lb.NewRoundRobin(subscriber) | |
56 | retry := lb.Retry(maxAttempts, maxTime, balancer) | |
57 | ||
58 | // And finally, return the ServiceMiddleware, implemented by proxymw. | |
31 | 59 | return func(next StringService) StringService { |
32 | var ( | |
33 | qps = 100 // max to each instance | |
34 | publisher = static.NewPublisher(proxies, factory(ctx, qps), logger) | |
35 | lb = loadbalancer.NewRoundRobin(publisher) | |
36 | maxAttempts = 3 | |
37 | maxTime = 100 * time.Millisecond | |
38 | endpoint = loadbalancer.Retry(maxAttempts, maxTime, lb) | |
39 | ) | |
40 | return proxymw{ctx, endpoint, next} | |
60 | return proxymw{ctx, next, retry} | |
41 | 61 | } |
42 | 62 | } |
43 | 63 | |
44 | 64 | // proxymw implements StringService, forwarding Uppercase requests to the |
45 | 65 | // provided endpoint, and serving all other (i.e. Count) requests via the |
46 | // embedded StringService. | |
66 | // next StringService. | |
47 | 67 | type proxymw struct { |
48 | context.Context | |
49 | UppercaseEndpoint endpoint.Endpoint | |
50 | StringService | |
68 | ctx context.Context | |
69 | next StringService // Serve most requests via this service... | |
70 | uppercase endpoint.Endpoint // ...except Uppercase, which gets served by this endpoint | |
71 | } | |
72 | ||
73 | func (mw proxymw) Count(s string) int { | |
74 | return mw.next.Count(s) | |
51 | 75 | } |
52 | 76 | |
53 | 77 | func (mw proxymw) Uppercase(s string) (string, error) { |
54 | response, err := mw.UppercaseEndpoint(mw.Context, uppercaseRequest{S: s}) | |
78 | response, err := mw.uppercase(mw.ctx, uppercaseRequest{S: s}) | |
55 | 79 | if err != nil { |
56 | 80 | return "", err |
57 | 81 | } |
61 | 85 | return resp.V, errors.New(resp.Err) |
62 | 86 | } |
63 | 87 | return resp.V, nil |
64 | } | |
65 | ||
66 | func factory(ctx context.Context, qps int) loadbalancer.Factory { | |
67 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
68 | var e endpoint.Endpoint | |
69 | e = makeUppercaseProxy(ctx, instance) | |
70 | e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) | |
71 | e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e) | |
72 | return e, nil, nil | |
73 | } | |
74 | 88 | } |
75 | 89 | |
76 | 90 | func makeUppercaseProxy(ctx context.Context, instance string) endpoint.Endpoint { |
0 | # package loadbalancer | |
1 | ||
2 | `package loadbalancer` provides a client-side load balancer abstraction. | |
3 | ||
4 | A publisher is responsible for emitting the most recent set of endpoints for a | |
5 | single logical service. Publishers exist for static endpoints, and endpoints | |
6 | discovered via periodic DNS SRV lookups on a single logical name. Consul and | |
7 | etcd publishers are planned. | |
8 | ||
9 | Different load balancers are implemented on top of publishers. Go kit | |
10 | currently provides random and round-robin load balancers. Smarter behaviors, | |
11 | e.g. load balancing based on underlying endpoint priority/weight, is planned. | |
12 | ||
13 | ## Rationale | |
14 | ||
15 | TODO | |
16 | ||
17 | ## Usage | |
18 | ||
19 | In your client, construct a publisher for a specific remote service, and pass | |
20 | it to a load balancer. Then, request an endpoint from the load balancer | |
21 | whenever you need to make a request to that remote service. | |
22 | ||
23 | ```go | |
24 | import ( | |
25 | "github.com/go-kit/kit/loadbalancer" | |
26 | "github.com/go-kit/kit/loadbalancer/dnssrv" | |
27 | ) | |
28 | ||
29 | func main() { | |
30 | // Construct a load balancer for foosvc, which gets foosvc instances by | |
31 | // polling a specific DNS SRV name. | |
32 | p, err := dnssrv.NewPublisher("foosvc.internal.domain", 5*time.Second, fooFactory, logger) | |
33 | if err != nil { | |
34 | panic(err) | |
35 | } | |
36 | ||
37 | lb := loadbalancer.NewRoundRobin(p) | |
38 | ||
39 | // Get a new endpoint from the load balancer. | |
40 | endpoint, err := lb.Endpoint() | |
41 | if err != nil { | |
42 | panic(err) | |
43 | } | |
44 | ||
45 | // Use the endpoint to make a request. | |
46 | response, err := endpoint(ctx, request) | |
47 | } | |
48 | ||
49 | func fooFactory(instance string) (endpoint.Endpoint, error) { | |
50 | // Convert an instance (host:port) to an endpoint, via a defined transport binding. | |
51 | } | |
52 | ``` | |
53 | ||
54 | It's also possible to wrap a load balancer with a retry strategy, so that it | |
55 | can be used as an endpoint directly. This may make load balancers more | |
56 | convenient to use, at the cost of fine-grained control of failures. | |
57 | ||
58 | ```go | |
59 | func main() { | |
60 | p := dnssrv.NewPublisher("foosvc.internal.domain", 5*time.Second, fooFactory, logger) | |
61 | lb := loadbalancer.NewRoundRobin(p) | |
62 | endpoint := loadbalancer.Retry(3, 5*time.Seconds, lb) | |
63 | ||
64 | response, err := endpoint(ctx, request) // requests will be automatically load balanced | |
65 | } | |
66 | ``` |
0 | package consul | |
1 | ||
2 | import consul "github.com/hashicorp/consul/api" | |
3 | ||
4 | // Client is a wrapper around the Consul API. | |
5 | type Client interface { | |
6 | Service(service string, tag string, queryOpts *consul.QueryOptions) ([]*consul.ServiceEntry, *consul.QueryMeta, error) | |
7 | } | |
8 | ||
9 | type client struct { | |
10 | consul *consul.Client | |
11 | } | |
12 | ||
13 | // NewClient returns an implementation of the Client interface expecting a fully | |
14 | // setup Consul Client. | |
15 | func NewClient(c *consul.Client) Client { | |
16 | return &client{ | |
17 | consul: c, | |
18 | } | |
19 | } | |
20 | ||
21 | // GetInstances returns the list of healthy entries for a given service filtered | |
22 | // by tag. | |
23 | func (c *client) Service( | |
24 | service string, | |
25 | tag string, | |
26 | opts *consul.QueryOptions, | |
27 | ) ([]*consul.ServiceEntry, *consul.QueryMeta, error) { | |
28 | return c.consul.Health().Service(service, tag, true, opts) | |
29 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "strings" | |
5 | ||
6 | consul "github.com/hashicorp/consul/api" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/loadbalancer" | |
10 | "github.com/go-kit/kit/log" | |
11 | ) | |
12 | ||
13 | const defaultIndex = 0 | |
14 | ||
15 | // Publisher yields endpoints for a service in Consul. Updates to the service | |
16 | // are watched and will update the Publisher endpoints. | |
17 | type Publisher struct { | |
18 | cache *loadbalancer.EndpointCache | |
19 | client Client | |
20 | logger log.Logger | |
21 | service string | |
22 | tags []string | |
23 | endpointsc chan []endpoint.Endpoint | |
24 | quitc chan struct{} | |
25 | } | |
26 | ||
27 | // NewPublisher returns a Consul publisher which returns Endpoints for the | |
28 | // requested service. It only returns instances for which all of the passed | |
29 | // tags are present. | |
30 | func NewPublisher( | |
31 | client Client, | |
32 | factory loadbalancer.Factory, | |
33 | logger log.Logger, | |
34 | service string, | |
35 | tags ...string, | |
36 | ) (*Publisher, error) { | |
37 | p := &Publisher{ | |
38 | cache: loadbalancer.NewEndpointCache(factory, logger), | |
39 | client: client, | |
40 | logger: logger, | |
41 | service: service, | |
42 | tags: tags, | |
43 | quitc: make(chan struct{}), | |
44 | } | |
45 | ||
46 | instances, index, err := p.getInstances(defaultIndex) | |
47 | if err == nil { | |
48 | logger.Log("service", service, "tags", strings.Join(tags, ", "), "instances", len(instances)) | |
49 | } else { | |
50 | logger.Log("service", service, "tags", strings.Join(tags, ", "), "err", err) | |
51 | } | |
52 | p.cache.Replace(instances) | |
53 | ||
54 | go p.loop(index) | |
55 | ||
56 | return p, nil | |
57 | } | |
58 | ||
59 | // Endpoints implements the Publisher interface. | |
60 | func (p *Publisher) Endpoints() ([]endpoint.Endpoint, error) { | |
61 | return p.cache.Endpoints() | |
62 | } | |
63 | ||
64 | // Stop terminates the publisher. | |
65 | func (p *Publisher) Stop() { | |
66 | close(p.quitc) | |
67 | } | |
68 | ||
69 | func (p *Publisher) loop(lastIndex uint64) { | |
70 | var ( | |
71 | errc = make(chan error, 1) | |
72 | resc = make(chan response, 1) | |
73 | ) | |
74 | ||
75 | for { | |
76 | go func() { | |
77 | instances, index, err := p.getInstances(lastIndex) | |
78 | if err != nil { | |
79 | errc <- err | |
80 | return | |
81 | } | |
82 | resc <- response{ | |
83 | index: index, | |
84 | instances: instances, | |
85 | } | |
86 | }() | |
87 | ||
88 | select { | |
89 | case err := <-errc: | |
90 | p.logger.Log("service", p.service, "err", err) | |
91 | case res := <-resc: | |
92 | p.cache.Replace(res.instances) | |
93 | lastIndex = res.index | |
94 | case <-p.quitc: | |
95 | return | |
96 | } | |
97 | } | |
98 | } | |
99 | ||
100 | func (p *Publisher) getInstances(lastIndex uint64) ([]string, uint64, error) { | |
101 | tag := "" | |
102 | ||
103 | if len(p.tags) > 0 { | |
104 | tag = p.tags[0] | |
105 | } | |
106 | ||
107 | entries, meta, err := p.client.Service( | |
108 | p.service, | |
109 | tag, | |
110 | &consul.QueryOptions{ | |
111 | WaitIndex: lastIndex, | |
112 | }, | |
113 | ) | |
114 | if err != nil { | |
115 | return nil, 0, err | |
116 | } | |
117 | ||
118 | // If more than one tag is passed we need to filter it in the publisher until | |
119 | // Consul supports multiple tags[0]. | |
120 | // | |
121 | // [0] https://github.com/hashicorp/consul/issues/294 | |
122 | if len(p.tags) > 1 { | |
123 | entries = filterEntries(entries, p.tags[1:]...) | |
124 | } | |
125 | ||
126 | return makeInstances(entries), meta.LastIndex, nil | |
127 | } | |
128 | ||
129 | // response is used as container to transport instances as well as the updated | |
130 | // index. | |
131 | type response struct { | |
132 | index uint64 | |
133 | instances []string | |
134 | } | |
135 | ||
136 | func filterEntries(entries []*consul.ServiceEntry, tags ...string) []*consul.ServiceEntry { | |
137 | var es []*consul.ServiceEntry | |
138 | ||
139 | ENTRIES: | |
140 | for _, entry := range entries { | |
141 | ts := make(map[string]struct{}, len(entry.Service.Tags)) | |
142 | ||
143 | for _, tag := range entry.Service.Tags { | |
144 | ts[tag] = struct{}{} | |
145 | } | |
146 | ||
147 | for _, tag := range tags { | |
148 | if _, ok := ts[tag]; !ok { | |
149 | continue ENTRIES | |
150 | } | |
151 | } | |
152 | ||
153 | es = append(es, entry) | |
154 | } | |
155 | ||
156 | return es | |
157 | } | |
158 | ||
159 | func makeInstances(entries []*consul.ServiceEntry) []string { | |
160 | instances := make([]string, len(entries)) | |
161 | ||
162 | for i, entry := range entries { | |
163 | addr := entry.Node.Address | |
164 | ||
165 | if entry.Service.Address != "" { | |
166 | addr = entry.Service.Address | |
167 | } | |
168 | ||
169 | instances[i] = fmt.Sprintf("%s:%d", addr, entry.Service.Port) | |
170 | } | |
171 | ||
172 | return instances | |
173 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "testing" | |
5 | ||
6 | consul "github.com/hashicorp/consul/api" | |
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/log" | |
11 | ) | |
12 | ||
13 | var consulState = []*consul.ServiceEntry{ | |
14 | { | |
15 | Node: &consul.Node{ | |
16 | Address: "10.0.0.0", | |
17 | Node: "app00.local", | |
18 | }, | |
19 | Service: &consul.AgentService{ | |
20 | ID: "search-api-0", | |
21 | Port: 8000, | |
22 | Service: "search", | |
23 | Tags: []string{ | |
24 | "api", | |
25 | "v1", | |
26 | }, | |
27 | }, | |
28 | }, | |
29 | { | |
30 | Node: &consul.Node{ | |
31 | Address: "10.0.0.1", | |
32 | Node: "app01.local", | |
33 | }, | |
34 | Service: &consul.AgentService{ | |
35 | ID: "search-api-1", | |
36 | Port: 8001, | |
37 | Service: "search", | |
38 | Tags: []string{ | |
39 | "api", | |
40 | "v2", | |
41 | }, | |
42 | }, | |
43 | }, | |
44 | { | |
45 | Node: &consul.Node{ | |
46 | Address: "10.0.0.1", | |
47 | Node: "app01.local", | |
48 | }, | |
49 | Service: &consul.AgentService{ | |
50 | Address: "10.0.0.10", | |
51 | ID: "search-db-0", | |
52 | Port: 9000, | |
53 | Service: "search", | |
54 | Tags: []string{ | |
55 | "db", | |
56 | }, | |
57 | }, | |
58 | }, | |
59 | } | |
60 | ||
61 | func TestPublisher(t *testing.T) { | |
62 | var ( | |
63 | logger = log.NewNopLogger() | |
64 | client = newTestClient(consulState) | |
65 | ) | |
66 | ||
67 | p, err := NewPublisher(client, testFactory, logger, "search", "api") | |
68 | if err != nil { | |
69 | t.Fatalf("publisher setup failed: %s", err) | |
70 | } | |
71 | defer p.Stop() | |
72 | ||
73 | eps, err := p.Endpoints() | |
74 | if err != nil { | |
75 | t.Fatalf("endpoints failed: %s", err) | |
76 | } | |
77 | ||
78 | if have, want := len(eps), 2; have != want { | |
79 | t.Errorf("have %v, want %v", have, want) | |
80 | } | |
81 | } | |
82 | ||
83 | func TestPublisherNoService(t *testing.T) { | |
84 | var ( | |
85 | logger = log.NewNopLogger() | |
86 | client = newTestClient(consulState) | |
87 | ) | |
88 | ||
89 | p, err := NewPublisher(client, testFactory, logger, "feed") | |
90 | if err != nil { | |
91 | t.Fatalf("publisher setup failed: %s", err) | |
92 | } | |
93 | defer p.Stop() | |
94 | ||
95 | eps, err := p.Endpoints() | |
96 | if err != nil { | |
97 | t.Fatalf("endpoints failed: %s", err) | |
98 | } | |
99 | ||
100 | if have, want := len(eps), 0; have != want { | |
101 | t.Fatalf("have %v, want %v", have, want) | |
102 | } | |
103 | } | |
104 | ||
105 | func TestPublisherWithTags(t *testing.T) { | |
106 | var ( | |
107 | logger = log.NewNopLogger() | |
108 | client = newTestClient(consulState) | |
109 | ) | |
110 | ||
111 | p, err := NewPublisher(client, testFactory, logger, "search", "api", "v2") | |
112 | if err != nil { | |
113 | t.Fatalf("publisher setup failed: %s", err) | |
114 | } | |
115 | defer p.Stop() | |
116 | ||
117 | eps, err := p.Endpoints() | |
118 | if err != nil { | |
119 | t.Fatalf("endpoints failed: %s", err) | |
120 | } | |
121 | ||
122 | if have, want := len(eps), 1; have != want { | |
123 | t.Fatalf("have %v, want %v", have, want) | |
124 | } | |
125 | } | |
126 | ||
127 | func TestPublisherAddressOverride(t *testing.T) { | |
128 | var ( | |
129 | ctx = context.Background() | |
130 | logger = log.NewNopLogger() | |
131 | client = newTestClient(consulState) | |
132 | ) | |
133 | ||
134 | p, err := NewPublisher(client, testFactory, logger, "search", "db") | |
135 | if err != nil { | |
136 | t.Fatalf("publisher setup failed: %s", err) | |
137 | } | |
138 | defer p.Stop() | |
139 | ||
140 | eps, err := p.Endpoints() | |
141 | if err != nil { | |
142 | t.Fatalf("endpoints failed: %s", err) | |
143 | } | |
144 | ||
145 | if have, want := len(eps), 1; have != want { | |
146 | t.Fatalf("have %v, want %v", have, want) | |
147 | } | |
148 | ||
149 | ins, err := eps[0](ctx, struct{}{}) | |
150 | if err != nil { | |
151 | t.Fatal(err) | |
152 | } | |
153 | ||
154 | if have, want := ins.(string), "10.0.0.10:9000"; have != want { | |
155 | t.Errorf("have %#v, want %#v", have, want) | |
156 | } | |
157 | } | |
158 | ||
159 | type testClient struct { | |
160 | entries []*consul.ServiceEntry | |
161 | } | |
162 | ||
163 | func newTestClient(entries []*consul.ServiceEntry) Client { | |
164 | if entries == nil { | |
165 | entries = []*consul.ServiceEntry{} | |
166 | } | |
167 | ||
168 | return &testClient{ | |
169 | entries: entries, | |
170 | } | |
171 | } | |
172 | ||
173 | func (c *testClient) Service( | |
174 | service string, | |
175 | tag string, | |
176 | opts *consul.QueryOptions, | |
177 | ) ([]*consul.ServiceEntry, *consul.QueryMeta, error) { | |
178 | es := []*consul.ServiceEntry{} | |
179 | ||
180 | for _, e := range c.entries { | |
181 | if e.Service.Service != service { | |
182 | continue | |
183 | } | |
184 | if tag != "" { | |
185 | tagMap := map[string]struct{}{} | |
186 | ||
187 | for _, t := range e.Service.Tags { | |
188 | tagMap[t] = struct{}{} | |
189 | } | |
190 | ||
191 | if _, ok := tagMap[tag]; !ok { | |
192 | continue | |
193 | } | |
194 | } | |
195 | ||
196 | es = append(es, e) | |
197 | } | |
198 | ||
199 | return es, &consul.QueryMeta{}, nil | |
200 | } | |
201 | ||
202 | func testFactory(ins string) (endpoint.Endpoint, io.Closer, error) { | |
203 | return func(context.Context, interface{}) (interface{}, error) { | |
204 | return ins, nil | |
205 | }, nil, nil | |
206 | } |
0 | package dnssrv | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "net" | |
5 | "time" | |
6 | ||
7 | "github.com/go-kit/kit/endpoint" | |
8 | "github.com/go-kit/kit/loadbalancer" | |
9 | "github.com/go-kit/kit/log" | |
10 | ) | |
11 | ||
12 | // Publisher yields endpoints taken from the named DNS SRV record. The name is | |
13 | // resolved on a fixed schedule. Priorities and weights are ignored. | |
14 | type Publisher struct { | |
15 | name string | |
16 | cache *loadbalancer.EndpointCache | |
17 | logger log.Logger | |
18 | quit chan struct{} | |
19 | } | |
20 | ||
21 | // NewPublisher returns a DNS SRV publisher. The name is resolved | |
22 | // synchronously as part of construction; if that resolution fails, the | |
23 | // constructor will return an error. The factory is used to convert a | |
24 | // host:port to a usable endpoint. The logger is used to report DNS and | |
25 | // factory errors. | |
26 | func NewPublisher( | |
27 | name string, | |
28 | ttl time.Duration, | |
29 | factory loadbalancer.Factory, | |
30 | logger log.Logger, | |
31 | ) *Publisher { | |
32 | return NewPublisherDetailed(name, time.NewTicker(ttl), net.LookupSRV, factory, logger) | |
33 | } | |
34 | ||
35 | // NewPublisherDetailed is the same as NewPublisher, but allows users to provide | |
36 | // an explicit lookup refresh ticker instead of a TTL, and specify the function | |
37 | // used to perform lookups instead of using net.LookupSRV. | |
38 | func NewPublisherDetailed( | |
39 | name string, | |
40 | refreshTicker *time.Ticker, | |
41 | lookupSRV func(service, proto, name string) (cname string, addrs []*net.SRV, err error), | |
42 | factory loadbalancer.Factory, | |
43 | logger log.Logger, | |
44 | ) *Publisher { | |
45 | p := &Publisher{ | |
46 | name: name, | |
47 | cache: loadbalancer.NewEndpointCache(factory, logger), | |
48 | logger: logger, | |
49 | quit: make(chan struct{}), | |
50 | } | |
51 | ||
52 | instances, err := p.resolve(lookupSRV) | |
53 | if err == nil { | |
54 | logger.Log("name", name, "instances", len(instances)) | |
55 | } else { | |
56 | logger.Log("name", name, "err", err) | |
57 | } | |
58 | p.cache.Replace(instances) | |
59 | ||
60 | go p.loop(refreshTicker, lookupSRV) | |
61 | return p | |
62 | } | |
63 | ||
64 | // Stop terminates the publisher. | |
65 | func (p *Publisher) Stop() { | |
66 | close(p.quit) | |
67 | } | |
68 | ||
69 | func (p *Publisher) loop( | |
70 | refreshTicker *time.Ticker, | |
71 | lookupSRV func(service, proto, name string) (cname string, addrs []*net.SRV, err error), | |
72 | ) { | |
73 | defer refreshTicker.Stop() | |
74 | for { | |
75 | select { | |
76 | case <-refreshTicker.C: | |
77 | instances, err := p.resolve(lookupSRV) | |
78 | if err != nil { | |
79 | p.logger.Log(p.name, err) | |
80 | continue // don't replace potentially-good with bad | |
81 | } | |
82 | p.cache.Replace(instances) | |
83 | ||
84 | case <-p.quit: | |
85 | return | |
86 | } | |
87 | } | |
88 | } | |
89 | ||
90 | // Endpoints implements the Publisher interface. | |
91 | func (p *Publisher) Endpoints() ([]endpoint.Endpoint, error) { | |
92 | return p.cache.Endpoints() | |
93 | } | |
94 | ||
95 | func (p *Publisher) resolve(lookupSRV func(service, proto, name string) (cname string, addrs []*net.SRV, err error)) ([]string, error) { | |
96 | _, addrs, err := lookupSRV("", "", p.name) | |
97 | if err != nil { | |
98 | return []string{}, err | |
99 | } | |
100 | instances := make([]string, len(addrs)) | |
101 | for i, addr := range addrs { | |
102 | instances[i] = net.JoinHostPort(addr.Target, fmt.Sprint(addr.Port)) | |
103 | } | |
104 | return instances, nil | |
105 | } |
0 | package dnssrv | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "io" | |
5 | "net" | |
6 | "sync/atomic" | |
7 | "testing" | |
8 | "time" | |
9 | ||
10 | "golang.org/x/net/context" | |
11 | ||
12 | "github.com/go-kit/kit/endpoint" | |
13 | "github.com/go-kit/kit/log" | |
14 | ) | |
15 | ||
16 | func TestPublisher(t *testing.T) { | |
17 | var ( | |
18 | name = "foo" | |
19 | ttl = time.Second | |
20 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
21 | factory = func(string) (endpoint.Endpoint, io.Closer, error) { return e, nil, nil } | |
22 | logger = log.NewNopLogger() | |
23 | ) | |
24 | ||
25 | p := NewPublisher(name, ttl, factory, logger) | |
26 | defer p.Stop() | |
27 | ||
28 | if _, err := p.Endpoints(); err != nil { | |
29 | t.Fatal(err) | |
30 | } | |
31 | } | |
32 | ||
33 | func TestBadLookup(t *testing.T) { | |
34 | var ( | |
35 | name = "some-name" | |
36 | ticker = time.NewTicker(time.Second) | |
37 | lookups = uint32(0) | |
38 | lookupSRV = func(string, string, string) (string, []*net.SRV, error) { | |
39 | atomic.AddUint32(&lookups, 1) | |
40 | return "", nil, errors.New("kaboom") | |
41 | } | |
42 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
43 | factory = func(string) (endpoint.Endpoint, io.Closer, error) { return e, nil, nil } | |
44 | logger = log.NewNopLogger() | |
45 | ) | |
46 | ||
47 | p := NewPublisherDetailed(name, ticker, lookupSRV, factory, logger) | |
48 | defer p.Stop() | |
49 | ||
50 | endpoints, err := p.Endpoints() | |
51 | if err != nil { | |
52 | t.Error(err) | |
53 | } | |
54 | if want, have := 0, len(endpoints); want != have { | |
55 | t.Errorf("want %d, have %d", want, have) | |
56 | } | |
57 | if want, have := uint32(1), atomic.LoadUint32(&lookups); want != have { | |
58 | t.Errorf("want %d, have %d", want, have) | |
59 | } | |
60 | } | |
61 | ||
62 | func TestBadFactory(t *testing.T) { | |
63 | var ( | |
64 | name = "some-name" | |
65 | ticker = time.NewTicker(time.Second) | |
66 | addr = &net.SRV{Target: "foo", Port: 1234} | |
67 | addrs = []*net.SRV{addr} | |
68 | lookupSRV = func(a, b, c string) (string, []*net.SRV, error) { return "", addrs, nil } | |
69 | creates = uint32(0) | |
70 | factory = func(s string) (endpoint.Endpoint, io.Closer, error) { | |
71 | atomic.AddUint32(&creates, 1) | |
72 | return nil, nil, errors.New("kaboom") | |
73 | } | |
74 | logger = log.NewNopLogger() | |
75 | ) | |
76 | ||
77 | p := NewPublisherDetailed(name, ticker, lookupSRV, factory, logger) | |
78 | defer p.Stop() | |
79 | ||
80 | endpoints, err := p.Endpoints() | |
81 | if err != nil { | |
82 | t.Error(err) | |
83 | } | |
84 | if want, have := 0, len(endpoints); want != have { | |
85 | t.Errorf("want %q, have %q", want, have) | |
86 | } | |
87 | if want, have := uint32(1), atomic.LoadUint32(&creates); want != have { | |
88 | t.Errorf("want %d, have %d", want, have) | |
89 | } | |
90 | } | |
91 | ||
92 | func TestRefreshWithChange(t *testing.T) { | |
93 | t.Skip("TODO") | |
94 | } | |
95 | ||
96 | func TestRefreshNoChange(t *testing.T) { | |
97 | var ( | |
98 | addr = &net.SRV{Target: "my-target", Port: 5678} | |
99 | addrs = []*net.SRV{addr} | |
100 | name = "my-name" | |
101 | ticker = time.NewTicker(time.Second) | |
102 | lookups = uint32(0) | |
103 | lookupSRV = func(string, string, string) (string, []*net.SRV, error) { | |
104 | atomic.AddUint32(&lookups, 1) | |
105 | return "", addrs, nil | |
106 | } | |
107 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
108 | factory = func(string) (endpoint.Endpoint, io.Closer, error) { return e, nil, nil } | |
109 | logger = log.NewNopLogger() | |
110 | ) | |
111 | ||
112 | ticker.Stop() | |
113 | tickc := make(chan time.Time) | |
114 | ticker.C = tickc | |
115 | ||
116 | p := NewPublisherDetailed(name, ticker, lookupSRV, factory, logger) | |
117 | defer p.Stop() | |
118 | ||
119 | if want, have := uint32(1), atomic.LoadUint32(&lookups); want != have { | |
120 | t.Errorf("want %d, have %d", want, have) | |
121 | } | |
122 | ||
123 | tickc <- time.Now() | |
124 | ||
125 | if want, have := uint32(2), atomic.LoadUint32(&lookups); want != have { | |
126 | t.Errorf("want %d, have %d", want, have) | |
127 | } | |
128 | } | |
129 | ||
130 | func TestRefreshResolveError(t *testing.T) { | |
131 | t.Skip("TODO") | |
132 | } |
0 | package loadbalancer | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "sort" | |
5 | "sync" | |
6 | "sync/atomic" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/log" | |
10 | ) | |
11 | ||
12 | // EndpointCache caches endpoints that need to be deallocated when they're no | |
13 | // longer useful. Clients update the cache by providing a current set of | |
14 | // instance strings. The cache converts each instance string to an endpoint | |
15 | // and a closer via the factory function. | |
16 | // | |
17 | // Instance strings are assumed to be unique and are used as keys. Endpoints | |
18 | // that were in the previous set of instances and are not in the current set | |
19 | // are considered invalid and closed. | |
20 | // | |
21 | // EndpointCache is designed to be used in your publisher implementation. | |
22 | type EndpointCache struct { | |
23 | mtx sync.Mutex | |
24 | f Factory | |
25 | m map[string]endpointCloser | |
26 | cache atomic.Value //[]endpoint.Endpoint | |
27 | logger log.Logger | |
28 | } | |
29 | ||
30 | // NewEndpointCache produces a new EndpointCache, ready for use. Instance | |
31 | // strings will be converted to endpoints via the provided factory function. | |
32 | // The logger is used to log errors. | |
33 | func NewEndpointCache(f Factory, logger log.Logger) *EndpointCache { | |
34 | endpointCache := &EndpointCache{ | |
35 | f: f, | |
36 | m: map[string]endpointCloser{}, | |
37 | logger: log.NewContext(logger).With("component", "Endpoint Cache"), | |
38 | } | |
39 | ||
40 | endpointCache.cache.Store(make([]endpoint.Endpoint, 0)) | |
41 | ||
42 | return endpointCache | |
43 | } | |
44 | ||
45 | type endpointCloser struct { | |
46 | endpoint.Endpoint | |
47 | io.Closer | |
48 | } | |
49 | ||
50 | // Replace replaces the current set of endpoints with endpoints manufactured | |
51 | // by the passed instances. If the same instance exists in both the existing | |
52 | // and new sets, it's left untouched. | |
53 | func (t *EndpointCache) Replace(instances []string) { | |
54 | t.mtx.Lock() | |
55 | defer t.mtx.Unlock() | |
56 | ||
57 | // Produce the current set of endpoints. | |
58 | oldMap := t.m | |
59 | t.m = make(map[string]endpointCloser, len(instances)) | |
60 | for _, instance := range instances { | |
61 | // If it already exists, just copy it over. | |
62 | if ec, ok := oldMap[instance]; ok { | |
63 | t.m[instance] = ec | |
64 | delete(oldMap, instance) | |
65 | continue | |
66 | } | |
67 | ||
68 | // If it doesn't exist, create it. | |
69 | endpoint, closer, err := t.f(instance) | |
70 | if err != nil { | |
71 | t.logger.Log("instance", instance, "err", err) | |
72 | continue | |
73 | } | |
74 | t.m[instance] = endpointCloser{endpoint, closer} | |
75 | } | |
76 | ||
77 | t.refreshCache() | |
78 | ||
79 | // Close any leftover endpoints. | |
80 | for _, ec := range oldMap { | |
81 | if ec.Closer != nil { | |
82 | ec.Closer.Close() | |
83 | } | |
84 | } | |
85 | } | |
86 | ||
87 | func (t *EndpointCache) refreshCache() { | |
88 | var ( | |
89 | length = len(t.m) | |
90 | instances = make([]string, 0, length) | |
91 | newCache = make([]endpoint.Endpoint, 0, length) | |
92 | ) | |
93 | ||
94 | for instance, _ := range t.m { | |
95 | instances = append(instances, instance) | |
96 | } | |
97 | // Sort the instances for ensuring that Endpoints are returned into the same order if no modified. | |
98 | sort.Strings(instances) | |
99 | ||
100 | for _, instance := range instances { | |
101 | newCache = append(newCache, t.m[instance].Endpoint) | |
102 | } | |
103 | ||
104 | t.cache.Store(newCache) | |
105 | } | |
106 | ||
107 | // Endpoints returns the current set of endpoints in undefined order. Satisfies | |
108 | // Publisher interface. | |
109 | func (t *EndpointCache) Endpoints() ([]endpoint.Endpoint, error) { | |
110 | return t.cache.Load().([]endpoint.Endpoint), nil | |
111 | } |
0 | package loadbalancer_test | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "testing" | |
5 | "time" | |
6 | ||
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/loadbalancer" | |
11 | "github.com/go-kit/kit/log" | |
12 | ) | |
13 | ||
14 | func TestEndpointCache(t *testing.T) { | |
15 | var ( | |
16 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
17 | ca = make(closer) | |
18 | cb = make(closer) | |
19 | c = map[string]io.Closer{"a": ca, "b": cb} | |
20 | f = func(s string) (endpoint.Endpoint, io.Closer, error) { return e, c[s], nil } | |
21 | ec = loadbalancer.NewEndpointCache(f, log.NewNopLogger()) | |
22 | ) | |
23 | ||
24 | // Populate | |
25 | ec.Replace([]string{"a", "b"}) | |
26 | select { | |
27 | case <-ca: | |
28 | t.Errorf("endpoint a closed, not good") | |
29 | case <-cb: | |
30 | t.Errorf("endpoint b closed, not good") | |
31 | case <-time.After(time.Millisecond): | |
32 | t.Logf("no closures yet, good") | |
33 | } | |
34 | ||
35 | // Duplicate, should be no-op | |
36 | ec.Replace([]string{"a", "b"}) | |
37 | select { | |
38 | case <-ca: | |
39 | t.Errorf("endpoint a closed, not good") | |
40 | case <-cb: | |
41 | t.Errorf("endpoint b closed, not good") | |
42 | case <-time.After(time.Millisecond): | |
43 | t.Logf("no closures yet, good") | |
44 | } | |
45 | ||
46 | // Delete b | |
47 | go ec.Replace([]string{"a"}) | |
48 | select { | |
49 | case <-ca: | |
50 | t.Errorf("endpoint a closed, not good") | |
51 | case <-cb: | |
52 | t.Logf("endpoint b closed, good") | |
53 | case <-time.After(time.Millisecond): | |
54 | t.Errorf("didn't close the deleted instance in time") | |
55 | } | |
56 | ||
57 | // Delete a | |
58 | go ec.Replace([]string{""}) | |
59 | select { | |
60 | // case <-cb: will succeed, as it's closed | |
61 | case <-ca: | |
62 | t.Logf("endpoint a closed, good") | |
63 | case <-time.After(time.Millisecond): | |
64 | t.Errorf("didn't close the deleted instance in time") | |
65 | } | |
66 | } | |
67 | ||
68 | type closer chan struct{} | |
69 | ||
70 | func (c closer) Close() error { close(c); return nil } | |
71 | ||
72 | func BenchmarkEndpoints(b *testing.B) { | |
73 | var ( | |
74 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
75 | ca = make(closer) | |
76 | cb = make(closer) | |
77 | c = map[string]io.Closer{"a": ca, "b": cb} | |
78 | f = func(s string) (endpoint.Endpoint, io.Closer, error) { return e, c[s], nil } | |
79 | ec = loadbalancer.NewEndpointCache(f, log.NewNopLogger()) | |
80 | ) | |
81 | ||
82 | b.ReportAllocs() | |
83 | ||
84 | ec.Replace([]string{"a", "b"}) | |
85 | ||
86 | b.RunParallel(func(pb *testing.PB) { | |
87 | for pb.Next() { | |
88 | ec.Endpoints() | |
89 | } | |
90 | }) | |
91 | }⏎ |
0 | package etcd | |
1 | ||
2 | import ( | |
3 | "crypto/tls" | |
4 | "crypto/x509" | |
5 | "io/ioutil" | |
6 | "net" | |
7 | "net/http" | |
8 | "time" | |
9 | ||
10 | etcd "github.com/coreos/etcd/client" | |
11 | "golang.org/x/net/context" | |
12 | ) | |
13 | ||
14 | // Client is a wrapper around the etcd client. | |
15 | type Client interface { | |
16 | // GetEntries will query the given prefix in etcd and returns a set of entries. | |
17 | GetEntries(prefix string) ([]string, error) | |
18 | // WatchPrefix starts watching every change for given prefix in etcd. When an | |
19 | // change is detected it will populate the responseChan when an *etcd.Response. | |
20 | WatchPrefix(prefix string, responseChan chan *etcd.Response) | |
21 | } | |
22 | ||
23 | type client struct { | |
24 | keysAPI etcd.KeysAPI | |
25 | ctx context.Context | |
26 | } | |
27 | ||
28 | type ClientOptions struct { | |
29 | Cert string | |
30 | Key string | |
31 | CaCert string | |
32 | DialTimeout time.Duration | |
33 | DialKeepAline time.Duration | |
34 | HeaderTimeoutPerRequest time.Duration | |
35 | } | |
36 | ||
37 | // NewClient returns an *etcd.Client with a connection to the named machines. | |
38 | // It will return an error if a connection to the cluster cannot be made. | |
39 | // The parameter machines needs to be a full URL with schemas. | |
40 | // e.g. "http://localhost:2379" will work, but "localhost:2379" will not. | |
41 | func NewClient(ctx context.Context, machines []string, options *ClientOptions) (Client, error) { | |
42 | var ( | |
43 | c etcd.KeysAPI | |
44 | err error | |
45 | caCertCt []byte | |
46 | tlsCert tls.Certificate | |
47 | ) | |
48 | if options == nil { | |
49 | options = &ClientOptions{} | |
50 | } | |
51 | ||
52 | if options.Cert != "" && options.Key != "" { | |
53 | tlsCert, err = tls.LoadX509KeyPair(options.Cert, options.Key) | |
54 | if err != nil { | |
55 | return nil, err | |
56 | } | |
57 | ||
58 | caCertCt, err = ioutil.ReadFile(options.CaCert) | |
59 | if err != nil { | |
60 | return nil, err | |
61 | } | |
62 | caCertPool := x509.NewCertPool() | |
63 | caCertPool.AppendCertsFromPEM(caCertCt) | |
64 | ||
65 | tlsConfig := &tls.Config{ | |
66 | Certificates: []tls.Certificate{tlsCert}, | |
67 | RootCAs: caCertPool, | |
68 | } | |
69 | ||
70 | transport := &http.Transport{ | |
71 | TLSClientConfig: tlsConfig, | |
72 | Dial: func(network, addr string) (net.Conn, error) { | |
73 | dial := &net.Dialer{ | |
74 | Timeout: options.DialTimeout, | |
75 | KeepAlive: options.DialKeepAline, | |
76 | } | |
77 | return dial.Dial(network, addr) | |
78 | }, | |
79 | } | |
80 | ||
81 | cfg := etcd.Config{ | |
82 | Endpoints: machines, | |
83 | Transport: transport, | |
84 | HeaderTimeoutPerRequest: options.HeaderTimeoutPerRequest, | |
85 | } | |
86 | ce, err := etcd.New(cfg) | |
87 | if err != nil { | |
88 | return nil, err | |
89 | } | |
90 | c = etcd.NewKeysAPI(ce) | |
91 | } else { | |
92 | cfg := etcd.Config{ | |
93 | Endpoints: machines, | |
94 | Transport: etcd.DefaultTransport, | |
95 | HeaderTimeoutPerRequest: options.HeaderTimeoutPerRequest, | |
96 | } | |
97 | ce, err := etcd.New(cfg) | |
98 | if err != nil { | |
99 | return nil, err | |
100 | } | |
101 | c = etcd.NewKeysAPI(ce) | |
102 | } | |
103 | return &client{c, ctx}, nil | |
104 | } | |
105 | ||
106 | // GetEntries implements the etcd Client interface. | |
107 | func (c *client) GetEntries(key string) ([]string, error) { | |
108 | resp, err := c.keysAPI.Get(c.ctx, key, &etcd.GetOptions{Recursive: true}) | |
109 | if err != nil { | |
110 | return nil, err | |
111 | } | |
112 | ||
113 | entries := make([]string, len(resp.Node.Nodes)) | |
114 | for i, node := range resp.Node.Nodes { | |
115 | entries[i] = node.Value | |
116 | } | |
117 | return entries, nil | |
118 | } | |
119 | ||
120 | // WatchPrefix implements the etcd Client interface. | |
121 | func (c *client) WatchPrefix(prefix string, responseChan chan *etcd.Response) { | |
122 | watch := c.keysAPI.Watcher(prefix, &etcd.WatcherOptions{AfterIndex: 0, Recursive: true}) | |
123 | for { | |
124 | res, err := watch.Next(c.ctx) | |
125 | if err != nil { | |
126 | return | |
127 | } | |
128 | responseChan <- res | |
129 | } | |
130 | } |
0 | package etcd | |
1 | ||
2 | import ( | |
3 | etcd "github.com/coreos/etcd/client" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/loadbalancer" | |
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | // Publisher yield endpoints stored in a certain etcd keyspace. Any kind of | |
11 | // change in that keyspace is watched and will update the Publisher endpoints. | |
12 | type Publisher struct { | |
13 | client Client | |
14 | prefix string | |
15 | cache *loadbalancer.EndpointCache | |
16 | logger log.Logger | |
17 | quit chan struct{} | |
18 | } | |
19 | ||
20 | // NewPublisher returs a etcd publisher. Etcd will start watching the given | |
21 | // prefix for changes and update the Publisher endpoints. | |
22 | func NewPublisher(c Client, prefix string, f loadbalancer.Factory, logger log.Logger) (*Publisher, error) { | |
23 | p := &Publisher{ | |
24 | client: c, | |
25 | prefix: prefix, | |
26 | cache: loadbalancer.NewEndpointCache(f, logger), | |
27 | logger: logger, | |
28 | quit: make(chan struct{}), | |
29 | } | |
30 | ||
31 | instances, err := p.client.GetEntries(p.prefix) | |
32 | if err == nil { | |
33 | logger.Log("prefix", p.prefix, "instances", len(instances)) | |
34 | } else { | |
35 | logger.Log("prefix", p.prefix, "err", err) | |
36 | } | |
37 | p.cache.Replace(instances) | |
38 | ||
39 | go p.loop() | |
40 | return p, nil | |
41 | } | |
42 | ||
43 | func (p *Publisher) loop() { | |
44 | responseChan := make(chan *etcd.Response) | |
45 | go p.client.WatchPrefix(p.prefix, responseChan) | |
46 | for { | |
47 | select { | |
48 | case <-responseChan: | |
49 | instances, err := p.client.GetEntries(p.prefix) | |
50 | if err != nil { | |
51 | p.logger.Log("msg", "failed to retrieve entries", "err", err) | |
52 | continue | |
53 | } | |
54 | p.cache.Replace(instances) | |
55 | ||
56 | case <-p.quit: | |
57 | return | |
58 | } | |
59 | } | |
60 | } | |
61 | ||
62 | // Endpoints implements the Publisher interface. | |
63 | func (p *Publisher) Endpoints() ([]endpoint.Endpoint, error) { | |
64 | return p.cache.Endpoints() | |
65 | } | |
66 | ||
67 | // Stop terminates the Publisher. | |
68 | func (p *Publisher) Stop() { | |
69 | close(p.quit) | |
70 | } |
0 | package etcd_test | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "io" | |
5 | "testing" | |
6 | ||
7 | stdetcd "github.com/coreos/etcd/client" | |
8 | "golang.org/x/net/context" | |
9 | ||
10 | "github.com/go-kit/kit/endpoint" | |
11 | kitetcd "github.com/go-kit/kit/loadbalancer/etcd" | |
12 | "github.com/go-kit/kit/log" | |
13 | ) | |
14 | ||
15 | var ( | |
16 | node = &stdetcd.Node{ | |
17 | Key: "/foo", | |
18 | Nodes: []*stdetcd.Node{ | |
19 | {Key: "/foo/1", Value: "1:1"}, | |
20 | {Key: "/foo/2", Value: "1:2"}, | |
21 | }, | |
22 | } | |
23 | fakeResponse = &stdetcd.Response{ | |
24 | Node: node, | |
25 | } | |
26 | ) | |
27 | ||
28 | func TestPublisher(t *testing.T) { | |
29 | var ( | |
30 | logger = log.NewNopLogger() | |
31 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
32 | ) | |
33 | ||
34 | factory := func(string) (endpoint.Endpoint, io.Closer, error) { | |
35 | return e, nil, nil | |
36 | } | |
37 | ||
38 | client := &fakeClient{ | |
39 | responses: map[string]*stdetcd.Response{"/foo": fakeResponse}, | |
40 | } | |
41 | ||
42 | p, err := kitetcd.NewPublisher(client, "/foo", factory, logger) | |
43 | if err != nil { | |
44 | t.Fatalf("failed to create new publisher: %v", err) | |
45 | } | |
46 | defer p.Stop() | |
47 | ||
48 | if _, err := p.Endpoints(); err != nil { | |
49 | t.Fatal(err) | |
50 | } | |
51 | } | |
52 | ||
53 | func TestBadFactory(t *testing.T) { | |
54 | logger := log.NewNopLogger() | |
55 | ||
56 | factory := func(string) (endpoint.Endpoint, io.Closer, error) { | |
57 | return nil, nil, errors.New("kaboom") | |
58 | } | |
59 | ||
60 | client := &fakeClient{ | |
61 | responses: map[string]*stdetcd.Response{"/foo": fakeResponse}, | |
62 | } | |
63 | ||
64 | p, err := kitetcd.NewPublisher(client, "/foo", factory, logger) | |
65 | if err != nil { | |
66 | t.Fatalf("failed to create new publisher: %v", err) | |
67 | } | |
68 | defer p.Stop() | |
69 | ||
70 | endpoints, err := p.Endpoints() | |
71 | if err != nil { | |
72 | t.Fatal(err) | |
73 | } | |
74 | ||
75 | if want, have := 0, len(endpoints); want != have { | |
76 | t.Errorf("want %q, have %q", want, have) | |
77 | } | |
78 | } | |
79 | ||
80 | type fakeClient struct { | |
81 | responses map[string]*stdetcd.Response | |
82 | } | |
83 | ||
84 | func (c *fakeClient) GetEntries(prefix string) ([]string, error) { | |
85 | response, ok := c.responses[prefix] | |
86 | if !ok { | |
87 | return nil, errors.New("key not exist") | |
88 | } | |
89 | ||
90 | entries := make([]string, len(response.Node.Nodes)) | |
91 | for i, node := range response.Node.Nodes { | |
92 | entries[i] = node.Value | |
93 | } | |
94 | return entries, nil | |
95 | } | |
96 | ||
97 | func (c *fakeClient) WatchPrefix(prefix string, responseChan chan *stdetcd.Response) {} |
0 | package loadbalancer | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // Factory is a function that converts an instance string, e.g. a host:port, | |
9 | // to a usable endpoint. Factories are used by load balancers to convert | |
10 | // instances returned by Publishers (typically host:port strings) into | |
11 | // endpoints. Users are expected to provide their own factory functions that | |
12 | // assume specific transports, or can deduce transports by parsing the | |
13 | // instance string. | |
14 | type Factory func(instance string) (endpoint.Endpoint, io.Closer, error) |
0 | package fixed | |
1 | ||
2 | import ( | |
3 | "sync" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // Publisher yields the same set of fixed endpoints. | |
9 | type Publisher struct { | |
10 | mtx sync.RWMutex | |
11 | endpoints []endpoint.Endpoint | |
12 | } | |
13 | ||
14 | // NewPublisher returns a fixed endpoint Publisher. | |
15 | func NewPublisher(endpoints []endpoint.Endpoint) *Publisher { | |
16 | return &Publisher{ | |
17 | endpoints: endpoints, | |
18 | } | |
19 | } | |
20 | ||
21 | // Endpoints implements the Publisher interface. | |
22 | func (p *Publisher) Endpoints() ([]endpoint.Endpoint, error) { | |
23 | p.mtx.RLock() | |
24 | defer p.mtx.RUnlock() | |
25 | return p.endpoints, nil | |
26 | } | |
27 | ||
28 | // Replace is a utility method to swap out the underlying endpoints of an | |
29 | // existing fixed publisher. It's useful mostly for testing. | |
30 | func (p *Publisher) Replace(endpoints []endpoint.Endpoint) { | |
31 | p.mtx.Lock() | |
32 | defer p.mtx.Unlock() | |
33 | p.endpoints = endpoints | |
34 | } |
0 | package fixed_test | |
1 | ||
2 | import ( | |
3 | "reflect" | |
4 | "testing" | |
5 | ||
6 | "golang.org/x/net/context" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/loadbalancer/fixed" | |
10 | ) | |
11 | ||
12 | func TestFixed(t *testing.T) { | |
13 | var ( | |
14 | e1 = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
15 | e2 = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
16 | endpoints = []endpoint.Endpoint{e1, e2} | |
17 | ) | |
18 | p := fixed.NewPublisher(endpoints) | |
19 | have, err := p.Endpoints() | |
20 | if err != nil { | |
21 | t.Fatal(err) | |
22 | } | |
23 | if want := endpoints; !reflect.DeepEqual(want, have) { | |
24 | t.Fatalf("want %#+v, have %#+v", want, have) | |
25 | } | |
26 | } | |
27 | ||
28 | func TestFixedReplace(t *testing.T) { | |
29 | p := fixed.NewPublisher([]endpoint.Endpoint{ | |
30 | func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, | |
31 | }) | |
32 | have, err := p.Endpoints() | |
33 | if err != nil { | |
34 | t.Fatal(err) | |
35 | } | |
36 | if want, have := 1, len(have); want != have { | |
37 | t.Fatalf("want %d, have %d", want, have) | |
38 | } | |
39 | p.Replace([]endpoint.Endpoint{}) | |
40 | have, err = p.Endpoints() | |
41 | if err != nil { | |
42 | t.Fatal(err) | |
43 | } | |
44 | if want, have := 0, len(have); want != have { | |
45 | t.Fatalf("want %d, have %d", want, have) | |
46 | } | |
47 | } |
0 | package loadbalancer | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // LoadBalancer describes something that can yield endpoints for a remote | |
9 | // service method. | |
10 | type LoadBalancer interface { | |
11 | Endpoint() (endpoint.Endpoint, error) | |
12 | } | |
13 | ||
14 | // ErrNoEndpoints is returned when a load balancer (or one of its components) | |
15 | // has no endpoints to return. In a request lifecycle, this is usually a fatal | |
16 | // error. | |
17 | var ErrNoEndpoints = errors.New("no endpoints available") |
0 | package loadbalancer | |
1 | ||
2 | import "github.com/go-kit/kit/endpoint" | |
3 | ||
4 | // Publisher describes something that provides a set of identical endpoints. | |
5 | // Different publisher implementations exist for different kinds of service | |
6 | // discovery systems. | |
7 | type Publisher interface { | |
8 | Endpoints() ([]endpoint.Endpoint, error) | |
9 | } |
0 | package loadbalancer | |
1 | ||
2 | import ( | |
3 | "math/rand" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // Random is a completely stateless load balancer that chooses a random | |
9 | // endpoint to return each time. | |
10 | type Random struct { | |
11 | p Publisher | |
12 | r *rand.Rand | |
13 | } | |
14 | ||
15 | // NewRandom returns a new Random load balancer. | |
16 | func NewRandom(p Publisher, seed int64) *Random { | |
17 | return &Random{ | |
18 | p: p, | |
19 | r: rand.New(rand.NewSource(seed)), | |
20 | } | |
21 | } | |
22 | ||
23 | // Endpoint implements the LoadBalancer interface. | |
24 | func (r *Random) Endpoint() (endpoint.Endpoint, error) { | |
25 | endpoints, err := r.p.Endpoints() | |
26 | if err != nil { | |
27 | return nil, err | |
28 | } | |
29 | if len(endpoints) <= 0 { | |
30 | return nil, ErrNoEndpoints | |
31 | } | |
32 | return endpoints[r.r.Intn(len(endpoints))], nil | |
33 | } |
0 | package loadbalancer_test | |
1 | ||
2 | import ( | |
3 | "math" | |
4 | "testing" | |
5 | ||
6 | "golang.org/x/net/context" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/loadbalancer" | |
10 | "github.com/go-kit/kit/loadbalancer/fixed" | |
11 | ) | |
12 | ||
13 | func TestRandomDistribution(t *testing.T) { | |
14 | var ( | |
15 | n = 3 | |
16 | endpoints = make([]endpoint.Endpoint, n) | |
17 | counts = make([]int, n) | |
18 | seed = int64(123) | |
19 | ctx = context.Background() | |
20 | iterations = 100000 | |
21 | want = iterations / n | |
22 | tolerance = want / 100 // 1% | |
23 | ) | |
24 | ||
25 | for i := 0; i < n; i++ { | |
26 | i0 := i | |
27 | endpoints[i] = func(context.Context, interface{}) (interface{}, error) { counts[i0]++; return struct{}{}, nil } | |
28 | } | |
29 | ||
30 | lb := loadbalancer.NewRandom(fixed.NewPublisher(endpoints), seed) | |
31 | ||
32 | for i := 0; i < iterations; i++ { | |
33 | e, err := lb.Endpoint() | |
34 | if err != nil { | |
35 | t.Fatal(err) | |
36 | } | |
37 | if _, err := e(ctx, struct{}{}); err != nil { | |
38 | t.Error(err) | |
39 | } | |
40 | } | |
41 | ||
42 | for i, have := range counts { | |
43 | if math.Abs(float64(want-have)) > float64(tolerance) { | |
44 | t.Errorf("%d: want %d, have %d", i, want, have) | |
45 | } | |
46 | } | |
47 | } | |
48 | ||
49 | func TestRandomBadPublisher(t *testing.T) { | |
50 | t.Skip("TODO") | |
51 | } | |
52 | ||
53 | func TestRandomNoEndpoints(t *testing.T) { | |
54 | lb := loadbalancer.NewRandom(fixed.NewPublisher([]endpoint.Endpoint{}), 123) | |
55 | _, have := lb.Endpoint() | |
56 | if want := loadbalancer.ErrNoEndpoints; want != have { | |
57 | t.Errorf("want %q, have %q", want, have) | |
58 | } | |
59 | } |
0 | package loadbalancer | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "strings" | |
5 | "time" | |
6 | ||
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | ) | |
11 | ||
12 | // Retry wraps the load balancer to make it behave like a simple endpoint. | |
13 | // Requests to the endpoint will be automatically load balanced via the load | |
14 | // balancer. Requests that return errors will be retried until they succeed, | |
15 | // up to max times, or until the timeout is elapsed, whichever comes first. | |
16 | func Retry(max int, timeout time.Duration, lb LoadBalancer) endpoint.Endpoint { | |
17 | if lb == nil { | |
18 | panic("nil LoadBalancer") | |
19 | } | |
20 | ||
21 | return func(ctx context.Context, request interface{}) (interface{}, error) { | |
22 | var ( | |
23 | newctx, cancel = context.WithTimeout(ctx, timeout) | |
24 | responses = make(chan interface{}, 1) | |
25 | errs = make(chan error, 1) | |
26 | a = []string{} | |
27 | ) | |
28 | defer cancel() | |
29 | for i := 1; i <= max; i++ { | |
30 | go func() { | |
31 | e, err := lb.Endpoint() | |
32 | if err != nil { | |
33 | errs <- err | |
34 | return | |
35 | } | |
36 | response, err := e(newctx, request) | |
37 | if err != nil { | |
38 | errs <- err | |
39 | return | |
40 | } | |
41 | responses <- response | |
42 | }() | |
43 | ||
44 | select { | |
45 | case <-newctx.Done(): | |
46 | return nil, newctx.Err() | |
47 | case response := <-responses: | |
48 | return response, nil | |
49 | case err := <-errs: | |
50 | a = append(a, err.Error()) | |
51 | continue | |
52 | } | |
53 | } | |
54 | return nil, fmt.Errorf("retry attempts exceeded (%s)", strings.Join(a, "; ")) | |
55 | } | |
56 | } |
0 | package loadbalancer_test | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "testing" | |
5 | "time" | |
6 | ||
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/loadbalancer" | |
11 | "github.com/go-kit/kit/loadbalancer/fixed" | |
12 | ) | |
13 | ||
14 | func TestRetryMaxTotalFail(t *testing.T) { | |
15 | var ( | |
16 | endpoints = []endpoint.Endpoint{} // no endpoints | |
17 | p = fixed.NewPublisher(endpoints) | |
18 | lb = loadbalancer.NewRoundRobin(p) | |
19 | retry = loadbalancer.Retry(999, time.Second, lb) // lots of retries | |
20 | ctx = context.Background() | |
21 | ) | |
22 | if _, err := retry(ctx, struct{}{}); err == nil { | |
23 | t.Errorf("expected error, got none") // should fail | |
24 | } | |
25 | } | |
26 | ||
27 | func TestRetryMaxPartialFail(t *testing.T) { | |
28 | var ( | |
29 | endpoints = []endpoint.Endpoint{ | |
30 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error one") }, | |
31 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") }, | |
32 | func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ }, | |
33 | } | |
34 | retries = len(endpoints) - 1 // not quite enough retries | |
35 | p = fixed.NewPublisher(endpoints) | |
36 | lb = loadbalancer.NewRoundRobin(p) | |
37 | ctx = context.Background() | |
38 | ) | |
39 | if _, err := loadbalancer.Retry(retries, time.Second, lb)(ctx, struct{}{}); err == nil { | |
40 | t.Errorf("expected error, got none") | |
41 | } | |
42 | } | |
43 | ||
44 | func TestRetryMaxSuccess(t *testing.T) { | |
45 | var ( | |
46 | endpoints = []endpoint.Endpoint{ | |
47 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error one") }, | |
48 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") }, | |
49 | func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ }, | |
50 | } | |
51 | retries = len(endpoints) // exactly enough retries | |
52 | p = fixed.NewPublisher(endpoints) | |
53 | lb = loadbalancer.NewRoundRobin(p) | |
54 | ctx = context.Background() | |
55 | ) | |
56 | if _, err := loadbalancer.Retry(retries, time.Second, lb)(ctx, struct{}{}); err != nil { | |
57 | t.Error(err) | |
58 | } | |
59 | } | |
60 | ||
61 | func TestRetryTimeout(t *testing.T) { | |
62 | var ( | |
63 | step = make(chan struct{}) | |
64 | e = func(context.Context, interface{}) (interface{}, error) { <-step; return struct{}{}, nil } | |
65 | timeout = time.Millisecond | |
66 | retry = loadbalancer.Retry(999, timeout, loadbalancer.NewRoundRobin(fixed.NewPublisher([]endpoint.Endpoint{e}))) | |
67 | errs = make(chan error, 1) | |
68 | invoke = func() { _, err := retry(context.Background(), struct{}{}); errs <- err } | |
69 | ) | |
70 | ||
71 | go func() { step <- struct{}{} }() // queue up a flush of the endpoint | |
72 | invoke() // invoke the endpoint and trigger the flush | |
73 | if err := <-errs; err != nil { // that should succeed | |
74 | t.Error(err) | |
75 | } | |
76 | ||
77 | go func() { time.Sleep(10 * timeout); step <- struct{}{} }() // a delayed flush | |
78 | invoke() // invoke the endpoint | |
79 | if err := <-errs; err != context.DeadlineExceeded { // that should not succeed | |
80 | t.Errorf("wanted %v, got none", context.DeadlineExceeded) | |
81 | } | |
82 | } |
0 | package loadbalancer | |
1 | ||
2 | import ( | |
3 | "sync/atomic" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // RoundRobin is a simple load balancer that returns each of the published | |
9 | // endpoints in sequence. | |
10 | type RoundRobin struct { | |
11 | p Publisher | |
12 | counter uint64 | |
13 | } | |
14 | ||
15 | // NewRoundRobin returns a new RoundRobin load balancer. | |
16 | func NewRoundRobin(p Publisher) *RoundRobin { | |
17 | return &RoundRobin{ | |
18 | p: p, | |
19 | counter: 0, | |
20 | } | |
21 | } | |
22 | ||
23 | // Endpoint implements the LoadBalancer interface. | |
24 | func (rr *RoundRobin) Endpoint() (endpoint.Endpoint, error) { | |
25 | endpoints, err := rr.p.Endpoints() | |
26 | if err != nil { | |
27 | return nil, err | |
28 | } | |
29 | if len(endpoints) <= 0 { | |
30 | return nil, ErrNoEndpoints | |
31 | } | |
32 | var old uint64 | |
33 | for { | |
34 | old = atomic.LoadUint64(&rr.counter) | |
35 | if atomic.CompareAndSwapUint64(&rr.counter, old, old+1) { | |
36 | break | |
37 | } | |
38 | } | |
39 | return endpoints[old%uint64(len(endpoints))], nil | |
40 | } |
0 | package loadbalancer_test | |
1 | ||
2 | import ( | |
3 | "reflect" | |
4 | "testing" | |
5 | ||
6 | "github.com/go-kit/kit/endpoint" | |
7 | "github.com/go-kit/kit/loadbalancer" | |
8 | "github.com/go-kit/kit/loadbalancer/fixed" | |
9 | "golang.org/x/net/context" | |
10 | ) | |
11 | ||
12 | func TestRoundRobinDistribution(t *testing.T) { | |
13 | var ( | |
14 | ctx = context.Background() | |
15 | counts = []int{0, 0, 0} | |
16 | endpoints = []endpoint.Endpoint{ | |
17 | func(context.Context, interface{}) (interface{}, error) { counts[0]++; return struct{}{}, nil }, | |
18 | func(context.Context, interface{}) (interface{}, error) { counts[1]++; return struct{}{}, nil }, | |
19 | func(context.Context, interface{}) (interface{}, error) { counts[2]++; return struct{}{}, nil }, | |
20 | } | |
21 | ) | |
22 | ||
23 | lb := loadbalancer.NewRoundRobin(fixed.NewPublisher(endpoints)) | |
24 | ||
25 | for i, want := range [][]int{ | |
26 | {1, 0, 0}, | |
27 | {1, 1, 0}, | |
28 | {1, 1, 1}, | |
29 | {2, 1, 1}, | |
30 | {2, 2, 1}, | |
31 | {2, 2, 2}, | |
32 | {3, 2, 2}, | |
33 | } { | |
34 | e, err := lb.Endpoint() | |
35 | if err != nil { | |
36 | t.Fatal(err) | |
37 | } | |
38 | if _, err := e(ctx, struct{}{}); err != nil { | |
39 | t.Error(err) | |
40 | } | |
41 | if have := counts; !reflect.DeepEqual(want, have) { | |
42 | t.Fatalf("%d: want %v, have %v", i, want, have) | |
43 | } | |
44 | ||
45 | } | |
46 | } | |
47 | ||
48 | func TestRoundRobinBadPublisher(t *testing.T) { | |
49 | t.Skip("TODO") | |
50 | } |
0 | package static | |
1 | ||
2 | import ( | |
3 | "github.com/go-kit/kit/endpoint" | |
4 | "github.com/go-kit/kit/loadbalancer" | |
5 | "github.com/go-kit/kit/loadbalancer/fixed" | |
6 | "github.com/go-kit/kit/log" | |
7 | ) | |
8 | ||
9 | // Publisher yields a set of static endpoints as produced by the passed factory. | |
10 | type Publisher struct{ publisher *fixed.Publisher } | |
11 | ||
12 | // NewPublisher returns a static endpoint Publisher. | |
13 | func NewPublisher(instances []string, factory loadbalancer.Factory, logger log.Logger) Publisher { | |
14 | logger = log.NewContext(logger).With("component", "Static Publisher") | |
15 | endpoints := []endpoint.Endpoint{} | |
16 | for _, instance := range instances { | |
17 | e, _, err := factory(instance) // never close | |
18 | if err != nil { | |
19 | logger.Log("instance", instance, "err", err) | |
20 | continue | |
21 | } | |
22 | endpoints = append(endpoints, e) | |
23 | } | |
24 | return Publisher{publisher: fixed.NewPublisher(endpoints)} | |
25 | } | |
26 | ||
27 | // Endpoints implements Publisher. | |
28 | func (p Publisher) Endpoints() ([]endpoint.Endpoint, error) { | |
29 | return p.publisher.Endpoints() | |
30 | } |
0 | package static_test | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "io" | |
5 | "testing" | |
6 | ||
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/loadbalancer/static" | |
11 | "github.com/go-kit/kit/log" | |
12 | ) | |
13 | ||
14 | func TestStatic(t *testing.T) { | |
15 | var ( | |
16 | instances = []string{"foo", "bar", "baz"} | |
17 | endpoints = map[string]endpoint.Endpoint{ | |
18 | "foo": func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, | |
19 | "bar": func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, | |
20 | "baz": func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, | |
21 | } | |
22 | factory = func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
23 | if e, ok := endpoints[instance]; ok { | |
24 | return e, nil, nil | |
25 | } | |
26 | return nil, nil, fmt.Errorf("%s: not found", instance) | |
27 | } | |
28 | ) | |
29 | p := static.NewPublisher(instances, factory, log.NewNopLogger()) | |
30 | have, err := p.Endpoints() | |
31 | if err != nil { | |
32 | t.Fatal(err) | |
33 | } | |
34 | want := []endpoint.Endpoint{endpoints["foo"], endpoints["bar"], endpoints["baz"]} | |
35 | if fmt.Sprint(want) != fmt.Sprint(have) { | |
36 | t.Fatalf("want %v, have %v", want, have) | |
37 | } | |
38 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "net" | |
5 | "strings" | |
6 | "time" | |
7 | ||
8 | "github.com/samuel/go-zookeeper/zk" | |
9 | ||
10 | "github.com/go-kit/kit/log" | |
11 | ) | |
12 | ||
13 | // DefaultACL is the default ACL to use for creating znodes. | |
14 | var ( | |
15 | DefaultACL = zk.WorldACL(zk.PermAll) | |
16 | ErrInvalidCredentials = errors.New("invalid credentials provided") | |
17 | ErrClientClosed = errors.New("client service closed") | |
18 | ) | |
19 | ||
20 | const ( | |
21 | // DefaultConnectTimeout is the default timeout to establish a connection to | |
22 | // a ZooKeeper node. | |
23 | DefaultConnectTimeout = 2 * time.Second | |
24 | // DefaultSessionTimeout is the default timeout to keep the current | |
25 | // ZooKeeper session alive during a temporary disconnect. | |
26 | DefaultSessionTimeout = 5 * time.Second | |
27 | ) | |
28 | ||
29 | // Client is a wrapper around a lower level ZooKeeper client implementation. | |
30 | type Client interface { | |
31 | // GetEntries should query the provided path in ZooKeeper, place a watch on | |
32 | // it and retrieve data from its current child nodes. | |
33 | GetEntries(path string) ([]string, <-chan zk.Event, error) | |
34 | // CreateParentNodes should try to create the path in case it does not exist | |
35 | // yet on ZooKeeper. | |
36 | CreateParentNodes(path string) error | |
37 | // Stop should properly shutdown the client implementation | |
38 | Stop() | |
39 | } | |
40 | ||
41 | type clientConfig struct { | |
42 | logger log.Logger | |
43 | acl []zk.ACL | |
44 | credentials []byte | |
45 | connectTimeout time.Duration | |
46 | sessionTimeout time.Duration | |
47 | rootNodePayload [][]byte | |
48 | eventHandler func(zk.Event) | |
49 | } | |
50 | ||
51 | // Option functions enable friendly APIs. | |
52 | type Option func(*clientConfig) error | |
53 | ||
54 | type client struct { | |
55 | *zk.Conn | |
56 | clientConfig | |
57 | active bool | |
58 | quit chan struct{} | |
59 | } | |
60 | ||
61 | // ACL returns an Option specifying a non-default ACL for creating parent nodes. | |
62 | func ACL(acl []zk.ACL) Option { | |
63 | return func(c *clientConfig) error { | |
64 | c.acl = acl | |
65 | return nil | |
66 | } | |
67 | } | |
68 | ||
69 | // Credentials returns an Option specifying a user/password combination which | |
70 | // the client will use to authenticate itself with. | |
71 | func Credentials(user, pass string) Option { | |
72 | return func(c *clientConfig) error { | |
73 | if user == "" || pass == "" { | |
74 | return ErrInvalidCredentials | |
75 | } | |
76 | c.credentials = []byte(user + ":" + pass) | |
77 | return nil | |
78 | } | |
79 | } | |
80 | ||
81 | // ConnectTimeout returns an Option specifying a non-default connection timeout | |
82 | // when we try to establish a connection to a ZooKeeper server. | |
83 | func ConnectTimeout(t time.Duration) Option { | |
84 | return func(c *clientConfig) error { | |
85 | if t.Seconds() < 1 { | |
86 | return errors.New("invalid connect timeout (minimum value is 1 second)") | |
87 | } | |
88 | c.connectTimeout = t | |
89 | return nil | |
90 | } | |
91 | } | |
92 | ||
93 | // SessionTimeout returns an Option specifying a non-default session timeout. | |
94 | func SessionTimeout(t time.Duration) Option { | |
95 | return func(c *clientConfig) error { | |
96 | if t.Seconds() < 1 { | |
97 | return errors.New("invalid session timeout (minimum value is 1 second)") | |
98 | } | |
99 | c.sessionTimeout = t | |
100 | return nil | |
101 | } | |
102 | } | |
103 | ||
104 | // Payload returns an Option specifying non-default data values for each znode | |
105 | // created by CreateParentNodes. | |
106 | func Payload(payload [][]byte) Option { | |
107 | return func(c *clientConfig) error { | |
108 | c.rootNodePayload = payload | |
109 | return nil | |
110 | } | |
111 | } | |
112 | ||
113 | // EventHandler returns an Option specifying a callback function to handle | |
114 | // incoming zk.Event payloads (ZooKeeper connection events). | |
115 | func EventHandler(handler func(zk.Event)) Option { | |
116 | return func(c *clientConfig) error { | |
117 | c.eventHandler = handler | |
118 | return nil | |
119 | } | |
120 | } | |
121 | ||
122 | // NewClient returns a ZooKeeper client with a connection to the server cluster. | |
123 | // It will return an error if the server cluster cannot be resolved. | |
124 | func NewClient(servers []string, logger log.Logger, options ...Option) (Client, error) { | |
125 | defaultEventHandler := func(event zk.Event) { | |
126 | logger.Log("eventtype", event.Type.String(), "server", event.Server, "state", event.State.String(), "err", event.Err) | |
127 | } | |
128 | config := clientConfig{ | |
129 | acl: DefaultACL, | |
130 | connectTimeout: DefaultConnectTimeout, | |
131 | sessionTimeout: DefaultSessionTimeout, | |
132 | eventHandler: defaultEventHandler, | |
133 | logger: logger, | |
134 | } | |
135 | for _, option := range options { | |
136 | if err := option(&config); err != nil { | |
137 | return nil, err | |
138 | } | |
139 | } | |
140 | // dialer overrides the default ZooKeeper library Dialer so we can configure | |
141 | // the connectTimeout. The current library has a hardcoded value of 1 second | |
142 | // and there are reports of race conditions, due to slow DNS resolvers and | |
143 | // other network latency issues. | |
144 | dialer := func(network, address string, _ time.Duration) (net.Conn, error) { | |
145 | return net.DialTimeout(network, address, config.connectTimeout) | |
146 | } | |
147 | conn, eventc, err := zk.Connect(servers, config.sessionTimeout, withLogger(logger), zk.WithDialer(dialer)) | |
148 | ||
149 | if err != nil { | |
150 | return nil, err | |
151 | } | |
152 | ||
153 | if len(config.credentials) > 0 { | |
154 | err = conn.AddAuth("digest", config.credentials) | |
155 | if err != nil { | |
156 | return nil, err | |
157 | } | |
158 | } | |
159 | ||
160 | c := &client{conn, config, true, make(chan struct{})} | |
161 | ||
162 | // Start listening for incoming Event payloads and callback the set | |
163 | // eventHandler. | |
164 | go func() { | |
165 | for { | |
166 | select { | |
167 | case event := <-eventc: | |
168 | config.eventHandler(event) | |
169 | case <-c.quit: | |
170 | return | |
171 | } | |
172 | } | |
173 | }() | |
174 | return c, nil | |
175 | } | |
176 | ||
177 | // CreateParentNodes implements the ZooKeeper Client interface. | |
178 | func (c *client) CreateParentNodes(path string) error { | |
179 | if !c.active { | |
180 | return ErrClientClosed | |
181 | } | |
182 | if path[0] != '/' { | |
183 | return zk.ErrInvalidPath | |
184 | } | |
185 | payload := []byte("") | |
186 | pathString := "" | |
187 | pathNodes := strings.Split(path, "/") | |
188 | for i := 1; i < len(pathNodes); i++ { | |
189 | if i <= len(c.rootNodePayload) { | |
190 | payload = c.rootNodePayload[i-1] | |
191 | } else { | |
192 | payload = []byte("") | |
193 | } | |
194 | pathString += "/" + pathNodes[i] | |
195 | _, err := c.Create(pathString, payload, 0, c.acl) | |
196 | // not being able to create the node because it exists or not having | |
197 | // sufficient rights is not an issue. It is ok for the node to already | |
198 | // exist and/or us to only have read rights | |
199 | if err != nil && err != zk.ErrNodeExists && err != zk.ErrNoAuth { | |
200 | return err | |
201 | } | |
202 | } | |
203 | return nil | |
204 | } | |
205 | ||
206 | // GetEntries implements the ZooKeeper Client interface. | |
207 | func (c *client) GetEntries(path string) ([]string, <-chan zk.Event, error) { | |
208 | // retrieve list of child nodes for given path and add watch to path | |
209 | znodes, _, eventc, err := c.ChildrenW(path) | |
210 | ||
211 | if err != nil { | |
212 | return nil, eventc, err | |
213 | } | |
214 | ||
215 | var resp []string | |
216 | for _, znode := range znodes { | |
217 | // retrieve payload for child znode and add to response array | |
218 | if data, _, err := c.Get(path + "/" + znode); err == nil { | |
219 | resp = append(resp, string(data)) | |
220 | } | |
221 | } | |
222 | return resp, eventc, nil | |
223 | } | |
224 | ||
225 | // Stop implements the ZooKeeper Client interface. | |
226 | func (c *client) Stop() { | |
227 | c.active = false | |
228 | close(c.quit) | |
229 | c.Close() | |
230 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "bytes" | |
4 | "testing" | |
5 | "time" | |
6 | ||
7 | stdzk "github.com/samuel/go-zookeeper/zk" | |
8 | ||
9 | "github.com/go-kit/kit/log" | |
10 | ) | |
11 | ||
12 | func TestNewClient(t *testing.T) { | |
13 | var ( | |
14 | acl = stdzk.WorldACL(stdzk.PermRead) | |
15 | connectTimeout = 3 * time.Second | |
16 | sessionTimeout = 20 * time.Second | |
17 | payload = [][]byte{[]byte("Payload"), []byte("Test")} | |
18 | ) | |
19 | ||
20 | c, err := NewClient( | |
21 | []string{"FailThisInvalidHost!!!"}, | |
22 | log.NewNopLogger(), | |
23 | ) | |
24 | if err == nil { | |
25 | t.Errorf("expected error, got nil") | |
26 | } | |
27 | ||
28 | hasFired := false | |
29 | calledEventHandler := make(chan struct{}) | |
30 | eventHandler := func(event stdzk.Event) { | |
31 | if !hasFired { | |
32 | // test is successful if this function has fired at least once | |
33 | hasFired = true | |
34 | close(calledEventHandler) | |
35 | } | |
36 | } | |
37 | ||
38 | c, err = NewClient( | |
39 | []string{"localhost"}, | |
40 | log.NewNopLogger(), | |
41 | ACL(acl), | |
42 | ConnectTimeout(connectTimeout), | |
43 | SessionTimeout(sessionTimeout), | |
44 | Payload(payload), | |
45 | EventHandler(eventHandler), | |
46 | ) | |
47 | if err != nil { | |
48 | t.Fatal(err) | |
49 | } | |
50 | defer c.Stop() | |
51 | ||
52 | clientImpl, ok := c.(*client) | |
53 | if !ok { | |
54 | t.Fatal("retrieved incorrect Client implementation") | |
55 | } | |
56 | if want, have := acl, clientImpl.acl; want[0] != have[0] { | |
57 | t.Errorf("want %+v, have %+v", want, have) | |
58 | } | |
59 | if want, have := connectTimeout, clientImpl.connectTimeout; want != have { | |
60 | t.Errorf("want %d, have %d", want, have) | |
61 | } | |
62 | if want, have := sessionTimeout, clientImpl.sessionTimeout; want != have { | |
63 | t.Errorf("want %d, have %d", want, have) | |
64 | } | |
65 | if want, have := payload, clientImpl.rootNodePayload; bytes.Compare(want[0], have[0]) != 0 || bytes.Compare(want[1], have[1]) != 0 { | |
66 | t.Errorf("want %s, have %s", want, have) | |
67 | } | |
68 | ||
69 | select { | |
70 | case <-calledEventHandler: | |
71 | case <-time.After(100 * time.Millisecond): | |
72 | t.Errorf("event handler never called") | |
73 | } | |
74 | } | |
75 | ||
76 | func TestOptions(t *testing.T) { | |
77 | _, err := NewClient([]string{"localhost"}, log.NewNopLogger(), Credentials("valid", "credentials")) | |
78 | if err != nil && err != stdzk.ErrNoServer { | |
79 | t.Errorf("unexpected error: %v", err) | |
80 | } | |
81 | ||
82 | _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), Credentials("nopass", "")) | |
83 | if want, have := err, ErrInvalidCredentials; want != have { | |
84 | t.Errorf("want %v, have %v", want, have) | |
85 | } | |
86 | ||
87 | _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), ConnectTimeout(0)) | |
88 | if err == nil { | |
89 | t.Errorf("expected connect timeout error") | |
90 | } | |
91 | ||
92 | _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), SessionTimeout(0)) | |
93 | if err == nil { | |
94 | t.Errorf("expected connect timeout error") | |
95 | } | |
96 | } | |
97 | ||
98 | func TestCreateParentNodes(t *testing.T) { | |
99 | payload := [][]byte{[]byte("Payload"), []byte("Test")} | |
100 | ||
101 | c, err := NewClient([]string{"localhost:65500"}, log.NewNopLogger()) | |
102 | if err != nil { | |
103 | t.Errorf("unexpected error: %v", err) | |
104 | } | |
105 | if c == nil { | |
106 | t.Fatal("expected new Client, got nil") | |
107 | } | |
108 | ||
109 | p, err := NewPublisher(c, "/validpath", newFactory(""), log.NewNopLogger()) | |
110 | if err != stdzk.ErrNoServer { | |
111 | t.Errorf("unexpected error: %v", err) | |
112 | } | |
113 | if p != nil { | |
114 | t.Error("expected failed new Publisher") | |
115 | } | |
116 | ||
117 | p, err = NewPublisher(c, "invalidpath", newFactory(""), log.NewNopLogger()) | |
118 | if err != stdzk.ErrInvalidPath { | |
119 | t.Errorf("unexpected error: %v", err) | |
120 | } | |
121 | _, _, err = c.GetEntries("/validpath") | |
122 | if err != stdzk.ErrNoServer { | |
123 | t.Errorf("unexpected error: %v", err) | |
124 | } | |
125 | ||
126 | c.Stop() | |
127 | ||
128 | err = c.CreateParentNodes("/validpath") | |
129 | if err != ErrClientClosed { | |
130 | t.Errorf("unexpected error: %v", err) | |
131 | } | |
132 | ||
133 | p, err = NewPublisher(c, "/validpath", newFactory(""), log.NewNopLogger()) | |
134 | if err != ErrClientClosed { | |
135 | t.Errorf("unexpected error: %v", err) | |
136 | } | |
137 | if p != nil { | |
138 | t.Error("expected failed new Publisher") | |
139 | } | |
140 | ||
141 | c, err = NewClient([]string{"localhost:65500"}, log.NewNopLogger(), Payload(payload)) | |
142 | if err != nil { | |
143 | t.Errorf("unexpected error: %v", err) | |
144 | } | |
145 | if c == nil { | |
146 | t.Fatal("expected new Client, got nil") | |
147 | } | |
148 | ||
149 | p, err = NewPublisher(c, "/validpath", newFactory(""), log.NewNopLogger()) | |
150 | if err != stdzk.ErrNoServer { | |
151 | t.Errorf("unexpected error: %v", err) | |
152 | } | |
153 | if p != nil { | |
154 | t.Error("expected failed new Publisher") | |
155 | } | |
156 | } |
0 | // +build integration | |
1 | ||
2 | package zk | |
3 | ||
4 | import ( | |
5 | "bytes" | |
6 | "flag" | |
7 | "fmt" | |
8 | "os" | |
9 | "testing" | |
10 | "time" | |
11 | ||
12 | stdzk "github.com/samuel/go-zookeeper/zk" | |
13 | ) | |
14 | ||
15 | var ( | |
16 | host []string | |
17 | ) | |
18 | ||
19 | func TestMain(m *testing.M) { | |
20 | flag.Parse() | |
21 | ||
22 | fmt.Println("Starting ZooKeeper server...") | |
23 | ||
24 | ts, err := stdzk.StartTestCluster(1, nil, nil) | |
25 | if err != nil { | |
26 | fmt.Printf("ZooKeeper server error: %v\n", err) | |
27 | os.Exit(1) | |
28 | } | |
29 | ||
30 | host = []string{fmt.Sprintf("localhost:%d", ts.Servers[0].Port)} | |
31 | code := m.Run() | |
32 | ||
33 | ts.Stop() | |
34 | os.Exit(code) | |
35 | } | |
36 | ||
37 | func TestCreateParentNodesOnServer(t *testing.T) { | |
38 | payload := [][]byte{[]byte("Payload"), []byte("Test")} | |
39 | c1, err := NewClient(host, logger, Payload(payload)) | |
40 | if err != nil { | |
41 | t.Fatalf("Connect returned error: %v", err) | |
42 | } | |
43 | if c1 == nil { | |
44 | t.Fatal("Expected pointer to client, got nil") | |
45 | } | |
46 | defer c1.Stop() | |
47 | ||
48 | p, err := NewPublisher(c1, path, newFactory(""), logger) | |
49 | if err != nil { | |
50 | t.Fatalf("Unable to create Publisher: %v", err) | |
51 | } | |
52 | defer p.Stop() | |
53 | ||
54 | endpoints, err := p.Endpoints() | |
55 | if err != nil { | |
56 | t.Fatal(err) | |
57 | } | |
58 | if want, have := 0, len(endpoints); want != have { | |
59 | t.Errorf("want %d, have %d", want, have) | |
60 | } | |
61 | ||
62 | c2, err := NewClient(host, logger) | |
63 | if err != nil { | |
64 | t.Fatalf("Connect returned error: %v", err) | |
65 | } | |
66 | defer c2.Stop() | |
67 | data, _, err := c2.(*client).Get(path) | |
68 | if err != nil { | |
69 | t.Fatal(err) | |
70 | } | |
71 | // test Client implementation of CreateParentNodes. It should have created | |
72 | // our payload | |
73 | if bytes.Compare(data, payload[1]) != 0 { | |
74 | t.Errorf("want %s, have %s", payload[1], data) | |
75 | } | |
76 | ||
77 | } | |
78 | ||
79 | func TestCreateBadParentNodesOnServer(t *testing.T) { | |
80 | c, _ := NewClient(host, logger) | |
81 | defer c.Stop() | |
82 | ||
83 | _, err := NewPublisher(c, "invalid/path", newFactory(""), logger) | |
84 | ||
85 | if want, have := stdzk.ErrInvalidPath, err; want != have { | |
86 | t.Errorf("want %v, have %v", want, have) | |
87 | } | |
88 | } | |
89 | ||
90 | func TestCredentials1(t *testing.T) { | |
91 | acl := stdzk.DigestACL(stdzk.PermAll, "user", "secret") | |
92 | c, _ := NewClient(host, logger, ACL(acl), Credentials("user", "secret")) | |
93 | defer c.Stop() | |
94 | ||
95 | _, err := NewPublisher(c, "/acl-issue-test", newFactory(""), logger) | |
96 | ||
97 | if err != nil { | |
98 | t.Fatal(err) | |
99 | } | |
100 | } | |
101 | ||
102 | func TestCredentials2(t *testing.T) { | |
103 | acl := stdzk.DigestACL(stdzk.PermAll, "user", "secret") | |
104 | c, _ := NewClient(host, logger, ACL(acl)) | |
105 | defer c.Stop() | |
106 | ||
107 | _, err := NewPublisher(c, "/acl-issue-test", newFactory(""), logger) | |
108 | ||
109 | if err != stdzk.ErrNoAuth { | |
110 | t.Errorf("want %v, have %v", stdzk.ErrNoAuth, err) | |
111 | } | |
112 | } | |
113 | ||
114 | func TestConnection(t *testing.T) { | |
115 | c, _ := NewClient(host, logger) | |
116 | c.Stop() | |
117 | ||
118 | _, err := NewPublisher(c, "/acl-issue-test", newFactory(""), logger) | |
119 | ||
120 | if err != ErrClientClosed { | |
121 | t.Errorf("want %v, have %v", ErrClientClosed, err) | |
122 | } | |
123 | } | |
124 | ||
125 | func TestGetEntriesOnServer(t *testing.T) { | |
126 | var instancePayload = "protocol://hostname:port/routing" | |
127 | ||
128 | c1, err := NewClient(host, logger) | |
129 | if err != nil { | |
130 | t.Fatalf("Connect returned error: %v", err) | |
131 | } | |
132 | ||
133 | defer c1.Stop() | |
134 | ||
135 | c2, err := NewClient(host, logger) | |
136 | p, err := NewPublisher(c2, path, newFactory(""), logger) | |
137 | if err != nil { | |
138 | t.Fatal(err) | |
139 | } | |
140 | defer c2.Stop() | |
141 | ||
142 | c2impl, _ := c2.(*client) | |
143 | _, err = c2impl.Create( | |
144 | path+"/instance1", | |
145 | []byte(instancePayload), | |
146 | stdzk.FlagEphemeral|stdzk.FlagSequence, | |
147 | stdzk.WorldACL(stdzk.PermAll), | |
148 | ) | |
149 | if err != nil { | |
150 | t.Fatalf("Unable to create test ephemeral znode 1: %v", err) | |
151 | } | |
152 | _, err = c2impl.Create( | |
153 | path+"/instance2", | |
154 | []byte(instancePayload+"2"), | |
155 | stdzk.FlagEphemeral|stdzk.FlagSequence, | |
156 | stdzk.WorldACL(stdzk.PermAll), | |
157 | ) | |
158 | if err != nil { | |
159 | t.Fatalf("Unable to create test ephemeral znode 2: %v", err) | |
160 | } | |
161 | ||
162 | time.Sleep(50 * time.Millisecond) | |
163 | ||
164 | endpoints, err := p.Endpoints() | |
165 | if err != nil { | |
166 | t.Fatal(err) | |
167 | } | |
168 | if want, have := 2, len(endpoints); want != have { | |
169 | t.Errorf("want %d, have %d", want, have) | |
170 | } | |
171 | } | |
172 | ||
173 | func TestGetEntriesPayloadOnServer(t *testing.T) { | |
174 | c, err := NewClient(host, logger) | |
175 | if err != nil { | |
176 | t.Fatalf("Connect returned error: %v", err) | |
177 | } | |
178 | _, eventc, err := c.GetEntries(path) | |
179 | if err != nil { | |
180 | t.Fatal(err) | |
181 | } | |
182 | _, err = c.(*client).Create( | |
183 | path+"/instance3", | |
184 | []byte("just some payload"), | |
185 | stdzk.FlagEphemeral|stdzk.FlagSequence, | |
186 | stdzk.WorldACL(stdzk.PermAll), | |
187 | ) | |
188 | if err != nil { | |
189 | t.Fatalf("Unable to create test ephemeral znode: %v", err) | |
190 | } | |
191 | select { | |
192 | case event := <-eventc: | |
193 | if want, have := stdzk.EventNodeChildrenChanged.String(), event.Type.String(); want != have { | |
194 | t.Errorf("want %s, have %s", want, have) | |
195 | } | |
196 | case <-time.After(20 * time.Millisecond): | |
197 | t.Errorf("expected incoming watch event, timeout occurred") | |
198 | } | |
199 | ||
200 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | ||
5 | "github.com/samuel/go-zookeeper/zk" | |
6 | ||
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | // wrapLogger wraps a go-kit logger so we can use it as the logging service for | |
11 | // the ZooKeeper library (which expects a Printf method to be available) | |
12 | type wrapLogger struct { | |
13 | log.Logger | |
14 | } | |
15 | ||
16 | func (logger wrapLogger) Printf(str string, vars ...interface{}) { | |
17 | logger.Log("msg", fmt.Sprintf(str, vars...)) | |
18 | } | |
19 | ||
20 | // withLogger replaces the ZooKeeper library's default logging service for our | |
21 | // own go-kit logger | |
22 | func withLogger(logger log.Logger) func(c *zk.Conn) { | |
23 | return func(c *zk.Conn) { | |
24 | c.SetLogger(wrapLogger{logger}) | |
25 | } | |
26 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "github.com/go-kit/kit/endpoint" | |
4 | "github.com/go-kit/kit/loadbalancer" | |
5 | "github.com/go-kit/kit/log" | |
6 | "github.com/samuel/go-zookeeper/zk" | |
7 | ) | |
8 | ||
9 | // Publisher yield endpoints stored in a certain ZooKeeper path. Any kind of | |
10 | // change in that path is watched and will update the Publisher endpoints. | |
11 | type Publisher struct { | |
12 | client Client | |
13 | path string | |
14 | cache *loadbalancer.EndpointCache | |
15 | logger log.Logger | |
16 | quit chan struct{} | |
17 | } | |
18 | ||
19 | // NewPublisher returns a ZooKeeper publisher. ZooKeeper will start watching the | |
20 | // given path for changes and update the Publisher endpoints. | |
21 | func NewPublisher(c Client, path string, f loadbalancer.Factory, logger log.Logger) (*Publisher, error) { | |
22 | p := &Publisher{ | |
23 | client: c, | |
24 | path: path, | |
25 | cache: loadbalancer.NewEndpointCache(f, logger), | |
26 | logger: logger, | |
27 | quit: make(chan struct{}), | |
28 | } | |
29 | ||
30 | err := p.client.CreateParentNodes(p.path) | |
31 | if err != nil { | |
32 | return nil, err | |
33 | } | |
34 | ||
35 | // initial node retrieval and cache fill | |
36 | instances, eventc, err := p.client.GetEntries(p.path) | |
37 | if err != nil { | |
38 | logger.Log("path", p.path, "msg", "failed to retrieve entries", "err", err) | |
39 | return nil, err | |
40 | } | |
41 | logger.Log("path", p.path, "instances", len(instances)) | |
42 | p.cache.Replace(instances) | |
43 | ||
44 | // handle incoming path updates | |
45 | go p.loop(eventc) | |
46 | ||
47 | return p, nil | |
48 | } | |
49 | ||
50 | func (p *Publisher) loop(eventc <-chan zk.Event) { | |
51 | var ( | |
52 | instances []string | |
53 | err error | |
54 | ) | |
55 | for { | |
56 | select { | |
57 | case <-eventc: | |
58 | // we received a path update notification, call GetEntries to | |
59 | // retrieve child node data and set new watch as zk watches are one | |
60 | // time triggers | |
61 | instances, eventc, err = p.client.GetEntries(p.path) | |
62 | if err != nil { | |
63 | p.logger.Log("path", p.path, "msg", "failed to retrieve entries", "err", err) | |
64 | continue | |
65 | } | |
66 | p.logger.Log("path", p.path, "instances", len(instances)) | |
67 | p.cache.Replace(instances) | |
68 | case <-p.quit: | |
69 | return | |
70 | } | |
71 | } | |
72 | } | |
73 | ||
74 | // Endpoints implements the Publisher interface. | |
75 | func (p *Publisher) Endpoints() ([]endpoint.Endpoint, error) { | |
76 | return p.cache.Endpoints() | |
77 | } | |
78 | ||
79 | // Stop terminates the Publisher. | |
80 | func (p *Publisher) Stop() { | |
81 | close(p.quit) | |
82 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | "time" | |
5 | ) | |
6 | ||
7 | func TestPublisher(t *testing.T) { | |
8 | client := newFakeClient() | |
9 | ||
10 | p, err := NewPublisher(client, path, newFactory(""), logger) | |
11 | if err != nil { | |
12 | t.Fatalf("failed to create new publisher: %v", err) | |
13 | } | |
14 | defer p.Stop() | |
15 | ||
16 | if _, err := p.Endpoints(); err != nil { | |
17 | t.Fatal(err) | |
18 | } | |
19 | } | |
20 | ||
21 | func TestBadFactory(t *testing.T) { | |
22 | client := newFakeClient() | |
23 | ||
24 | p, err := NewPublisher(client, path, newFactory("kaboom"), logger) | |
25 | if err != nil { | |
26 | t.Fatalf("failed to create new publisher: %v", err) | |
27 | } | |
28 | defer p.Stop() | |
29 | ||
30 | // instance1 came online | |
31 | client.AddService(path+"/instance1", "kaboom") | |
32 | ||
33 | // instance2 came online | |
34 | client.AddService(path+"/instance2", "zookeeper_node_data") | |
35 | ||
36 | if err = asyncTest(100*time.Millisecond, 1, p); err != nil { | |
37 | t.Error(err) | |
38 | } | |
39 | } | |
40 | ||
41 | func TestServiceUpdate(t *testing.T) { | |
42 | client := newFakeClient() | |
43 | ||
44 | p, err := NewPublisher(client, path, newFactory(""), logger) | |
45 | if err != nil { | |
46 | t.Fatalf("failed to create new publisher: %v", err) | |
47 | } | |
48 | defer p.Stop() | |
49 | ||
50 | endpoints, err := p.Endpoints() | |
51 | if err != nil { | |
52 | t.Fatal(err) | |
53 | } | |
54 | ||
55 | if want, have := 0, len(endpoints); want != have { | |
56 | t.Errorf("want %d, have %d", want, have) | |
57 | } | |
58 | ||
59 | // instance1 came online | |
60 | client.AddService(path+"/instance1", "zookeeper_node_data") | |
61 | ||
62 | // instance2 came online | |
63 | client.AddService(path+"/instance2", "zookeeper_node_data2") | |
64 | ||
65 | // we should have 2 instances | |
66 | if err = asyncTest(100*time.Millisecond, 2, p); err != nil { | |
67 | t.Error(err) | |
68 | } | |
69 | ||
70 | // watch triggers an error... | |
71 | client.SendErrorOnWatch() | |
72 | ||
73 | // test if error was consumed | |
74 | if err = client.ErrorIsConsumed(100 * time.Millisecond); err != nil { | |
75 | t.Error(err) | |
76 | } | |
77 | ||
78 | // instance3 came online | |
79 | client.AddService(path+"/instance3", "zookeeper_node_data3") | |
80 | ||
81 | // we should have 3 instances | |
82 | if err = asyncTest(100*time.Millisecond, 3, p); err != nil { | |
83 | t.Error(err) | |
84 | } | |
85 | ||
86 | // instance1 goes offline | |
87 | client.RemoveService(path + "/instance1") | |
88 | ||
89 | // instance2 goes offline | |
90 | client.RemoveService(path + "/instance2") | |
91 | ||
92 | // we should have 1 instance | |
93 | if err = asyncTest(100*time.Millisecond, 1, p); err != nil { | |
94 | t.Error(err) | |
95 | } | |
96 | } | |
97 | ||
98 | func TestBadPublisherCreate(t *testing.T) { | |
99 | client := newFakeClient() | |
100 | client.SendErrorOnWatch() | |
101 | p, err := NewPublisher(client, path, newFactory(""), logger) | |
102 | if err == nil { | |
103 | t.Error("expected error on new publisher") | |
104 | } | |
105 | if p != nil { | |
106 | t.Error("expected publisher not to be created") | |
107 | } | |
108 | p, err = NewPublisher(client, "BadPath", newFactory(""), logger) | |
109 | if err == nil { | |
110 | t.Error("expected error on new publisher") | |
111 | } | |
112 | if p != nil { | |
113 | t.Error("expected publisher not to be created") | |
114 | } | |
115 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "fmt" | |
5 | "io" | |
6 | "sync" | |
7 | "time" | |
8 | ||
9 | "github.com/samuel/go-zookeeper/zk" | |
10 | "golang.org/x/net/context" | |
11 | ||
12 | "github.com/go-kit/kit/endpoint" | |
13 | "github.com/go-kit/kit/loadbalancer" | |
14 | "github.com/go-kit/kit/log" | |
15 | ) | |
16 | ||
17 | var ( | |
18 | path = "/gokit.test/service.name" | |
19 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
20 | logger = log.NewNopLogger() | |
21 | ) | |
22 | ||
23 | type fakeClient struct { | |
24 | mtx sync.Mutex | |
25 | ch chan zk.Event | |
26 | responses map[string]string | |
27 | result bool | |
28 | } | |
29 | ||
30 | func newFakeClient() *fakeClient { | |
31 | return &fakeClient{ | |
32 | ch: make(chan zk.Event, 5), | |
33 | responses: make(map[string]string), | |
34 | result: true, | |
35 | } | |
36 | } | |
37 | ||
38 | func (c *fakeClient) CreateParentNodes(path string) error { | |
39 | if path == "BadPath" { | |
40 | return errors.New("Dummy Error") | |
41 | } | |
42 | return nil | |
43 | } | |
44 | ||
45 | func (c *fakeClient) GetEntries(path string) ([]string, <-chan zk.Event, error) { | |
46 | c.mtx.Lock() | |
47 | defer c.mtx.Unlock() | |
48 | if c.result == false { | |
49 | c.result = true | |
50 | return []string{}, c.ch, errors.New("Dummy Error") | |
51 | } | |
52 | responses := []string{} | |
53 | for _, data := range c.responses { | |
54 | responses = append(responses, data) | |
55 | } | |
56 | return responses, c.ch, nil | |
57 | } | |
58 | ||
59 | func (c *fakeClient) AddService(node, data string) { | |
60 | c.mtx.Lock() | |
61 | defer c.mtx.Unlock() | |
62 | c.responses[node] = data | |
63 | c.ch <- zk.Event{} | |
64 | } | |
65 | ||
66 | func (c *fakeClient) RemoveService(node string) { | |
67 | c.mtx.Lock() | |
68 | defer c.mtx.Unlock() | |
69 | delete(c.responses, node) | |
70 | c.ch <- zk.Event{} | |
71 | } | |
72 | ||
73 | func (c *fakeClient) SendErrorOnWatch() { | |
74 | c.mtx.Lock() | |
75 | defer c.mtx.Unlock() | |
76 | c.result = false | |
77 | c.ch <- zk.Event{} | |
78 | } | |
79 | ||
80 | func (c *fakeClient) ErrorIsConsumed(t time.Duration) error { | |
81 | timeout := time.After(t) | |
82 | for { | |
83 | select { | |
84 | case <-timeout: | |
85 | return fmt.Errorf("expected error not consumed after timeout %s", t.String()) | |
86 | default: | |
87 | c.mtx.Lock() | |
88 | if c.result == false { | |
89 | c.mtx.Unlock() | |
90 | return nil | |
91 | } | |
92 | c.mtx.Unlock() | |
93 | } | |
94 | } | |
95 | } | |
96 | ||
97 | func (c *fakeClient) Stop() {} | |
98 | ||
99 | func newFactory(fakeError string) loadbalancer.Factory { | |
100 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
101 | if fakeError == instance { | |
102 | return nil, nil, errors.New(fakeError) | |
103 | } | |
104 | return e, nil, nil | |
105 | } | |
106 | } | |
107 | ||
108 | func asyncTest(timeout time.Duration, want int, p *Publisher) (err error) { | |
109 | var endpoints []endpoint.Endpoint | |
110 | // want can never be -1 | |
111 | have := -1 | |
112 | t := time.After(timeout) | |
113 | for { | |
114 | select { | |
115 | case <-t: | |
116 | return fmt.Errorf("want %d, have %d after timeout %s", want, have, timeout.String()) | |
117 | default: | |
118 | endpoints, err = p.Endpoints() | |
119 | have = len(endpoints) | |
120 | if err != nil || want == have { | |
121 | return | |
122 | } | |
123 | time.Sleep(time.Millisecond) | |
124 | } | |
125 | } | |
126 | } |
0 | package cache | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "testing" | |
5 | ||
6 | "github.com/go-kit/kit/endpoint" | |
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | func BenchmarkEndpoints(b *testing.B) { | |
11 | var ( | |
12 | ca = make(closer) | |
13 | cb = make(closer) | |
14 | cmap = map[string]io.Closer{"a": ca, "b": cb} | |
15 | factory = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, cmap[instance], nil } | |
16 | c = New(factory, log.NewNopLogger()) | |
17 | ) | |
18 | ||
19 | b.ReportAllocs() | |
20 | ||
21 | c.Update([]string{"a", "b"}) | |
22 | ||
23 | b.RunParallel(func(pb *testing.PB) { | |
24 | for pb.Next() { | |
25 | c.Endpoints() | |
26 | } | |
27 | }) | |
28 | } |
0 | package cache | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "sort" | |
5 | "sync" | |
6 | "sync/atomic" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/log" | |
10 | "github.com/go-kit/kit/sd" | |
11 | ) | |
12 | ||
13 | // Cache collects the most recent set of endpoints from a service discovery | |
14 | // system via a subscriber, and makes them available to consumers. Cache is | |
15 | // meant to be embedded inside of a concrete subscriber, and can serve Service | |
16 | // invocations directly. | |
17 | type Cache struct { | |
18 | mtx sync.RWMutex | |
19 | factory sd.Factory | |
20 | cache map[string]endpointCloser | |
21 | slice atomic.Value // []endpoint.Endpoint | |
22 | logger log.Logger | |
23 | } | |
24 | ||
25 | type endpointCloser struct { | |
26 | endpoint.Endpoint | |
27 | io.Closer | |
28 | } | |
29 | ||
30 | // New returns a new, empty endpoint cache. | |
31 | func New(factory sd.Factory, logger log.Logger) *Cache { | |
32 | return &Cache{ | |
33 | factory: factory, | |
34 | cache: map[string]endpointCloser{}, | |
35 | logger: logger, | |
36 | } | |
37 | } | |
38 | ||
39 | // Update should be invoked by clients with a complete set of current instance | |
40 | // strings whenever that set changes. The cache manufactures new endpoints via | |
41 | // the factory, closes old endpoints when they disappear, and persists existing | |
42 | // endpoints if they survive through an update. | |
43 | func (c *Cache) Update(instances []string) { | |
44 | c.mtx.Lock() | |
45 | defer c.mtx.Unlock() | |
46 | ||
47 | // Deterministic order (for later). | |
48 | sort.Strings(instances) | |
49 | ||
50 | // Produce the current set of services. | |
51 | cache := make(map[string]endpointCloser, len(instances)) | |
52 | for _, instance := range instances { | |
53 | // If it already exists, just copy it over. | |
54 | if sc, ok := c.cache[instance]; ok { | |
55 | cache[instance] = sc | |
56 | delete(c.cache, instance) | |
57 | continue | |
58 | } | |
59 | ||
60 | // If it doesn't exist, create it. | |
61 | service, closer, err := c.factory(instance) | |
62 | if err != nil { | |
63 | c.logger.Log("instance", instance, "err", err) | |
64 | continue | |
65 | } | |
66 | cache[instance] = endpointCloser{service, closer} | |
67 | } | |
68 | ||
69 | // Close any leftover endpoints. | |
70 | for _, sc := range c.cache { | |
71 | if sc.Closer != nil { | |
72 | sc.Closer.Close() | |
73 | } | |
74 | } | |
75 | ||
76 | // Populate the slice of endpoints. | |
77 | slice := make([]endpoint.Endpoint, 0, len(cache)) | |
78 | for _, instance := range instances { | |
79 | // A bad factory may mean an instance is not present. | |
80 | if _, ok := cache[instance]; !ok { | |
81 | continue | |
82 | } | |
83 | slice = append(slice, cache[instance].Endpoint) | |
84 | } | |
85 | ||
86 | // Swap and trigger GC for old copies. | |
87 | c.slice.Store(slice) | |
88 | c.cache = cache | |
89 | } | |
90 | ||
91 | // Endpoints yields the current set of (presumably identical) endpoints, ordered | |
92 | // lexicographically by the corresponding instance string. | |
93 | func (c *Cache) Endpoints() []endpoint.Endpoint { | |
94 | return c.slice.Load().([]endpoint.Endpoint) | |
95 | } |
0 | package cache | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "io" | |
5 | "testing" | |
6 | "time" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/log" | |
10 | ) | |
11 | ||
12 | func TestCache(t *testing.T) { | |
13 | var ( | |
14 | ca = make(closer) | |
15 | cb = make(closer) | |
16 | c = map[string]io.Closer{"a": ca, "b": cb} | |
17 | f = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, c[instance], nil } | |
18 | cache = New(f, log.NewNopLogger()) | |
19 | ) | |
20 | ||
21 | // Populate | |
22 | cache.Update([]string{"a", "b"}) | |
23 | select { | |
24 | case <-ca: | |
25 | t.Errorf("endpoint a closed, not good") | |
26 | case <-cb: | |
27 | t.Errorf("endpoint b closed, not good") | |
28 | case <-time.After(time.Millisecond): | |
29 | t.Logf("no closures yet, good") | |
30 | } | |
31 | if want, have := 2, len(cache.Endpoints()); want != have { | |
32 | t.Errorf("want %d, have %d", want, have) | |
33 | } | |
34 | ||
35 | // Duplicate, should be no-op | |
36 | cache.Update([]string{"a", "b"}) | |
37 | select { | |
38 | case <-ca: | |
39 | t.Errorf("endpoint a closed, not good") | |
40 | case <-cb: | |
41 | t.Errorf("endpoint b closed, not good") | |
42 | case <-time.After(time.Millisecond): | |
43 | t.Logf("no closures yet, good") | |
44 | } | |
45 | if want, have := 2, len(cache.Endpoints()); want != have { | |
46 | t.Errorf("want %d, have %d", want, have) | |
47 | } | |
48 | ||
49 | // Delete b | |
50 | go cache.Update([]string{"a"}) | |
51 | select { | |
52 | case <-ca: | |
53 | t.Errorf("endpoint a closed, not good") | |
54 | case <-cb: | |
55 | t.Logf("endpoint b closed, good") | |
56 | case <-time.After(time.Second): | |
57 | t.Errorf("didn't close the deleted instance in time") | |
58 | } | |
59 | if want, have := 1, len(cache.Endpoints()); want != have { | |
60 | t.Errorf("want %d, have %d", want, have) | |
61 | } | |
62 | ||
63 | // Delete a | |
64 | go cache.Update([]string{}) | |
65 | select { | |
66 | // case <-cb: will succeed, as it's closed | |
67 | case <-ca: | |
68 | t.Logf("endpoint a closed, good") | |
69 | case <-time.After(time.Second): | |
70 | t.Errorf("didn't close the deleted instance in time") | |
71 | } | |
72 | if want, have := 0, len(cache.Endpoints()); want != have { | |
73 | t.Errorf("want %d, have %d", want, have) | |
74 | } | |
75 | } | |
76 | ||
77 | func TestBadFactory(t *testing.T) { | |
78 | cache := New(func(string) (endpoint.Endpoint, io.Closer, error) { | |
79 | return nil, nil, errors.New("bad factory") | |
80 | }, log.NewNopLogger()) | |
81 | ||
82 | cache.Update([]string{"foo:1234", "bar:5678"}) | |
83 | if want, have := 0, len(cache.Endpoints()); want != have { | |
84 | t.Errorf("want %d, have %d", want, have) | |
85 | } | |
86 | } | |
87 | ||
88 | type closer chan struct{} | |
89 | ||
90 | func (c closer) Close() error { close(c); return nil } |
0 | package consul | |
1 | ||
2 | import consul "github.com/hashicorp/consul/api" | |
3 | ||
4 | // Client is a wrapper around the Consul API. | |
5 | type Client interface { | |
6 | // Register a service with the local agent. | |
7 | Register(r *consul.AgentServiceRegistration) error | |
8 | ||
9 | // Deregister a service with the local agent. | |
10 | Deregister(r *consul.AgentServiceRegistration) error | |
11 | ||
12 | // Service | |
13 | Service(service, tag string, passingOnly bool, queryOpts *consul.QueryOptions) ([]*consul.ServiceEntry, *consul.QueryMeta, error) | |
14 | } | |
15 | ||
16 | type client struct { | |
17 | consul *consul.Client | |
18 | } | |
19 | ||
20 | // NewClient returns an implementation of the Client interface, wrapping a | |
21 | // concrete Consul client. | |
22 | func NewClient(c *consul.Client) Client { | |
23 | return &client{consul: c} | |
24 | } | |
25 | ||
26 | func (c *client) Register(r *consul.AgentServiceRegistration) error { | |
27 | return c.consul.Agent().ServiceRegister(r) | |
28 | } | |
29 | ||
30 | func (c *client) Deregister(r *consul.AgentServiceRegistration) error { | |
31 | return c.consul.Agent().ServiceDeregister(r.ID) | |
32 | } | |
33 | ||
34 | func (c *client) Service(service, tag string, passingOnly bool, queryOpts *consul.QueryOptions) ([]*consul.ServiceEntry, *consul.QueryMeta, error) { | |
35 | return c.consul.Health().Service(service, tag, passingOnly, queryOpts) | |
36 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "io" | |
5 | "reflect" | |
6 | "testing" | |
7 | ||
8 | stdconsul "github.com/hashicorp/consul/api" | |
9 | "golang.org/x/net/context" | |
10 | ||
11 | "github.com/go-kit/kit/endpoint" | |
12 | ) | |
13 | ||
14 | func TestClientRegistration(t *testing.T) { | |
15 | c := newTestClient(nil) | |
16 | ||
17 | services, _, err := c.Service(testRegistration.Name, "", true, &stdconsul.QueryOptions{}) | |
18 | if err != nil { | |
19 | t.Error(err) | |
20 | } | |
21 | if want, have := 0, len(services); want != have { | |
22 | t.Errorf("want %d, have %d", want, have) | |
23 | } | |
24 | ||
25 | if err := c.Register(testRegistration); err != nil { | |
26 | t.Error(err) | |
27 | } | |
28 | ||
29 | if err := c.Register(testRegistration); err == nil { | |
30 | t.Errorf("want error, have %v", err) | |
31 | } | |
32 | ||
33 | services, _, err = c.Service(testRegistration.Name, "", true, &stdconsul.QueryOptions{}) | |
34 | if err != nil { | |
35 | t.Error(err) | |
36 | } | |
37 | if want, have := 1, len(services); want != have { | |
38 | t.Errorf("want %d, have %d", want, have) | |
39 | } | |
40 | ||
41 | if err := c.Deregister(testRegistration); err != nil { | |
42 | t.Error(err) | |
43 | } | |
44 | ||
45 | if err := c.Deregister(testRegistration); err == nil { | |
46 | t.Errorf("want error, have %v", err) | |
47 | } | |
48 | ||
49 | services, _, err = c.Service(testRegistration.Name, "", true, &stdconsul.QueryOptions{}) | |
50 | if err != nil { | |
51 | t.Error(err) | |
52 | } | |
53 | if want, have := 0, len(services); want != have { | |
54 | t.Errorf("want %d, have %d", want, have) | |
55 | } | |
56 | } | |
57 | ||
58 | type testClient struct { | |
59 | entries []*stdconsul.ServiceEntry | |
60 | } | |
61 | ||
62 | func newTestClient(entries []*stdconsul.ServiceEntry) *testClient { | |
63 | return &testClient{ | |
64 | entries: entries, | |
65 | } | |
66 | } | |
67 | ||
68 | var _ Client = &testClient{} | |
69 | ||
70 | func (c *testClient) Service(service, tag string, _ bool, opts *stdconsul.QueryOptions) ([]*stdconsul.ServiceEntry, *stdconsul.QueryMeta, error) { | |
71 | var results []*stdconsul.ServiceEntry | |
72 | ||
73 | for _, entry := range c.entries { | |
74 | if entry.Service.Service != service { | |
75 | continue | |
76 | } | |
77 | if tag != "" { | |
78 | tagMap := map[string]struct{}{} | |
79 | ||
80 | for _, t := range entry.Service.Tags { | |
81 | tagMap[t] = struct{}{} | |
82 | } | |
83 | ||
84 | if _, ok := tagMap[tag]; !ok { | |
85 | continue | |
86 | } | |
87 | } | |
88 | ||
89 | results = append(results, entry) | |
90 | } | |
91 | ||
92 | return results, &stdconsul.QueryMeta{}, nil | |
93 | } | |
94 | ||
95 | func (c *testClient) Register(r *stdconsul.AgentServiceRegistration) error { | |
96 | toAdd := registration2entry(r) | |
97 | ||
98 | for _, entry := range c.entries { | |
99 | if reflect.DeepEqual(*entry, *toAdd) { | |
100 | return errors.New("duplicate") | |
101 | } | |
102 | } | |
103 | ||
104 | c.entries = append(c.entries, toAdd) | |
105 | return nil | |
106 | } | |
107 | ||
108 | func (c *testClient) Deregister(r *stdconsul.AgentServiceRegistration) error { | |
109 | toDelete := registration2entry(r) | |
110 | ||
111 | var newEntries []*stdconsul.ServiceEntry | |
112 | for _, entry := range c.entries { | |
113 | if reflect.DeepEqual(*entry, *toDelete) { | |
114 | continue | |
115 | } | |
116 | newEntries = append(newEntries, entry) | |
117 | } | |
118 | if len(newEntries) == len(c.entries) { | |
119 | return errors.New("not found") | |
120 | } | |
121 | ||
122 | c.entries = newEntries | |
123 | return nil | |
124 | } | |
125 | ||
126 | func registration2entry(r *stdconsul.AgentServiceRegistration) *stdconsul.ServiceEntry { | |
127 | return &stdconsul.ServiceEntry{ | |
128 | Node: &stdconsul.Node{ | |
129 | Node: "some-node", | |
130 | Address: r.Address, | |
131 | }, | |
132 | Service: &stdconsul.AgentService{ | |
133 | ID: r.ID, | |
134 | Service: r.Name, | |
135 | Tags: r.Tags, | |
136 | Port: r.Port, | |
137 | Address: r.Address, | |
138 | }, | |
139 | // Checks ignored | |
140 | } | |
141 | } | |
142 | ||
143 | func testFactory(instance string) (endpoint.Endpoint, io.Closer, error) { | |
144 | return func(context.Context, interface{}) (interface{}, error) { | |
145 | return instance, nil | |
146 | }, nil, nil | |
147 | } | |
148 | ||
149 | var testRegistration = &stdconsul.AgentServiceRegistration{ | |
150 | ID: "my-id", | |
151 | Name: "my-name", | |
152 | Tags: []string{"my-tag-1", "my-tag-2"}, | |
153 | Port: 12345, | |
154 | Address: "my-address", | |
155 | } |
0 | // +build integration | |
1 | ||
2 | package consul | |
3 | ||
4 | import ( | |
5 | "io" | |
6 | "os" | |
7 | "testing" | |
8 | "time" | |
9 | ||
10 | "github.com/go-kit/kit/log" | |
11 | "github.com/go-kit/kit/service" | |
12 | stdconsul "github.com/hashicorp/consul/api" | |
13 | ) | |
14 | ||
15 | func TestIntegration(t *testing.T) { | |
16 | // Connect to Consul. | |
17 | // docker run -p 8500:8500 progrium/consul -server -bootstrap | |
18 | consulAddr := os.Getenv("CONSUL_ADDRESS") | |
19 | if consulAddr == "" { | |
20 | t.Fatal("CONSUL_ADDRESS is not set") | |
21 | } | |
22 | stdClient, err := stdconsul.NewClient(&stdconsul.Config{ | |
23 | Address: consulAddr, | |
24 | }) | |
25 | if err != nil { | |
26 | t.Fatal(err) | |
27 | } | |
28 | client := NewClient(stdClient) | |
29 | logger := log.NewLogfmtLogger(os.Stderr) | |
30 | ||
31 | // Produce a fake service registration. | |
32 | r := &stdconsul.AgentServiceRegistration{ | |
33 | ID: "my-service-ID", | |
34 | Name: "my-service-name", | |
35 | Tags: []string{"alpha", "beta"}, | |
36 | Port: 12345, | |
37 | Address: "my-address", | |
38 | EnableTagOverride: false, | |
39 | // skipping check(s) | |
40 | } | |
41 | ||
42 | // Build a subscriber on r.Name + r.Tags. | |
43 | factory := func(instance string) (service.Service, io.Closer, error) { | |
44 | t.Logf("factory invoked for %q", instance) | |
45 | return service.Fixed{}, nil, nil | |
46 | } | |
47 | subscriber, err := NewSubscriber( | |
48 | client, | |
49 | factory, | |
50 | log.NewContext(logger).With("component", "subscriber"), | |
51 | r.Name, | |
52 | r.Tags, | |
53 | true, | |
54 | ) | |
55 | if err != nil { | |
56 | t.Fatal(err) | |
57 | } | |
58 | ||
59 | time.Sleep(time.Second) | |
60 | ||
61 | // Before we publish, we should have no services. | |
62 | services, err := subscriber.Services() | |
63 | if err != nil { | |
64 | t.Error(err) | |
65 | } | |
66 | if want, have := 0, len(services); want != have { | |
67 | t.Errorf("want %d, have %d", want, have) | |
68 | } | |
69 | ||
70 | // Build a registrar for r. | |
71 | registrar := NewRegistrar(client, r, log.NewContext(logger).With("component", "registrar")) | |
72 | registrar.Register() | |
73 | defer registrar.Deregister() | |
74 | ||
75 | time.Sleep(time.Second) | |
76 | ||
77 | // Now we should have one active service. | |
78 | services, err = subscriber.Services() | |
79 | if err != nil { | |
80 | t.Error(err) | |
81 | } | |
82 | if want, have := 1, len(services); want != have { | |
83 | t.Errorf("want %d, have %d", want, have) | |
84 | } | |
85 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | ||
5 | stdconsul "github.com/hashicorp/consul/api" | |
6 | ||
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | // Registrar registers service instance liveness information to Consul. | |
11 | type Registrar struct { | |
12 | client Client | |
13 | registration *stdconsul.AgentServiceRegistration | |
14 | logger log.Logger | |
15 | } | |
16 | ||
17 | // NewRegistrar returns a Consul Registrar acting on the provided catalog | |
18 | // registration. | |
19 | func NewRegistrar(client Client, r *stdconsul.AgentServiceRegistration, logger log.Logger) *Registrar { | |
20 | return &Registrar{ | |
21 | client: client, | |
22 | registration: r, | |
23 | logger: log.NewContext(logger).With("service", r.Name, "tags", fmt.Sprint(r.Tags), "address", r.Address), | |
24 | } | |
25 | } | |
26 | ||
27 | // Register implements sd.Registrar interface. | |
28 | func (p *Registrar) Register() { | |
29 | if err := p.client.Register(p.registration); err != nil { | |
30 | p.logger.Log("err", err) | |
31 | } else { | |
32 | p.logger.Log("action", "register") | |
33 | } | |
34 | } | |
35 | ||
36 | // Deregister implements sd.Registrar interface. | |
37 | func (p *Registrar) Deregister() { | |
38 | if err := p.client.Deregister(p.registration); err != nil { | |
39 | p.logger.Log("err", err) | |
40 | } else { | |
41 | p.logger.Log("action", "deregister") | |
42 | } | |
43 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | ||
5 | stdconsul "github.com/hashicorp/consul/api" | |
6 | ||
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | func TestRegistrar(t *testing.T) { | |
11 | client := newTestClient([]*stdconsul.ServiceEntry{}) | |
12 | p := NewRegistrar(client, testRegistration, log.NewNopLogger()) | |
13 | if want, have := 0, len(client.entries); want != have { | |
14 | t.Errorf("want %d, have %d", want, have) | |
15 | } | |
16 | ||
17 | p.Register() | |
18 | if want, have := 1, len(client.entries); want != have { | |
19 | t.Errorf("want %d, have %d", want, have) | |
20 | } | |
21 | ||
22 | p.Deregister() | |
23 | if want, have := 0, len(client.entries); want != have { | |
24 | t.Errorf("want %d, have %d", want, have) | |
25 | } | |
26 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "io" | |
5 | ||
6 | consul "github.com/hashicorp/consul/api" | |
7 | ||
8 | "github.com/go-kit/kit/endpoint" | |
9 | "github.com/go-kit/kit/log" | |
10 | "github.com/go-kit/kit/sd" | |
11 | "github.com/go-kit/kit/sd/cache" | |
12 | ) | |
13 | ||
14 | const defaultIndex = 0 | |
15 | ||
16 | // Subscriber yields endpoints for a service in Consul. Updates to the service | |
17 | // are watched and will update the Subscriber endpoints. | |
18 | type Subscriber struct { | |
19 | cache *cache.Cache | |
20 | client Client | |
21 | logger log.Logger | |
22 | service string | |
23 | tags []string | |
24 | passingOnly bool | |
25 | endpointsc chan []endpoint.Endpoint | |
26 | quitc chan struct{} | |
27 | } | |
28 | ||
29 | var _ sd.Subscriber = &Subscriber{} | |
30 | ||
31 | // NewSubscriber returns a Consul subscriber which returns endpoints for the | |
32 | // requested service. It only returns instances for which all of the passed tags | |
33 | // are present. | |
34 | func NewSubscriber(client Client, factory sd.Factory, logger log.Logger, service string, tags []string, passingOnly bool) *Subscriber { | |
35 | s := &Subscriber{ | |
36 | cache: cache.New(factory, logger), | |
37 | client: client, | |
38 | logger: log.NewContext(logger).With("service", service, "tags", fmt.Sprint(tags)), | |
39 | service: service, | |
40 | tags: tags, | |
41 | passingOnly: passingOnly, | |
42 | quitc: make(chan struct{}), | |
43 | } | |
44 | ||
45 | instances, index, err := s.getInstances(defaultIndex, nil) | |
46 | if err == nil { | |
47 | s.logger.Log("instances", len(instances)) | |
48 | } else { | |
49 | s.logger.Log("err", err) | |
50 | } | |
51 | ||
52 | s.cache.Update(instances) | |
53 | go s.loop(index) | |
54 | return s | |
55 | } | |
56 | ||
57 | // Endpoints implements the Subscriber interface. | |
58 | func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { | |
59 | return s.cache.Endpoints(), nil | |
60 | } | |
61 | ||
62 | // Stop terminates the subscriber. | |
63 | func (s *Subscriber) Stop() { | |
64 | close(s.quitc) | |
65 | } | |
66 | ||
67 | func (s *Subscriber) loop(lastIndex uint64) { | |
68 | var ( | |
69 | instances []string | |
70 | err error | |
71 | ) | |
72 | for { | |
73 | instances, lastIndex, err = s.getInstances(lastIndex, s.quitc) | |
74 | switch { | |
75 | case err == io.EOF: | |
76 | return // stopped via quitc | |
77 | case err != nil: | |
78 | s.logger.Log("err", err) | |
79 | default: | |
80 | s.cache.Update(instances) | |
81 | } | |
82 | } | |
83 | } | |
84 | ||
85 | func (s *Subscriber) getInstances(lastIndex uint64, interruptc chan struct{}) ([]string, uint64, error) { | |
86 | tag := "" | |
87 | if len(s.tags) > 0 { | |
88 | tag = s.tags[0] | |
89 | } | |
90 | ||
91 | // Consul doesn't support more than one tag in its service query method. | |
92 | // https://github.com/hashicorp/consul/issues/294 | |
93 | // Hashi suggest prepared queries, but they don't support blocking. | |
94 | // https://www.consul.io/docs/agent/http/query.html#execute | |
95 | // If we want blocking for efficiency, we must filter tags manually. | |
96 | ||
97 | type response struct { | |
98 | instances []string | |
99 | index uint64 | |
100 | } | |
101 | ||
102 | var ( | |
103 | errc = make(chan error, 1) | |
104 | resc = make(chan response, 1) | |
105 | ) | |
106 | ||
107 | go func() { | |
108 | entries, meta, err := s.client.Service(s.service, tag, s.passingOnly, &consul.QueryOptions{ | |
109 | WaitIndex: lastIndex, | |
110 | }) | |
111 | if err != nil { | |
112 | errc <- err | |
113 | return | |
114 | } | |
115 | if len(s.tags) > 1 { | |
116 | entries = filterEntries(entries, s.tags[1:]...) | |
117 | } | |
118 | resc <- response{ | |
119 | instances: makeInstances(entries), | |
120 | index: meta.LastIndex, | |
121 | } | |
122 | }() | |
123 | ||
124 | select { | |
125 | case err := <-errc: | |
126 | return nil, 0, err | |
127 | case res := <-resc: | |
128 | return res.instances, res.index, nil | |
129 | case <-interruptc: | |
130 | return nil, 0, io.EOF | |
131 | } | |
132 | } | |
133 | ||
134 | func filterEntries(entries []*consul.ServiceEntry, tags ...string) []*consul.ServiceEntry { | |
135 | var es []*consul.ServiceEntry | |
136 | ||
137 | ENTRIES: | |
138 | for _, entry := range entries { | |
139 | ts := make(map[string]struct{}, len(entry.Service.Tags)) | |
140 | for _, tag := range entry.Service.Tags { | |
141 | ts[tag] = struct{}{} | |
142 | } | |
143 | ||
144 | for _, tag := range tags { | |
145 | if _, ok := ts[tag]; !ok { | |
146 | continue ENTRIES | |
147 | } | |
148 | } | |
149 | es = append(es, entry) | |
150 | } | |
151 | ||
152 | return es | |
153 | } | |
154 | ||
155 | func makeInstances(entries []*consul.ServiceEntry) []string { | |
156 | instances := make([]string, len(entries)) | |
157 | for i, entry := range entries { | |
158 | addr := entry.Node.Address | |
159 | if entry.Service.Address != "" { | |
160 | addr = entry.Service.Address | |
161 | } | |
162 | instances[i] = fmt.Sprintf("%s:%d", addr, entry.Service.Port) | |
163 | } | |
164 | return instances | |
165 | } |
0 | package consul | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | ||
5 | consul "github.com/hashicorp/consul/api" | |
6 | "golang.org/x/net/context" | |
7 | ||
8 | "github.com/go-kit/kit/log" | |
9 | ) | |
10 | ||
11 | var consulState = []*consul.ServiceEntry{ | |
12 | { | |
13 | Node: &consul.Node{ | |
14 | Address: "10.0.0.0", | |
15 | Node: "app00.local", | |
16 | }, | |
17 | Service: &consul.AgentService{ | |
18 | ID: "search-api-0", | |
19 | Port: 8000, | |
20 | Service: "search", | |
21 | Tags: []string{ | |
22 | "api", | |
23 | "v1", | |
24 | }, | |
25 | }, | |
26 | }, | |
27 | { | |
28 | Node: &consul.Node{ | |
29 | Address: "10.0.0.1", | |
30 | Node: "app01.local", | |
31 | }, | |
32 | Service: &consul.AgentService{ | |
33 | ID: "search-api-1", | |
34 | Port: 8001, | |
35 | Service: "search", | |
36 | Tags: []string{ | |
37 | "api", | |
38 | "v2", | |
39 | }, | |
40 | }, | |
41 | }, | |
42 | { | |
43 | Node: &consul.Node{ | |
44 | Address: "10.0.0.1", | |
45 | Node: "app01.local", | |
46 | }, | |
47 | Service: &consul.AgentService{ | |
48 | Address: "10.0.0.10", | |
49 | ID: "search-db-0", | |
50 | Port: 9000, | |
51 | Service: "search", | |
52 | Tags: []string{ | |
53 | "db", | |
54 | }, | |
55 | }, | |
56 | }, | |
57 | } | |
58 | ||
59 | func TestSubscriber(t *testing.T) { | |
60 | var ( | |
61 | logger = log.NewNopLogger() | |
62 | client = newTestClient(consulState) | |
63 | ) | |
64 | ||
65 | s := NewSubscriber(client, testFactory, logger, "search", []string{"api"}, true) | |
66 | defer s.Stop() | |
67 | ||
68 | endpoints, err := s.Endpoints() | |
69 | if err != nil { | |
70 | t.Fatal(err) | |
71 | } | |
72 | ||
73 | if want, have := 2, len(endpoints); want != have { | |
74 | t.Errorf("want %d, have %d", want, have) | |
75 | } | |
76 | } | |
77 | ||
78 | func TestSubscriberNoService(t *testing.T) { | |
79 | var ( | |
80 | logger = log.NewNopLogger() | |
81 | client = newTestClient(consulState) | |
82 | ) | |
83 | ||
84 | s := NewSubscriber(client, testFactory, logger, "feed", []string{}, true) | |
85 | defer s.Stop() | |
86 | ||
87 | endpoints, err := s.Endpoints() | |
88 | if err != nil { | |
89 | t.Fatal(err) | |
90 | } | |
91 | ||
92 | if want, have := 0, len(endpoints); want != have { | |
93 | t.Fatalf("want %d, have %d", want, have) | |
94 | } | |
95 | } | |
96 | ||
97 | func TestSubscriberWithTags(t *testing.T) { | |
98 | var ( | |
99 | logger = log.NewNopLogger() | |
100 | client = newTestClient(consulState) | |
101 | ) | |
102 | ||
103 | s := NewSubscriber(client, testFactory, logger, "search", []string{"api", "v2"}, true) | |
104 | defer s.Stop() | |
105 | ||
106 | endpoints, err := s.Endpoints() | |
107 | if err != nil { | |
108 | t.Fatal(err) | |
109 | } | |
110 | ||
111 | if want, have := 1, len(endpoints); want != have { | |
112 | t.Fatalf("want %d, have %d", want, have) | |
113 | } | |
114 | } | |
115 | ||
116 | func TestSubscriberAddressOverride(t *testing.T) { | |
117 | s := NewSubscriber(newTestClient(consulState), testFactory, log.NewNopLogger(), "search", []string{"db"}, true) | |
118 | defer s.Stop() | |
119 | ||
120 | endpoints, err := s.Endpoints() | |
121 | if err != nil { | |
122 | t.Fatal(err) | |
123 | } | |
124 | ||
125 | if want, have := 1, len(endpoints); want != have { | |
126 | t.Fatalf("want %d, have %d", want, have) | |
127 | } | |
128 | ||
129 | response, err := endpoints[0](context.Background(), struct{}{}) | |
130 | if err != nil { | |
131 | t.Fatal(err) | |
132 | } | |
133 | ||
134 | if want, have := "10.0.0.10:9000", response.(string); want != have { | |
135 | t.Errorf("want %q, have %q", want, have) | |
136 | } | |
137 | } |
0 | package dnssrv | |
1 | ||
2 | import "net" | |
3 | ||
4 | // Lookup is a function that resolves a DNS SRV record to multiple addresses. | |
5 | // It has the same signature as net.LookupSRV. | |
6 | type Lookup func(service, proto, name string) (cname string, addrs []*net.SRV, err error) |
0 | package dnssrv | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "net" | |
5 | "time" | |
6 | ||
7 | "github.com/go-kit/kit/endpoint" | |
8 | "github.com/go-kit/kit/log" | |
9 | "github.com/go-kit/kit/sd" | |
10 | "github.com/go-kit/kit/sd/cache" | |
11 | ) | |
12 | ||
13 | // Subscriber yields endpoints taken from the named DNS SRV record. The name is | |
14 | // resolved on a fixed schedule. Priorities and weights are ignored. | |
15 | type Subscriber struct { | |
16 | name string | |
17 | cache *cache.Cache | |
18 | logger log.Logger | |
19 | quit chan struct{} | |
20 | } | |
21 | ||
22 | // NewSubscriber returns a DNS SRV subscriber. | |
23 | func NewSubscriber( | |
24 | name string, | |
25 | ttl time.Duration, | |
26 | factory sd.Factory, | |
27 | logger log.Logger, | |
28 | ) *Subscriber { | |
29 | return NewSubscriberDetailed(name, time.NewTicker(ttl), net.LookupSRV, factory, logger) | |
30 | } | |
31 | ||
32 | // NewSubscriberDetailed is the same as NewSubscriber, but allows users to | |
33 | // provide an explicit lookup refresh ticker instead of a TTL, and specify the | |
34 | // lookup function instead of using net.LookupSRV. | |
35 | func NewSubscriberDetailed( | |
36 | name string, | |
37 | refresh *time.Ticker, | |
38 | lookup Lookup, | |
39 | factory sd.Factory, | |
40 | logger log.Logger, | |
41 | ) *Subscriber { | |
42 | p := &Subscriber{ | |
43 | name: name, | |
44 | cache: cache.New(factory, logger), | |
45 | logger: logger, | |
46 | quit: make(chan struct{}), | |
47 | } | |
48 | ||
49 | instances, err := p.resolve(lookup) | |
50 | if err == nil { | |
51 | logger.Log("name", name, "instances", len(instances)) | |
52 | } else { | |
53 | logger.Log("name", name, "err", err) | |
54 | } | |
55 | p.cache.Update(instances) | |
56 | ||
57 | go p.loop(refresh, lookup) | |
58 | return p | |
59 | } | |
60 | ||
61 | // Stop terminates the Subscriber. | |
62 | func (p *Subscriber) Stop() { | |
63 | close(p.quit) | |
64 | } | |
65 | ||
66 | func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) { | |
67 | defer t.Stop() | |
68 | for { | |
69 | select { | |
70 | case <-t.C: | |
71 | instances, err := p.resolve(lookup) | |
72 | if err != nil { | |
73 | p.logger.Log("name", p.name, "err", err) | |
74 | continue // don't replace potentially-good with bad | |
75 | } | |
76 | p.cache.Update(instances) | |
77 | ||
78 | case <-p.quit: | |
79 | return | |
80 | } | |
81 | } | |
82 | } | |
83 | ||
84 | // Endpoints implements the Subscriber interface. | |
85 | func (p *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { | |
86 | return p.cache.Endpoints(), nil | |
87 | } | |
88 | ||
89 | func (p *Subscriber) resolve(lookup Lookup) ([]string, error) { | |
90 | _, addrs, err := lookup("", "", p.name) | |
91 | if err != nil { | |
92 | return []string{}, err | |
93 | } | |
94 | instances := make([]string, len(addrs)) | |
95 | for i, addr := range addrs { | |
96 | instances[i] = net.JoinHostPort(addr.Target, fmt.Sprint(addr.Port)) | |
97 | } | |
98 | return instances, nil | |
99 | } |
0 | package dnssrv | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "net" | |
5 | "sync/atomic" | |
6 | "testing" | |
7 | "time" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/log" | |
11 | ) | |
12 | ||
13 | func TestRefresh(t *testing.T) { | |
14 | name := "some.service.internal" | |
15 | ||
16 | ticker := time.NewTicker(time.Second) | |
17 | ticker.Stop() | |
18 | tickc := make(chan time.Time) | |
19 | ticker.C = tickc | |
20 | ||
21 | var lookups uint64 | |
22 | records := []*net.SRV{} | |
23 | lookup := func(service, proto, name string) (string, []*net.SRV, error) { | |
24 | t.Logf("lookup(%q, %q, %q)", service, proto, name) | |
25 | atomic.AddUint64(&lookups, 1) | |
26 | return "cname", records, nil | |
27 | } | |
28 | ||
29 | var generates uint64 | |
30 | factory := func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
31 | t.Logf("factory(%q)", instance) | |
32 | atomic.AddUint64(&generates, 1) | |
33 | return endpoint.Nop, nopCloser{}, nil | |
34 | } | |
35 | ||
36 | subscriber := NewSubscriberDetailed(name, ticker, lookup, factory, log.NewNopLogger()) | |
37 | defer subscriber.Stop() | |
38 | ||
39 | // First lookup, empty | |
40 | endpoints, err := subscriber.Endpoints() | |
41 | if err != nil { | |
42 | t.Error(err) | |
43 | } | |
44 | if want, have := 0, len(endpoints); want != have { | |
45 | t.Errorf("want %d, have %d", want, have) | |
46 | } | |
47 | if want, have := uint64(1), atomic.LoadUint64(&lookups); want != have { | |
48 | t.Errorf("want %d, have %d", want, have) | |
49 | } | |
50 | if want, have := uint64(0), atomic.LoadUint64(&generates); want != have { | |
51 | t.Errorf("want %d, have %d", want, have) | |
52 | } | |
53 | ||
54 | // Load some records and lookup again | |
55 | records = []*net.SRV{ | |
56 | &net.SRV{Target: "1.0.0.1", Port: 1001}, | |
57 | &net.SRV{Target: "1.0.0.2", Port: 1002}, | |
58 | &net.SRV{Target: "1.0.0.3", Port: 1003}, | |
59 | } | |
60 | tickc <- time.Now() | |
61 | ||
62 | // There is a race condition where the subscriber.Endpoints call below | |
63 | // invokes the cache before it is updated by the tick above. | |
64 | // TODO(pb): solve by running the read through the loop goroutine. | |
65 | time.Sleep(100 * time.Millisecond) | |
66 | ||
67 | endpoints, err = subscriber.Endpoints() | |
68 | if err != nil { | |
69 | t.Error(err) | |
70 | } | |
71 | if want, have := 3, len(endpoints); want != have { | |
72 | t.Errorf("want %d, have %d", want, have) | |
73 | } | |
74 | if want, have := uint64(2), atomic.LoadUint64(&lookups); want != have { | |
75 | t.Errorf("want %d, have %d", want, have) | |
76 | } | |
77 | if want, have := uint64(len(records)), atomic.LoadUint64(&generates); want != have { | |
78 | t.Errorf("want %d, have %d", want, have) | |
79 | } | |
80 | } | |
81 | ||
82 | type nopCloser struct{} | |
83 | ||
84 | func (nopCloser) Close() error { return nil } |
0 | // Package sd provides utilities related to service discovery. That includes | |
1 | // subscribing to service discovery systems in order to reach remote instances, | |
2 | // and publishing to service discovery systems to make an instance available. | |
3 | // Implementations are provided for most common systems. | |
4 | package sd |
0 | package etcd | |
1 | ||
2 | import ( | |
3 | "crypto/tls" | |
4 | "crypto/x509" | |
5 | "io/ioutil" | |
6 | "net" | |
7 | "net/http" | |
8 | "time" | |
9 | ||
10 | etcd "github.com/coreos/etcd/client" | |
11 | "golang.org/x/net/context" | |
12 | ) | |
13 | ||
14 | // Client is a wrapper around the etcd client. | |
15 | type Client interface { | |
16 | // GetEntries will query the given prefix in etcd and returns a set of entries. | |
17 | GetEntries(prefix string) ([]string, error) | |
18 | ||
19 | // WatchPrefix starts watching every change for given prefix in etcd. When an | |
20 | // change is detected it will populate the responseChan when an *etcd.Response. | |
21 | WatchPrefix(prefix string, responseChan chan *etcd.Response) | |
22 | } | |
23 | ||
24 | type client struct { | |
25 | keysAPI etcd.KeysAPI | |
26 | ctx context.Context | |
27 | } | |
28 | ||
29 | // ClientOptions defines options for the etcd client. | |
30 | type ClientOptions struct { | |
31 | Cert string | |
32 | Key string | |
33 | CaCert string | |
34 | DialTimeout time.Duration | |
35 | DialKeepAline time.Duration | |
36 | HeaderTimeoutPerRequest time.Duration | |
37 | } | |
38 | ||
39 | // NewClient returns an *etcd.Client with a connection to the named machines. | |
40 | // It will return an error if a connection to the cluster cannot be made. | |
41 | // The parameter machines needs to be a full URL with schemas. | |
42 | // e.g. "http://localhost:2379" will work, but "localhost:2379" will not. | |
43 | func NewClient(ctx context.Context, machines []string, options ClientOptions) (Client, error) { | |
44 | var ( | |
45 | c etcd.KeysAPI | |
46 | err error | |
47 | caCertCt []byte | |
48 | tlsCert tls.Certificate | |
49 | ) | |
50 | ||
51 | if options.Cert != "" && options.Key != "" { | |
52 | tlsCert, err = tls.LoadX509KeyPair(options.Cert, options.Key) | |
53 | if err != nil { | |
54 | return nil, err | |
55 | } | |
56 | ||
57 | caCertCt, err = ioutil.ReadFile(options.CaCert) | |
58 | if err != nil { | |
59 | return nil, err | |
60 | } | |
61 | caCertPool := x509.NewCertPool() | |
62 | caCertPool.AppendCertsFromPEM(caCertCt) | |
63 | ||
64 | tlsConfig := &tls.Config{ | |
65 | Certificates: []tls.Certificate{tlsCert}, | |
66 | RootCAs: caCertPool, | |
67 | } | |
68 | ||
69 | transport := &http.Transport{ | |
70 | TLSClientConfig: tlsConfig, | |
71 | Dial: func(network, addr string) (net.Conn, error) { | |
72 | dial := &net.Dialer{ | |
73 | Timeout: options.DialTimeout, | |
74 | KeepAlive: options.DialKeepAline, | |
75 | } | |
76 | return dial.Dial(network, addr) | |
77 | }, | |
78 | } | |
79 | ||
80 | cfg := etcd.Config{ | |
81 | Endpoints: machines, | |
82 | Transport: transport, | |
83 | HeaderTimeoutPerRequest: options.HeaderTimeoutPerRequest, | |
84 | } | |
85 | ce, err := etcd.New(cfg) | |
86 | if err != nil { | |
87 | return nil, err | |
88 | } | |
89 | c = etcd.NewKeysAPI(ce) | |
90 | } else { | |
91 | cfg := etcd.Config{ | |
92 | Endpoints: machines, | |
93 | Transport: etcd.DefaultTransport, | |
94 | HeaderTimeoutPerRequest: options.HeaderTimeoutPerRequest, | |
95 | } | |
96 | ce, err := etcd.New(cfg) | |
97 | if err != nil { | |
98 | return nil, err | |
99 | } | |
100 | c = etcd.NewKeysAPI(ce) | |
101 | } | |
102 | ||
103 | return &client{c, ctx}, nil | |
104 | } | |
105 | ||
106 | // GetEntries implements the etcd Client interface. | |
107 | func (c *client) GetEntries(key string) ([]string, error) { | |
108 | resp, err := c.keysAPI.Get(c.ctx, key, &etcd.GetOptions{Recursive: true}) | |
109 | if err != nil { | |
110 | return nil, err | |
111 | } | |
112 | ||
113 | entries := make([]string, len(resp.Node.Nodes)) | |
114 | for i, node := range resp.Node.Nodes { | |
115 | entries[i] = node.Value | |
116 | } | |
117 | return entries, nil | |
118 | } | |
119 | ||
120 | // WatchPrefix implements the etcd Client interface. | |
121 | func (c *client) WatchPrefix(prefix string, responseChan chan *etcd.Response) { | |
122 | watch := c.keysAPI.Watcher(prefix, &etcd.WatcherOptions{AfterIndex: 0, Recursive: true}) | |
123 | for { | |
124 | res, err := watch.Next(c.ctx) | |
125 | if err != nil { | |
126 | return | |
127 | } | |
128 | responseChan <- res | |
129 | } | |
130 | } |
0 | package etcd | |
1 | ||
2 | import ( | |
3 | etcd "github.com/coreos/etcd/client" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/log" | |
7 | "github.com/go-kit/kit/sd" | |
8 | "github.com/go-kit/kit/sd/cache" | |
9 | ) | |
10 | ||
11 | // Subscriber yield endpoints stored in a certain etcd keyspace. Any kind of | |
12 | // change in that keyspace is watched and will update the Subscriber endpoints. | |
13 | type Subscriber struct { | |
14 | client Client | |
15 | prefix string | |
16 | cache *cache.Cache | |
17 | logger log.Logger | |
18 | quitc chan struct{} | |
19 | } | |
20 | ||
21 | var _ sd.Subscriber = &Subscriber{} | |
22 | ||
23 | // NewSubscriber returns an etcd subscriber. It will start watching the given | |
24 | // prefix for changes, and update the endpoints. | |
25 | func NewSubscriber(c Client, prefix string, factory sd.Factory, logger log.Logger) (*Subscriber, error) { | |
26 | s := &Subscriber{ | |
27 | client: c, | |
28 | prefix: prefix, | |
29 | cache: cache.New(factory, logger), | |
30 | logger: logger, | |
31 | quitc: make(chan struct{}), | |
32 | } | |
33 | ||
34 | instances, err := s.client.GetEntries(s.prefix) | |
35 | if err == nil { | |
36 | logger.Log("prefix", s.prefix, "instances", len(instances)) | |
37 | } else { | |
38 | logger.Log("prefix", s.prefix, "err", err) | |
39 | } | |
40 | s.cache.Update(instances) | |
41 | ||
42 | go s.loop() | |
43 | return s, nil | |
44 | } | |
45 | ||
46 | func (s *Subscriber) loop() { | |
47 | responseChan := make(chan *etcd.Response) | |
48 | go s.client.WatchPrefix(s.prefix, responseChan) | |
49 | for { | |
50 | select { | |
51 | case <-responseChan: | |
52 | instances, err := s.client.GetEntries(s.prefix) | |
53 | if err != nil { | |
54 | s.logger.Log("msg", "failed to retrieve entries", "err", err) | |
55 | continue | |
56 | } | |
57 | s.cache.Update(instances) | |
58 | ||
59 | case <-s.quitc: | |
60 | return | |
61 | } | |
62 | } | |
63 | } | |
64 | ||
65 | // Endpoints implements the Subscriber interface. | |
66 | func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { | |
67 | return s.cache.Endpoints(), nil | |
68 | } | |
69 | ||
70 | // Stop terminates the Subscriber. | |
71 | func (s *Subscriber) Stop() { | |
72 | close(s.quitc) | |
73 | } |
0 | package etcd | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "io" | |
5 | "testing" | |
6 | ||
7 | stdetcd "github.com/coreos/etcd/client" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/log" | |
11 | ) | |
12 | ||
13 | var ( | |
14 | node = &stdetcd.Node{ | |
15 | Key: "/foo", | |
16 | Nodes: []*stdetcd.Node{ | |
17 | {Key: "/foo/1", Value: "1:1"}, | |
18 | {Key: "/foo/2", Value: "1:2"}, | |
19 | }, | |
20 | } | |
21 | fakeResponse = &stdetcd.Response{ | |
22 | Node: node, | |
23 | } | |
24 | ) | |
25 | ||
26 | func TestSubscriber(t *testing.T) { | |
27 | factory := func(string) (endpoint.Endpoint, io.Closer, error) { | |
28 | return endpoint.Nop, nil, nil | |
29 | } | |
30 | ||
31 | client := &fakeClient{ | |
32 | responses: map[string]*stdetcd.Response{"/foo": fakeResponse}, | |
33 | } | |
34 | ||
35 | s, err := NewSubscriber(client, "/foo", factory, log.NewNopLogger()) | |
36 | if err != nil { | |
37 | t.Fatal(err) | |
38 | } | |
39 | defer s.Stop() | |
40 | ||
41 | if _, err := s.Endpoints(); err != nil { | |
42 | t.Fatal(err) | |
43 | } | |
44 | } | |
45 | ||
46 | func TestBadFactory(t *testing.T) { | |
47 | factory := func(string) (endpoint.Endpoint, io.Closer, error) { | |
48 | return nil, nil, errors.New("kaboom") | |
49 | } | |
50 | ||
51 | client := &fakeClient{ | |
52 | responses: map[string]*stdetcd.Response{"/foo": fakeResponse}, | |
53 | } | |
54 | ||
55 | s, err := NewSubscriber(client, "/foo", factory, log.NewNopLogger()) | |
56 | if err != nil { | |
57 | t.Fatal(err) | |
58 | } | |
59 | defer s.Stop() | |
60 | ||
61 | endpoints, err := s.Endpoints() | |
62 | if err != nil { | |
63 | t.Fatal(err) | |
64 | } | |
65 | ||
66 | if want, have := 0, len(endpoints); want != have { | |
67 | t.Errorf("want %d, have %d", want, have) | |
68 | } | |
69 | } | |
70 | ||
71 | type fakeClient struct { | |
72 | responses map[string]*stdetcd.Response | |
73 | } | |
74 | ||
75 | func (c *fakeClient) GetEntries(prefix string) ([]string, error) { | |
76 | response, ok := c.responses[prefix] | |
77 | if !ok { | |
78 | return nil, errors.New("key not exist") | |
79 | } | |
80 | ||
81 | entries := make([]string, len(response.Node.Nodes)) | |
82 | for i, node := range response.Node.Nodes { | |
83 | entries[i] = node.Value | |
84 | } | |
85 | return entries, nil | |
86 | } | |
87 | ||
88 | func (c *fakeClient) WatchPrefix(prefix string, responseChan chan *stdetcd.Response) {} |
0 | package sd | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // Factory is a function that converts an instance string (e.g. host:port) to a | |
9 | // specific endpoint. Instances that provide multiple endpoints require multiple | |
10 | // factories. A factory also returns an io.Closer that's invoked when the | |
11 | // instance goes away and needs to be cleaned up. Factories may return nil | |
12 | // closers. | |
13 | // | |
14 | // Users are expected to provide their own factory functions that assume | |
15 | // specific transports, or can deduce transports by parsing the instance string. | |
16 | type Factory func(instance string) (endpoint.Endpoint, io.Closer, error) |
0 | package sd | |
1 | ||
2 | import "github.com/go-kit/kit/endpoint" | |
3 | ||
4 | // FixedSubscriber yields a fixed set of services. | |
5 | type FixedSubscriber []endpoint.Endpoint | |
6 | ||
7 | // Endpoints implements Subscriber. | |
8 | func (s FixedSubscriber) Endpoints() ([]endpoint.Endpoint, error) { return s, nil } |
0 | package lb | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | ) | |
7 | ||
8 | // Balancer yields endpoints according to some heuristic. | |
9 | type Balancer interface { | |
10 | Endpoint() (endpoint.Endpoint, error) | |
11 | } | |
12 | ||
13 | // ErrNoEndpoints is returned when no qualifying endpoints are available. | |
14 | var ErrNoEndpoints = errors.New("no endpoints available") |
0 | // Package lb deals with client-side load balancing across multiple identical | |
1 | // instances of services and endpoints. When combined with a service discovery | |
2 | // system of record, it enables a more decentralized architecture, removing the | |
3 | // need for separate load balancers like HAProxy. | |
4 | package lb |
0 | package lb | |
1 | ||
2 | import ( | |
3 | "math/rand" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/sd" | |
7 | ) | |
8 | ||
9 | // NewRandom returns a load balancer that selects services randomly. | |
10 | func NewRandom(s sd.Subscriber, seed int64) Balancer { | |
11 | return &random{ | |
12 | s: s, | |
13 | r: rand.New(rand.NewSource(seed)), | |
14 | } | |
15 | } | |
16 | ||
17 | type random struct { | |
18 | s sd.Subscriber | |
19 | r *rand.Rand | |
20 | } | |
21 | ||
22 | func (r *random) Endpoint() (endpoint.Endpoint, error) { | |
23 | endpoints, err := r.s.Endpoints() | |
24 | if err != nil { | |
25 | return nil, err | |
26 | } | |
27 | if len(endpoints) <= 0 { | |
28 | return nil, ErrNoEndpoints | |
29 | } | |
30 | return endpoints[r.r.Intn(len(endpoints))], nil | |
31 | } |
0 | package lb | |
1 | ||
2 | import ( | |
3 | "math" | |
4 | "testing" | |
5 | ||
6 | "github.com/go-kit/kit/endpoint" | |
7 | "github.com/go-kit/kit/sd" | |
8 | "golang.org/x/net/context" | |
9 | ) | |
10 | ||
11 | func TestRandom(t *testing.T) { | |
12 | var ( | |
13 | n = 7 | |
14 | endpoints = make([]endpoint.Endpoint, n) | |
15 | counts = make([]int, n) | |
16 | seed = int64(12345) | |
17 | iterations = 1000000 | |
18 | want = iterations / n | |
19 | tolerance = want / 100 // 1% | |
20 | ) | |
21 | ||
22 | for i := 0; i < n; i++ { | |
23 | i0 := i | |
24 | endpoints[i] = func(context.Context, interface{}) (interface{}, error) { counts[i0]++; return struct{}{}, nil } | |
25 | } | |
26 | ||
27 | subscriber := sd.FixedSubscriber(endpoints) | |
28 | balancer := NewRandom(subscriber, seed) | |
29 | ||
30 | for i := 0; i < iterations; i++ { | |
31 | endpoint, _ := balancer.Endpoint() | |
32 | endpoint(context.Background(), struct{}{}) | |
33 | } | |
34 | ||
35 | for i, have := range counts { | |
36 | delta := int(math.Abs(float64(want - have))) | |
37 | if delta > tolerance { | |
38 | t.Errorf("%d: want %d, have %d, delta %d > %d tolerance", i, want, have, delta, tolerance) | |
39 | } | |
40 | } | |
41 | } | |
42 | ||
43 | func TestRandomNoEndpoints(t *testing.T) { | |
44 | subscriber := sd.FixedSubscriber{} | |
45 | balancer := NewRandom(subscriber, 1415926) | |
46 | _, err := balancer.Endpoint() | |
47 | if want, have := ErrNoEndpoints, err; want != have { | |
48 | t.Errorf("want %v, have %v", want, have) | |
49 | } | |
50 | ||
51 | } |
0 | package lb | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "strings" | |
5 | "time" | |
6 | ||
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | ) | |
11 | ||
12 | // Retry wraps a service load balancer and returns an endpoint oriented load | |
13 | // balancer for the specified service method. | |
14 | // Requests to the endpoint will be automatically load balanced via the load | |
15 | // balancer. Requests that return errors will be retried until they succeed, | |
16 | // up to max times, or until the timeout is elapsed, whichever comes first. | |
17 | func Retry(max int, timeout time.Duration, b Balancer) endpoint.Endpoint { | |
18 | if b == nil { | |
19 | panic("nil Balancer") | |
20 | } | |
21 | return func(ctx context.Context, request interface{}) (response interface{}, err error) { | |
22 | var ( | |
23 | newctx, cancel = context.WithTimeout(ctx, timeout) | |
24 | responses = make(chan interface{}, 1) | |
25 | errs = make(chan error, 1) | |
26 | a = []string{} | |
27 | ) | |
28 | defer cancel() | |
29 | for i := 1; i <= max; i++ { | |
30 | go func() { | |
31 | e, err := b.Endpoint() | |
32 | if err != nil { | |
33 | errs <- err | |
34 | return | |
35 | } | |
36 | response, err := e(newctx, request) | |
37 | if err != nil { | |
38 | errs <- err | |
39 | return | |
40 | } | |
41 | responses <- response | |
42 | }() | |
43 | ||
44 | select { | |
45 | case <-newctx.Done(): | |
46 | return nil, newctx.Err() | |
47 | case response := <-responses: | |
48 | return response, nil | |
49 | case err := <-errs: | |
50 | a = append(a, err.Error()) | |
51 | continue | |
52 | } | |
53 | } | |
54 | return nil, fmt.Errorf("retry attempts exceeded (%s)", strings.Join(a, "; ")) | |
55 | } | |
56 | } |
0 | package lb_test | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "testing" | |
5 | "time" | |
6 | ||
7 | "golang.org/x/net/context" | |
8 | ||
9 | "github.com/go-kit/kit/endpoint" | |
10 | "github.com/go-kit/kit/sd" | |
11 | loadbalancer "github.com/go-kit/kit/sd/lb" | |
12 | ) | |
13 | ||
14 | func TestRetryMaxTotalFail(t *testing.T) { | |
15 | var ( | |
16 | endpoints = sd.FixedSubscriber{} // no endpoints | |
17 | lb = loadbalancer.NewRoundRobin(endpoints) | |
18 | retry = loadbalancer.Retry(999, time.Second, lb) // lots of retries | |
19 | ctx = context.Background() | |
20 | ) | |
21 | if _, err := retry(ctx, struct{}{}); err == nil { | |
22 | t.Errorf("expected error, got none") // should fail | |
23 | } | |
24 | } | |
25 | ||
26 | func TestRetryMaxPartialFail(t *testing.T) { | |
27 | var ( | |
28 | endpoints = []endpoint.Endpoint{ | |
29 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error one") }, | |
30 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") }, | |
31 | func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ }, | |
32 | } | |
33 | subscriber = sd.FixedSubscriber{ | |
34 | 0: endpoints[0], | |
35 | 1: endpoints[1], | |
36 | 2: endpoints[2], | |
37 | } | |
38 | retries = len(endpoints) - 1 // not quite enough retries | |
39 | lb = loadbalancer.NewRoundRobin(subscriber) | |
40 | ctx = context.Background() | |
41 | ) | |
42 | if _, err := loadbalancer.Retry(retries, time.Second, lb)(ctx, struct{}{}); err == nil { | |
43 | t.Errorf("expected error, got none") | |
44 | } | |
45 | } | |
46 | ||
47 | func TestRetryMaxSuccess(t *testing.T) { | |
48 | var ( | |
49 | endpoints = []endpoint.Endpoint{ | |
50 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error one") }, | |
51 | func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") }, | |
52 | func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ }, | |
53 | } | |
54 | subscriber = sd.FixedSubscriber{ | |
55 | 0: endpoints[0], | |
56 | 1: endpoints[1], | |
57 | 2: endpoints[2], | |
58 | } | |
59 | retries = len(endpoints) // exactly enough retries | |
60 | lb = loadbalancer.NewRoundRobin(subscriber) | |
61 | ctx = context.Background() | |
62 | ) | |
63 | if _, err := loadbalancer.Retry(retries, time.Second, lb)(ctx, struct{}{}); err != nil { | |
64 | t.Error(err) | |
65 | } | |
66 | } | |
67 | ||
68 | func TestRetryTimeout(t *testing.T) { | |
69 | var ( | |
70 | step = make(chan struct{}) | |
71 | e = func(context.Context, interface{}) (interface{}, error) { <-step; return struct{}{}, nil } | |
72 | timeout = time.Millisecond | |
73 | retry = loadbalancer.Retry(999, timeout, loadbalancer.NewRoundRobin(sd.FixedSubscriber{0: e})) | |
74 | errs = make(chan error, 1) | |
75 | invoke = func() { _, err := retry(context.Background(), struct{}{}); errs <- err } | |
76 | ) | |
77 | ||
78 | go func() { step <- struct{}{} }() // queue up a flush of the endpoint | |
79 | invoke() // invoke the endpoint and trigger the flush | |
80 | if err := <-errs; err != nil { // that should succeed | |
81 | t.Error(err) | |
82 | } | |
83 | ||
84 | go func() { time.Sleep(10 * timeout); step <- struct{}{} }() // a delayed flush | |
85 | invoke() // invoke the endpoint | |
86 | if err := <-errs; err != context.DeadlineExceeded { // that should not succeed | |
87 | t.Errorf("wanted %v, got none", context.DeadlineExceeded) | |
88 | } | |
89 | } |
0 | package lb | |
1 | ||
2 | import ( | |
3 | "sync/atomic" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/sd" | |
7 | ) | |
8 | ||
9 | // NewRoundRobin returns a load balancer that returns services in sequence. | |
10 | func NewRoundRobin(s sd.Subscriber) Balancer { | |
11 | return &roundRobin{ | |
12 | s: s, | |
13 | c: 0, | |
14 | } | |
15 | } | |
16 | ||
17 | type roundRobin struct { | |
18 | s sd.Subscriber | |
19 | c uint64 | |
20 | } | |
21 | ||
22 | func (rr *roundRobin) Endpoint() (endpoint.Endpoint, error) { | |
23 | endpoints, err := rr.s.Endpoints() | |
24 | if err != nil { | |
25 | return nil, err | |
26 | } | |
27 | if len(endpoints) <= 0 { | |
28 | return nil, ErrNoEndpoints | |
29 | } | |
30 | old := atomic.AddUint64(&rr.c, 1) - 1 | |
31 | idx := old % uint64(len(endpoints)) | |
32 | return endpoints[idx], nil | |
33 | } |
0 | package lb | |
1 | ||
2 | import ( | |
3 | "reflect" | |
4 | "sync" | |
5 | "sync/atomic" | |
6 | "testing" | |
7 | "time" | |
8 | ||
9 | "golang.org/x/net/context" | |
10 | ||
11 | "github.com/go-kit/kit/endpoint" | |
12 | "github.com/go-kit/kit/sd" | |
13 | ) | |
14 | ||
15 | func TestRoundRobin(t *testing.T) { | |
16 | var ( | |
17 | counts = []int{0, 0, 0} | |
18 | endpoints = []endpoint.Endpoint{ | |
19 | func(context.Context, interface{}) (interface{}, error) { counts[0]++; return struct{}{}, nil }, | |
20 | func(context.Context, interface{}) (interface{}, error) { counts[1]++; return struct{}{}, nil }, | |
21 | func(context.Context, interface{}) (interface{}, error) { counts[2]++; return struct{}{}, nil }, | |
22 | } | |
23 | ) | |
24 | ||
25 | subscriber := sd.FixedSubscriber(endpoints) | |
26 | balancer := NewRoundRobin(subscriber) | |
27 | ||
28 | for i, want := range [][]int{ | |
29 | {1, 0, 0}, | |
30 | {1, 1, 0}, | |
31 | {1, 1, 1}, | |
32 | {2, 1, 1}, | |
33 | {2, 2, 1}, | |
34 | {2, 2, 2}, | |
35 | {3, 2, 2}, | |
36 | } { | |
37 | endpoint, err := balancer.Endpoint() | |
38 | if err != nil { | |
39 | t.Fatal(err) | |
40 | } | |
41 | endpoint(context.Background(), struct{}{}) | |
42 | if have := counts; !reflect.DeepEqual(want, have) { | |
43 | t.Fatalf("%d: want %v, have %v", i, want, have) | |
44 | } | |
45 | } | |
46 | } | |
47 | ||
48 | func TestRoundRobinNoEndpoints(t *testing.T) { | |
49 | subscriber := sd.FixedSubscriber{} | |
50 | balancer := NewRoundRobin(subscriber) | |
51 | _, err := balancer.Endpoint() | |
52 | if want, have := ErrNoEndpoints, err; want != have { | |
53 | t.Errorf("want %v, have %v", want, have) | |
54 | } | |
55 | } | |
56 | ||
57 | func TestRoundRobinNoRace(t *testing.T) { | |
58 | balancer := NewRoundRobin(sd.FixedSubscriber([]endpoint.Endpoint{ | |
59 | endpoint.Nop, | |
60 | endpoint.Nop, | |
61 | endpoint.Nop, | |
62 | endpoint.Nop, | |
63 | endpoint.Nop, | |
64 | })) | |
65 | ||
66 | var ( | |
67 | n = 100 | |
68 | done = make(chan struct{}) | |
69 | wg sync.WaitGroup | |
70 | count uint64 | |
71 | ) | |
72 | ||
73 | wg.Add(n) | |
74 | ||
75 | for i := 0; i < n; i++ { | |
76 | go func() { | |
77 | defer wg.Done() | |
78 | for { | |
79 | select { | |
80 | case <-done: | |
81 | return | |
82 | default: | |
83 | _, _ = balancer.Endpoint() | |
84 | atomic.AddUint64(&count, 1) | |
85 | } | |
86 | } | |
87 | }() | |
88 | } | |
89 | ||
90 | time.Sleep(time.Second) | |
91 | close(done) | |
92 | wg.Wait() | |
93 | ||
94 | t.Logf("made %d calls", atomic.LoadUint64(&count)) | |
95 | } |
0 | package sd | |
1 | ||
2 | // Registrar registers instance information to a service discovery system when | |
3 | // an instance becomes alive and healthy, and deregisters that information when | |
4 | // the service becomes unhealthy or goes away. | |
5 | // | |
6 | // Registrar implementations exist for various service discovery systems. Note | |
7 | // that identifying instance information (e.g. host:port) must be given via the | |
8 | // concrete constructor; this interface merely signals lifecycle changes. | |
9 | type Registrar interface { | |
10 | Register() | |
11 | Deregister() | |
12 | } |
0 | package sd | |
1 | ||
2 | import "github.com/go-kit/kit/endpoint" | |
3 | ||
4 | // Subscriber listens to a service discovery system and yields a set of | |
5 | // identical endpoints on demand. An error indicates a problem with connectivity | |
6 | // to the service discovery system, or within the system itself; a subscriber | |
7 | // may yield no endpoints without error. | |
8 | type Subscriber interface { | |
9 | Endpoints() ([]endpoint.Endpoint, error) | |
10 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "net" | |
5 | "strings" | |
6 | "time" | |
7 | ||
8 | "github.com/samuel/go-zookeeper/zk" | |
9 | ||
10 | "github.com/go-kit/kit/log" | |
11 | ) | |
12 | ||
13 | // DefaultACL is the default ACL to use for creating znodes. | |
14 | var ( | |
15 | DefaultACL = zk.WorldACL(zk.PermAll) | |
16 | ErrInvalidCredentials = errors.New("invalid credentials provided") | |
17 | ErrClientClosed = errors.New("client service closed") | |
18 | ) | |
19 | ||
20 | const ( | |
21 | // DefaultConnectTimeout is the default timeout to establish a connection to | |
22 | // a ZooKeeper node. | |
23 | DefaultConnectTimeout = 2 * time.Second | |
24 | // DefaultSessionTimeout is the default timeout to keep the current | |
25 | // ZooKeeper session alive during a temporary disconnect. | |
26 | DefaultSessionTimeout = 5 * time.Second | |
27 | ) | |
28 | ||
29 | // Client is a wrapper around a lower level ZooKeeper client implementation. | |
30 | type Client interface { | |
31 | // GetEntries should query the provided path in ZooKeeper, place a watch on | |
32 | // it and retrieve data from its current child nodes. | |
33 | GetEntries(path string) ([]string, <-chan zk.Event, error) | |
34 | // CreateParentNodes should try to create the path in case it does not exist | |
35 | // yet on ZooKeeper. | |
36 | CreateParentNodes(path string) error | |
37 | // Stop should properly shutdown the client implementation | |
38 | Stop() | |
39 | } | |
40 | ||
41 | type clientConfig struct { | |
42 | logger log.Logger | |
43 | acl []zk.ACL | |
44 | credentials []byte | |
45 | connectTimeout time.Duration | |
46 | sessionTimeout time.Duration | |
47 | rootNodePayload [][]byte | |
48 | eventHandler func(zk.Event) | |
49 | } | |
50 | ||
51 | // Option functions enable friendly APIs. | |
52 | type Option func(*clientConfig) error | |
53 | ||
54 | type client struct { | |
55 | *zk.Conn | |
56 | clientConfig | |
57 | active bool | |
58 | quit chan struct{} | |
59 | } | |
60 | ||
61 | // ACL returns an Option specifying a non-default ACL for creating parent nodes. | |
62 | func ACL(acl []zk.ACL) Option { | |
63 | return func(c *clientConfig) error { | |
64 | c.acl = acl | |
65 | return nil | |
66 | } | |
67 | } | |
68 | ||
69 | // Credentials returns an Option specifying a user/password combination which | |
70 | // the client will use to authenticate itself with. | |
71 | func Credentials(user, pass string) Option { | |
72 | return func(c *clientConfig) error { | |
73 | if user == "" || pass == "" { | |
74 | return ErrInvalidCredentials | |
75 | } | |
76 | c.credentials = []byte(user + ":" + pass) | |
77 | return nil | |
78 | } | |
79 | } | |
80 | ||
81 | // ConnectTimeout returns an Option specifying a non-default connection timeout | |
82 | // when we try to establish a connection to a ZooKeeper server. | |
83 | func ConnectTimeout(t time.Duration) Option { | |
84 | return func(c *clientConfig) error { | |
85 | if t.Seconds() < 1 { | |
86 | return errors.New("invalid connect timeout (minimum value is 1 second)") | |
87 | } | |
88 | c.connectTimeout = t | |
89 | return nil | |
90 | } | |
91 | } | |
92 | ||
93 | // SessionTimeout returns an Option specifying a non-default session timeout. | |
94 | func SessionTimeout(t time.Duration) Option { | |
95 | return func(c *clientConfig) error { | |
96 | if t.Seconds() < 1 { | |
97 | return errors.New("invalid session timeout (minimum value is 1 second)") | |
98 | } | |
99 | c.sessionTimeout = t | |
100 | return nil | |
101 | } | |
102 | } | |
103 | ||
104 | // Payload returns an Option specifying non-default data values for each znode | |
105 | // created by CreateParentNodes. | |
106 | func Payload(payload [][]byte) Option { | |
107 | return func(c *clientConfig) error { | |
108 | c.rootNodePayload = payload | |
109 | return nil | |
110 | } | |
111 | } | |
112 | ||
113 | // EventHandler returns an Option specifying a callback function to handle | |
114 | // incoming zk.Event payloads (ZooKeeper connection events). | |
115 | func EventHandler(handler func(zk.Event)) Option { | |
116 | return func(c *clientConfig) error { | |
117 | c.eventHandler = handler | |
118 | return nil | |
119 | } | |
120 | } | |
121 | ||
122 | // NewClient returns a ZooKeeper client with a connection to the server cluster. | |
123 | // It will return an error if the server cluster cannot be resolved. | |
124 | func NewClient(servers []string, logger log.Logger, options ...Option) (Client, error) { | |
125 | defaultEventHandler := func(event zk.Event) { | |
126 | logger.Log("eventtype", event.Type.String(), "server", event.Server, "state", event.State.String(), "err", event.Err) | |
127 | } | |
128 | config := clientConfig{ | |
129 | acl: DefaultACL, | |
130 | connectTimeout: DefaultConnectTimeout, | |
131 | sessionTimeout: DefaultSessionTimeout, | |
132 | eventHandler: defaultEventHandler, | |
133 | logger: logger, | |
134 | } | |
135 | for _, option := range options { | |
136 | if err := option(&config); err != nil { | |
137 | return nil, err | |
138 | } | |
139 | } | |
140 | // dialer overrides the default ZooKeeper library Dialer so we can configure | |
141 | // the connectTimeout. The current library has a hardcoded value of 1 second | |
142 | // and there are reports of race conditions, due to slow DNS resolvers and | |
143 | // other network latency issues. | |
144 | dialer := func(network, address string, _ time.Duration) (net.Conn, error) { | |
145 | return net.DialTimeout(network, address, config.connectTimeout) | |
146 | } | |
147 | conn, eventc, err := zk.Connect(servers, config.sessionTimeout, withLogger(logger), zk.WithDialer(dialer)) | |
148 | ||
149 | if err != nil { | |
150 | return nil, err | |
151 | } | |
152 | ||
153 | if len(config.credentials) > 0 { | |
154 | err = conn.AddAuth("digest", config.credentials) | |
155 | if err != nil { | |
156 | return nil, err | |
157 | } | |
158 | } | |
159 | ||
160 | c := &client{conn, config, true, make(chan struct{})} | |
161 | ||
162 | // Start listening for incoming Event payloads and callback the set | |
163 | // eventHandler. | |
164 | go func() { | |
165 | for { | |
166 | select { | |
167 | case event := <-eventc: | |
168 | config.eventHandler(event) | |
169 | case <-c.quit: | |
170 | return | |
171 | } | |
172 | } | |
173 | }() | |
174 | return c, nil | |
175 | } | |
176 | ||
177 | // CreateParentNodes implements the ZooKeeper Client interface. | |
178 | func (c *client) CreateParentNodes(path string) error { | |
179 | if !c.active { | |
180 | return ErrClientClosed | |
181 | } | |
182 | if path[0] != '/' { | |
183 | return zk.ErrInvalidPath | |
184 | } | |
185 | payload := []byte("") | |
186 | pathString := "" | |
187 | pathNodes := strings.Split(path, "/") | |
188 | for i := 1; i < len(pathNodes); i++ { | |
189 | if i <= len(c.rootNodePayload) { | |
190 | payload = c.rootNodePayload[i-1] | |
191 | } else { | |
192 | payload = []byte("") | |
193 | } | |
194 | pathString += "/" + pathNodes[i] | |
195 | _, err := c.Create(pathString, payload, 0, c.acl) | |
196 | // not being able to create the node because it exists or not having | |
197 | // sufficient rights is not an issue. It is ok for the node to already | |
198 | // exist and/or us to only have read rights | |
199 | if err != nil && err != zk.ErrNodeExists && err != zk.ErrNoAuth { | |
200 | return err | |
201 | } | |
202 | } | |
203 | return nil | |
204 | } | |
205 | ||
206 | // GetEntries implements the ZooKeeper Client interface. | |
207 | func (c *client) GetEntries(path string) ([]string, <-chan zk.Event, error) { | |
208 | // retrieve list of child nodes for given path and add watch to path | |
209 | znodes, _, eventc, err := c.ChildrenW(path) | |
210 | ||
211 | if err != nil { | |
212 | return nil, eventc, err | |
213 | } | |
214 | ||
215 | var resp []string | |
216 | for _, znode := range znodes { | |
217 | // retrieve payload for child znode and add to response array | |
218 | if data, _, err := c.Get(path + "/" + znode); err == nil { | |
219 | resp = append(resp, string(data)) | |
220 | } | |
221 | } | |
222 | return resp, eventc, nil | |
223 | } | |
224 | ||
225 | // Stop implements the ZooKeeper Client interface. | |
226 | func (c *client) Stop() { | |
227 | c.active = false | |
228 | close(c.quit) | |
229 | c.Close() | |
230 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "bytes" | |
4 | "testing" | |
5 | "time" | |
6 | ||
7 | stdzk "github.com/samuel/go-zookeeper/zk" | |
8 | ||
9 | "github.com/go-kit/kit/log" | |
10 | ) | |
11 | ||
12 | func TestNewClient(t *testing.T) { | |
13 | var ( | |
14 | acl = stdzk.WorldACL(stdzk.PermRead) | |
15 | connectTimeout = 3 * time.Second | |
16 | sessionTimeout = 20 * time.Second | |
17 | payload = [][]byte{[]byte("Payload"), []byte("Test")} | |
18 | ) | |
19 | ||
20 | c, err := NewClient( | |
21 | []string{"FailThisInvalidHost!!!"}, | |
22 | log.NewNopLogger(), | |
23 | ) | |
24 | if err == nil { | |
25 | t.Errorf("expected error, got nil") | |
26 | } | |
27 | ||
28 | hasFired := false | |
29 | calledEventHandler := make(chan struct{}) | |
30 | eventHandler := func(event stdzk.Event) { | |
31 | if !hasFired { | |
32 | // test is successful if this function has fired at least once | |
33 | hasFired = true | |
34 | close(calledEventHandler) | |
35 | } | |
36 | } | |
37 | ||
38 | c, err = NewClient( | |
39 | []string{"localhost"}, | |
40 | log.NewNopLogger(), | |
41 | ACL(acl), | |
42 | ConnectTimeout(connectTimeout), | |
43 | SessionTimeout(sessionTimeout), | |
44 | Payload(payload), | |
45 | EventHandler(eventHandler), | |
46 | ) | |
47 | if err != nil { | |
48 | t.Fatal(err) | |
49 | } | |
50 | defer c.Stop() | |
51 | ||
52 | clientImpl, ok := c.(*client) | |
53 | if !ok { | |
54 | t.Fatal("retrieved incorrect Client implementation") | |
55 | } | |
56 | if want, have := acl, clientImpl.acl; want[0] != have[0] { | |
57 | t.Errorf("want %+v, have %+v", want, have) | |
58 | } | |
59 | if want, have := connectTimeout, clientImpl.connectTimeout; want != have { | |
60 | t.Errorf("want %d, have %d", want, have) | |
61 | } | |
62 | if want, have := sessionTimeout, clientImpl.sessionTimeout; want != have { | |
63 | t.Errorf("want %d, have %d", want, have) | |
64 | } | |
65 | if want, have := payload, clientImpl.rootNodePayload; bytes.Compare(want[0], have[0]) != 0 || bytes.Compare(want[1], have[1]) != 0 { | |
66 | t.Errorf("want %s, have %s", want, have) | |
67 | } | |
68 | ||
69 | select { | |
70 | case <-calledEventHandler: | |
71 | case <-time.After(100 * time.Millisecond): | |
72 | t.Errorf("event handler never called") | |
73 | } | |
74 | } | |
75 | ||
76 | func TestOptions(t *testing.T) { | |
77 | _, err := NewClient([]string{"localhost"}, log.NewNopLogger(), Credentials("valid", "credentials")) | |
78 | if err != nil && err != stdzk.ErrNoServer { | |
79 | t.Errorf("unexpected error: %v", err) | |
80 | } | |
81 | ||
82 | _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), Credentials("nopass", "")) | |
83 | if want, have := err, ErrInvalidCredentials; want != have { | |
84 | t.Errorf("want %v, have %v", want, have) | |
85 | } | |
86 | ||
87 | _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), ConnectTimeout(0)) | |
88 | if err == nil { | |
89 | t.Errorf("expected connect timeout error") | |
90 | } | |
91 | ||
92 | _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), SessionTimeout(0)) | |
93 | if err == nil { | |
94 | t.Errorf("expected connect timeout error") | |
95 | } | |
96 | } | |
97 | ||
98 | func TestCreateParentNodes(t *testing.T) { | |
99 | payload := [][]byte{[]byte("Payload"), []byte("Test")} | |
100 | ||
101 | c, err := NewClient([]string{"localhost:65500"}, log.NewNopLogger()) | |
102 | if err != nil { | |
103 | t.Errorf("unexpected error: %v", err) | |
104 | } | |
105 | if c == nil { | |
106 | t.Fatal("expected new Client, got nil") | |
107 | } | |
108 | ||
109 | s, err := NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger()) | |
110 | if err != stdzk.ErrNoServer { | |
111 | t.Errorf("unexpected error: %v", err) | |
112 | } | |
113 | if s != nil { | |
114 | t.Error("expected failed new Subscriber") | |
115 | } | |
116 | ||
117 | s, err = NewSubscriber(c, "invalidpath", newFactory(""), log.NewNopLogger()) | |
118 | if err != stdzk.ErrInvalidPath { | |
119 | t.Errorf("unexpected error: %v", err) | |
120 | } | |
121 | _, _, err = c.GetEntries("/validpath") | |
122 | if err != stdzk.ErrNoServer { | |
123 | t.Errorf("unexpected error: %v", err) | |
124 | } | |
125 | ||
126 | c.Stop() | |
127 | ||
128 | err = c.CreateParentNodes("/validpath") | |
129 | if err != ErrClientClosed { | |
130 | t.Errorf("unexpected error: %v", err) | |
131 | } | |
132 | ||
133 | s, err = NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger()) | |
134 | if err != ErrClientClosed { | |
135 | t.Errorf("unexpected error: %v", err) | |
136 | } | |
137 | if s != nil { | |
138 | t.Error("expected failed new Subscriber") | |
139 | } | |
140 | ||
141 | c, err = NewClient([]string{"localhost:65500"}, log.NewNopLogger(), Payload(payload)) | |
142 | if err != nil { | |
143 | t.Errorf("unexpected error: %v", err) | |
144 | } | |
145 | if c == nil { | |
146 | t.Fatal("expected new Client, got nil") | |
147 | } | |
148 | ||
149 | s, err = NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger()) | |
150 | if err != stdzk.ErrNoServer { | |
151 | t.Errorf("unexpected error: %v", err) | |
152 | } | |
153 | if s != nil { | |
154 | t.Error("expected failed new Subscriber") | |
155 | } | |
156 | } |
0 | // +build integration | |
1 | ||
2 | package zk | |
3 | ||
4 | import ( | |
5 | "bytes" | |
6 | "flag" | |
7 | "fmt" | |
8 | "os" | |
9 | "testing" | |
10 | "time" | |
11 | ||
12 | stdzk "github.com/samuel/go-zookeeper/zk" | |
13 | ) | |
14 | ||
15 | var ( | |
16 | host []string | |
17 | ) | |
18 | ||
19 | func TestMain(m *testing.M) { | |
20 | flag.Parse() | |
21 | ||
22 | fmt.Println("Starting ZooKeeper server...") | |
23 | ||
24 | ts, err := stdzk.StartTestCluster(1, nil, nil) | |
25 | if err != nil { | |
26 | fmt.Printf("ZooKeeper server error: %v\n", err) | |
27 | os.Exit(1) | |
28 | } | |
29 | ||
30 | host = []string{fmt.Sprintf("localhost:%d", ts.Servers[0].Port)} | |
31 | code := m.Run() | |
32 | ||
33 | ts.Stop() | |
34 | os.Exit(code) | |
35 | } | |
36 | ||
37 | func TestCreateParentNodesOnServer(t *testing.T) { | |
38 | payload := [][]byte{[]byte("Payload"), []byte("Test")} | |
39 | c1, err := NewClient(host, logger, Payload(payload)) | |
40 | if err != nil { | |
41 | t.Fatalf("Connect returned error: %v", err) | |
42 | } | |
43 | if c1 == nil { | |
44 | t.Fatal("Expected pointer to client, got nil") | |
45 | } | |
46 | defer c1.Stop() | |
47 | ||
48 | s, err := NewSubscriber(c1, path, newFactory(""), logger) | |
49 | if err != nil { | |
50 | t.Fatalf("Unable to create Subscriber: %v", err) | |
51 | } | |
52 | defer s.Stop() | |
53 | ||
54 | services, err := s.Services() | |
55 | if err != nil { | |
56 | t.Fatal(err) | |
57 | } | |
58 | if want, have := 0, len(services); want != have { | |
59 | t.Errorf("want %d, have %d", want, have) | |
60 | } | |
61 | ||
62 | c2, err := NewClient(host, logger) | |
63 | if err != nil { | |
64 | t.Fatalf("Connect returned error: %v", err) | |
65 | } | |
66 | defer c2.Stop() | |
67 | data, _, err := c2.(*client).Get(path) | |
68 | if err != nil { | |
69 | t.Fatal(err) | |
70 | } | |
71 | // test Client implementation of CreateParentNodes. It should have created | |
72 | // our payload | |
73 | if bytes.Compare(data, payload[1]) != 0 { | |
74 | t.Errorf("want %s, have %s", payload[1], data) | |
75 | } | |
76 | ||
77 | } | |
78 | ||
79 | func TestCreateBadParentNodesOnServer(t *testing.T) { | |
80 | c, _ := NewClient(host, logger) | |
81 | defer c.Stop() | |
82 | ||
83 | _, err := NewSubscriber(c, "invalid/path", newFactory(""), logger) | |
84 | ||
85 | if want, have := stdzk.ErrInvalidPath, err; want != have { | |
86 | t.Errorf("want %v, have %v", want, have) | |
87 | } | |
88 | } | |
89 | ||
90 | func TestCredentials1(t *testing.T) { | |
91 | acl := stdzk.DigestACL(stdzk.PermAll, "user", "secret") | |
92 | c, _ := NewClient(host, logger, ACL(acl), Credentials("user", "secret")) | |
93 | defer c.Stop() | |
94 | ||
95 | _, err := NewSubscriber(c, "/acl-issue-test", newFactory(""), logger) | |
96 | ||
97 | if err != nil { | |
98 | t.Fatal(err) | |
99 | } | |
100 | } | |
101 | ||
102 | func TestCredentials2(t *testing.T) { | |
103 | acl := stdzk.DigestACL(stdzk.PermAll, "user", "secret") | |
104 | c, _ := NewClient(host, logger, ACL(acl)) | |
105 | defer c.Stop() | |
106 | ||
107 | _, err := NewSubscriber(c, "/acl-issue-test", newFactory(""), logger) | |
108 | ||
109 | if err != stdzk.ErrNoAuth { | |
110 | t.Errorf("want %v, have %v", stdzk.ErrNoAuth, err) | |
111 | } | |
112 | } | |
113 | ||
114 | func TestConnection(t *testing.T) { | |
115 | c, _ := NewClient(host, logger) | |
116 | c.Stop() | |
117 | ||
118 | _, err := NewSubscriber(c, "/acl-issue-test", newFactory(""), logger) | |
119 | ||
120 | if err != ErrClientClosed { | |
121 | t.Errorf("want %v, have %v", ErrClientClosed, err) | |
122 | } | |
123 | } | |
124 | ||
125 | func TestGetEntriesOnServer(t *testing.T) { | |
126 | var instancePayload = "protocol://hostname:port/routing" | |
127 | ||
128 | c1, err := NewClient(host, logger) | |
129 | if err != nil { | |
130 | t.Fatalf("Connect returned error: %v", err) | |
131 | } | |
132 | ||
133 | defer c1.Stop() | |
134 | ||
135 | c2, err := NewClient(host, logger) | |
136 | s, err := NewSubscriber(c2, path, newFactory(""), logger) | |
137 | if err != nil { | |
138 | t.Fatal(err) | |
139 | } | |
140 | defer c2.Stop() | |
141 | ||
142 | c2impl, _ := c2.(*client) | |
143 | _, err = c2impl.Create( | |
144 | path+"/instance1", | |
145 | []byte(instancePayload), | |
146 | stdzk.FlagEphemeral|stdzk.FlagSequence, | |
147 | stdzk.WorldACL(stdzk.PermAll), | |
148 | ) | |
149 | if err != nil { | |
150 | t.Fatalf("Unable to create test ephemeral znode 1: %v", err) | |
151 | } | |
152 | _, err = c2impl.Create( | |
153 | path+"/instance2", | |
154 | []byte(instancePayload+"2"), | |
155 | stdzk.FlagEphemeral|stdzk.FlagSequence, | |
156 | stdzk.WorldACL(stdzk.PermAll), | |
157 | ) | |
158 | if err != nil { | |
159 | t.Fatalf("Unable to create test ephemeral znode 2: %v", err) | |
160 | } | |
161 | ||
162 | time.Sleep(50 * time.Millisecond) | |
163 | ||
164 | services, err := s.Services() | |
165 | if err != nil { | |
166 | t.Fatal(err) | |
167 | } | |
168 | if want, have := 2, len(services); want != have { | |
169 | t.Errorf("want %d, have %d", want, have) | |
170 | } | |
171 | } | |
172 | ||
173 | func TestGetEntriesPayloadOnServer(t *testing.T) { | |
174 | c, err := NewClient(host, logger) | |
175 | if err != nil { | |
176 | t.Fatalf("Connect returned error: %v", err) | |
177 | } | |
178 | _, eventc, err := c.GetEntries(path) | |
179 | if err != nil { | |
180 | t.Fatal(err) | |
181 | } | |
182 | _, err = c.(*client).Create( | |
183 | path+"/instance3", | |
184 | []byte("just some payload"), | |
185 | stdzk.FlagEphemeral|stdzk.FlagSequence, | |
186 | stdzk.WorldACL(stdzk.PermAll), | |
187 | ) | |
188 | if err != nil { | |
189 | t.Fatalf("Unable to create test ephemeral znode: %v", err) | |
190 | } | |
191 | select { | |
192 | case event := <-eventc: | |
193 | if want, have := stdzk.EventNodeChildrenChanged.String(), event.Type.String(); want != have { | |
194 | t.Errorf("want %s, have %s", want, have) | |
195 | } | |
196 | case <-time.After(20 * time.Millisecond): | |
197 | t.Errorf("expected incoming watch event, timeout occurred") | |
198 | } | |
199 | ||
200 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | ||
5 | "github.com/samuel/go-zookeeper/zk" | |
6 | ||
7 | "github.com/go-kit/kit/log" | |
8 | ) | |
9 | ||
10 | // wrapLogger wraps a Go kit logger so we can use it as the logging service for | |
11 | // the ZooKeeper library, which expects a Printf method to be available. | |
12 | type wrapLogger struct { | |
13 | log.Logger | |
14 | } | |
15 | ||
16 | func (logger wrapLogger) Printf(format string, args ...interface{}) { | |
17 | logger.Log("msg", fmt.Sprintf(format, args...)) | |
18 | } | |
19 | ||
20 | // withLogger replaces the ZooKeeper library's default logging service with our | |
21 | // own Go kit logger. | |
22 | func withLogger(logger log.Logger) func(c *zk.Conn) { | |
23 | return func(c *zk.Conn) { | |
24 | c.SetLogger(wrapLogger{logger}) | |
25 | } | |
26 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "github.com/samuel/go-zookeeper/zk" | |
4 | ||
5 | "github.com/go-kit/kit/endpoint" | |
6 | "github.com/go-kit/kit/log" | |
7 | "github.com/go-kit/kit/sd" | |
8 | "github.com/go-kit/kit/sd/cache" | |
9 | ) | |
10 | ||
11 | // Subscriber yield endpoints stored in a certain ZooKeeper path. Any kind of | |
12 | // change in that path is watched and will update the Subscriber endpoints. | |
13 | type Subscriber struct { | |
14 | client Client | |
15 | path string | |
16 | cache *cache.Cache | |
17 | logger log.Logger | |
18 | quitc chan struct{} | |
19 | } | |
20 | ||
21 | var _ sd.Subscriber = &Subscriber{} | |
22 | ||
23 | // NewSubscriber returns a ZooKeeper subscriber. ZooKeeper will start watching | |
24 | // the given path for changes and update the Subscriber endpoints. | |
25 | func NewSubscriber(c Client, path string, factory sd.Factory, logger log.Logger) (*Subscriber, error) { | |
26 | s := &Subscriber{ | |
27 | client: c, | |
28 | path: path, | |
29 | cache: cache.New(factory, logger), | |
30 | logger: logger, | |
31 | quitc: make(chan struct{}), | |
32 | } | |
33 | ||
34 | err := s.client.CreateParentNodes(s.path) | |
35 | if err != nil { | |
36 | return nil, err | |
37 | } | |
38 | ||
39 | instances, eventc, err := s.client.GetEntries(s.path) | |
40 | if err != nil { | |
41 | logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) | |
42 | return nil, err | |
43 | } | |
44 | logger.Log("path", s.path, "instances", len(instances)) | |
45 | s.cache.Update(instances) | |
46 | ||
47 | go s.loop(eventc) | |
48 | ||
49 | return s, nil | |
50 | } | |
51 | ||
52 | func (s *Subscriber) loop(eventc <-chan zk.Event) { | |
53 | var ( | |
54 | instances []string | |
55 | err error | |
56 | ) | |
57 | for { | |
58 | select { | |
59 | case <-eventc: | |
60 | // We received a path update notification. Call GetEntries to | |
61 | // retrieve child node data, and set a new watch, as ZK watches are | |
62 | // one-time triggers. | |
63 | instances, eventc, err = s.client.GetEntries(s.path) | |
64 | if err != nil { | |
65 | s.logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) | |
66 | continue | |
67 | } | |
68 | s.logger.Log("path", s.path, "instances", len(instances)) | |
69 | s.cache.Update(instances) | |
70 | ||
71 | case <-s.quitc: | |
72 | return | |
73 | } | |
74 | } | |
75 | } | |
76 | ||
77 | // Endpoints implements the Subscriber interface. | |
78 | func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { | |
79 | return s.cache.Endpoints(), nil | |
80 | } | |
81 | ||
82 | // Stop terminates the Subscriber. | |
83 | func (s *Subscriber) Stop() { | |
84 | close(s.quitc) | |
85 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | "time" | |
5 | ) | |
6 | ||
7 | func TestSubscriber(t *testing.T) { | |
8 | client := newFakeClient() | |
9 | ||
10 | s, err := NewSubscriber(client, path, newFactory(""), logger) | |
11 | if err != nil { | |
12 | t.Fatalf("failed to create new Subscriber: %v", err) | |
13 | } | |
14 | defer s.Stop() | |
15 | ||
16 | if _, err := s.Endpoints(); err != nil { | |
17 | t.Fatal(err) | |
18 | } | |
19 | } | |
20 | ||
21 | func TestBadFactory(t *testing.T) { | |
22 | client := newFakeClient() | |
23 | ||
24 | s, err := NewSubscriber(client, path, newFactory("kaboom"), logger) | |
25 | if err != nil { | |
26 | t.Fatalf("failed to create new Subscriber: %v", err) | |
27 | } | |
28 | defer s.Stop() | |
29 | ||
30 | // instance1 came online | |
31 | client.AddService(path+"/instance1", "kaboom") | |
32 | ||
33 | // instance2 came online | |
34 | client.AddService(path+"/instance2", "zookeeper_node_data") | |
35 | ||
36 | if err = asyncTest(100*time.Millisecond, 1, s); err != nil { | |
37 | t.Error(err) | |
38 | } | |
39 | } | |
40 | ||
41 | func TestServiceUpdate(t *testing.T) { | |
42 | client := newFakeClient() | |
43 | ||
44 | s, err := NewSubscriber(client, path, newFactory(""), logger) | |
45 | if err != nil { | |
46 | t.Fatalf("failed to create new Subscriber: %v", err) | |
47 | } | |
48 | defer s.Stop() | |
49 | ||
50 | endpoints, err := s.Endpoints() | |
51 | if err != nil { | |
52 | t.Fatal(err) | |
53 | } | |
54 | if want, have := 0, len(endpoints); want != have { | |
55 | t.Errorf("want %d, have %d", want, have) | |
56 | } | |
57 | ||
58 | // instance1 came online | |
59 | client.AddService(path+"/instance1", "zookeeper_node_data1") | |
60 | ||
61 | // instance2 came online | |
62 | client.AddService(path+"/instance2", "zookeeper_node_data2") | |
63 | ||
64 | // we should have 2 instances | |
65 | if err = asyncTest(100*time.Millisecond, 2, s); err != nil { | |
66 | t.Error(err) | |
67 | } | |
68 | ||
69 | // TODO(pb): this bit is flaky | |
70 | // | |
71 | //// watch triggers an error... | |
72 | //client.SendErrorOnWatch() | |
73 | // | |
74 | //// test if error was consumed | |
75 | //if err = client.ErrorIsConsumedWithin(100 * time.Millisecond); err != nil { | |
76 | // t.Error(err) | |
77 | //} | |
78 | ||
79 | // instance3 came online | |
80 | client.AddService(path+"/instance3", "zookeeper_node_data3") | |
81 | ||
82 | // we should have 3 instances | |
83 | if err = asyncTest(100*time.Millisecond, 3, s); err != nil { | |
84 | t.Error(err) | |
85 | } | |
86 | ||
87 | // instance1 goes offline | |
88 | client.RemoveService(path + "/instance1") | |
89 | ||
90 | // instance2 goes offline | |
91 | client.RemoveService(path + "/instance2") | |
92 | ||
93 | // we should have 1 instance | |
94 | if err = asyncTest(100*time.Millisecond, 1, s); err != nil { | |
95 | t.Error(err) | |
96 | } | |
97 | } | |
98 | ||
99 | func TestBadSubscriberCreate(t *testing.T) { | |
100 | client := newFakeClient() | |
101 | client.SendErrorOnWatch() | |
102 | s, err := NewSubscriber(client, path, newFactory(""), logger) | |
103 | if err == nil { | |
104 | t.Error("expected error on new Subscriber") | |
105 | } | |
106 | if s != nil { | |
107 | t.Error("expected Subscriber not to be created") | |
108 | } | |
109 | s, err = NewSubscriber(client, "BadPath", newFactory(""), logger) | |
110 | if err == nil { | |
111 | t.Error("expected error on new Subscriber") | |
112 | } | |
113 | if s != nil { | |
114 | t.Error("expected Subscriber not to be created") | |
115 | } | |
116 | } |
0 | package zk | |
1 | ||
2 | import ( | |
3 | "errors" | |
4 | "fmt" | |
5 | "io" | |
6 | "sync" | |
7 | "time" | |
8 | ||
9 | "github.com/samuel/go-zookeeper/zk" | |
10 | "golang.org/x/net/context" | |
11 | ||
12 | "github.com/go-kit/kit/endpoint" | |
13 | "github.com/go-kit/kit/log" | |
14 | "github.com/go-kit/kit/sd" | |
15 | ) | |
16 | ||
17 | var ( | |
18 | path = "/gokit.test/service.name" | |
19 | e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } | |
20 | logger = log.NewNopLogger() | |
21 | ) | |
22 | ||
23 | type fakeClient struct { | |
24 | mtx sync.Mutex | |
25 | ch chan zk.Event | |
26 | responses map[string]string | |
27 | result bool | |
28 | } | |
29 | ||
30 | func newFakeClient() *fakeClient { | |
31 | return &fakeClient{ | |
32 | ch: make(chan zk.Event, 1), | |
33 | responses: make(map[string]string), | |
34 | result: true, | |
35 | } | |
36 | } | |
37 | ||
38 | func (c *fakeClient) CreateParentNodes(path string) error { | |
39 | if path == "BadPath" { | |
40 | return errors.New("dummy error") | |
41 | } | |
42 | return nil | |
43 | } | |
44 | ||
45 | func (c *fakeClient) GetEntries(path string) ([]string, <-chan zk.Event, error) { | |
46 | c.mtx.Lock() | |
47 | defer c.mtx.Unlock() | |
48 | if c.result == false { | |
49 | c.result = true | |
50 | return []string{}, c.ch, errors.New("dummy error") | |
51 | } | |
52 | responses := []string{} | |
53 | for _, data := range c.responses { | |
54 | responses = append(responses, data) | |
55 | } | |
56 | return responses, c.ch, nil | |
57 | } | |
58 | ||
59 | func (c *fakeClient) AddService(node, data string) { | |
60 | c.mtx.Lock() | |
61 | defer c.mtx.Unlock() | |
62 | c.responses[node] = data | |
63 | c.ch <- zk.Event{} | |
64 | } | |
65 | ||
66 | func (c *fakeClient) RemoveService(node string) { | |
67 | c.mtx.Lock() | |
68 | defer c.mtx.Unlock() | |
69 | delete(c.responses, node) | |
70 | c.ch <- zk.Event{} | |
71 | } | |
72 | ||
73 | func (c *fakeClient) SendErrorOnWatch() { | |
74 | c.mtx.Lock() | |
75 | defer c.mtx.Unlock() | |
76 | c.result = false | |
77 | c.ch <- zk.Event{} | |
78 | } | |
79 | ||
80 | func (c *fakeClient) ErrorIsConsumedWithin(timeout time.Duration) error { | |
81 | t := time.After(timeout) | |
82 | for { | |
83 | select { | |
84 | case <-t: | |
85 | return fmt.Errorf("expected error not consumed after timeout %s", timeout) | |
86 | default: | |
87 | c.mtx.Lock() | |
88 | if c.result == false { | |
89 | c.mtx.Unlock() | |
90 | return nil | |
91 | } | |
92 | c.mtx.Unlock() | |
93 | } | |
94 | } | |
95 | } | |
96 | ||
97 | func (c *fakeClient) Stop() {} | |
98 | ||
99 | func newFactory(fakeError string) sd.Factory { | |
100 | return func(instance string) (endpoint.Endpoint, io.Closer, error) { | |
101 | if fakeError == instance { | |
102 | return nil, nil, errors.New(fakeError) | |
103 | } | |
104 | return endpoint.Nop, nil, nil | |
105 | } | |
106 | } | |
107 | ||
108 | func asyncTest(timeout time.Duration, want int, s *Subscriber) (err error) { | |
109 | var endpoints []endpoint.Endpoint | |
110 | have := -1 // want can never be <0 | |
111 | t := time.After(timeout) | |
112 | for { | |
113 | select { | |
114 | case <-t: | |
115 | return fmt.Errorf("want %d, have %d (timeout %s)", want, have, timeout.String()) | |
116 | default: | |
117 | endpoints, err = s.Endpoints() | |
118 | have = len(endpoints) | |
119 | if err != nil || want == have { | |
120 | return | |
121 | } | |
122 | time.Sleep(timeout / 10) | |
123 | } | |
124 | } | |
125 | } |
24 | 24 | } |
25 | 25 | |
26 | 26 | // NewClient constructs a usable Client for a single remote endpoint. |
27 | // Pass an zero-value Protobuf message of the RPC response type as | |
27 | // Pass an zero-value protobuf message of the RPC response type as | |
28 | 28 | // the grpcReply argument. |
29 | 29 | func NewClient( |
30 | 30 | cc *grpc.ClientConn, |
8 | 8 | ) |
9 | 9 | |
10 | 10 | // Handler which should be called from the grpc binding of the service |
11 | // implementation. | |
11 | // implementation. The incoming request parameter, and returned response | |
12 | // parameter, are both gRPC types, not user-domain. | |
12 | 13 | type Handler interface { |
13 | ServeGRPC(context.Context, interface{}) (context.Context, interface{}, error) | |
14 | ServeGRPC(ctx context.Context, request interface{}) (context.Context, interface{}, error) | |
14 | 15 | } |
15 | 16 | |
16 | 17 | // Server wraps an endpoint and implements grpc.Handler. |
24 | 25 | logger log.Logger |
25 | 26 | } |
26 | 27 | |
27 | // NewServer constructs a new server, which implements grpc.Server and wraps | |
28 | // the provided endpoint. | |
28 | // NewServer constructs a new server, which implements wraps the provided | |
29 | // endpoint and implements the Handler interface. Consumers should write | |
30 | // bindings that adapt the concrete gRPC methods from their compiled protobuf | |
31 | // definitions to individual handlers. Request and response objects are from the | |
32 | // caller business domain, not gRPC request and reply types. | |
29 | 33 | func NewServer( |
30 | 34 | ctx context.Context, |
31 | 35 | e endpoint.Endpoint, |
67 | 71 | return func(s *Server) { s.logger = logger } |
68 | 72 | } |
69 | 73 | |
70 | // ServeGRPC implements grpc.Handler | |
71 | func (s Server) ServeGRPC(grpcCtx context.Context, r interface{}) (context.Context, interface{}, error) { | |
74 | // ServeGRPC implements the Handler interface. | |
75 | func (s Server) ServeGRPC(grpcCtx context.Context, req interface{}) (context.Context, interface{}, error) { | |
72 | 76 | ctx, cancel := context.WithCancel(s.ctx) |
73 | 77 | defer cancel() |
74 | 78 | |
75 | // retrieve gRPC metadata | |
79 | // Retrieve gRPC metadata. | |
76 | 80 | md, ok := metadata.FromContext(grpcCtx) |
77 | 81 | if !ok { |
78 | 82 | md = metadata.MD{} |
82 | 86 | ctx = f(ctx, &md) |
83 | 87 | } |
84 | 88 | |
85 | // store potentially updated metadata in the gRPC context | |
89 | // Store potentially updated metadata in the gRPC context. | |
86 | 90 | grpcCtx = metadata.NewContext(grpcCtx, md) |
87 | 91 | |
88 | request, err := s.dec(grpcCtx, r) | |
92 | request, err := s.dec(grpcCtx, req) | |
89 | 93 | if err != nil { |
90 | 94 | s.logger.Log("err", err) |
91 | 95 | return grpcCtx, nil, BadRequestError{err} |
101 | 105 | f(ctx, &md) |
102 | 106 | } |
103 | 107 | |
104 | // store potentially updated metadata in the gRPC context | |
108 | // Store potentially updated metadata in the gRPC context. | |
105 | 109 | grpcCtx = metadata.NewContext(grpcCtx, md) |
106 | 110 | |
107 | 111 | grpcResp, err := s.enc(grpcCtx, response) |
109 | 113 | s.logger.Log("err", err) |
110 | 114 | return grpcCtx, nil, err |
111 | 115 | } |
116 | ||
112 | 117 | return grpcCtx, grpcResp, nil |
113 | 118 | } |
114 | 119 |
20 | 20 | bufferedStream bool |
21 | 21 | } |
22 | 22 | |
23 | // NewClient constructs a usable Client for a single remote endpoint. | |
23 | // NewClient constructs a usable Client for a single remote method. | |
24 | 24 | func NewClient( |
25 | 25 | method string, |
26 | 26 | tgt *url.URL, |
64 | 64 | return func(c *Client) { c.bufferedStream = buffered } |
65 | 65 | } |
66 | 66 | |
67 | // Endpoint returns a usable endpoint that will invoke the RPC specified by | |
68 | // the client. | |
67 | // Endpoint returns a usable endpoint that invokes the remote endpoint. | |
69 | 68 | func (c Client) Endpoint() endpoint.Endpoint { |
70 | 69 | return func(ctx context.Context, request interface{}) (interface{}, error) { |
71 | 70 | ctx, cancel := context.WithCancel(ctx) |