Imported Upstream version 0.9.18
SVN-Git Migration
8 years ago
0 | 0 | Metadata-Version: 1.0 |
1 | 1 | Name: lazr.restfulclient |
2 | Version: 0.9.14 | |
2 | Version: 0.9.18 | |
3 | 3 | Summary: This is a template for your lazr package. To start your own lazr package, |
4 | 4 | Home-page: https://launchpad.net/lazr.restfulclient |
5 | 5 | Author: LAZR Developers |
55 | 55 | NEWS for lazr.restfulclient |
56 | 56 | =========================== |
57 | 57 | |
58 | 0.9.18 (2010-06-16) | |
59 | =================== | |
60 | ||
61 | - Made it possible to avoid fetching a representation of every | |
62 | single object looked up from a CollectionWithKeyBasedLookup (by | |
63 | defining .contains_resource_type on the class), potentially | |
64 | improving script performance. | |
65 | ||
66 | 0.9.17 (2010-05-10) | |
67 | =================== | |
68 | ||
69 | - Switched back to asking for compression using the standard | |
70 | Accept-Encoding header. Using the TE header has never worked in a | |
71 | real situation due to HTTP intermediaries. | |
72 | ||
73 | 0.9.16 (2010-05-03) | |
74 | =================== | |
75 | ||
76 | - If a server returns a 502 or 503 error code, lazr.restfulclient | |
77 | will retry its request a configurable number of times in hopes that | |
78 | the error is transient. | |
79 | ||
80 | - It's now possible to invoke lazr.restful destructor methods, with | |
81 | the lp_delete() method. | |
82 | ||
83 | 0.9.15 (2010-04-27) | |
84 | ==================== | |
85 | ||
86 | - Clients will no longer fetch a representation of a collection | |
87 | before invoking a named operation on the collection. | |
88 | ||
58 | 89 | 0.9.14 (2010-04-15) |
59 | 90 | =================== |
60 | 91 |
0 | 0 | =========================== |
1 | 1 | NEWS for lazr.restfulclient |
2 | 2 | =========================== |
3 | ||
4 | 0.9.18 (2010-06-16) | |
5 | =================== | |
6 | ||
7 | - Made it possible to avoid fetching a representation of every | |
8 | single object looked up from a CollectionWithKeyBasedLookup (by | |
9 | defining .contains_resource_type on the class), potentially | |
10 | improving script performance. | |
11 | ||
12 | 0.9.17 (2010-05-10) | |
13 | =================== | |
14 | ||
15 | - Switched back to asking for compression using the standard | |
16 | Accept-Encoding header. Using the TE header has never worked in a | |
17 | real situation due to HTTP intermediaries. | |
18 | ||
19 | 0.9.16 (2010-05-03) | |
20 | =================== | |
21 | ||
22 | - If a server returns a 502 or 503 error code, lazr.restfulclient | |
23 | will retry its request a configurable number of times in hopes that | |
24 | the error is transient. | |
25 | ||
26 | - It's now possible to invoke lazr.restful destructor methods, with | |
27 | the lp_delete() method. | |
28 | ||
29 | 0.9.15 (2010-04-27) | |
30 | ==================== | |
31 | ||
32 | - Clients will no longer fetch a representation of a collection | |
33 | before invoking a named operation on the collection. | |
3 | 34 | |
4 | 35 | 0.9.14 (2010-04-15) |
5 | 36 | =================== |
33 | 33 | import gzip |
34 | 34 | import shutil |
35 | 35 | import tempfile |
36 | # Import sleep directly into the module so we can monkey-patch it | |
37 | # during a test. | |
38 | from time import sleep | |
36 | 39 | from httplib2 import ( |
37 | 40 | FailedToDecompressContent, FileCache, Http, urlnorm) |
38 | 41 | import simplejson |
44 | 47 | from lazr.uri import URI |
45 | 48 | from errors import HTTPError |
46 | 49 | from _json import DatetimeJSONEncoder |
47 | ||
48 | # A drop-in replacement for httplib2's _decompressContent, which looks | |
49 | # in the Transfer-Encoding header instead of in Content-Encoding. | |
50 | def _decompressContent(response, new_content): | |
51 | content = new_content | |
52 | try: | |
53 | encoding = response.get('transfer-encoding', None) | |
54 | if encoding in ['gzip', 'deflate']: | |
55 | if encoding == 'gzip': | |
56 | content = gzip.GzipFile( | |
57 | fileobj=StringIO.StringIO(new_content)).read() | |
58 | if encoding == 'deflate': | |
59 | content = zlib.decompress(content) | |
60 | response['content-length'] = str(len(content)) | |
61 | del response['transfer-encoding'] | |
62 | except IOError: | |
63 | content = "" | |
64 | raise FailedToDecompressContent( | |
65 | ("Content purported to be compressed with %s but failed " | |
66 | "to decompress." % response.get('transfer-encoding')), | |
67 | response, content) | |
68 | return content | |
69 | 50 | |
70 | 51 | # A drop-in replacement for httplib2's safename. |
71 | 52 | from httplib2 import _md5, re_url_scheme, re_slash |
131 | 112 | |
132 | 113 | def _request(self, conn, host, absolute_uri, request_uri, method, body, |
133 | 114 | headers, redirections, cachekey): |
134 | """Manipulate Transfer-Encoding header before sending the request. | |
135 | ||
136 | httplib2 asks for compressed representations in | |
137 | Accept-Encoding. But a different content-encoding means a | |
138 | different ETag, which can cause problems later when we make | |
139 | a conditional request. We don't want to treat a | |
140 | representation differently based on whether or not we asked | |
141 | for a compressed version of it. | |
142 | ||
143 | So we move the compression request from Accept-Encoding to | |
144 | TE. Transfer-encoding compression can be handled | |
145 | transparently, without affecting the ETag. | |
146 | """ | |
147 | if 'accept-encoding' in headers: | |
148 | headers['te'] = 'deflate, gzip' | |
149 | del headers['accept-encoding'] | |
115 | """Use the authorizer to authorize an outgoing request.""" | |
150 | 116 | if headers.has_key('authorization'): |
151 | 117 | # There's an authorization header left over from a |
152 | # previous request that resulted in a redirect. Remove it | |
153 | # and start again. | |
118 | # previous request that resulted in a redirect. Resources | |
119 | # protected by OAuth or HTTP Digest must send a distinct | |
120 | # Authorization header with each request, to prevent | |
121 | # playback attacks. Remove the Authorization header and | |
122 | # start again. | |
154 | 123 | del headers['authorization'] |
155 | 124 | if self.authorizer is not None: |
156 | 125 | self.authorizer.authorizeRequest( |
158 | 127 | return super(RestfulHttp, self)._request( |
159 | 128 | conn, host, absolute_uri, request_uri, method, body, headers, |
160 | 129 | redirections, cachekey) |
161 | ||
162 | def _conn_request(self, conn, request_uri, method, body, headers): | |
163 | """Decompress content using our version of _decompressContent.""" | |
164 | response, content = super(RestfulHttp, self)._conn_request( | |
165 | conn, request_uri, method, body, headers) | |
166 | # Decompress the response, if it was compressed. | |
167 | if method != "HEAD": | |
168 | content = _decompressContent(response, content) | |
169 | return (response, content) | |
170 | 130 | |
171 | 131 | def _getCachedHeader(self, uri, header): |
172 | 132 | """Retrieve a cached value for an HTTP header.""" |
224 | 184 | """A class for making calls to lazr.restful web services.""" |
225 | 185 | |
226 | 186 | NOT_MODIFIED = object() |
187 | MAX_RETRIES = 6 | |
227 | 188 | |
228 | 189 | def __init__(self, service_root, credentials, cache=None, timeout=None, |
229 | proxy_info=None, user_agent=None): | |
190 | proxy_info=None, user_agent=None, max_retries=MAX_RETRIES): | |
230 | 191 | """Initialize, possibly creating a cache. |
231 | 192 | |
232 | 193 | If no cache is provided, a temporary directory will be used as |
241 | 202 | self._connection = service_root.httpFactory( |
242 | 203 | credentials, cache, timeout, proxy_info) |
243 | 204 | self.user_agent = user_agent |
205 | self.max_retries = max_retries | |
206 | ||
207 | def _request_and_retry(self, url, method, body, headers): | |
208 | for retry_count in range(0, self.max_retries+1): | |
209 | response, content = self._connection.request( | |
210 | url, method=method, body=body, headers=headers) | |
211 | if (response.status in [502, 503] | |
212 | and retry_count < self.max_retries): | |
213 | # The server returned a 502 or 503. Sleep for 0, 1, 2, | |
214 | # 4, 8, 16, ... seconds and try again. | |
215 | sleep_for = int(2**(retry_count-1)) | |
216 | sleep(sleep_for) | |
217 | else: | |
218 | break | |
219 | # Either the request succeeded or we gave up. | |
220 | return response, content | |
244 | 221 | |
245 | 222 | def _request(self, url, data=None, method='GET', |
246 | 223 | media_type='application/json', extra_headers=None): |
260 | 237 | if extra_headers is not None: |
261 | 238 | headers.update(extra_headers) |
262 | 239 | # Make the request. |
263 | response, content = self._connection.request( | |
240 | response, content = self._request_and_retry( | |
264 | 241 | str(url), method=method, body=data, headers=headers) |
265 | 242 | if response.status == 304: |
266 | 243 | # The resource didn't change. |
334 | 311 | def delete(self, url): |
335 | 312 | """DELETE the resource at the given URL.""" |
336 | 313 | self._request(url, method='DELETE') |
314 | return None | |
337 | 315 | |
338 | 316 | def patch(self, url, representation, headers=None): |
339 | 317 | """PATCH the object at url with the updated representation.""" |
1 | 1 | Caching |
2 | 2 | ******* |
3 | 3 | |
4 | lazr.restfulclient automatically decompresses the documents it | |
5 | receives, and caches the responses in a temporary directory. | |
4 | lazr.restfulclient automatically caches the responses to its requests | |
5 | in a temporary directory. | |
6 | 6 | |
7 | 7 | >>> import httplib2 |
8 | 8 | >>> httplib2.debuglevel = 1 |
12 | 12 | send: 'GET /1.0/ ... |
13 | 13 | reply: ...200... |
14 | 14 | ... |
15 | header: Transfer-Encoding: deflate | |
16 | ... | |
17 | 15 | header: Content-Type: application/vnd.sun.wadl+xml |
18 | 16 | send: 'GET /1.0/ ... |
19 | 17 | reply: ...200... |
20 | ... | |
21 | header: Transfer-Encoding: deflate | |
22 | 18 | ... |
23 | 19 | header: Content-Type: application/json |
24 | 20 |
109 | 109 | Traceback (most recent call last): |
110 | 110 | ... |
111 | 111 | IndexError: list index out of range |
112 | ||
113 | When are representations fetched? | |
114 | ================================= | |
115 | ||
116 | To avoid unnecessary HTTP requests, a representation of a collection | |
117 | is fetched at the last possible moment. Let's see what that means. | |
118 | ||
119 | >>> import httplib2 | |
120 | >>> httplib2.debuglevel = 1 | |
121 | ||
122 | >>> service = CookbookWebServiceClient() | |
123 | send: ... | |
124 | ... | |
125 | ||
126 | Just accessing a top-level collection doesn't trigger an HTTP request. | |
127 | ||
128 | >>> recipes = service.recipes | |
129 | >>> dishes = service.dishes | |
130 | >>> cookbooks = service.cookbooks | |
131 | ||
132 | Getting the length of the collection, or any entry from the | |
133 | collection, triggers an HTTP request. | |
134 | ||
135 | >>> len(recipes) | |
136 | send: 'GET /1.0/recipes... | |
137 | ... | |
138 | ||
139 | >>> dish = dishes[1] | |
140 | send: 'GET /1.0/dishes... | |
141 | ... | |
142 | ||
143 | Invoking a named operation will also trigger an HTTP request. | |
144 | ||
145 | >>> cookbooks.find_recipes(search="foo") | |
146 | send: ... | |
147 | ... | |
148 | ||
149 | Scoped collections work the same way: just getting a reference to the | |
150 | collection doesn't trigger an HTTP request. | |
151 | ||
152 | >>> recipes = dish.recipes | |
153 | ||
154 | But getting any information about the collection triggers an HTTP request. | |
155 | ||
156 | >>> len(recipes) | |
157 | send: 'GET /1.0/dishes/.../recipes ... | |
158 | ... | |
159 | ||
160 | Cleanup. | |
161 | ||
162 | >>> httplib2.debuglevel = None |
121 | 121 | Refreshing data |
122 | 122 | --------------- |
123 | 123 | |
124 | Here are two objects representing recipe #1. A representation of an | |
125 | entry object is not fetched until the data is needed. We'll fetch a | |
126 | representation for the first object right away... | |
127 | ||
124 | 128 | >>> recipe_copy = service.recipes[1] |
129 | >>> print recipe_copy.instructions | |
130 | Different instructions | |
131 | ||
132 | ...but leave the second object alone. | |
133 | ||
134 | >>> recipe_copy_2 = service.recipes[1] | |
125 | 135 | |
126 | 136 | An entry is automatically refreshed after saving. |
127 | 137 | |
130 | 140 | >>> print recipe.instructions |
131 | 141 | Even newer instructions |
132 | 142 | |
133 | Any other version of that resource will still have the old data. | |
143 | If an old object representing that entry already has a representation, | |
144 | it will still show the old data. | |
134 | 145 | |
135 | 146 | >>> print recipe_copy.instructions |
136 | 147 | Different instructions |
137 | 148 | |
138 | But you can also refresh a resource object manually. | |
149 | If an old object representing that entry doesn't have a representation | |
150 | yet, it will show the new data. | |
151 | ||
152 | >>> print recipe_copy_2.instructions | |
153 | Even newer instructions | |
154 | ||
155 | You can also refresh a resource object manually. | |
139 | 156 | |
140 | 157 | >>> recipe_copy.lp_refresh() |
141 | 158 | >>> print recipe_copy.instructions |
142 | 159 | Even newer instructions |
143 | ||
144 | 160 | |
145 | 161 | Bookmarking an entry |
146 | 162 | -------------------- |
468 | 484 | ... |
469 | 485 | ValueError: You tried to access a resource that you don't have the |
470 | 486 | server-side permission to see. |
487 | ||
488 | Deleting an entry | |
489 | ================= | |
490 | ||
491 | Some entries can be deleted with the lp_delete method. | |
492 | ||
493 | >>> recipe = service.recipes[6] | |
494 | >>> print recipe.lp_delete() | |
495 | None | |
496 | ||
497 | A deleted entry no longer exists. | |
498 | ||
499 | >>> recipe.lp_refresh() | |
500 | Traceback (most recent call last): | |
501 | ... | |
502 | HTTPError: HTTP Error 404: Not Found | |
503 | ... | |
504 | ||
505 | Some entries can't be deleted. | |
506 | ||
507 | >>> cookbook.lp_delete() | |
508 | Traceback (most recent call last): | |
509 | ... | |
510 | HTTPError: HTTP Error 405: Method Not Allowed | |
511 | ... | |
512 | ||
513 | When are representations fetched? | |
514 | ================================= | |
515 | ||
516 | To avoid unnecessary HTTP requests, a representation of an entry is | |
517 | fetched at the last possible moment. Let's see what that means. | |
518 | ||
519 | >>> import httplib2 | |
520 | >>> httplib2.debuglevel = 1 | |
521 | ||
522 | >>> service = CookbookWebServiceClient() | |
523 | send: ... | |
524 | ... | |
525 | ||
526 | Here's an entry we got from a lookup operation on a top-level | |
527 | collection. Just doing the lookup operation doesn't trigger an HTTP | |
528 | request, because CookbookWebServiceClient happens to know that the | |
529 | 'recipes' collection contains recipe objects. | |
530 | ||
531 | >>> recipe1 = service.recipes[1] | |
532 | ||
533 | Here's the dish associated with that original entry. Traversing from | |
534 | one entry to another causes an HTTP request for the first | |
535 | entry. Without this HTTP request, there's no way to know the URL of | |
536 | the second entry. | |
537 | ||
538 | >>> dish = recipe1.dish | |
539 | send: 'GET /1.0/recipes/1 ...' | |
540 | ... | |
541 | ||
542 | Note that this request is a request for the _recipe_, not the dish. We | |
543 | don't need to know anything about the dish yet. And now that we have a | |
544 | representation of the recipe, we can traverse from the recipe to its | |
545 | cookbook without making another request. | |
546 | ||
547 | >>> cookbook = recipe1.cookbook | |
548 | ||
549 | Accessing any information about an entry we've traversed to _will_ | |
550 | cause an HTTP request. | |
551 | ||
552 | >>> print dish.name | |
553 | send: 'GET /1.0/dishes/Roast%20chicken ...' | |
554 | ... | |
555 | Roast chicken | |
556 | ||
557 | Invoking a named operation also causes one (and only one) HTTP | |
558 | request. | |
559 | ||
560 | >>> recipes = cookbook.find_recipes(search="foo") | |
561 | send: 'get /1.0/cookbooks/...ws.op=find_recipes...' | |
562 | ... | |
563 | ||
564 | Even dereferencing an entry from another entry and then invoking a | |
565 | named operation causes only one HTTP request. | |
566 | ||
567 | >>> recipes = recipe1.cookbook.find_recipes(search="bar") | |
568 | send: 'get /1.0/cookbooks/...ws.op=find_recipes...' | |
569 | ... | |
570 | ||
571 | In all cases we are able to delay HTTP requests until the moment we | |
572 | need data that can only be found by making those HTTP requests. If it | |
573 | turns out we never need that data, we've eliminated a request | |
574 | entirely. | |
575 | ||
576 | If CookbookWebServiceClient didn't know that the 'recipes' collection | |
577 | contained recipe objects, then doing a lookup on that collection *would* | |
578 | trigger an HTTP request. There'd simply be no other way to know what | |
579 | kind of object was at the other end of the URL. | |
580 | ||
581 | >>> from lazr.restfulclient.tests.example import RecipeSet | |
582 | >>> old_collection_of = RecipeSet.collection_of | |
583 | >>> RecipeSet.collection_of = None | |
584 | ||
585 | >>> recipe1 = service.recipes[1] | |
586 | send: 'GET /1.0/recipes/1 ...' | |
587 | ... | |
588 | ||
589 | On the plus side, at least accessing this object's properties doesn't | |
590 | require _another_ HTTP request. | |
591 | ||
592 | >>> print recipe1.instructions | |
593 | Modified again. | |
594 | ||
595 | Cleanup. | |
596 | ||
597 | >>> RecipeSet.collection_of = old_collection_of | |
598 | >>> httplib2.debuglevel = 0 |
103 | 103 | ... except Exception, e: |
104 | 104 | ... print e.content |
105 | 105 | price: got 'unicode', expected float, int: u'1.23' |
106 | ||
107 | Named operations on collections don't fetch the collections | |
108 | ----------------------------------------------------------- | |
109 | ||
110 | If you invoke a named operation on a collection, the only HTTP request | |
111 | made is the one for the named operation. You don't have to get a | |
112 | representation of the collection to invoke the operation. | |
113 | ||
114 | >>> import httplib2 | |
115 | >>> httplib2.debuglevel = 1 | |
116 | >>> service = CookbookWebServiceClient() | |
117 | send: ... | |
118 | ... | |
119 | ||
120 | >>> print service.cookbooks.find_recipes( | |
121 | ... search="Chicken", vegetarian=True) | |
122 | send: 'get /1.0/cookbooks?vegetarian=true...' | |
123 | ... | |
124 | ||
125 | Cleanup. | |
126 | ||
127 | >>> httplib2.debuglevel = None |
33 | 33 | import simplejson |
34 | 34 | from StringIO import StringIO |
35 | 35 | import urllib |
36 | from urlparse import urlparse | |
36 | from urlparse import urlparse, urljoin | |
37 | 37 | |
38 | 38 | from lazr.uri import URI |
39 | 39 | from wadllib.application import Resource as WadlResource |
297 | 297 | def __getattr__(self, attr): |
298 | 298 | """Try to retrive a named operation or parameter of the given name.""" |
299 | 299 | try: |
300 | return self.lp_get_parameter(attr) | |
300 | return self.lp_get_named_operation(attr) | |
301 | 301 | except KeyError: |
302 | 302 | pass |
303 | 303 | try: |
304 | return self.lp_get_named_operation(attr) | |
304 | return self.lp_get_parameter(attr) | |
305 | 305 | except KeyError: |
306 | 306 | raise AttributeError("'%s' object has no attribute '%s'" |
307 | 307 | % (self.__class__.__name__, attr)) |
379 | 379 | |
380 | 380 | def __init__(self, authorizer, service_root, cache=None, |
381 | 381 | timeout=None, proxy_info=None, version=None, |
382 | base_client_name=''): | |
382 | base_client_name='', max_retries=Browser.MAX_RETRIES): | |
383 | 383 | """Root access to a lazr.restful API. |
384 | 384 | |
385 | 385 | :param credentials: The credentials used to access the service. |
400 | 400 | # Get the WADL definition. |
401 | 401 | self.credentials = authorizer |
402 | 402 | self._browser = Browser( |
403 | self, authorizer, cache, timeout, proxy_info, self._user_agent) | |
403 | self, authorizer, cache, timeout, proxy_info, self._user_agent, | |
404 | max_retries) | |
404 | 405 | self._wadl = self._browser.get_wadl_application(self._root_uri) |
405 | 406 | |
406 | 407 | # Get the root resource. |
607 | 608 | return '<%s at %s>' % ( |
608 | 609 | URI(self.resource_type_link).fragment, self.self_link) |
609 | 610 | |
611 | def lp_delete(self): | |
612 | """Delete the resource.""" | |
613 | return self._root._browser.delete(URI(self.self_link)) | |
614 | ||
610 | 615 | def __str__(self): |
611 | 616 | """Return the URL to the resource.""" |
612 | 617 | return self.self_link |
871 | 876 | if url is None: |
872 | 877 | raise KeyError(key) |
873 | 878 | |
874 | # We don't know what kind of resource this is. Even the | |
875 | # subclass doesn't necessarily know, because some resources | |
876 | # (the person list) are gateways to more than one kind of | |
877 | # resource (people, and teams). The only way to know for sure | |
878 | # is to retrieve a representation of the resource and see how | |
879 | # the resource describes itself. | |
880 | try: | |
881 | representation = simplejson.loads( | |
882 | unicode(self._root._browser.get(url))) | |
883 | except HTTPError, error: | |
884 | # There's no resource corresponding to the given ID. | |
885 | if error.response.status == 404: | |
886 | raise KeyError(key) | |
887 | raise | |
888 | # We know that every lazr.restful resource has a | |
889 | # 'resource_type_link' in its representation. | |
890 | resource_type_link = representation['resource_type_link'] | |
879 | if self.collection_of is not None: | |
880 | # We know what kind of resource is at the other end of the | |
881 | # URL. There's no need to actually fetch that URL until | |
882 | # the user demands it. If the user is invoking a named | |
883 | # operation on this object rather than fetching its data, | |
884 | # this will save us one round trip. | |
885 | representation = None | |
886 | resource_type_link = urljoin( | |
887 | self._root._wadl.markup_url, '#' + self.collection_of) | |
888 | else: | |
889 | # We don't know what kind of resource this is. Either the | |
890 | # subclass wasn't programmed with this knowledge, or | |
891 | # there's simply no way to tell without going to the | |
892 | # server, because the collection contains more than one | |
893 | # kind of resource. The only way to know for sure is to | |
894 | # retrieve a representation of the resource and see how | |
895 | # the resource describes itself. | |
896 | try: | |
897 | representation = simplejson.loads( | |
898 | unicode(self._root._browser.get(url))) | |
899 | except HTTPError, error: | |
900 | # There's no resource corresponding to the given ID. | |
901 | if error.response.status == 404: | |
902 | raise KeyError(key) | |
903 | raise | |
904 | # We know that every lazr.restful resource has a | |
905 | # 'resource_type_link' in its representation. | |
906 | resource_type_link = representation['resource_type_link'] | |
907 | ||
891 | 908 | resource = WadlResource(self._root._wadl, url, resource_type_link) |
892 | 909 | return self._create_bound_resource( |
893 | 910 | self._root, resource, representation=representation, |
894 | 911 | representation_needs_processing=False) |
895 | 912 | |
913 | # If provided, this should be a string designating the ID of a | |
914 | # resource_type from a specific service's WADL file. | |
915 | collection_of = None | |
896 | 916 | |
897 | 917 | def _get_url_from_id(self, key): |
898 | 918 | """Transform the unique ID of an object into its URL.""" |
33 | 33 | return (str(self._root._root_uri.ensureSlash()) |
34 | 34 | + 'cookbooks/' + quote(str(id))) |
35 | 35 | |
36 | collection_of = "cookbook" | |
37 | ||
36 | 38 | |
37 | 39 | class RecipeSet(CollectionWithKeyBasedLookup): |
38 | 40 | """A custom subclass capable of recipe lookup by recipe ID.""" |
40 | 42 | def _get_url_from_id(self, id): |
41 | 43 | """Transform a recipe ID into the URL to a recipe resource.""" |
42 | 44 | return str(self._root._root_uri.ensureSlash()) + 'recipes/' + str(id) |
45 | ||
46 | collection_of = "recipe" | |
43 | 47 | |
44 | 48 | |
45 | 49 | class CookbookWebServiceClient(ServiceRoot): |