mirror of
https://github.com/enso-org/enso.git
synced 2024-11-22 03:32:23 +03:00
HTTP response caching, with TTL and LRU logic (#11342)
This commit is contained in:
parent
10d76ca614
commit
dc50a7e369
@ -106,6 +106,8 @@
|
||||
range.][11135]
|
||||
- [Added `format` parameter to `Decimal.parse`.][11205]
|
||||
- [Added `format` parameter to `Float.parse`.][11229]
|
||||
- [Implemented a cache for HTTP data requests, as well as a per-file response
|
||||
size limit.][11342]
|
||||
|
||||
[10614]: https://github.com/enso-org/enso/pull/10614
|
||||
[10660]: https://github.com/enso-org/enso/pull/10660
|
||||
@ -121,6 +123,7 @@
|
||||
[11135]: https://github.com/enso-org/enso/pull/11135
|
||||
[11205]: https://github.com/enso-org/enso/pull/11205
|
||||
[11229]: https://github.com/enso-org/enso/pull/11229
|
||||
[11342]: https://github.com/enso-org/enso/pull/11342
|
||||
|
||||
#### Enso Language & Runtime
|
||||
|
||||
|
@ -12,6 +12,7 @@ import project.Errors.Illegal_Argument.Illegal_Argument
|
||||
import project.Errors.Problem_Behavior.Problem_Behavior
|
||||
import project.Internal.Data_Read_Helpers
|
||||
import project.Meta
|
||||
import project.Network.HTTP.Cache_Policy.Cache_Policy
|
||||
import project.Network.HTTP.Header.Header
|
||||
import project.Network.HTTP.HTTP
|
||||
import project.Network.HTTP.HTTP_Error.HTTP_Error
|
||||
@ -51,6 +52,19 @@ from project.System.File_Format import Auto_Detect, File_Format
|
||||
If set to `Report_Error`, the operation fails with a dataflow error.
|
||||
If set to `Ignore`, the operation proceeds without errors or warnings.
|
||||
|
||||
! Request Caching
|
||||
|
||||
Responses to HTTP data requests are cached, and additional requests for the
|
||||
same resources will use the cache, saving a round-trip call to the remote
|
||||
server. Two resources are considered the same if the URIs and request
|
||||
headers are the same. Header order does not affect sameness.
|
||||
|
||||
The cache respects the "max-age" and "Age" response headers; see
|
||||
`Data.fetch` for more details.
|
||||
|
||||
The cached values are retained as long as the project remains open. Closing
|
||||
a project will clear the cache.
|
||||
|
||||
> Example
|
||||
Read the first sheet of an XLSX from disk and convert it into a table.
|
||||
|
||||
@ -72,7 +86,7 @@ read : Text | URI | File -> File_Format -> Problem_Behavior -> Any ! File_Error
|
||||
read path=(Missing_Argument.throw "path") format=Auto_Detect (on_problems : Problem_Behavior = ..Report_Warning) = case path of
|
||||
_ : Text -> if Data_Read_Helpers.looks_like_uri path then Data_Read_Helpers.fetch_following_data_links path format=format else
|
||||
read (File.new path) format on_problems
|
||||
uri : URI -> Data_Read_Helpers.fetch_following_data_links uri format=format
|
||||
uri : URI -> fetch uri format=format
|
||||
_ ->
|
||||
file_obj = File.new path
|
||||
if file_obj.is_directory then Error.throw (Illegal_Argument.Error "Cannot `read` a directory, use `Data.list`.") else
|
||||
@ -183,6 +197,32 @@ list (directory:(Text | File)=enso_project.root) (name_filter:Text="") recursive
|
||||
Defaults to `Auto_Detect`. If `Raw_Response` is selected or if the format
|
||||
cannot be determined automatically, a raw HTTP `Response` will be returned.
|
||||
|
||||
! Request Caching
|
||||
|
||||
Responses to HTTP data requests are cached, and additional requests for the
|
||||
same resources will use the cache, saving a round-trip call to the remote
|
||||
server. Two resources are considered the same if the URIs and request
|
||||
headers are the same. Header order does not affect sameness.
|
||||
|
||||
The cached values are retained as long as the project remains open. Closing
|
||||
a project will clear the cache.
|
||||
|
||||
The cache respects the "max-age" and "Age" response headers received from
|
||||
remote servers. These headers are used to determine if the cached value is
|
||||
fresh or stale. If it is stale, the cached value is removed and a request
|
||||
is made again to the remote servers.
|
||||
|
||||
The following limits are imposed on values stored in the cache:
|
||||
- Single file limit: a single file can be no more than 10M.
|
||||
- Total cache size limit: the entire cache can be no more than 10G.
|
||||
|
||||
For data responses over the single file limit, you can use `Data.download`
|
||||
to download the file locally. Download sizes are not constrained by either
|
||||
limit.
|
||||
|
||||
If the entire cache goes over the total cache size limit, the
|
||||
least-recently-used entries are removed.
|
||||
|
||||
> Example
|
||||
Read from an HTTP endpoint.
|
||||
|
||||
@ -198,9 +238,9 @@ list (directory:(Text | File)=enso_project.root) (name_filter:Text="") recursive
|
||||
@uri (Text_Input display=..Always)
|
||||
@format Data_Read_Helpers.format_widget_with_raw_response
|
||||
@headers Header.default_widget
|
||||
fetch : (URI | Text) -> HTTP_Method -> Vector (Header | Pair Text Text) -> File_Format -> Any ! Request_Error | HTTP_Error
|
||||
fetch (uri:(URI | Text)=(Missing_Argument.throw "uri")) (method:HTTP_Method=..Get) (headers:(Vector (Header | Pair Text Text))=[]) (format = Auto_Detect) =
|
||||
Data_Read_Helpers.fetch_following_data_links uri method headers (Data_Read_Helpers.handle_legacy_format "fetch" "format" format)
|
||||
fetch : (URI | Text) -> HTTP_Method -> Vector (Header | Pair Text Text) -> File_Format -> Cache_Policy -> Any ! Request_Error | HTTP_Error
|
||||
fetch (uri:(URI | Text)=(Missing_Argument.throw "uri")) (method:HTTP_Method=..Get) (headers:(Vector (Header | Pair Text Text))=[]) (format = Auto_Detect) (cache_policy:Cache_Policy = ..Default) =
|
||||
Data_Read_Helpers.fetch_following_data_links uri method headers (Data_Read_Helpers.handle_legacy_format "fetch" "format" format) cache_policy=cache_policy
|
||||
|
||||
## ALIAS http post, upload
|
||||
GROUP Output
|
||||
@ -347,7 +387,7 @@ post (uri:(URI | Text)=(Missing_Argument.throw "uri")) (body:Request_Body=..Empt
|
||||
download : (URI | Text) -> Writable_File -> HTTP_Method -> Vector (Header | Pair Text Text) -> File ! Request_Error | HTTP_Error
|
||||
download (uri:(URI | Text)=(Missing_Argument.throw "uri")) file:Writable_File (method:HTTP_Method=..Get) (headers:(Vector (Header | Pair Text Text))=[]) =
|
||||
Context.Output.if_enabled disabled_message="As writing is disabled, cannot download to a file. Press the Write button ▶ to perform the operation." panic=False <|
|
||||
response = HTTP.fetch uri method headers
|
||||
response = HTTP.fetch uri method headers cache_policy=Cache_Policy.No_Cache
|
||||
case Data_Link.is_data_link response.body.metadata of
|
||||
True ->
|
||||
# If the resource was a data link, we follow it, download the target data and try to write it to a file.
|
||||
|
@ -12,6 +12,7 @@ import project.Enso_Cloud.Errors.Not_Logged_In
|
||||
import project.Enso_Cloud.Internal.Authentication
|
||||
import project.Error.Error
|
||||
import project.Function.Function
|
||||
import project.Network.HTTP.Cache_Policy.Cache_Policy
|
||||
import project.Network.HTTP.Header.Header
|
||||
import project.Network.HTTP.HTTP
|
||||
import project.Network.HTTP.HTTP_Error.HTTP_Error
|
||||
@ -95,7 +96,7 @@ http_request (method : HTTP_Method) (url : URI) (body : Request_Body = ..Empty)
|
||||
all_headers = [authorization_header] + additional_headers
|
||||
as_connection_error err = Error.throw (Enso_Cloud_Error.Connection_Error err)
|
||||
|
||||
response = HTTP.new.request (Request.new method url headers=all_headers body=body) error_on_failure_code=False
|
||||
response = HTTP.new.request (Request.new method url headers=all_headers body=body) cache_policy=..No_Cache error_on_failure_code=False
|
||||
. catch HTTP_Error as_connection_error
|
||||
. catch Request_Error as_connection_error
|
||||
if response.is_error && (retries > 0) then http_request method url body additional_headers error_handlers (retries - 1) else
|
||||
|
@ -15,6 +15,7 @@ polyglot java import java.lang.ArithmeticException
|
||||
polyglot java import java.lang.ClassCastException
|
||||
polyglot java import java.lang.OutOfMemoryError
|
||||
polyglot java import org.enso.base.CompareException
|
||||
polyglot java import org.enso.base.cache.ResponseTooLargeException
|
||||
|
||||
## An error indicating that no value was found.
|
||||
type Not_Found
|
||||
@ -553,3 +554,25 @@ type Out_Of_Range
|
||||
to_text self =
|
||||
extra = if self.message.is_nothing then "" else ": "+self.message.to_text
|
||||
"(Out_Of_Range (value = "+self.value.to_text+")" + extra + ")"
|
||||
|
||||
## Indiciates that the response from a remote endpoint is over the size limit.
|
||||
type Response_Too_Large
|
||||
## PRIVATE
|
||||
Error limit:Integer
|
||||
|
||||
## PRIVATE
|
||||
Create a human-readable version of the error.
|
||||
to_display_text : Text
|
||||
to_display_text self =
|
||||
suggestion = " Use `Data.fetch` with `cache_policy=No_Cache`, or use `Data.download` to fetch the data to a local file, and `Data.read` to read the file."
|
||||
"Response too large: repsonse size is over the limit ("+self.limit.to_text+")" + suggestion
|
||||
|
||||
## PRIVATE
|
||||
to_text : Text
|
||||
to_text self =
|
||||
"(Response_Too_Large (limit = "+self.limit.to_text+")" + ")"
|
||||
|
||||
## PRIVATE
|
||||
Convert the Java exception to an Enso dataflow error.
|
||||
handle_java_exception ~action =
|
||||
Panic.catch ResponseTooLargeException action (cause-> Error.throw (Response_Too_Large.Error cause.payload.getLimit))
|
||||
|
@ -10,10 +10,12 @@ import project.Errors.Deprecated.Deprecated
|
||||
import project.Errors.Problem_Behavior.Problem_Behavior
|
||||
import project.Metadata.Display
|
||||
import project.Metadata.Widget
|
||||
import project.Network.HTTP.Cache_Policy.Cache_Policy
|
||||
import project.Network.HTTP.HTTP
|
||||
import project.Network.HTTP.HTTP_Error.HTTP_Error
|
||||
import project.Network.HTTP.HTTP_Method.HTTP_Method
|
||||
import project.Network.URI.URI
|
||||
import project.Nothing.Nothing
|
||||
import project.Warning.Warning
|
||||
from project.Data import Raw_Response
|
||||
from project.Data.Boolean import Boolean, False, True
|
||||
@ -31,9 +33,9 @@ looks_like_uri path:Text -> Boolean =
|
||||
## PRIVATE
|
||||
A common implementation for fetching a resource and decoding it,
|
||||
following encountered data links.
|
||||
fetch_following_data_links (uri:URI) (method:HTTP_Method = HTTP_Method.Get) (headers:Vector = []) format =
|
||||
fetch_following_data_links (uri:URI) (method:HTTP_Method = ..Get) (headers:Vector = []) format (cache_policy:Cache_Policy = ..Default) =
|
||||
fetch_and_decode =
|
||||
response = HTTP.fetch uri method headers
|
||||
response = HTTP.fetch uri method headers cache_policy=cache_policy
|
||||
decode_http_response_following_data_links response format
|
||||
|
||||
error_handler attempt =
|
||||
|
@ -1,19 +1,24 @@
|
||||
import project.Any.Any
|
||||
import project.Data.Dictionary.Dictionary
|
||||
import project.Data.Hashset.Hashset
|
||||
import project.Data.Numbers.Integer
|
||||
import project.Data.Pair.Pair
|
||||
import project.Data.Sort_Direction.Sort_Direction
|
||||
import project.Data.Text.Encoding.Encoding
|
||||
import project.Data.Text.Text
|
||||
import project.Data.Time.Date_Time.Date_Time
|
||||
import project.Data.Time.Duration.Duration
|
||||
import project.Data.Vector.No_Wrap
|
||||
import project.Data.Vector.Vector
|
||||
import project.Enso_Cloud.Enso_Secret.Enso_Secret
|
||||
import project.Error.Error
|
||||
import project.Errors.Common.Forbidden_Operation
|
||||
import project.Errors.Common.Response_Too_Large
|
||||
import project.Errors.Illegal_Argument.Illegal_Argument
|
||||
import project.Errors.Unimplemented.Unimplemented
|
||||
import project.Function.Function
|
||||
import project.Meta
|
||||
import project.Network.HTTP.Cache_Policy.Cache_Policy
|
||||
import project.Network.HTTP.Header.Header
|
||||
import project.Network.HTTP.HTTP_Error.HTTP_Error
|
||||
import project.Network.HTTP.HTTP_Method.HTTP_Method
|
||||
@ -44,6 +49,7 @@ polyglot java import java.net.http.HttpRequest.Builder
|
||||
polyglot java import java.net.InetSocketAddress
|
||||
polyglot java import java.net.ProxySelector
|
||||
polyglot java import javax.net.ssl.SSLContext
|
||||
polyglot java import org.enso.base.enso_cloud.EnsoHTTPResponseCache
|
||||
polyglot java import org.enso.base.enso_cloud.EnsoSecretHelper
|
||||
polyglot java import org.enso.base.file_system.File_Utils
|
||||
polyglot java import org.enso.base.net.http.MultipartBodyBuilder
|
||||
@ -52,10 +58,14 @@ polyglot java import org.enso.base.net.http.UrlencodedBodyBuilder
|
||||
type HTTP
|
||||
## PRIVATE
|
||||
Static helper for get-like methods
|
||||
fetch : (URI | Text) -> HTTP_Method -> Vector (Header | Pair Text Text) -> Response ! Request_Error | HTTP_Error
|
||||
fetch (uri:(URI | Text)) (method:HTTP_Method=..Get) (headers:(Vector (Header | Pair Text Text))=[]) = if_fetch_method method <|
|
||||
|
||||
! Response caching
|
||||
|
||||
See `Data.fetch` for information about response caching.
|
||||
fetch : (URI | Text) -> HTTP_Method -> Vector (Header | Pair Text Text) -> Cache_Policy -> Response ! Request_Error | HTTP_Error
|
||||
fetch (uri:(URI | Text)) (method:HTTP_Method=..Get) (headers:(Vector (Header | Pair Text Text))=[]) (cache_policy:Cache_Policy = ..Default) = if_fetch_method method <|
|
||||
request = Request.new method uri (Header.unify_vector headers) Request_Body.Empty
|
||||
HTTP.new.request request
|
||||
HTTP.new.request request cache_policy=cache_policy
|
||||
|
||||
## PRIVATE
|
||||
Static helper for post-like methods
|
||||
@ -117,23 +127,33 @@ type HTTP
|
||||
Please note, this must be closed after use (either directly or via the
|
||||
helpers on Response_Body).
|
||||
|
||||
! Response caching
|
||||
|
||||
See `Data.fetch` for information about response caching.
|
||||
|
||||
Arguments:
|
||||
- req: The HTTP request to send using `self` HTTP client.
|
||||
- error_on_failure_code: Whether or not to throw an error if the response
|
||||
code is not a success code.
|
||||
request : Request -> Boolean -> Response ! Request_Error | HTTP_Error | Illegal_Argument
|
||||
request self req error_on_failure_code=True =
|
||||
request : Request -> Boolean -> Cache_Policy -> Response ! Request_Error | HTTP_Error | Illegal_Argument | Response_Too_Large
|
||||
request self req error_on_failure_code=True (cache_policy:Cache_Policy = ..Default) =
|
||||
# Prevent request if the method is a write-like method and output context is disabled.
|
||||
check_output_context ~action =
|
||||
if (if_fetch_method req.method True if_not=Context.Output.is_enabled) then action else
|
||||
Error.throw (Forbidden_Operation.Error ("As writing is disabled, " + req.method.to_text + " request not sent. Press the Write button ▶ to send it."))
|
||||
# You can only explicitly mention the cache for GET requests.
|
||||
check_cache_policy ~action =
|
||||
cache_policy_value_ok = req.method == HTTP_Method.Get || cache_policy != Cache_Policy.Use_Cache
|
||||
if cache_policy_value_ok then action else
|
||||
Error.throw (Illegal_Argument.Error "Cannot specify cache policy for a "+req.method.to_text+" request")
|
||||
|
||||
handle_request_error =
|
||||
handler caught_panic =
|
||||
exception = caught_panic.payload
|
||||
Error.throw (Request_Error.Error (Meta.type_of exception . to_text) exception.getMessage)
|
||||
Panic.catch IllegalArgumentException handler=handler <| Panic.catch IOException handler=handler
|
||||
|
||||
handle_request_error <| Illegal_Argument.handle_java_exception <| check_output_context <|
|
||||
handle_request_error <| Illegal_Argument.handle_java_exception <| check_output_context <| check_cache_policy <| Response_Too_Large.handle_java_exception <|
|
||||
headers = _resolve_headers req
|
||||
headers.if_not_error <|
|
||||
resolved_body = _resolve_body req.body self.hash_method
|
||||
@ -147,12 +167,25 @@ type HTTP
|
||||
all_headers = headers + boundary_header_list
|
||||
mapped_headers = all_headers.map on_problems=No_Wrap .to_java_pair
|
||||
|
||||
response = Response.Value (EnsoSecretHelper.makeRequest (self.make_client self resolved_body.hash) builder req.uri.to_java_representation mapped_headers)
|
||||
response = Response.Value (EnsoSecretHelper.makeRequest (self.make_client self resolved_body.hash) builder req.uri.to_java_representation mapped_headers (cache_policy.should_use_cache req))
|
||||
if error_on_failure_code.not || response.code.is_success then response else
|
||||
body = response.body.decode_as_text.catch Any _->""
|
||||
message = if body.is_empty then Nothing else body
|
||||
Error.throw (HTTP_Error.Status_Error response.code message response.uri)
|
||||
|
||||
## ALIAS flush
|
||||
ICON temp
|
||||
Clear the HTTP request cache.
|
||||
|
||||
> Example
|
||||
Clear the HTTP response cache.
|
||||
|
||||
import Standard.Base.Network.HTTP
|
||||
|
||||
HTTP.clear_response_cache
|
||||
clear_response_cache : Nothing
|
||||
clear_response_cache -> Nothing = EnsoHTTPResponseCache.clear
|
||||
|
||||
## PRIVATE
|
||||
ADVANCED
|
||||
Create a copy of the HTTP client with a custom SSL context.
|
||||
|
@ -0,0 +1,19 @@
|
||||
import project.Network.HTTP.HTTP_Method.HTTP_Method
|
||||
import project.Network.HTTP.Request.Request
|
||||
from project.Data.Boolean import Boolean, False, True
|
||||
|
||||
type Cache_Policy
|
||||
## Use the default policy for the HTTP method of the request.
|
||||
Default
|
||||
|
||||
## Use the response cache.
|
||||
Use_Cache
|
||||
|
||||
## Don't use the response cache.
|
||||
No_Cache
|
||||
|
||||
# Default to using the cache for GET requests, unless explicitly disabled
|
||||
should_use_cache self request:Request -> Boolean = case self of
|
||||
Cache_Policy.Default -> if request.method == HTTP_Method.Get then True else False
|
||||
Cache_Policy.Use_Cache -> True
|
||||
Cache_Policy.No_Cache -> False
|
@ -160,4 +160,28 @@ public final class Stream_Utils {
|
||||
outputStreamLike.write(b, off, len);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the contents of the input sream to the output stream. If the number of bytes copied is
|
||||
* greater than maxLength, abort the cpoy and return false; otherwise return true.
|
||||
*/
|
||||
public static boolean limitedCopy(
|
||||
InputStream inputStream, OutputStream outputStream, long maxLength) throws IOException {
|
||||
byte buffer[] = new byte[4096];
|
||||
long numBytesRead = 0;
|
||||
while (true) {
|
||||
int n = inputStream.read(buffer);
|
||||
if (n <= 0) {
|
||||
break;
|
||||
}
|
||||
if (numBytesRead + n <= maxLength) {
|
||||
outputStream.write(buffer, 0, n);
|
||||
}
|
||||
numBytesRead += n;
|
||||
if (numBytesRead > maxLength) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
353
std-bits/base/src/main/java/org/enso/base/cache/LRUCache.java
vendored
Normal file
353
std-bits/base/src/main/java/org/enso/base/cache/LRUCache.java
vendored
Normal file
@ -0,0 +1,353 @@
|
||||
package org.enso.base.cache;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.time.Duration;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.stream.Collectors;
|
||||
import org.enso.base.Stream_Utils;
|
||||
|
||||
/**
|
||||
* LRUCache is a cache for data presented via InputStreams. Files are deleted on JVM exit.
|
||||
*
|
||||
* <p>It puts limits on the size of files that can be requested, and on the total cache size,
|
||||
* deleting entries to make space for new ones. All cache files are set to be deleted automatically
|
||||
* on JVM exit.
|
||||
*
|
||||
* @param <M> Additional metadata to associate with the data.
|
||||
*/
|
||||
public class LRUCache<M> {
|
||||
private static final Logger logger = Logger.getLogger(LRUCache.class.getName());
|
||||
|
||||
private final long maxFileSize;
|
||||
private final long maxTotalCacheSize;
|
||||
|
||||
private final CacheTestParameters cacheTestParameters = new CacheTestParameters();
|
||||
|
||||
private final Map<String, CacheEntry<M>> cache = new HashMap<>();
|
||||
private final Map<String, ZonedDateTime> lastUsed = new HashMap<>();
|
||||
|
||||
public LRUCache(long maxFileSize, long maxTotalCacheSize) {
|
||||
this.maxFileSize = maxFileSize;
|
||||
this.maxTotalCacheSize = maxTotalCacheSize;
|
||||
}
|
||||
|
||||
public CacheResult<M> getResult(ItemBuilder<M> itemBuilder)
|
||||
throws IOException, InterruptedException, ResponseTooLargeException {
|
||||
String cacheKey = itemBuilder.makeCacheKey();
|
||||
if (cache.containsKey(cacheKey)) {
|
||||
return getResultForCacheEntry(cacheKey);
|
||||
} else {
|
||||
return makeRequestAndCache(cacheKey, itemBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* IOExceptions thrown by the HTTP request are propagated; IOExceptions thrown while storing the
|
||||
* data in the cache are caught, and the request is re-issued without caching.
|
||||
*/
|
||||
private CacheResult<M> makeRequestAndCache(String cacheKey, ItemBuilder<M> itemBuilder)
|
||||
throws IOException, InterruptedException, ResponseTooLargeException {
|
||||
assert !cache.containsKey(cacheKey);
|
||||
|
||||
Item<M> item = itemBuilder.buildItem();
|
||||
|
||||
if (!item.shouldCache()) {
|
||||
return new CacheResult<>(item.stream(), item.metadata());
|
||||
}
|
||||
|
||||
if (item.sizeMaybe.isPresent()) {
|
||||
long size = item.sizeMaybe().get();
|
||||
if (size > getMaxFileSize()) {
|
||||
throw new ResponseTooLargeException(getMaxFileSize());
|
||||
}
|
||||
makeRoomFor(size);
|
||||
}
|
||||
|
||||
try {
|
||||
// Download the response data.
|
||||
File responseData = downloadResponseData(cacheKey, item);
|
||||
M metadata = item.metadata();
|
||||
long size = responseData.length();
|
||||
ZonedDateTime expiry = getNow().plus(Duration.ofSeconds(item.ttl().get()));
|
||||
|
||||
// Create a cache entry.
|
||||
var cacheEntry = new CacheEntry<>(responseData, metadata, size, expiry);
|
||||
cache.put(cacheKey, cacheEntry);
|
||||
markCacheEntryUsed(cacheKey);
|
||||
|
||||
// Clear out old entries to satisfy the total cache size limit. This might
|
||||
// be necessary here if we didn't receive a correct content size value.
|
||||
removeFilesToSatisfyLimit();
|
||||
|
||||
return getResultForCacheEntry(cacheKey);
|
||||
} catch (IOException e) {
|
||||
logger.log(
|
||||
Level.WARNING, "Failure storing cache entry; will re-execute without caching: {}", e);
|
||||
// Re-issue the request since we don't know if we've consumed any of the response.
|
||||
Item<M> rerequested = itemBuilder.buildItem();
|
||||
return new CacheResult<>(rerequested.stream(), rerequested.metadata());
|
||||
}
|
||||
}
|
||||
|
||||
/** Mark cache entry used and return a stream reading from the cache file. */
|
||||
private CacheResult<M> getResultForCacheEntry(String cacheKey) throws IOException {
|
||||
markCacheEntryUsed(cacheKey);
|
||||
return new CacheResult<>(
|
||||
new FileInputStream(cache.get(cacheKey).responseData), cache.get(cacheKey).metadata());
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the repsonse data from the remote server into the cache file. If the downloaded data is
|
||||
* over the file size limit, throw a ResponseTooLargeException.
|
||||
*/
|
||||
private File downloadResponseData(String cacheKey, Item item)
|
||||
throws IOException, ResponseTooLargeException {
|
||||
File temp = File.createTempFile("LRUCache-" + cacheKey, "");
|
||||
temp.deleteOnExit();
|
||||
var inputStream = item.stream();
|
||||
var outputStream = new FileOutputStream(temp);
|
||||
boolean successful = false;
|
||||
try {
|
||||
// Limit the download to getMaxFileSize().
|
||||
boolean sizeOK = Stream_Utils.limitedCopy(inputStream, outputStream, getMaxFileSize());
|
||||
|
||||
if (sizeOK) {
|
||||
successful = true;
|
||||
return temp;
|
||||
} else {
|
||||
throw new ResponseTooLargeException(getMaxFileSize());
|
||||
}
|
||||
} finally {
|
||||
outputStream.close();
|
||||
if (!successful) {
|
||||
if (!temp.delete()) {
|
||||
logger.log(Level.WARNING, "Unable to delete cache file (key {})", cacheKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Mark the entry with the current time, to maintain LRU data. */
|
||||
private void markCacheEntryUsed(String cacheKey) {
|
||||
lastUsed.put(cacheKey, getNow());
|
||||
}
|
||||
|
||||
/** Remove all cache entries (and their files) that have passed their TTL. */
|
||||
private void removeStaleEntries() {
|
||||
var now = getNow();
|
||||
removeCacheEntriesByPredicate(e -> e.expiry().isBefore(now));
|
||||
}
|
||||
|
||||
/** Remove all cache entries (and their files). */
|
||||
public void clear() {
|
||||
removeCacheEntriesByPredicate(e -> true);
|
||||
}
|
||||
|
||||
/** Remove all cache entries (and their cache files) that match the predicate. */
|
||||
private void removeCacheEntriesByPredicate(Predicate<CacheEntry<M>> predicate) {
|
||||
List<Map.Entry<String, CacheEntry<M>>> toRemove =
|
||||
cache.entrySet().stream()
|
||||
.filter(me -> predicate.test(me.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
removeCacheEntries(toRemove);
|
||||
}
|
||||
|
||||
/** Remove a set of cache entries. */
|
||||
private void removeCacheEntries(List<Map.Entry<String, CacheEntry<M>>> toRemove) {
|
||||
for (var entry : toRemove) {
|
||||
removeCacheEntry(entry);
|
||||
}
|
||||
}
|
||||
|
||||
/** Remove a cache entry: from `cache`, `lastUsed`, and the filesystem. */
|
||||
private void removeCacheEntry(Map.Entry<String, CacheEntry<M>> toRemove) {
|
||||
var key = toRemove.getKey();
|
||||
var value = toRemove.getValue();
|
||||
cache.remove(key);
|
||||
lastUsed.remove(key);
|
||||
removeCacheFile(key, value);
|
||||
}
|
||||
|
||||
/** Remove a cache file. */
|
||||
private void removeCacheFile(String key, CacheEntry<M> cacheEntry) {
|
||||
boolean removed = cacheEntry.responseData.delete();
|
||||
if (!removed) {
|
||||
logger.log(Level.WARNING, "Unable to delete cache file for key {0}", key);
|
||||
}
|
||||
}
|
||||
|
||||
/** Remove least-recently used entries until there is enough room for a new file. */
|
||||
private void makeRoomFor(long newFileSize) {
|
||||
removeStaleEntries();
|
||||
|
||||
long totalSize = getTotalCacheSize() + newFileSize;
|
||||
long maxTotalCacheSize = getMaxTotalCacheSize();
|
||||
if (totalSize <= maxTotalCacheSize) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Remove least-recently used entries first.
|
||||
var sortedEntries = getSortedEntries();
|
||||
var toRemove = new ArrayList<Map.Entry<String, CacheEntry<M>>>();
|
||||
for (var mapEntry : sortedEntries) {
|
||||
if (totalSize <= maxTotalCacheSize) {
|
||||
break;
|
||||
}
|
||||
toRemove.add(mapEntry);
|
||||
totalSize -= mapEntry.getValue().size();
|
||||
}
|
||||
assert totalSize <= maxTotalCacheSize;
|
||||
removeCacheEntries(toRemove);
|
||||
}
|
||||
|
||||
private SortedSet<Map.Entry<String, CacheEntry<M>>> getSortedEntries() {
|
||||
var sortedEntries = new TreeSet<Map.Entry<String, CacheEntry<M>>>(cacheEntryLRUComparator);
|
||||
sortedEntries.addAll(cache.entrySet());
|
||||
return sortedEntries;
|
||||
}
|
||||
|
||||
/** Remove least-recently used entries until the total cache size is under the limit. */
|
||||
private void removeFilesToSatisfyLimit() {
|
||||
makeRoomFor(0L);
|
||||
}
|
||||
|
||||
private long getTotalCacheSize() {
|
||||
return cache.values().stream().collect(Collectors.summingLong(e -> e.size()));
|
||||
}
|
||||
|
||||
private long getMaxFileSize() {
|
||||
return cacheTestParameters.getMaxFileSizeOverrideTestOnly().orElse(maxFileSize);
|
||||
}
|
||||
|
||||
private long getMaxTotalCacheSize() {
|
||||
return cacheTestParameters.getMaxTotalCacheSizeOverrideTestOnly().orElse(maxTotalCacheSize);
|
||||
}
|
||||
|
||||
public int getNumEntries() {
|
||||
return cache.size();
|
||||
}
|
||||
|
||||
public List<Long> getFileSizesTestOnly() {
|
||||
return new ArrayList<>(
|
||||
cache.values().stream().map(CacheEntry::size).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
private ZonedDateTime getNow() {
|
||||
return cacheTestParameters.getNowOverrideTestOnly().orElse(ZonedDateTime.now());
|
||||
}
|
||||
|
||||
/** Return a set of parameters that can be used to modify settings for testing purposes. */
|
||||
public CacheTestParameters getCacheTestParameters() {
|
||||
return cacheTestParameters;
|
||||
}
|
||||
|
||||
private record CacheEntry<M>(File responseData, M metadata, long size, ZonedDateTime expiry) {}
|
||||
|
||||
/**
|
||||
* A record to define the contents and properties of something to be cached.
|
||||
*
|
||||
* @param stream The InputStream providing the contents of the thing to be cached.
|
||||
* @param sizeMaybe (Optional) The size of the data provided by the InputStream
|
||||
* @param ttl (Optional) The time for which the data is fresh. If the returned Item has a TTL of
|
||||
* 0, the item will not be cahced at all.
|
||||
*/
|
||||
public record Item<M>(
|
||||
InputStream stream, M metadata, Optional<Long> sizeMaybe, Optional<Integer> ttl) {
|
||||
|
||||
public boolean shouldCache() {
|
||||
return ttl.isPresent();
|
||||
}
|
||||
}
|
||||
|
||||
public record CacheResult<M>(InputStream inputStream, M metadata) {}
|
||||
|
||||
/** Wraps code that creates an Item to be cached. */
|
||||
public interface ItemBuilder<M> {
|
||||
/** Generate a unique key for the Item */
|
||||
String makeCacheKey();
|
||||
|
||||
/**
|
||||
* Creates the Item to be cached. Returning an Item with no TTL indicates that the data should
|
||||
* not be cached. This is only called when the Item is not already present in the cache.
|
||||
*/
|
||||
Item<M> buildItem() throws IOException, InterruptedException;
|
||||
}
|
||||
|
||||
private final Comparator<Map.Entry<String, CacheEntry<M>>> cacheEntryLRUComparator =
|
||||
Comparator.comparing(me -> lastUsed.get(me.getKey()));
|
||||
|
||||
/** A set of parameters that can be used to modify cache settings for testing purposes. */
|
||||
public class CacheTestParameters {
|
||||
/** This value is used for the current time when testing TTL expiration logic. */
|
||||
private Optional<ZonedDateTime> nowOverrideTestOnly = Optional.empty();
|
||||
|
||||
/**
|
||||
* Used for testing file and cache size limits. These cannot be set to values larger than the
|
||||
* real limits.
|
||||
*/
|
||||
private Optional<Long> maxFileSizeOverrideTestOnly = Optional.empty();
|
||||
|
||||
private Optional<Long> maxTotalCacheSizeOverrideTestOnly = Optional.empty();
|
||||
|
||||
public Optional<ZonedDateTime> getNowOverrideTestOnly() {
|
||||
return nowOverrideTestOnly;
|
||||
}
|
||||
|
||||
public void setNowOverrideTestOnly(ZonedDateTime nowOverride) {
|
||||
nowOverrideTestOnly = Optional.of(nowOverride);
|
||||
}
|
||||
|
||||
public void clearNowOverrideTestOnly() {
|
||||
nowOverrideTestOnly = Optional.empty();
|
||||
}
|
||||
|
||||
public Optional<Long> getMaxFileSizeOverrideTestOnly() {
|
||||
return maxFileSizeOverrideTestOnly;
|
||||
}
|
||||
|
||||
public void setMaxFileSizeOverrideTestOnly(long maxFileSizeOverrideTestOnly_) {
|
||||
if (maxFileSizeOverrideTestOnly_ > maxFileSize) {
|
||||
throw new IllegalArgumentException(
|
||||
"Cannot set the (test-only) maximum file size to more than the allowed limit of "
|
||||
+ maxFileSize);
|
||||
}
|
||||
maxFileSizeOverrideTestOnly = Optional.of(maxFileSizeOverrideTestOnly_);
|
||||
}
|
||||
|
||||
public void clearMaxFileSizeOverrideTestOnly() {
|
||||
maxFileSizeOverrideTestOnly = Optional.empty();
|
||||
}
|
||||
|
||||
public Optional<Long> getMaxTotalCacheSizeOverrideTestOnly() {
|
||||
return maxTotalCacheSizeOverrideTestOnly;
|
||||
}
|
||||
|
||||
public void setMaxTotalCacheSizeOverrideTestOnly(long maxTotalCacheSizeOverrideTestOnly_) {
|
||||
if (maxTotalCacheSizeOverrideTestOnly_ > maxTotalCacheSize) {
|
||||
throw new IllegalArgumentException(
|
||||
"Cannot set the (test-only) total cache size to more than the allowed limit of "
|
||||
+ maxTotalCacheSize);
|
||||
}
|
||||
maxTotalCacheSizeOverrideTestOnly = Optional.of(maxTotalCacheSizeOverrideTestOnly_);
|
||||
}
|
||||
|
||||
public void clearMaxTotalCacheSizeOverrideTestOnly() {
|
||||
maxTotalCacheSizeOverrideTestOnly = Optional.empty();
|
||||
}
|
||||
}
|
||||
}
|
15
std-bits/base/src/main/java/org/enso/base/cache/ResponseTooLargeException.java
vendored
Normal file
15
std-bits/base/src/main/java/org/enso/base/cache/ResponseTooLargeException.java
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package org.enso.base.cache;
|
||||
|
||||
public class ResponseTooLargeException extends Exception {
|
||||
private final long limit;
|
||||
|
||||
public ResponseTooLargeException(long limit) {
|
||||
super("Response too large: repsonse size is over the limit (" + limit + ")");
|
||||
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public long getLimit() {
|
||||
return limit;
|
||||
}
|
||||
}
|
@ -0,0 +1,163 @@
|
||||
package org.enso.base.enso_cloud;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.http.HttpHeaders;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import org.enso.base.cache.LRUCache;
|
||||
import org.enso.base.cache.ResponseTooLargeException;
|
||||
|
||||
/**
|
||||
* EnsoHTTPResponseCache is a cache for EnsoHttpResponse values that respects the cache control HTTP
|
||||
* headers received in the original repsonse to a request.
|
||||
*
|
||||
* <p>It uses LRUCache, so it also puts limits on the size of files that can be requested, and on
|
||||
* the total cache size, deleting entries to make space for new ones. All cache files are set to be
|
||||
* deleted automatically on JVM exit.
|
||||
*
|
||||
* <p>Without caching, EnsoHttpResponse contains an InputStream providing the response data. When
|
||||
* there is a cache hit, this stream reads from the local file storing the cached data. When there
|
||||
* is no cache hit, the InputStream is connected directly to the remote server.
|
||||
*/
|
||||
public class EnsoHTTPResponseCache {
|
||||
// 1 year.
|
||||
private static final int DEFAULT_TTL_SECONDS = 31536000;
|
||||
private static final long MAX_FILE_SIZE = 2L * 1024 * 1024 * 1024;
|
||||
private static final long MAX_TOTAL_CACHE_SIZE = 20L * 1024 * 1024 * 1024;
|
||||
|
||||
private static final LRUCache<Metadata> lruCache =
|
||||
new LRUCache<>(MAX_FILE_SIZE, MAX_TOTAL_CACHE_SIZE);
|
||||
|
||||
public static EnsoHttpResponse makeRequest(RequestMaker requestMaker)
|
||||
throws IOException, InterruptedException, ResponseTooLargeException {
|
||||
var itemBuilder = new ItemBuilder(requestMaker);
|
||||
|
||||
LRUCache.CacheResult<Metadata> cacheResult = lruCache.getResult(itemBuilder);
|
||||
|
||||
return requestMaker.reconstructResponseFromCachedStream(
|
||||
cacheResult.inputStream(), cacheResult.metadata());
|
||||
}
|
||||
|
||||
public static class ItemBuilder implements LRUCache.ItemBuilder<Metadata> {
|
||||
private final RequestMaker requestMaker;
|
||||
|
||||
ItemBuilder(RequestMaker requestMaker) {
|
||||
this.requestMaker = requestMaker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String makeCacheKey() {
|
||||
return requestMaker.hashKey();
|
||||
}
|
||||
|
||||
/** Only HTTP 200 responses are cached; all others are returned uncached. */
|
||||
@Override
|
||||
public LRUCache.Item<Metadata> buildItem() throws IOException, InterruptedException {
|
||||
var response = requestMaker.makeRequest();
|
||||
|
||||
if (response.statusCode() != 200) {
|
||||
// Don't cache non-200 repsonses.
|
||||
return new LRUCache.Item<>(
|
||||
response.body(),
|
||||
new Metadata(response.headers(), response.statusCode()),
|
||||
Optional.empty(),
|
||||
Optional.empty());
|
||||
} else {
|
||||
InputStream inputStream = response.body();
|
||||
var metadata = new Metadata(response.headers(), response.statusCode());
|
||||
var sizeMaybe = getResponseDataSize(response.headers());
|
||||
int ttl = calculateTTL(response.headers());
|
||||
return new LRUCache.Item<>(inputStream, metadata, sizeMaybe, Optional.of(ttl));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Get the size of the response data, if available. */
|
||||
private static Optional<Long> getResponseDataSize(HttpHeaders headers) {
|
||||
return headers.firstValue("content-length").map(Long::parseLong);
|
||||
}
|
||||
|
||||
/**
|
||||
* We define the TTL as the amount of time that the response should be considered fresh.
|
||||
*
|
||||
* <p>Define t0 as the time at which the content was generated on the origin server.
|
||||
*
|
||||
* <p>Define t1 as the time at which the current request was handled, either by the origin server
|
||||
* or an intervening cache.
|
||||
*
|
||||
* <p>The 'Age' header, if present is (t1 - t0).
|
||||
*
|
||||
* <p>The 'max-age' value in the 'Cache-Control' header, if present, is the origin server's
|
||||
* definition of how long the response should be considered fresh.
|
||||
*
|
||||
* <p>If 'max-age' and 'Age' are both present, we set TTL = max-age - Age. If only 'max-age' is
|
||||
* present, we set TTL = max-age. If neither are present, we use a default.
|
||||
*/
|
||||
private static int calculateTTL(HttpHeaders headers) {
|
||||
Integer maxAge = getMaxAge(headers);
|
||||
if (maxAge == null) {
|
||||
return DEFAULT_TTL_SECONDS;
|
||||
} else {
|
||||
int age = headers.firstValue("age").map(Integer::parseInt).orElse(0);
|
||||
return maxAge - age;
|
||||
}
|
||||
}
|
||||
|
||||
private static Integer getMaxAge(HttpHeaders headers) {
|
||||
var cacheControlMaybe = headers.firstValue("cache-control");
|
||||
Integer maxAge = null;
|
||||
if (cacheControlMaybe.isPresent()) {
|
||||
var cacheControl = cacheControlMaybe.get();
|
||||
var cacheControlEntries = cacheControl.split(",");
|
||||
for (var entry : cacheControlEntries) {
|
||||
if (entry.trim().toLowerCase().startsWith("max-age")) {
|
||||
var maxAgeBinding = entry.split("=");
|
||||
if (maxAgeBinding.length > 1) {
|
||||
maxAge = Integer.valueOf(maxAgeBinding[1]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return maxAge;
|
||||
}
|
||||
|
||||
public static void clear() {
|
||||
lruCache.clear();
|
||||
}
|
||||
|
||||
public static int getNumEntries() {
|
||||
return lruCache.getNumEntries();
|
||||
}
|
||||
|
||||
public static List<Long> getFileSizesTestOnly() {
|
||||
return lruCache.getFileSizesTestOnly();
|
||||
}
|
||||
|
||||
/** Return a set of parameters that can be used to modify settings for testing purposes. */
|
||||
public static LRUCache.CacheTestParameters getCacheTestParameters() {
|
||||
return lruCache.getCacheTestParameters();
|
||||
}
|
||||
|
||||
public interface RequestMaker {
|
||||
/** Executes the HTTP request and returns the response. */
|
||||
EnsoHttpResponse makeRequest() throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Returns a hash key that can be used to uniquely identify this request. This will be used to
|
||||
* decide if the `run` method should be executed, or if a cached response will be returned. The
|
||||
* hash key should not be reversible.
|
||||
*/
|
||||
String hashKey();
|
||||
|
||||
/**
|
||||
* When a cached response is returned, instead of executing `makeRequest`, this method is used
|
||||
* to construct the response.
|
||||
*/
|
||||
EnsoHttpResponse reconstructResponseFromCachedStream(
|
||||
InputStream inputStream, Metadata metadata);
|
||||
}
|
||||
|
||||
public record Metadata(HttpHeaders headers, int statusCode) {}
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package org.enso.base.enso_cloud;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.http.HttpClient;
|
||||
@ -9,8 +10,12 @@ import java.net.http.HttpResponse;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import org.enso.base.cache.ResponseTooLargeException;
|
||||
import org.enso.base.net.URISchematic;
|
||||
import org.enso.base.net.URIWithSecrets;
|
||||
import org.graalvm.collections.Pair;
|
||||
@ -58,44 +63,119 @@ public final class EnsoSecretHelper extends SecretValueResolver {
|
||||
HttpClient client,
|
||||
Builder builder,
|
||||
URIWithSecrets uri,
|
||||
List<Pair<String, HideableValue>> headers)
|
||||
throws IllegalArgumentException, IOException, InterruptedException {
|
||||
List<Pair<String, HideableValue>> headers,
|
||||
boolean useCache)
|
||||
throws IllegalArgumentException,
|
||||
IOException,
|
||||
InterruptedException,
|
||||
ResponseTooLargeException {
|
||||
|
||||
// Build a new URI with the query arguments.
|
||||
URI resolvedURI = resolveURI(uri);
|
||||
URI renderedURI = uri.render();
|
||||
|
||||
boolean hasSecrets =
|
||||
uri.containsSecrets() || headers.stream().anyMatch(p -> p.getRight().containsSecrets());
|
||||
if (hasSecrets) {
|
||||
if (resolvedURI.getScheme() == null) {
|
||||
throw new IllegalArgumentException("The URI must have a scheme.");
|
||||
}
|
||||
List<Pair<String, String>> resolvedHeaders =
|
||||
headers.stream()
|
||||
.map(
|
||||
pair -> {
|
||||
return Pair.create(pair.getLeft(), resolveValue(pair.getRight()));
|
||||
})
|
||||
.toList();
|
||||
|
||||
if (!resolvedURI.getScheme().equalsIgnoreCase("https")) {
|
||||
throw new IllegalArgumentException(
|
||||
"Secrets are not allowed in HTTP connections, use HTTPS instead.");
|
||||
}
|
||||
var requestMaker =
|
||||
new RequestMaker(client, builder, uri, resolvedURI, headers, resolvedHeaders);
|
||||
|
||||
if (!useCache) {
|
||||
return requestMaker.makeRequest();
|
||||
} else {
|
||||
return EnsoHTTPResponseCache.makeRequest(requestMaker);
|
||||
}
|
||||
|
||||
builder.uri(resolvedURI);
|
||||
|
||||
// Resolve the header arguments.
|
||||
for (Pair<String, HideableValue> header : headers) {
|
||||
builder.header(header.getLeft(), resolveValue(header.getRight()));
|
||||
}
|
||||
|
||||
// Build and Send the request.
|
||||
var httpRequest = builder.build();
|
||||
var bodyHandler = HttpResponse.BodyHandlers.ofInputStream();
|
||||
var javaResponse = client.send(httpRequest, bodyHandler);
|
||||
|
||||
// Extract parts of the response
|
||||
return new EnsoHttpResponse(
|
||||
renderedURI, javaResponse.headers(), javaResponse.body(), javaResponse.statusCode());
|
||||
}
|
||||
|
||||
public static void deleteSecretFromCache(String secretId) {
|
||||
EnsoSecretReader.removeFromCache(secretId);
|
||||
}
|
||||
|
||||
private static class RequestMaker implements EnsoHTTPResponseCache.RequestMaker {
|
||||
private final HttpClient client;
|
||||
private final Builder builder;
|
||||
private final URIWithSecrets uri;
|
||||
private final URI resolvedURI;
|
||||
private final List<Pair<String, HideableValue>> headers;
|
||||
private final List<Pair<String, String>> resolvedHeaders;
|
||||
|
||||
RequestMaker(
|
||||
HttpClient client,
|
||||
Builder builder,
|
||||
URIWithSecrets uri,
|
||||
URI resolvedURI,
|
||||
List<Pair<String, HideableValue>> headers,
|
||||
List<Pair<String, String>> resolvedHeaders) {
|
||||
this.client = client;
|
||||
this.builder = builder;
|
||||
this.uri = uri;
|
||||
this.resolvedURI = resolvedURI;
|
||||
this.headers = headers;
|
||||
this.resolvedHeaders = resolvedHeaders;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnsoHttpResponse makeRequest() throws IOException, InterruptedException {
|
||||
boolean hasSecrets =
|
||||
uri.containsSecrets() || headers.stream().anyMatch(p -> p.getRight().containsSecrets());
|
||||
if (hasSecrets) {
|
||||
if (resolvedURI.getScheme() == null) {
|
||||
throw new IllegalArgumentException("The URI must have a scheme.");
|
||||
}
|
||||
|
||||
if (!resolvedURI.getScheme().equalsIgnoreCase("https")) {
|
||||
throw new IllegalArgumentException(
|
||||
"Secrets are not allowed in HTTP connections, use HTTPS instead.");
|
||||
}
|
||||
}
|
||||
|
||||
builder.uri(resolvedURI);
|
||||
|
||||
for (Pair<String, String> resolvedHeader : resolvedHeaders) {
|
||||
builder.header(resolvedHeader.getLeft(), resolvedHeader.getRight());
|
||||
}
|
||||
|
||||
// Build and Send the request.
|
||||
var httpRequest = builder.build();
|
||||
var bodyHandler = HttpResponse.BodyHandlers.ofInputStream();
|
||||
var javaResponse = client.send(httpRequest, bodyHandler);
|
||||
|
||||
URI renderedURI = uri.render();
|
||||
|
||||
return new EnsoHttpResponse(
|
||||
renderedURI, javaResponse.headers(), javaResponse.body(), javaResponse.statusCode());
|
||||
}
|
||||
|
||||
/** Sorts the header by header name and value. */
|
||||
@Override
|
||||
public String hashKey() {
|
||||
var sortedHeaders = resolvedHeaders.stream().sorted(headerNameComparator).toList();
|
||||
List<String> keyStrings = new ArrayList<>(sortedHeaders.size() + 1);
|
||||
keyStrings.add(resolvedURI.toString());
|
||||
|
||||
for (Pair<String, String> resolvedHeader : sortedHeaders) {
|
||||
keyStrings.add(resolvedHeader.getLeft());
|
||||
keyStrings.add(resolvedHeader.getRight());
|
||||
}
|
||||
|
||||
return Integer.toString(Arrays.deepHashCode(keyStrings.toArray()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnsoHttpResponse reconstructResponseFromCachedStream(
|
||||
InputStream inputStream, EnsoHTTPResponseCache.Metadata metadata) {
|
||||
URI renderedURI = uri.render();
|
||||
|
||||
return new EnsoHttpResponse(
|
||||
renderedURI, metadata.headers(), inputStream, metadata.statusCode());
|
||||
}
|
||||
}
|
||||
|
||||
private static final Comparator<Pair<String, String>> headerNameComparator =
|
||||
Comparator.comparing((Pair<String, String> pair) -> pair.getLeft())
|
||||
.thenComparing(Comparator.comparing(pair -> pair.getRight()));
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ from Standard.Base import all
|
||||
import Standard.Base.Errors.Common.Forbidden_Operation
|
||||
import Standard.Base.Errors.Common.Syntax_Error
|
||||
import Standard.Base.Errors.Illegal_Argument.Illegal_Argument
|
||||
import Standard.Base.Network.HTTP.Cache_Policy.Cache_Policy
|
||||
import Standard.Base.Network.HTTP.HTTP_Error.HTTP_Error
|
||||
import Standard.Base.Network.HTTP.Request.Request
|
||||
import Standard.Base.Network.HTTP.Response.Response
|
||||
@ -184,11 +185,11 @@ add_specs suite_builder =
|
||||
|
||||
group_builder.specify "can select the version" <| Test.with_retries <|
|
||||
req = Request.get url_get
|
||||
r2 = HTTP.new version=HTTP_Version.HTTP_2 . request req . decode_as_json
|
||||
r2 = HTTP.new version=HTTP_Version.HTTP_2 . request req cache_policy=..No_Cache . decode_as_json
|
||||
r2.at "headers" . at "Connection" . should_equal "Upgrade, HTTP2-Settings"
|
||||
r2.at "headers" . at "Http2-Settings" . should_contain "AA"
|
||||
|
||||
r1 = HTTP.new version=HTTP_Version.HTTP_1_1 . request req . decode_as_json
|
||||
r1 = HTTP.new version=HTTP_Version.HTTP_1_1 . request req cache_policy=..No_Cache . decode_as_json
|
||||
header_names = r1.at "headers" . field_names . map (s-> s.to_case Case.Lower)
|
||||
header_names.should_not_contain "connection"
|
||||
header_names.should_not_contain "http2-settings"
|
||||
@ -606,7 +607,7 @@ add_specs suite_builder =
|
||||
r1.should_be_a Response
|
||||
|
||||
group_builder.specify "should be able to handle server crash that closes stream abruptly" pending=pending_has_url <|
|
||||
err = Data.fetch (base_url_with_slash+"crash?type=stream")
|
||||
err = Data.fetch (base_url_with_slash+"crash?type=stream") cache_policy=..No_Cache
|
||||
err.should_fail_with HTTP_Error
|
||||
err.catch.message . should_equal "An IO error has occurred: java.io.IOException: closed"
|
||||
|
||||
|
@ -1,18 +1,28 @@
|
||||
from Standard.Base import all
|
||||
import Standard.Base.Data.Base_64.Base_64
|
||||
import Standard.Base.Errors.Common.Response_Too_Large
|
||||
import Standard.Base.Errors.File_Error.File_Error
|
||||
import Standard.Base.Errors.Illegal_Argument.Illegal_Argument
|
||||
import Standard.Base.Network.HTTP.Cache_Policy.Cache_Policy
|
||||
import Standard.Base.Network.HTTP.Request.Request
|
||||
import Standard.Base.Network.HTTP.Request_Body.Request_Body
|
||||
import Standard.Base.Network.HTTP.Response.Response
|
||||
import Standard.Base.Runtime.Context
|
||||
import Standard.Base.Runtime.Ref.Ref
|
||||
|
||||
from Standard.Table import all
|
||||
import Standard.Table.Errors.Invalid_JSON_Format
|
||||
|
||||
from Standard.Test import all
|
||||
|
||||
import enso_dev.Base_Tests.Network.Enso_Cloud.Cloud_Tests_Setup.Cloud_Tests_Setup
|
||||
from enso_dev.Base_Tests.Network.Http.Http_Test_Setup import base_url_with_slash, pending_has_url
|
||||
|
||||
import project.Util
|
||||
|
||||
polyglot java import java.lang.IllegalArgumentException
|
||||
polyglot java import org.enso.base.enso_cloud.EnsoHTTPResponseCache
|
||||
|
||||
main filter=Nothing =
|
||||
suite = Test.build suite_builder->
|
||||
add_specs suite_builder
|
||||
@ -94,3 +104,334 @@ add_specs suite_builder =
|
||||
# Reinterpreting as TSV:
|
||||
r4 = (uri.add_query_argument "Content-Type" "text/tab-separated-values").fetch
|
||||
r4.should_equal (Table.from_rows ["Column 1"] [["A,B"], ["1,x"], ["3,y"]])
|
||||
|
||||
suite_builder.group "Response caching" pending=pending_has_url group_builder->
|
||||
get_num_response_cache_entries =
|
||||
EnsoHTTPResponseCache.getNumEntries
|
||||
with_counts ~action =
|
||||
before_count = get_num_response_cache_entries
|
||||
action
|
||||
after_count = get_num_response_cache_entries
|
||||
[before_count, after_count]
|
||||
|
||||
reset_size_limits =
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.clearMaxFileSizeOverrideTestOnly
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.clearMaxTotalCacheSizeOverrideTestOnly
|
||||
|
||||
expect_counts expected_counts ~action =
|
||||
counts = with_counts action
|
||||
counts . should_equal expected_counts frames_to_skip=1
|
||||
|
||||
get_cache_file_sizes : Vector Integer
|
||||
get_cache_file_sizes -> Vector Integer =
|
||||
Vector.from_polyglot_array EnsoHTTPResponseCache.getFileSizesTestOnly . sort Sort_Direction.Ascending
|
||||
|
||||
url0 = base_url_with_slash+'test_download?max-age=16&length=10'
|
||||
url1 = base_url_with_slash+'test_download?max-age=16&length=20'
|
||||
url_post = base_url_with_slash + "post"
|
||||
headers0 = [Header.new "A-Header" "a-header-value", Header.new "A-Header" "a-header-value"]
|
||||
headers1 = [Header.new "A-Header" "a-different-header-value", Header.new "A-Header" "a-header-value"]
|
||||
|
||||
# Run the request(s) twice and confirm the results are the same
|
||||
check_same_results ~action =
|
||||
results = 0.up_to 2 . map (_-> action)
|
||||
results.distinct.length . should_equal 1
|
||||
|
||||
group_builder.specify "Cache should return the same repsonse" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
|
||||
check_same_results <|
|
||||
HTTP.fetch url0 . decode_as_text
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
check_same_results <|
|
||||
HTTP.fetch url1 . decode_as_text
|
||||
get_num_response_cache_entries . should_equal 2
|
||||
|
||||
HTTP.clear_response_cache
|
||||
|
||||
HTTP.fetch url0 cache_policy=Cache_Policy.Use_Cache . decode_as_text
|
||||
HTTP.fetch url0 cache_policy=Cache_Policy.Use_Cache . decode_as_text
|
||||
url1_body_1 = HTTP.fetch url1 cache_policy=Cache_Policy.Use_Cache . decode_as_text
|
||||
HTTP.fetch url1 cache_policy=Cache_Policy.Use_Cache . decode_as_text . should_equal url1_body_1
|
||||
get_num_response_cache_entries . should_equal 2
|
||||
|
||||
HTTP.clear_response_cache
|
||||
|
||||
url0_body_2 = HTTP.fetch url0 cache_policy=Cache_Policy.No_Cache . decode_as_text
|
||||
HTTP.fetch url0 cache_policy=Cache_Policy.No_Cache . decode_as_text . should_not_equal url0_body_2
|
||||
url1_body_2 = HTTP.fetch url1 cache_policy=Cache_Policy.No_Cache . decode_as_text
|
||||
HTTP.fetch url1 cache_policy=Cache_Policy.No_Cache . decode_as_text . should_not_equal url1_body_2
|
||||
get_num_response_cache_entries . should_equal 0
|
||||
|
||||
group_builder.specify "Cache should handle many entries" pending=pending_has_url <| Test.with_retries <|
|
||||
count = 20
|
||||
|
||||
HTTP.clear_response_cache
|
||||
check_same_results <|
|
||||
0.up_to count . map i->
|
||||
HTTP.fetch base_url_with_slash+"test_download?length="+i.to_text . decode_as_text
|
||||
get_num_response_cache_entries . should_equal count
|
||||
|
||||
HTTP.clear_response_cache
|
||||
check_same_results <|
|
||||
0.up_to count . each i->
|
||||
headers = [Header.new "A-Header" "a-header-value-"+i.to_text]
|
||||
HTTP.fetch base_url_with_slash+"test_download?length=8" headers=headers . decode_as_text
|
||||
get_num_response_cache_entries . should_equal count
|
||||
|
||||
group_builder.specify "Cache policy should work for HTTP.fetch" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
expect_counts [0, 0] <|
|
||||
HTTP.fetch url0 cache_policy=Cache_Policy.No_Cache
|
||||
HTTP.fetch url1 cache_policy=Cache_Policy.No_Cache
|
||||
expect_counts [0, 2] <|
|
||||
HTTP.fetch url0 cache_policy=Cache_Policy.Use_Cache
|
||||
HTTP.fetch url1 cache_policy=Cache_Policy.Use_Cache
|
||||
HTTP.clear_response_cache
|
||||
expect_counts [0, 2] <|
|
||||
HTTP.fetch url0
|
||||
HTTP.fetch url1
|
||||
|
||||
group_builder.specify "Cache policy should work for Data.fetch" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
expect_counts [0, 0] <|
|
||||
Data.fetch url0 cache_policy=Cache_Policy.No_Cache
|
||||
Data.fetch url1 cache_policy=Cache_Policy.No_Cache
|
||||
expect_counts [0, 2] <|
|
||||
Data.fetch url0 cache_policy=Cache_Policy.Use_Cache
|
||||
Data.fetch url1 cache_policy=Cache_Policy.Use_Cache
|
||||
HTTP.clear_response_cache
|
||||
expect_counts [0, 2] <|
|
||||
Data.fetch url0
|
||||
Data.fetch url1
|
||||
|
||||
group_builder.specify "Should not cache Data.download" pending=pending_has_url <| Test.with_retries <|
|
||||
target_file = enso_project.data / "transient" / "my_download0.txt"
|
||||
|
||||
HTTP.clear_response_cache
|
||||
target_file.delete_if_exists
|
||||
|
||||
Data.download url0 target_file
|
||||
get_num_response_cache_entries . should_equal 0
|
||||
|
||||
target_file.delete_if_exists
|
||||
|
||||
group_builder.specify "Data.download is not affected by caching limits" pending=pending_has_url <| Test.with_retries <|
|
||||
target_file = enso_project.data / "transient" / "my_download0.txt"
|
||||
Panic.with_finalizer reset_size_limits <|
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxTotalCacheSizeOverrideTestOnly 120
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxFileSizeOverrideTestOnly 100
|
||||
Data.download base_url_with_slash+"test_download?length=200" target_file
|
||||
target_file.read.length . should_equal 200
|
||||
target_file.delete_if_exists
|
||||
|
||||
group_builder.specify "Should not cache for methods other than GET" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
|
||||
expect_counts [0, 0] <|
|
||||
Data.post url_post (Request_Body.Text "hello world")
|
||||
|
||||
group_builder.specify "HTTP request with a non-GET method should reject a cache_policy=Use_Cache argument" pending=pending_has_url <| Test.with_retries <|
|
||||
request = Request.new HTTP_Method.Post url_post [] Request_Body.Empty
|
||||
HTTP.new.request request cache_policy=Cache_Policy.Use_Cache . should_fail_with Illegal_Argument
|
||||
|
||||
group_builder.specify "HTTP request with a non-GET method should not reject a cache_policy=No_Cache argument" pending=pending_has_url <| Test.with_retries <|
|
||||
request = Request.new HTTP_Method.Post url_post [] Request_Body.Empty
|
||||
HTTP.new.request request cache_policy=Cache_Policy.No_Cache . should_succeed
|
||||
|
||||
group_builder.specify "Should be able to clear caches" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
Data.fetch url0
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
HTTP.clear_response_cache
|
||||
get_num_response_cache_entries . should_equal 0
|
||||
|
||||
group_builder.specify "Cache key should depend on the headers" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
expect_counts [0, 2] <|
|
||||
Data.fetch url0 headers=headers0
|
||||
Data.fetch url0 headers=headers1
|
||||
Data.fetch url0 headers=headers1
|
||||
Data.fetch url0 headers=headers0
|
||||
Data.fetch url0 headers=headers0
|
||||
Data.fetch url0 headers=headers1
|
||||
|
||||
group_builder.specify "Cache key should not depend on header order" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
header0 = Header.new "Abc" "eef"
|
||||
header1 = Header.new "Abc" "def"
|
||||
header2 = Header.new "Ghi" "jkl"
|
||||
orders = [[header0, header1, header2], [header1, header2, header0], [header2, header1, header0]]
|
||||
responses = orders.map headers->
|
||||
Data.fetch url0 headers=headers . decode_as_text
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
responses.distinct.length . should_equal 1
|
||||
|
||||
## Fetching the trigger uri causes stale entries to be removed, since the
|
||||
uri is always different and so the caching and cleanup logic is run.
|
||||
fake_now = Date_Time.now
|
||||
trigger_uri_serial = Ref.new 0
|
||||
make_trigger_uri =
|
||||
serial = trigger_uri_serial.get
|
||||
trigger_uri_serial.modify (_ + 1)
|
||||
base_url_with_slash+'test_download?max-age=10000&length=50&abc='+serial.to_text
|
||||
set_time_and_get_count advance_secs =
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setNowOverrideTestOnly (fake_now + (Duration.new seconds=advance_secs))
|
||||
trigger_uri = make_trigger_uri
|
||||
Data.fetch trigger_uri
|
||||
get_num_response_cache_entries
|
||||
fake_time_resetter =
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.clearNowOverrideTestOnly
|
||||
|
||||
group_builder.specify "The cache should expire stale entries" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
|
||||
set_time_and_get_count 0 # Initialize fake now.
|
||||
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=100&length=50'
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=200&length=50'
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=200&length=51'
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=300&length=50'
|
||||
|
||||
Panic.with_finalizer fake_time_resetter <|
|
||||
## The count will increase by 1 each time, but decrease by the
|
||||
number of entries removed
|
||||
set_time_and_get_count 0 . should_equal 6
|
||||
set_time_and_get_count 90 . should_equal 7
|
||||
set_time_and_get_count 110 . should_equal 7
|
||||
set_time_and_get_count 190 . should_equal 8
|
||||
set_time_and_get_count 202 . should_equal 7
|
||||
set_time_and_get_count 292 . should_equal 8
|
||||
set_time_and_get_count 301 . should_equal 8
|
||||
|
||||
group_builder.specify "The cache should use the Age response header" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
|
||||
set_time_and_get_count 0 # Initialize fake now.
|
||||
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=100&age=50&length=50' # ttl 50
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=100&age=30&length=50' # ttl 70
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=120&age=50&length=50' # ttl 70
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=70&&length=50' # ttl 70
|
||||
Data.fetch base_url_with_slash+'test_download?max-age=160&age=70&length=50' # ttl 90
|
||||
|
||||
Panic.with_finalizer fake_time_resetter <|
|
||||
## The count will increase by 1 each time, but decrease by the
|
||||
number of entries removed
|
||||
set_time_and_get_count 0 . should_equal 7
|
||||
set_time_and_get_count 40 . should_equal 8
|
||||
set_time_and_get_count 51 . should_equal 8
|
||||
set_time_and_get_count 68 . should_equal 9
|
||||
set_time_and_get_count 72 . should_equal 7
|
||||
set_time_and_get_count 88 . should_equal 8
|
||||
set_time_and_get_count 93 . should_equal 8
|
||||
|
||||
download size =
|
||||
Data.fetch base_url_with_slash+'test_download?length='+size.to_text
|
||||
|
||||
group_builder.specify "Will remove old cache files to keep the total cache size under the total cache size limit" pending=pending_has_url <| Test.with_retries <|
|
||||
Panic.with_finalizer reset_size_limits <|
|
||||
reset_size_limits
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxTotalCacheSizeOverrideTestOnly 100
|
||||
|
||||
download 30
|
||||
download 50
|
||||
download 10
|
||||
get_cache_file_sizes . should_equal_ignoring_order [10, 30, 50]
|
||||
download 20
|
||||
get_cache_file_sizes . should_equal_ignoring_order [10, 20, 50]
|
||||
download 40
|
||||
get_cache_file_sizes . should_equal_ignoring_order [10, 20, 40]
|
||||
download 35
|
||||
get_cache_file_sizes . should_equal_ignoring_order [20, 35, 40]
|
||||
|
||||
group_builder.specify "Will remove old cache files based on how recently they were used" pending=pending_has_url <| Test.with_retries <|
|
||||
Panic.with_finalizer reset_size_limits <|
|
||||
reset_size_limits
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxTotalCacheSizeOverrideTestOnly 100
|
||||
|
||||
download 30
|
||||
download 50
|
||||
download 10
|
||||
get_cache_file_sizes . should_equal_ignoring_order [10, 30, 50]
|
||||
# Use 30 again so it's considered more recently used.
|
||||
download 30
|
||||
get_cache_file_sizes . should_equal_ignoring_order [10, 30, 50]
|
||||
download 20
|
||||
get_cache_file_sizes . should_equal_ignoring_order [10, 20, 30]
|
||||
download 45
|
||||
get_cache_file_sizes . should_equal_ignoring_order [20, 30, 45]
|
||||
|
||||
group_builder.specify "Will not cache a file with a content length greater than the single file limit" pending=pending_has_url <| Test.with_retries <|
|
||||
Panic.with_finalizer reset_size_limits <|
|
||||
reset_size_limits
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxFileSizeOverrideTestOnly 100
|
||||
download 110 . should_fail_with (Response_Too_Large.Error 100)
|
||||
|
||||
|
||||
group_builder.specify "Will not cache a file without a content length, but which is greater than the single file limit" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
Panic.with_finalizer reset_size_limits <|
|
||||
reset_size_limits
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxFileSizeOverrideTestOnly 100
|
||||
url = base_url_with_slash+'test_download?omit-content-length=1&length=110'
|
||||
Data.fetch url . should_fail_with (Response_Too_Large.Error 100)
|
||||
|
||||
group_builder.specify "Should not cache if the request fails" pending=pending_has_url <| Test.with_retries <|
|
||||
HTTP.clear_response_cache
|
||||
|
||||
HTTP.fetch url0
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
HTTP.fetch base_url_with_slash+'crash'
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
HTTP.fetch base_url_with_slash+'nonexistent_endpoint'
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
|
||||
cloud_setup = Cloud_Tests_Setup.prepare
|
||||
|
||||
group_builder.specify "Should work with secrets in the URI" pending=pending_has_url <| Test.with_retries <|
|
||||
cloud_setup.with_prepared_environment <|
|
||||
secret1 = Enso_Secret.create "http-cache-secret-1-"+Random.uuid "My Value"
|
||||
secret2 = Enso_Secret.create "http-cache-secret-2-"+Random.uuid "Some Value"
|
||||
cleanup =
|
||||
secret1.delete
|
||||
secret2.delete
|
||||
Panic.with_finalizer cleanup <|
|
||||
# Requests differ only in secrets in URI.
|
||||
url1 = URI.from 'https://httpbin.org/bytes/50'
|
||||
. add_query_argument "arg1" secret1
|
||||
. add_query_argument "arg2" "plain value"
|
||||
uri2 = URI.from 'https://httpbin.org/bytes/50'
|
||||
. add_query_argument "arg1" secret2
|
||||
. add_query_argument "arg2" "plain value"
|
||||
|
||||
HTTP.clear_response_cache
|
||||
HTTP.fetch url1
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
HTTP.fetch uri2
|
||||
get_num_response_cache_entries . should_equal 2
|
||||
|
||||
group_builder.specify "Should work with secrets in the headers" pending=pending_has_url <| Test.with_retries <|
|
||||
cloud_setup.with_prepared_environment <|
|
||||
secret1 = Enso_Secret.create "http-cache-secret-1-"+Random.uuid "My Value"
|
||||
secret2 = Enso_Secret.create "http-cache-secret-2-"+Random.uuid "Some Value"
|
||||
cleanup =
|
||||
secret1.delete
|
||||
secret2.delete
|
||||
Panic.with_finalizer cleanup <|
|
||||
# Requests differ only in secrets in headers.
|
||||
uri = URI.from 'https://httpbin.org/bytes/50'
|
||||
headers1 = [Header.new "A-Header" secret1]
|
||||
headers2 = [Header.new "A-Header" secret2]
|
||||
|
||||
HTTP.clear_response_cache
|
||||
HTTP.fetch headers=headers1 uri
|
||||
get_num_response_cache_entries . should_equal 1
|
||||
HTTP.fetch headers=headers2 uri
|
||||
get_num_response_cache_entries . should_equal 2
|
||||
|
||||
group_builder.specify "Should not be able to set the cache limits higher than the real limits" pending=pending_has_url <| Test.with_retries <|
|
||||
Test.expect_panic IllegalArgumentException <|
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxFileSizeOverrideTestOnly (2 * 1024 * 1024 * 1024 + 1) . should_fail_with Illegal_Argument
|
||||
Test.expect_panic IllegalArgumentException <|
|
||||
EnsoHTTPResponseCache.getCacheTestParameters.setMaxTotalCacheSizeOverrideTestOnly (20 * 1024 * 1024 * 1024 + 1) . should_fail_with Illegal_Argument
|
||||
|
@ -98,6 +98,7 @@ public class HTTPTestHelperServer {
|
||||
server.addHandler("/test_basic_auth", new BasicAuthTestHandler());
|
||||
server.addHandler("/crash", new CrashingTestHandler());
|
||||
server.addHandler("/test_redirect", new RedirectTestHandler("/testfiles/js.txt"));
|
||||
server.addHandler("/test_download", new DownloadTestHandler());
|
||||
|
||||
// Cloud mock
|
||||
if (cloudMockSetup != null) {
|
||||
|
@ -0,0 +1,58 @@
|
||||
package org.enso.shttp.test_helpers;
|
||||
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URI;
|
||||
import java.util.Random;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.enso.shttp.SimpleHttpHandler;
|
||||
|
||||
/**
|
||||
* A handler that generates a data response, with optional max-age and Age headers. The data
|
||||
* response consists of a string of random letters of the requested length.
|
||||
*/
|
||||
public class DownloadTestHandler extends SimpleHttpHandler {
|
||||
private Random random = new Random(42);
|
||||
|
||||
@Override
|
||||
protected void doHandle(HttpExchange exchange) throws IOException {
|
||||
URI uri = exchange.getRequestURI();
|
||||
URIBuilder builder = new URIBuilder(uri);
|
||||
|
||||
int length = 10;
|
||||
String maxAge = null;
|
||||
String age = null;
|
||||
boolean omitContentLength = false;
|
||||
for (var queryPair : builder.getQueryParams()) {
|
||||
switch (queryPair.getName()) {
|
||||
case "length" -> length = Integer.parseInt(queryPair.getValue());
|
||||
case "max-age" -> maxAge = queryPair.getValue();
|
||||
case "age" -> age = queryPair.getValue();
|
||||
case "omit-content-length" -> omitContentLength = true;
|
||||
default -> {}
|
||||
}
|
||||
}
|
||||
|
||||
byte responseData[] = new byte[length];
|
||||
for (int i = 0; i < length; ++i) {
|
||||
responseData[i] = (byte) (97 + random.nextInt(26));
|
||||
}
|
||||
|
||||
if (maxAge != null) {
|
||||
exchange.getResponseHeaders().add("Cache-Control", "max-age=" + maxAge);
|
||||
}
|
||||
|
||||
if (age != null) {
|
||||
exchange.getResponseHeaders().add("Age", age.toString());
|
||||
}
|
||||
|
||||
long contentLength = omitContentLength ? 0 : responseData.length;
|
||||
exchange.sendResponseHeaders(200, contentLength);
|
||||
|
||||
try (OutputStream os = exchange.getResponseBody()) {
|
||||
os.write(responseData);
|
||||
}
|
||||
exchange.close();
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user