mirror of
https://github.com/enso-org/enso.git
synced 2024-12-22 10:11:37 +03:00
Accessing and modifying description and labels of Enso Cloud assets (#11255)
- Closes #11227 - Additionally, it should fix #11278 by ensuring that every scheduled message goes to the desired endpoint, by splitting each batch by endpoint.
This commit is contained in:
parent
468b643aad
commit
3458fe4fe1
@ -11,8 +11,11 @@
|
||||
|
||||
- [The `enso://~` path now resolves to user's home directory in the
|
||||
cloud.][11235]
|
||||
- [The user may set description and labels of an Enso Cloud asset
|
||||
programmatically.][11255]
|
||||
|
||||
[11235]: https://github.com/enso-org/enso/pull/11235
|
||||
[11255]: https://github.com/enso-org/enso/pull/11255
|
||||
|
||||
# Enso 2024.4
|
||||
|
||||
|
@ -0,0 +1,6 @@
|
||||
import project.Data.Numbers.Integer
|
||||
|
||||
## Represents a color.
|
||||
type Color
|
||||
## A color represented in the HCL color space.
|
||||
HCL hue:Integer chroma:Integer lightness:Integer
|
@ -1,4 +1,5 @@
|
||||
import project.Any.Any
|
||||
import project.Data.Color.Color
|
||||
import project.Data.Json.JS_Object
|
||||
import project.Data.Numbers.Integer
|
||||
import project.Data.Text.Encoding.Encoding
|
||||
@ -138,12 +139,126 @@ type Enso_File
|
||||
ICON metadata
|
||||
Gets the last modified time of a file.
|
||||
last_modified_time : Date_Time
|
||||
last_modified_time self =
|
||||
asset = Existing_Enso_Asset.get_asset_reference_for self
|
||||
if asset.is_regular_file.not then Error.throw (Illegal_Argument.Error "`last_modified_time` can only be queried for files.") else
|
||||
metadata = asset.get_file_description |> get_required_field "metadata"
|
||||
Date_Time.parse (get_required_field "modifiedAt" metadata expected_type=Text) Date_Time_Formatter.iso_offset_date_time
|
||||
. catch Time_Error error-> Error.throw (Enso_Cloud_Error.Invalid_Response_Payload error)
|
||||
last_modified_time self -> Date_Time =
|
||||
asset = Existing_Enso_Asset.get_asset_reference_for self want_metadata=True
|
||||
asset.metadata.modified_at
|
||||
|
||||
## GROUP Metadata
|
||||
ICON metadata
|
||||
Gets the description associated with the file in Enso Cloud.
|
||||
|
||||
By default, the description is empty. It can be set in the Dashboard, or
|
||||
using the `set_description` method.
|
||||
description : Text
|
||||
description self -> Text =
|
||||
asset = Existing_Enso_Asset.get_asset_reference_for self want_metadata=True
|
||||
asset.metadata.description.if_nothing ""
|
||||
|
||||
## GROUP Metadata
|
||||
ICON data_output
|
||||
Sets the description associated with the file in Enso Cloud.
|
||||
|
||||
Arguments:
|
||||
- description: The new description to set.
|
||||
|
||||
Returns:
|
||||
- The file on which it was called.
|
||||
set_description : Text -> Enso_File
|
||||
set_description self (description : Text) -> Enso_File =
|
||||
Context.Output.if_enabled disabled_message="Cannot set a description when writing is disabled. Press the Write button ▶ to perform the operation." panic=False <|
|
||||
update_asset_description self description . if_not_error self
|
||||
|
||||
## GROUP Metadata
|
||||
ICON metadata
|
||||
Gets the labels associated with the file in Enso Cloud.
|
||||
labels : Vector Text
|
||||
labels self -> Vector Text =
|
||||
asset = Existing_Enso_Asset.get_asset_reference_for self want_metadata=True
|
||||
asset.metadata.labels
|
||||
|
||||
## GROUP Metadata
|
||||
ICON data_output
|
||||
Adds a label to the file in Enso Cloud.
|
||||
|
||||
If the label with the given name did not exist before, it is created with
|
||||
a random color associated with it. If you want to set a specific color,
|
||||
use `Enso_File.create_label` before calling this method.
|
||||
|
||||
Arguments:
|
||||
- label: The name of the label to add to the file.
|
||||
|
||||
Returns:
|
||||
- The file on which it was called.
|
||||
|
||||
? Race Conditions
|
||||
|
||||
Adding labels is not atomic. If two processes are modifying labels of
|
||||
the same asset at the same time, some changes may be lost.
|
||||
add_label : Text -> Enso_File
|
||||
add_label self (label : Text) -> Enso_File =
|
||||
labels = self.labels
|
||||
if labels.contains label then self else
|
||||
Context.Output.if_enabled disabled_message="Cannot add a label when writing is disabled. Press the Write button ▶ to perform the operation." panic=False <|
|
||||
create_tags_if_not_exist [label] . if_not_error <|
|
||||
new_labels = labels+[label]
|
||||
update_asset_labels self new_labels . if_not_error self
|
||||
|
||||
## GROUP Metadata
|
||||
ICON data_output
|
||||
Removes a label from the file in Enso Cloud.
|
||||
|
||||
Arguments:
|
||||
- label: The name of the label to remove from the file.
|
||||
|
||||
Returns:
|
||||
- A boolean value indicating if the file had the label before the operation.
|
||||
|
||||
? Race Conditions
|
||||
|
||||
Removing labels is not atomic. If two processes are modifying labels of
|
||||
the same asset at the same time, some changes may be lost.
|
||||
remove_label : Text -> Boolean
|
||||
remove_label self (label : Text) -> Boolean =
|
||||
labels = self.labels
|
||||
if labels.contains label . not then False else
|
||||
Context.Output.if_enabled disabled_message="Cannot remove a label when writing is disabled. Press the Write button ▶ to perform the operation." panic=False <|
|
||||
new_labels = labels.filter l-> l != label
|
||||
update_asset_labels self new_labels . if_not_error True
|
||||
|
||||
## GROUP Metadata
|
||||
ICON data_output
|
||||
Sets the set of labels associated with the file in Enso Cloud.
|
||||
Any other labels than the ones provided are removed from the file.
|
||||
|
||||
If any of the provided labels did not exist before, it is created with
|
||||
a random color associated with it. If you want to set a specific color,
|
||||
use `Enso_File.create_label` before calling this method.
|
||||
|
||||
Arguments:
|
||||
- labels: The new set of labels to associate with the file.
|
||||
|
||||
Returns:
|
||||
- The file on which it was called.
|
||||
set_labels : Vector Text -> Enso_File
|
||||
set_labels self (labels : Vector Text) -> Enso_File =
|
||||
Context.Output.if_enabled disabled_message="Cannot update labels when writing is disabled. Press the Write button ▶ to perform the operation." panic=False <|
|
||||
create_tags_if_not_exist labels . if_not_error <|
|
||||
update_asset_labels self labels . if_not_error self
|
||||
|
||||
## GROUP Metadata
|
||||
ICON data_output
|
||||
Creates a new label with the given name and color.
|
||||
|
||||
Arguments:
|
||||
- name: The name of the label to create.
|
||||
- color: The color of the label to create.
|
||||
create_label : Text -> Text -> Nothing
|
||||
create_label (name : Text) (color : Color) -> Nothing =
|
||||
# TODO once cloud also checks for tag existance, if the Write context is enabled we could avoid this check as cloud will do it anyway - then we'll have only one request instead of 2 each time
|
||||
# Cloud ticket: https://github.com/enso-org/cloud-v2/issues/1544
|
||||
if does_tag_exist name then Error.throw (Illegal_Argument.Error "A label with name "+name+" already exists.") else
|
||||
Context.Output.if_enabled disabled_message="Cannot create a label when writing is disabled. Press the Write button ▶ to perform the operation." panic=False <|
|
||||
create_tag name color . if_not_error Nothing
|
||||
|
||||
## GROUP Metadata
|
||||
ICON metadata
|
||||
|
@ -1,10 +1,13 @@
|
||||
private
|
||||
|
||||
import project.Any.Any
|
||||
import project.Data.Color.Color
|
||||
import project.Data.Dictionary.Dictionary
|
||||
import project.Data.Hashset.Hashset
|
||||
import project.Data.Json.Invalid_JSON
|
||||
import project.Data.Json.JS_Object
|
||||
import project.Data.Text.Text
|
||||
import project.Data.Vector.Vector
|
||||
import project.Enso_Cloud.Enso_File.Enso_Asset_Type
|
||||
import project.Enso_Cloud.Enso_File.Enso_File
|
||||
import project.Enso_Cloud.Errors.Enso_Cloud_Error
|
||||
@ -19,6 +22,7 @@ import project.Network.HTTP.HTTP_Method.HTTP_Method
|
||||
import project.Network.HTTP.Request_Error
|
||||
import project.Network.URI.URI
|
||||
import project.Nothing.Nothing
|
||||
import project.Random.Random
|
||||
import project.System.File.File
|
||||
import project.System.Output_Stream.Output_Stream
|
||||
from project.Data.Boolean import Boolean, False, True
|
||||
@ -87,7 +91,7 @@ perform_upload (destination : Enso_File) (allow_existing : Boolean) (~generate_r
|
||||
response = Utils.http_request_as_json HTTP_Method.Post full_uri pair.first error_handlers=error_handlers
|
||||
response.if_not_error <|
|
||||
id = get_required_field "id" response expected_type=Text
|
||||
Asset_Cache.update destination (Existing_Enso_Asset.from_id_and_title id file_name) . if_not_error <|
|
||||
Asset_Cache.update destination (Existing_Enso_Asset.new id file_name) . if_not_error <|
|
||||
pair.second
|
||||
|
||||
## PRIVATE
|
||||
@ -103,7 +107,9 @@ create_directory_with_parents (target : Enso_File) -> Existing_Enso_Asset =
|
||||
body = JS_Object.from_pairs [["title", target.name], ["parentId", parent_asset.id]]
|
||||
Asset_Cache.invalidate target
|
||||
response = Utils.http_request_as_json HTTP_Method.Post Utils.directory_api body
|
||||
created_asset = Existing_Enso_Asset.from_json response
|
||||
id = get_required_field "id" response expected_type=Text
|
||||
title = get_required_field "title" response expected_type=Text
|
||||
created_asset = Existing_Enso_Asset.new id title
|
||||
created_asset.if_not_error <|
|
||||
Asset_Cache.update target created_asset
|
||||
created_asset
|
||||
@ -129,5 +135,64 @@ create_datalink_from_stream_action (destination : Enso_File) (allow_existing : B
|
||||
response = Utils.http_request_as_json HTTP_Method.Post Utils.datalinks_api payload error_handlers=error_handlers
|
||||
response.if_not_error <|
|
||||
id = get_required_field "id" response expected_type=Text
|
||||
Asset_Cache.update destination (Existing_Enso_Asset.from_id_and_title id title) . if_not_error <|
|
||||
Asset_Cache.update destination (Existing_Enso_Asset.new id title) . if_not_error <|
|
||||
action_result
|
||||
|
||||
## PRIVATE
|
||||
update_asset_description (asset : Enso_File) (description : Text) =
|
||||
existing_asset = Existing_Enso_Asset.get_asset_reference_for asset
|
||||
payload = JS_Object.from_pairs [["description", description]]
|
||||
Asset_Cache.invalidate asset
|
||||
Utils.http_request HTTP_Method.Patch existing_asset.asset_uri payload
|
||||
|
||||
## PRIVATE
|
||||
update_asset_labels (asset : Enso_File) (new_labels : Vector Text) =
|
||||
existing_asset = Existing_Enso_Asset.get_asset_reference_for asset
|
||||
payload = JS_Object.from_pairs [["labels", new_labels]]
|
||||
Asset_Cache.invalidate asset
|
||||
Utils.http_request HTTP_Method.Patch existing_asset.asset_uri+"/labels" payload
|
||||
|
||||
## PRIVATE
|
||||
create_tag (name : Text) (color : Color) =
|
||||
color_as_json = JS_Object.from_pairs <| case color of
|
||||
Color.HCL h c l -> [["lightness", l], ["chroma", c], ["hue", h]]
|
||||
_ -> Error.throw (Illegal_Argument.Error "Only colors expressed in HCL color-space are supported when creating labels.")
|
||||
payload = JS_Object.from_pairs <|
|
||||
[["value", name], ["color", color_as_json]]
|
||||
Utils.http_request_as_json HTTP_Method.Post Utils.tags_api payload
|
||||
|
||||
## PRIVATE
|
||||
Returns a list of known tags.
|
||||
list_tags -> Vector Tag =
|
||||
response = Utils.http_request_as_json HTTP_Method.Get Utils.tags_api
|
||||
tags = get_required_field "tags" response expected_type=Vector
|
||||
tags.map tag_json->
|
||||
id = get_required_field "id" tag_json expected_type=Text
|
||||
value = get_required_field "value" tag_json expected_type=Text
|
||||
Tag.Value value id
|
||||
|
||||
## PRIVATE
|
||||
type Tag
|
||||
## PRIVATE
|
||||
We are not including the color, as we are not using it yet.
|
||||
Once needed, it should be added.
|
||||
Value value:Text id:Text
|
||||
|
||||
## PRIVATE
|
||||
random_tag_color -> Color =
|
||||
lightness = 50
|
||||
chroma = 66
|
||||
hue = Random.integer 0 360
|
||||
Color.HCL hue chroma lightness
|
||||
|
||||
## PRIVATE
|
||||
does_tag_exist (name : Text) -> Boolean =
|
||||
existing_tags = Hashset.from_vector <| list_tags.map .value
|
||||
existing_tags.contains name
|
||||
|
||||
## PRIVATE
|
||||
create_tags_if_not_exist (names : Vector Text) =
|
||||
existing_tags = Hashset.from_vector <| list_tags.map .value
|
||||
tags_to_create = (Hashset.from_vector names).difference existing_tags . to_vector
|
||||
tags_to_create.each name->
|
||||
create_tag name random_tag_color
|
||||
|
@ -10,19 +10,22 @@ import project.Enso_Cloud.Cloud_Caching_Settings
|
||||
import project.Enso_Cloud.Enso_File.Enso_Asset_Type
|
||||
import project.Enso_Cloud.Enso_File.Enso_File
|
||||
import project.Enso_Cloud.Enso_User.Enso_User
|
||||
import project.Enso_Cloud.Errors.Enso_Cloud_Error
|
||||
import project.Enso_Cloud.Internal.Utils
|
||||
import project.Error.Error
|
||||
import project.Errors.Common.Not_Found
|
||||
import project.Errors.File_Error.File_Error
|
||||
import project.Errors.Illegal_Argument.Illegal_Argument
|
||||
import project.Errors.Time_Error.Time_Error
|
||||
import project.Network.HTTP.HTTP_Method.HTTP_Method
|
||||
import project.Network.URI.URI
|
||||
import project.Nothing.Nothing
|
||||
import project.Panic.Panic
|
||||
import project.Runtime.Context
|
||||
from project.Data.Boolean import Boolean, False, True
|
||||
from project.Data.Text.Extensions import all
|
||||
from project.Enso_Cloud.Data_Link_Helpers import data_link_extension
|
||||
from project.Enso_Cloud.Public_Utils import get_required_field
|
||||
from project.Enso_Cloud.Public_Utils import get_required_field, get_optional_field
|
||||
|
||||
## PRIVATE
|
||||
Currently, most of the Cloud API relies on asset IDs and not paths.
|
||||
@ -32,7 +35,11 @@ from project.Enso_Cloud.Public_Utils import get_required_field
|
||||
type Existing_Enso_Asset
|
||||
## PRIVATE
|
||||
Represents an existing asset within the Enso cloud.
|
||||
private Value title:Text id:Text asset_type:Enso_Asset_Type
|
||||
|
||||
The metadata represents additional information about the asset.
|
||||
It is stored if it was available in a response. It may not be present for
|
||||
entries saved in cache after an upload operation that does not return metadata.
|
||||
private Value title:Text id:Text asset_type:Enso_Asset_Type metadata:Asset_Metadata|Nothing
|
||||
|
||||
## PRIVATE
|
||||
Target URI for the api
|
||||
@ -69,8 +76,14 @@ type Existing_Enso_Asset
|
||||
## PRIVATE
|
||||
Fetches the basic information about an existing file from the Cloud.
|
||||
It will fail if the file does not exist.
|
||||
get_asset_reference_for (file : Enso_File) -> Existing_Enso_Asset ! File_Error =
|
||||
fetch_asset_reference file
|
||||
|
||||
Arguments:
|
||||
- file: The file to fetch the information for.
|
||||
- want_metadata: Whether to require metadata for the file.
|
||||
If set to True, a cached entry without metadata will not be considered
|
||||
and a full fetch will be performed.
|
||||
get_asset_reference_for (file : Enso_File) (want_metadata : Boolean = False) -> Existing_Enso_Asset ! File_Error =
|
||||
fetch_asset_reference file want_metadata
|
||||
|
||||
## PRIVATE
|
||||
Resolves a path to an existing asset in the cloud.
|
||||
@ -100,36 +113,67 @@ type Existing_Enso_Asset
|
||||
from_json json -> Existing_Enso_Asset =
|
||||
title = get_required_field "title" json expected_type=Text
|
||||
id = get_required_field "id" json expected_type=Text
|
||||
Existing_Enso_Asset.from_id_and_title id title
|
||||
metadata = Asset_Metadata.from_json json
|
||||
Existing_Enso_Asset.new id title metadata
|
||||
|
||||
## PRIVATE
|
||||
from_id_and_title id:Text title:Text -> Existing_Enso_Asset =
|
||||
asset_type = Enso_Asset_Type.from (id.take (..Before "-"))
|
||||
Existing_Enso_Asset.Value title id asset_type
|
||||
new id:Text title:Text (metadata : Asset_Metadata | Nothing = Nothing) -> Existing_Enso_Asset =
|
||||
Existing_Enso_Asset.Value title id (asset_type_from_id id) metadata
|
||||
|
||||
## PRIVATE
|
||||
asset_type_from_id id:Text -> Enso_Asset_Type =
|
||||
Enso_Asset_Type.from (id.take (..Before "-"))
|
||||
|
||||
## PRIVATE
|
||||
The asset metadata that is currently automatically included as part of the path resolution response.
|
||||
type Asset_Metadata
|
||||
## PRIVATE
|
||||
Value modified_at:Date_Time description:Text|Nothing labels:Vector
|
||||
|
||||
## PRIVATE
|
||||
from_json json -> Asset_Metadata =
|
||||
modified_at_text = get_required_field "modifiedAt" json expected_type=Text
|
||||
modified_at = Date_Time.parse modified_at_text Date_Time_Formatter.iso_offset_date_time
|
||||
. catch Time_Error error-> Error.throw (Enso_Cloud_Error.Invalid_Response_Payload error)
|
||||
description = get_optional_field "description" json expected_type=Text
|
||||
labels = get_optional_field "labels" json expected_type=Vector if_missing=[]
|
||||
Asset_Metadata.Value modified_at description labels
|
||||
|
||||
## PRIVATE
|
||||
type Asset_Cache
|
||||
## PRIVATE
|
||||
asset_prefix = "asset:"
|
||||
|
||||
## PRIVATE
|
||||
asset_key (file : Enso_File) -> Text = Asset_Cache.asset_prefix+file.enso_path.to_text
|
||||
|
||||
## PRIVATE
|
||||
invalidate (file : Enso_File) =
|
||||
Utils.invalidate_cache (Asset_Cache.asset_key file)
|
||||
|
||||
## PRIVATE
|
||||
invalidate_subtree (file : Enso_File) =
|
||||
Utils.invalidate_caches_with_prefix (Asset_Cache.asset_key file)
|
||||
|
||||
## PRIVATE
|
||||
invalidate_all =
|
||||
Utils.invalidate_caches_with_prefix Asset_Cache.asset_prefix
|
||||
|
||||
## PRIVATE
|
||||
update (file : Enso_File) (asset : Existing_Enso_Asset) =
|
||||
Utils.set_cached (Asset_Cache.asset_key file) asset cache_duration=Cloud_Caching_Settings.get_file_cache_ttl
|
||||
|
||||
## PRIVATE
|
||||
Returns the cached reference or fetches it from the cloud.
|
||||
fetch_asset_reference (file : Enso_File) -> Existing_Enso_Asset ! File_Error = file.if_not_error <|
|
||||
fetch_asset_reference (file : Enso_File) (want_metadata : Boolean) -> Existing_Enso_Asset ! File_Error = file.if_not_error <|
|
||||
path = file.enso_path.to_text
|
||||
Utils.get_cached (Asset_Cache.asset_key file) cache_duration=Cloud_Caching_Settings.get_file_cache_ttl <|
|
||||
Existing_Enso_Asset.resolve_path path if_not_found=(Error.throw (File_Error.Not_Found file))
|
||||
on_not_found =
|
||||
Error.throw (File_Error.Not_Found file)
|
||||
r = Utils.get_cached (Asset_Cache.asset_key file) cache_duration=Cloud_Caching_Settings.get_file_cache_ttl <|
|
||||
Existing_Enso_Asset.resolve_path path if_not_found=on_not_found
|
||||
|
||||
needs_refetch = want_metadata && r.metadata.is_nothing
|
||||
if needs_refetch.not then r else
|
||||
with_metadata = Existing_Enso_Asset.resolve_path path if_not_found=on_not_found
|
||||
Asset_Cache.update file with_metadata
|
||||
with_metadata
|
||||
|
@ -63,6 +63,10 @@ secrets_api = cloud_root_uri + "secrets"
|
||||
Root address for DataLinks API.
|
||||
datalinks_api = cloud_root_uri + "datalinks"
|
||||
|
||||
## PRIVATE
|
||||
Root address for managing labels.
|
||||
tags_api = cloud_root_uri + "tags"
|
||||
|
||||
## PRIVATE
|
||||
Flushes all cloud caches, including the authentication data
|
||||
(so the next request will re-read the credentials file).
|
||||
|
@ -168,10 +168,13 @@ type Test
|
||||
next_clue = case prev_clue of
|
||||
Clue.Value prev_add_clue -> (x -> prev_add_clue (add_clue x))
|
||||
_ -> add_clue
|
||||
|
||||
# Set the new clue
|
||||
State.put Clue (Clue.Value next_clue)
|
||||
result = behavior
|
||||
State.put Clue prev_clue
|
||||
result
|
||||
|
||||
# Run the behaviour, and restore the previous clue afterwards, even if the behaviour panics
|
||||
Panic.with_finalizer (State.put Clue prev_clue) <|
|
||||
behavior
|
||||
|
||||
## A helper method that retries the action a few times if it panics.
|
||||
It allows to make flaky tests more robust.
|
||||
|
@ -6,6 +6,9 @@ import java.net.http.HttpClient;
|
||||
import java.net.http.HttpRequest;
|
||||
import java.net.http.HttpResponse;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.Future;
|
||||
@ -90,20 +93,52 @@ class AuditLogApiAccess {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// We use the request config from the first message.
|
||||
// The request config may only change in test scenarios which just must take this into
|
||||
// account.
|
||||
var requestConfig = pendingMessages.get(0).requestConfig();
|
||||
var request = buildRequest(requestConfig, pendingMessages);
|
||||
sendLogRequest(request, MAX_RETRIES);
|
||||
notifyJobsAboutSuccess(pendingMessages);
|
||||
} catch (RequestFailureException e) {
|
||||
notifyJobsAboutFailure(pendingMessages, e);
|
||||
var batchesByConfig = splitMessagesByConfig(pendingMessages);
|
||||
for (var batch : batchesByConfig) {
|
||||
sendBatch(batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a batch of log messages.
|
||||
*
|
||||
* <p>The batch must not be empty and all messages must share the same request config.
|
||||
*/
|
||||
private void sendBatch(List<LogJob> batch) {
|
||||
assert !batch.isEmpty() : "The batch must not be empty.";
|
||||
// We use the request config from the first message - all messages in the batch should have the
|
||||
// same request config.
|
||||
var requestConfig = batch.get(0).requestConfig();
|
||||
assert requestConfig != null
|
||||
: "The request configuration must be set before building a request.";
|
||||
assert batch.stream().allMatch(job -> job.requestConfig().equals(requestConfig))
|
||||
: "All messages in a batch must have the same request configuration.";
|
||||
|
||||
try {
|
||||
var request = buildRequest(requestConfig, batch);
|
||||
sendLogRequest(request, MAX_RETRIES);
|
||||
notifyJobsAboutSuccess(batch);
|
||||
} catch (RequestFailureException e) {
|
||||
notifyJobsAboutFailure(batch, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Only during testing, it is possible to encounter pending messages with different request
|
||||
* configs (when the config changes between tests). To send each message where it is intended, we
|
||||
* split up the batch by the config.
|
||||
*/
|
||||
Collection<List<LogJob>> splitMessagesByConfig(List<LogJob> messages) {
|
||||
HashMap<RequestConfig, List<LogJob>> hashMap = new HashMap<>();
|
||||
for (var message : messages) {
|
||||
var list = hashMap.computeIfAbsent(message.requestConfig(), k -> new ArrayList<>());
|
||||
list.add(message);
|
||||
}
|
||||
|
||||
return hashMap.values();
|
||||
}
|
||||
|
||||
private void notifyJobsAboutSuccess(List<LogJob> jobs) {
|
||||
for (var job : jobs) {
|
||||
if (job.completionNotification() != null) {
|
||||
|
@ -261,6 +261,78 @@ add_specs suite_builder setup:Cloud_Tests_Setup = suite_builder.group "Enso Clou
|
||||
(Enso_File.root / "test-directory" / "non-existent") . is_descendant_of (Enso_File.root / "test-directory") . should_be_true
|
||||
(Enso_File.root / "test-directory") . is_descendant_of (Enso_File.root / "test-directory" / "non-existent") . should_be_false
|
||||
|
||||
group_builder.specify "supports partial metadata for directories" <|
|
||||
dir = test_root.get / "test-directory"
|
||||
dir.last_modified_time.should_be_a Date_Time
|
||||
|
||||
# TODO this test should be 'reversed' and merged with above once the metadata is implemented
|
||||
dir.creation_time.should_fail_with Illegal_Argument
|
||||
|
||||
group_builder.specify "should be able to read and modify asset description" <|
|
||||
nested_file = test_root.get / "test-directory" / "another.txt"
|
||||
nested_file.description . should_equal ""
|
||||
|
||||
my_description = "This is a test file."
|
||||
nested_file.set_description my_description . should_equal nested_file
|
||||
nested_file.description . should_equal my_description
|
||||
|
||||
# We can also set description back to empty
|
||||
nested_file.set_description "" . should_equal nested_file
|
||||
nested_file.description . should_equal ""
|
||||
|
||||
Context.Output.with_disabled <|
|
||||
nested_file.set_description "ABC" . should_fail_with Forbidden_Operation
|
||||
|
||||
group_builder.specify "should be able to get and modify labels on assets" <|
|
||||
f = test_root.get / "test_file.json"
|
||||
f.labels . should_equal []
|
||||
|
||||
f.set_labels ["A", "B", "C", "D"] . should_equal f
|
||||
f.labels . should_equal_ignoring_order ["A", "B", "C", "D"]
|
||||
|
||||
f.add_label "E" . should_equal f
|
||||
f.add_label "A" . should_equal f
|
||||
f.labels . should_equal_ignoring_order ["A", "B", "C", "D", "E"]
|
||||
|
||||
f.remove_label "B" . should_equal True
|
||||
f.remove_label "C" . should_equal True
|
||||
f.labels . should_equal_ignoring_order ["A", "D", "E"]
|
||||
|
||||
f.remove_label "non-existent" . should_equal False
|
||||
# B is also already removed
|
||||
f.remove_label "B" . should_equal False
|
||||
f.labels . should_equal_ignoring_order ["A", "D", "E"]
|
||||
|
||||
Context.Output.with_disabled <|
|
||||
# Adding new label with disabled context fails
|
||||
f.add_label "new" . should_fail_with Forbidden_Operation
|
||||
|
||||
# Removing a label that is there also fails
|
||||
f.remove_label "A" . should_fail_with Forbidden_Operation
|
||||
|
||||
f.labels . should_equal_ignoring_order ["A", "D", "E"]
|
||||
|
||||
# But adding a label that is already there or removing one that is not (so no effective change), succeeds:
|
||||
f.add_label "A" . should_equal f
|
||||
f.remove_label "non-existent" . should_equal False
|
||||
|
||||
group_builder.specify "can get and set metadata on just-created asset" <|
|
||||
## This is a bit of a white-box test - we know that just-created assets are cached without metadata,
|
||||
so we want to test this case to ensure nothing breaks here.
|
||||
f = (test_root.get / "my-directory-for-metadata").create_directory
|
||||
f.should_succeed
|
||||
|
||||
f.description . should_equal ""
|
||||
f.labels . should_equal []
|
||||
|
||||
f.set_description "ABC" . should_succeed
|
||||
f.description . should_equal "ABC"
|
||||
|
||||
# Also test setting metadata before querying it:
|
||||
g = (test_root.get / "my-directory-for-metadata-2").create_directory
|
||||
g.set_labels ["A"] . should_succeed
|
||||
g.labels . should_equal ["A"]
|
||||
|
||||
group_builder.specify "allows / as well as .. and . in resolve" <|
|
||||
(Enso_File.root / "a/b/c") . should_equal (Enso_File.root / "a" / "b" / "c")
|
||||
(Enso_File.root / "a///b/c") . should_equal (Enso_File.root / "a" / "b" / "c")
|
||||
@ -285,21 +357,6 @@ add_specs suite_builder setup:Cloud_Tests_Setup = suite_builder.group "Enso Clou
|
||||
# If the `~` is not at the beginning, it is not resolved
|
||||
Enso_File.new "enso://a/b/c/~" . should_not_equal Enso_File.home
|
||||
|
||||
group_builder.specify "currently does not support metadata for directories" <|
|
||||
# TODO this test should be 'reversed' and merged with above once the metadata is implemented
|
||||
dir = test_root.get / "test-directory"
|
||||
dir.creation_time.should_fail_with Illegal_Argument
|
||||
dir.last_modified_time.should_fail_with Illegal_Argument
|
||||
|
||||
group_builder.specify "should be able to read other file metadata" pending="TODO needs further design" <|
|
||||
nested_file = test_root.get / "test-directory" / "another.txt"
|
||||
|
||||
nested_file.is_absolute.should_be_true
|
||||
nested_file.absolute . should_equal nested_file
|
||||
nested_file.normalize . should_equal nested_file
|
||||
nested_file.posix_permissions . should_be_a File_Permissions
|
||||
nested_file.is_writable . should_be_a Boolean
|
||||
|
||||
Local_File_Spec.add_create_and_delete_directory_specs group_builder test_root.get
|
||||
Local_File_Spec.add_copy_edge_cases_specs group_builder test_root.get
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from Standard.Base import all
|
||||
import Standard.Base.Enso_Cloud.Internal.Audit_Log.Audit_Log
|
||||
|
||||
from Standard.Table import Table
|
||||
|
||||
@ -36,29 +37,32 @@ add_specs suite_builder prefix ~datalink_to_connection database_pending =
|
||||
|
||||
# Retrying is needed as there may be some delay before the background thread finishes processing the logs.
|
||||
Test.with_retries <|
|
||||
Thread.sleep 1000
|
||||
# We send a synchronous log to block the main thread until all pending async logs have been processed.
|
||||
Audit_Log.report_event "TestEvent" "test message to sync" async=False . should_succeed
|
||||
|
||||
all_events = get_audit_log_events
|
||||
relevant_events = all_events.filter e-> e.message.contains table_name
|
||||
Test.with_clue ((relevant_events.map .to_text).join '\n' 'Found relevant events are:\n' '\n') <|
|
||||
|
||||
create = relevant_events.find (e-> e.message.contains "CREATE")
|
||||
create.should_succeed
|
||||
create.user_email . should_equal Enso_User.current.email
|
||||
create.metadata.get "connectionUri" . should_contain "jdbc:"
|
||||
create.metadata.get "projectName" . should_equal enso_project.namespace+"."+enso_project.name
|
||||
create = relevant_events.find (e-> e.message.contains "CREATE")
|
||||
create.should_succeed
|
||||
create.user_email . should_equal Enso_User.current.email
|
||||
create.metadata.get "connectionUri" . should_contain "jdbc:"
|
||||
create.metadata.get "projectName" . should_equal enso_project.namespace+"."+enso_project.name
|
||||
|
||||
insert = relevant_events.find (e-> e.message.contains "INSERT INTO")
|
||||
insert.should_succeed
|
||||
# The insert query should not contain column cell data - only column names / metadata.
|
||||
insert.message.should_not_contain "my_payload"
|
||||
insert = relevant_events.find (e-> e.message.contains "INSERT INTO")
|
||||
insert.should_succeed
|
||||
# The insert query should not contain column cell data - only column names / metadata.
|
||||
insert.message.should_not_contain "my_payload"
|
||||
|
||||
create_sequence_number = create.metadata.get "sequenceNumber"
|
||||
insert_sequence_number = insert.metadata.get "sequenceNumber"
|
||||
create_sequence_number.should_be_a Integer
|
||||
insert_sequence_number.should_be_a Integer
|
||||
(create_sequence_number < insert_sequence_number) . should_be_true
|
||||
create_sequence_number = create.metadata.get "sequenceNumber"
|
||||
insert_sequence_number = insert.metadata.get "sequenceNumber"
|
||||
create_sequence_number.should_be_a Integer
|
||||
insert_sequence_number.should_be_a Integer
|
||||
(create_sequence_number < insert_sequence_number) . should_be_true
|
||||
|
||||
relevant_events.find (e-> e.message.contains "SELECT") . should_succeed
|
||||
relevant_events.find (e-> e.message.contains "DROP") . should_succeed
|
||||
relevant_events.find (e-> e.message.contains "SELECT") . should_succeed
|
||||
relevant_events.find (e-> e.message.contains "DROP") . should_succeed
|
||||
|
||||
group_builder.specify "should see Database operations performed manually" <| cloud_setup.with_prepared_environment <|
|
||||
audited_connection = datalink_to_connection.read
|
||||
|
@ -1,5 +1,8 @@
|
||||
package org.enso.shttp.cloud_mock;
|
||||
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.Arrays;
|
||||
import java.util.Deque;
|
||||
import java.util.LinkedList;
|
||||
@ -15,11 +18,22 @@ public class AssetStore {
|
||||
final Directory homeDirectory;
|
||||
|
||||
public AssetStore() {
|
||||
rootDirectory = new Directory(ROOT_DIRECTORY_ID, "", null, new LinkedList<>());
|
||||
rootDirectory =
|
||||
new Directory(ROOT_DIRECTORY_ID, "", null, currentTimeAsString(), new LinkedList<>());
|
||||
Directory usersDirectory =
|
||||
new Directory(USERS_DIRECTORY_ID, "Users", rootDirectory.id, new LinkedList<>());
|
||||
new Directory(
|
||||
USERS_DIRECTORY_ID,
|
||||
"Users",
|
||||
rootDirectory.id,
|
||||
currentTimeAsString(),
|
||||
new LinkedList<>());
|
||||
homeDirectory =
|
||||
new Directory(HOME_DIRECTORY_ID, "My test User 1", usersDirectory.id, new LinkedList<>());
|
||||
new Directory(
|
||||
HOME_DIRECTORY_ID,
|
||||
"My test User 1",
|
||||
usersDirectory.id,
|
||||
currentTimeAsString(),
|
||||
new LinkedList<>());
|
||||
|
||||
rootDirectory.children.add(usersDirectory);
|
||||
usersDirectory.children.add(homeDirectory);
|
||||
@ -37,7 +51,7 @@ public class AssetStore {
|
||||
}
|
||||
|
||||
String id = "secret-" + java.util.UUID.randomUUID().toString();
|
||||
secrets.add(new Secret(id, title, value, parentDirectoryId));
|
||||
secrets.add(new Secret(id, title, value, parentDirectoryId, currentTimeAsString()));
|
||||
return id;
|
||||
}
|
||||
|
||||
@ -65,10 +79,15 @@ public class AssetStore {
|
||||
return List.copyOf(secrets);
|
||||
}
|
||||
|
||||
record Secret(String id, String title, String value, String parentDirectoryId) {
|
||||
Asset asAsset() {
|
||||
return new Asset(id, title, parentDirectoryId);
|
||||
}
|
||||
/**
|
||||
* We do not store the creation time as ZoneDateTime, because that causes trouble with
|
||||
* serialization to JSON - we'd need to add more dependencies to handle it. Instead, since this is
|
||||
* just a mock, we store as raw strings.
|
||||
*/
|
||||
private String currentTimeAsString() {
|
||||
return ZonedDateTime.now()
|
||||
.withZoneSameInstant(ZoneId.of("UTC"))
|
||||
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
|
||||
}
|
||||
|
||||
Asset resolvePath(String[] path) {
|
||||
@ -117,11 +136,19 @@ public class AssetStore {
|
||||
return currentAsset;
|
||||
}
|
||||
|
||||
public record Asset(String id, String title, String parentId) {}
|
||||
|
||||
record Directory(String id, String title, String parentId, LinkedList<Directory> children) {
|
||||
record Secret(
|
||||
String id, String title, String value, String parentDirectoryId, String modifiedAt) {
|
||||
Asset asAsset() {
|
||||
return new Asset(id, title, parentId);
|
||||
return new Asset(id, title, parentDirectoryId, modifiedAt);
|
||||
}
|
||||
}
|
||||
|
||||
public record Asset(String id, String title, String parentId, String modifiedAt) {}
|
||||
|
||||
record Directory(
|
||||
String id, String title, String parentId, String modifiedAt, LinkedList<Directory> children) {
|
||||
Asset asAsset() {
|
||||
return new Asset(id, title, parentId, modifiedAt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -47,13 +47,6 @@ public class PostLogHandler implements CloudHandler {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Delay recording the event to simulate network conditions
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
// ignore the interruption
|
||||
}
|
||||
|
||||
JsonNode root = jsonMapper.readTree(exchange.decodeBodyAsText());
|
||||
var incomingEvents = decodeLogEvents(root);
|
||||
if (batchingTestModeEnabled) {
|
||||
|
Loading…
Reference in New Issue
Block a user