diff --git a/cmd/apigear-streams/main.go b/cmd/apigear-streams/main.go new file mode 100644 index 00000000..85ec725c --- /dev/null +++ b/cmd/apigear-streams/main.go @@ -0,0 +1,7 @@ +package main + +import "github.com/apigear-io/cli/pkg/streams/cli" + +func main() { + cli.Execute() +} diff --git a/data/mon/sample.ndjson b/data/mon/sample.ndjson index 694c4cca..e1b94d29 100644 --- a/data/mon/sample.ndjson +++ b/data/mon/sample.ndjson @@ -1,4 +1,10 @@ { "type": "call", "symbol": "demo.Counter/increment" } { "type": "state", "symbol": "demo.Counter", "data": { "count": 1 } } { "type": "call", "symbol": "demo.Counter/increment" } -{ "type": "state", "symbol": "demo.Counter", "data": { "count": 2 } } \ No newline at end of file +{ "type": "state", "symbol": "demo.Counter", "data": { "count": 2 } } +{ "type": "call", "symbol": "demo.Counter/increment" } +{ "type": "state", "symbol": "demo.Counter", "data": { "count": 3 } } +{ "type": "call", "symbol": "demo.Counter/increment" } +{ "type": "state", "symbol": "demo.Counter", "data": { "count": 4 } } +{ "type": "call", "symbol": "demo.Counter/increment" } +{ "type": "state", "symbol": "demo.Counter", "data": { "count": 5 } } \ No newline at end of file diff --git a/docs/apigear.md b/docs/apigear.md deleted file mode 100644 index ff93cb07..00000000 --- a/docs/apigear.md +++ /dev/null @@ -1,29 +0,0 @@ -## apigear - -apigear creates instrumented SDKs from an API description - -### Synopsis - -ApiGear allows you to describe interfaces and generate instrumented SDKs out of the descriptions. - -### Options - -``` - -h, --help help for apigear -``` - -### SEE ALSO - -* [apigear completion](apigear_completion.md) - Generate the autocompletion script for the specified shell -* [apigear config](apigear_config.md) - Display the config vars -* [apigear generate](apigear_generate.md) - Generate code from APIs -* [apigear monitor](apigear_monitor.md) - Display monitor API calls -* [apigear project](apigear_project.md) - Manage apigear projects -* [apigear simulate](apigear_simulate.md) - Simulate API calls -* [apigear spec](apigear_spec.md) - Load and validate files -* [apigear template](apigear_template.md) - manage sdk templates -* [apigear update](apigear_update.md) - update the program -* [apigear version](apigear_version.md) - display version information -* [apigear x](apigear_x.md) - Experimental commands - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_completion.md b/docs/apigear_completion.md deleted file mode 100644 index 799a11ef..00000000 --- a/docs/apigear_completion.md +++ /dev/null @@ -1,25 +0,0 @@ -## apigear completion - -Generate the autocompletion script for the specified shell - -### Synopsis - -Generate the autocompletion script for apigear for the specified shell. -See each sub-command's help for details on how to use the generated script. - - -### Options - -``` - -h, --help help for completion -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear completion bash](apigear_completion_bash.md) - Generate the autocompletion script for bash -* [apigear completion fish](apigear_completion_fish.md) - Generate the autocompletion script for fish -* [apigear completion powershell](apigear_completion_powershell.md) - Generate the autocompletion script for powershell -* [apigear completion zsh](apigear_completion_zsh.md) - Generate the autocompletion script for zsh - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_completion_bash.md b/docs/apigear_completion_bash.md deleted file mode 100644 index 0ce3841a..00000000 --- a/docs/apigear_completion_bash.md +++ /dev/null @@ -1,44 +0,0 @@ -## apigear completion bash - -Generate the autocompletion script for bash - -### Synopsis - -Generate the autocompletion script for the bash shell. - -This script depends on the 'bash-completion' package. -If it is not installed already, you can install it via your OS's package manager. - -To load completions in your current shell session: - - source <(apigear completion bash) - -To load completions for every new session, execute once: - -#### Linux: - - apigear completion bash > /etc/bash_completion.d/apigear - -#### macOS: - - apigear completion bash > $(brew --prefix)/etc/bash_completion.d/apigear - -You will need to start a new shell for this setup to take effect. - - -``` -apigear completion bash -``` - -### Options - -``` - -h, --help help for bash - --no-descriptions disable completion descriptions -``` - -### SEE ALSO - -* [apigear completion](apigear_completion.md) - Generate the autocompletion script for the specified shell - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_completion_fish.md b/docs/apigear_completion_fish.md deleted file mode 100644 index e4863cc2..00000000 --- a/docs/apigear_completion_fish.md +++ /dev/null @@ -1,35 +0,0 @@ -## apigear completion fish - -Generate the autocompletion script for fish - -### Synopsis - -Generate the autocompletion script for the fish shell. - -To load completions in your current shell session: - - apigear completion fish | source - -To load completions for every new session, execute once: - - apigear completion fish > ~/.config/fish/completions/apigear.fish - -You will need to start a new shell for this setup to take effect. - - -``` -apigear completion fish [flags] -``` - -### Options - -``` - -h, --help help for fish - --no-descriptions disable completion descriptions -``` - -### SEE ALSO - -* [apigear completion](apigear_completion.md) - Generate the autocompletion script for the specified shell - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_completion_powershell.md b/docs/apigear_completion_powershell.md deleted file mode 100644 index 863aa6d3..00000000 --- a/docs/apigear_completion_powershell.md +++ /dev/null @@ -1,32 +0,0 @@ -## apigear completion powershell - -Generate the autocompletion script for powershell - -### Synopsis - -Generate the autocompletion script for powershell. - -To load completions in your current shell session: - - apigear completion powershell | Out-String | Invoke-Expression - -To load completions for every new session, add the output of the above command -to your powershell profile. - - -``` -apigear completion powershell [flags] -``` - -### Options - -``` - -h, --help help for powershell - --no-descriptions disable completion descriptions -``` - -### SEE ALSO - -* [apigear completion](apigear_completion.md) - Generate the autocompletion script for the specified shell - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_completion_zsh.md b/docs/apigear_completion_zsh.md deleted file mode 100644 index 0b5fe650..00000000 --- a/docs/apigear_completion_zsh.md +++ /dev/null @@ -1,46 +0,0 @@ -## apigear completion zsh - -Generate the autocompletion script for zsh - -### Synopsis - -Generate the autocompletion script for the zsh shell. - -If shell completion is not already enabled in your environment you will need -to enable it. You can execute the following once: - - echo "autoload -U compinit; compinit" >> ~/.zshrc - -To load completions in your current shell session: - - source <(apigear completion zsh); compdef _apigear apigear - -To load completions for every new session, execute once: - -#### Linux: - - apigear completion zsh > "${fpath[1]}/_apigear" - -#### macOS: - - apigear completion zsh > $(brew --prefix)/share/zsh/site-functions/_apigear - -You will need to start a new shell for this setup to take effect. - - -``` -apigear completion zsh [flags] -``` - -### Options - -``` - -h, --help help for zsh - --no-descriptions disable completion descriptions -``` - -### SEE ALSO - -* [apigear completion](apigear_completion.md) - Generate the autocompletion script for the specified shell - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_config.md b/docs/apigear_config.md deleted file mode 100644 index 513ee8e9..00000000 --- a/docs/apigear_config.md +++ /dev/null @@ -1,21 +0,0 @@ -## apigear config - -Display the config vars - -### Synopsis - -Display and edit the configuration variables - -### Options - -``` - -h, --help help for config -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear config get](apigear_config_get.md) - Display configuration values -* [apigear config info](apigear_config_info.md) - Display the config information - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_config_get.md b/docs/apigear_config_get.md deleted file mode 100644 index 8a41b88f..00000000 --- a/docs/apigear_config_get.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear config get - -Display configuration values - -### Synopsis - -Display the value of a configuration variable - -``` -apigear config get [flags] -``` - -### Options - -``` - -h, --help help for get -``` - -### SEE ALSO - -* [apigear config](apigear_config.md) - Display the config vars - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_config_info.md b/docs/apigear_config_info.md deleted file mode 100644 index 07eb5e7a..00000000 --- a/docs/apigear_config_info.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear config info - -Display the config information - -### Synopsis - -Display the config information and the location of the config file - -``` -apigear config info [flags] -``` - -### Options - -``` - -h, --help help for info -``` - -### SEE ALSO - -* [apigear config](apigear_config.md) - Display the config vars - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_generate.md b/docs/apigear_generate.md deleted file mode 100644 index 91b9cd74..00000000 --- a/docs/apigear_generate.md +++ /dev/null @@ -1,21 +0,0 @@ -## apigear generate - -Generate code from APIs - -### Synopsis - -generate API SDKs from API descriptions using templates - -### Options - -``` - -h, --help help for generate -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear generate expert](apigear_generate_expert.md) - Generate code using expert mode -* [apigear generate solution](apigear_generate_solution.md) - Generate SDK using a solution document - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_generate_expert.md b/docs/apigear_generate_expert.md deleted file mode 100644 index 633c18c2..00000000 --- a/docs/apigear_generate_expert.md +++ /dev/null @@ -1,29 +0,0 @@ -## apigear generate expert - -Generate code using expert mode - -### Synopsis - -in expert mode you can individually set your generator options. This is helpful when you do not have a solution document. - -``` -apigear generate expert [flags] -``` - -### Options - -``` - -f, --features strings features to enable (default [all]) - --force force overwrite - -h, --help help for expert - -i, --input strings input files (default [apigear]) - -o, --output string output directory (default "out") - -t, --template string template directory (default "tpl") - --watch watch for changes -``` - -### SEE ALSO - -* [apigear generate](apigear_generate.md) - Generate code from APIs - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_generate_solution.md b/docs/apigear_generate_solution.md deleted file mode 100644 index af56b23e..00000000 --- a/docs/apigear_generate_solution.md +++ /dev/null @@ -1,27 +0,0 @@ -## apigear generate solution - -Generate SDK using a solution document - -### Synopsis - -A solution is a yaml document which describes different layers. -Each layer defines the input module files, output directory and the features to enable, -as also the other options. To create a demo module or solution use the 'project create' command. - -``` -apigear generate solution [solution-file] [flags] -``` - -### Options - -``` - --exec string execute a command after generation - -h, --help help for solution - --watch watch solution file for changes -``` - -### SEE ALSO - -* [apigear generate](apigear_generate.md) - Generate code from APIs - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_monitor.md b/docs/apigear_monitor.md deleted file mode 100644 index f82bf666..00000000 --- a/docs/apigear_monitor.md +++ /dev/null @@ -1,21 +0,0 @@ -## apigear monitor - -Display monitor API calls - -### Synopsis - -Display monitored API calls using a monitoring server. SDKs typically create trace points and forward all API traffic to this monitoring service if configured. - -### Options - -``` - -h, --help help for monitor -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear monitor feed](apigear_monitor_feed.md) - Feed a script to a monitor -* [apigear monitor run](apigear_monitor_run.md) - Run the monitor server - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_monitor_feed.md b/docs/apigear_monitor_feed.md deleted file mode 100644 index d5088278..00000000 --- a/docs/apigear_monitor_feed.md +++ /dev/null @@ -1,26 +0,0 @@ -## apigear monitor feed - -Feed a script to a monitor - -### Synopsis - -Feeds API calls from various sources to the monitor to be displayed. This is mainly to playback recorded API calls. - -``` -apigear monitor feed [flags] -``` - -### Options - -``` - -h, --help help for feed - --repeat int number of times to repeat the script (default 1) - --sleep duration sleep between each event - --url string monitor server address (default "http://127.0.0.1:5555/monitor/123") -``` - -### SEE ALSO - -* [apigear monitor](apigear_monitor.md) - Display monitor API calls - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_monitor_run.md b/docs/apigear_monitor_run.md deleted file mode 100644 index d35bcf39..00000000 --- a/docs/apigear_monitor_run.md +++ /dev/null @@ -1,24 +0,0 @@ -## apigear monitor run - -Run the monitor server - -### Synopsis - -The monitor server runs on a HTTP port and listens for API calls. - -``` -apigear monitor run [flags] -``` - -### Options - -``` - -a, --addr string address to listen on (default "127.0.0.1:5555") - -h, --help help for run -``` - -### SEE ALSO - -* [apigear monitor](apigear_monitor.md) - Display monitor API calls - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project.md b/docs/apigear_project.md deleted file mode 100644 index 98d9289f..00000000 --- a/docs/apigear_project.md +++ /dev/null @@ -1,28 +0,0 @@ -## apigear project - -Manage apigear projects - -### Synopsis - -Projects consist of API descriptions, SDK configuration, simulation documents and other files - -### Options - -``` - -h, --help help for project -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear project create](apigear_project_create.md) - Create a new document inside current project -* [apigear project edit](apigear_project_edit.md) - Edit a project in the default editor (vscode) -* [apigear project import](apigear_project_import.md) - Import a remote project -* [apigear project info](apigear_project_info.md) - Display project information -* [apigear project init](apigear_project_init.md) - Initialize a new project -* [apigear project open](apigear_project_open.md) - Open a project in studio -* [apigear project pack](apigear_project_pack.md) - Pack a project -* [apigear project recent](apigear_project_recent.md) - Display recent projects -* [apigear project share](apigear_project_share.md) - Share a project with your team - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_create.md b/docs/apigear_project_create.md deleted file mode 100644 index c25652ca..00000000 --- a/docs/apigear_project_create.md +++ /dev/null @@ -1,24 +0,0 @@ -## apigear project create - -Create a new document inside current project - -### Synopsis - -Create a new document inside current project from a template. - -``` -apigear project create doc-type doc-name [flags] -``` - -### Options - -``` - -h, --help help for create - -p, --project string project directory (default ".") -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_edit.md b/docs/apigear_project_edit.md deleted file mode 100644 index 8f2fbb24..00000000 --- a/docs/apigear_project_edit.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project edit - -Edit a project in the default editor (vscode) - -### Synopsis - -Edit a project in the default editor (e.g.Visual Studio Code). - -``` -apigear project edit [flags] -``` - -### Options - -``` - -h, --help help for edit -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_import.md b/docs/apigear_project_import.md deleted file mode 100644 index 378a1834..00000000 --- a/docs/apigear_project_import.md +++ /dev/null @@ -1,24 +0,0 @@ -## apigear project import - -Import a remote project - -### Synopsis - -Import a remote project from a repository to the local file system - -``` -apigear project import source --target target [flags] -``` - -### Options - -``` - -h, --help help for import - -t, --target string target directory -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_info.md b/docs/apigear_project_info.md deleted file mode 100644 index 377eeb4b..00000000 --- a/docs/apigear_project_info.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project info - -Display project information - -### Synopsis - -Display detailed project information - -``` -apigear project info [flags] -``` - -### Options - -``` - -h, --help help for info -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_init.md b/docs/apigear_project_init.md deleted file mode 100644 index ac26b524..00000000 --- a/docs/apigear_project_init.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project init - -Initialize a new project - -### Synopsis - -Initialize a project with a default project files - -``` -apigear project init [flags] -``` - -### Options - -``` - -h, --help help for init -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_open.md b/docs/apigear_project_open.md deleted file mode 100644 index 97ec3916..00000000 --- a/docs/apigear_project_open.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project open - -Open a project in studio - -### Synopsis - -Open the given project in the desktop studio, if installed - -``` -apigear project open project-path [flags] -``` - -### Options - -``` - -h, --help help for open -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_pack.md b/docs/apigear_project_pack.md deleted file mode 100644 index 6649f69f..00000000 --- a/docs/apigear_project_pack.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project pack - -Pack a project - -### Synopsis - -Pack the project and all files into a archive file - -``` -apigear project pack [flags] -``` - -### Options - -``` - -h, --help help for pack -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_recent.md b/docs/apigear_project_recent.md deleted file mode 100644 index bab0aea3..00000000 --- a/docs/apigear_project_recent.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project recent - -Display recent projects - -### Synopsis - -Display recently used projects and their locations - -``` -apigear project recent [flags] -``` - -### Options - -``` - -h, --help help for recent -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_project_share.md b/docs/apigear_project_share.md deleted file mode 100644 index a7b379b5..00000000 --- a/docs/apigear_project_share.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear project share - -Share a project with your team - -### Synopsis - -Share a project and all files with your team to work together - -``` -apigear project share [flags] -``` - -### Options - -``` - -h, --help help for share -``` - -### SEE ALSO - -* [apigear project](apigear_project.md) - Manage apigear projects - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_simulate.md b/docs/apigear_simulate.md deleted file mode 100644 index ea4b5460..00000000 --- a/docs/apigear_simulate.md +++ /dev/null @@ -1,21 +0,0 @@ -## apigear simulate - -Simulate API calls - -### Synopsis - -Simulate api calls using either a dynamic JS script - -### Options - -``` - -h, --help help for simulate -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear simulate feed](apigear_simulate_feed.md) - Feed simulation from command line -* [apigear simulate run](apigear_simulate_run.md) - Run simulation server using an optional scenario file - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_simulate_feed.md b/docs/apigear_simulate_feed.md deleted file mode 100644 index 1d6776ee..00000000 --- a/docs/apigear_simulate_feed.md +++ /dev/null @@ -1,26 +0,0 @@ -## apigear simulate feed - -Feed simulation from command line - -### Synopsis - -Feed simulation calls using JSON documents from command line - -``` -apigear simulate feed [flags] -``` - -### Options - -``` - --addr string address of the simulation server (default "ws://127.0.0.1:4333/ws") - -h, --help help for feed - --repeat int number of times to repeat the script (default 1) - --sleep duration sleep duration between messages (default 100ns) -``` - -### SEE ALSO - -* [apigear simulate](apigear_simulate.md) - Simulate API calls - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_simulate_run.md b/docs/apigear_simulate_run.md deleted file mode 100644 index ac8310ec..00000000 --- a/docs/apigear_simulate_run.md +++ /dev/null @@ -1,26 +0,0 @@ -## apigear simulate run - -Run simulation server using an optional scenario file - -### Synopsis - -Simulation server simulates the API backend. -In its simplest form it just answers every call and all properties are set to default values. -Using a scenario you can define additional static and scripted data and behavior. - -``` -apigear simulate run [scenario to run] [flags] -``` - -### Options - -``` - -a, --addr string address to listen on (default "127.0.0.1:4333") - -h, --help help for run -``` - -### SEE ALSO - -* [apigear simulate](apigear_simulate.md) - Simulate API calls - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_spec.md b/docs/apigear_spec.md deleted file mode 100644 index 175a5237..00000000 --- a/docs/apigear_spec.md +++ /dev/null @@ -1,20 +0,0 @@ -## apigear spec - -Load and validate files - -### Synopsis - -Specification defines the file formats used inside apigear - -### Options - -``` - -h, --help help for spec -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear spec check](apigear_spec_check.md) - Check document - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_spec_check.md b/docs/apigear_spec_check.md deleted file mode 100644 index 61452413..00000000 --- a/docs/apigear_spec_check.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear spec check - -Check document - -### Synopsis - -Check documents and report errors - -``` -apigear spec check [flags] -``` - -### Options - -``` - -h, --help help for check -``` - -### SEE ALSO - -* [apigear spec](apigear_spec.md) - Load and validate files - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template.md b/docs/apigear_template.md deleted file mode 100644 index 7e2919ed..00000000 --- a/docs/apigear_template.md +++ /dev/null @@ -1,31 +0,0 @@ -## apigear template - -manage sdk templates - -### Synopsis - -sdk templates are git repositories that contain a sdk template. - -``` -apigear template [flags] -``` - -### Options - -``` - -h, --help help for template -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear template import](apigear_template_import.md) - import template -* [apigear template info](apigear_template_info.md) - display template information -* [apigear template install](apigear_template_install.md) - install template -* [apigear template list](apigear_template_list.md) - list templates -* [apigear template remove](apigear_template_remove.md) - remove installed template -* [apigear template search](apigear_template_search.md) - search templates -* [apigear template update](apigear_template_update.md) - update template registry -* [apigear template upgrade](apigear_template_upgrade.md) - upgrade installed template - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_import.md b/docs/apigear_template_import.md deleted file mode 100644 index 3b9ce4a4..00000000 --- a/docs/apigear_template_import.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template import - -import template - -### Synopsis - -import template from a git-url - -``` -apigear template import [git-url] [flags] -``` - -### Options - -``` - -h, --help help for import -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_info.md b/docs/apigear_template_info.md deleted file mode 100644 index 76b7ff73..00000000 --- a/docs/apigear_template_info.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template info - -display template information - -### Synopsis - -display template information for named templates. I no name is given all templates are listed. - -``` -apigear template info [name] [flags] -``` - -### Options - -``` - -h, --help help for info -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_install.md b/docs/apigear_template_install.md deleted file mode 100644 index 9b35f810..00000000 --- a/docs/apigear_template_install.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template install - -install template - -### Synopsis - -install template from registry using a name - -``` -apigear template install [name] [flags] -``` - -### Options - -``` - -h, --help help for install -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_list.md b/docs/apigear_template_list.md deleted file mode 100644 index 7a4a0608..00000000 --- a/docs/apigear_template_list.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template list - -list templates - -### Synopsis - -list templates. A template can be installed the install command. - -``` -apigear template list [flags] -``` - -### Options - -``` - -h, --help help for list -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_remove.md b/docs/apigear_template_remove.md deleted file mode 100644 index 46e2e22b..00000000 --- a/docs/apigear_template_remove.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template remove - -remove installed template - -### Synopsis - -remove installed template by name. - -``` -apigear template remove [name] [flags] -``` - -### Options - -``` - -h, --help help for remove -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_search.md b/docs/apigear_template_search.md deleted file mode 100644 index 8fc2f742..00000000 --- a/docs/apigear_template_search.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template search - -search templates - -### Synopsis - -search templates by name. - -``` -apigear template search [flags] -``` - -### Options - -``` - -h, --help help for search -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_update.md b/docs/apigear_template_update.md deleted file mode 100644 index 4474b7f3..00000000 --- a/docs/apigear_template_update.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear template update - -update template registry - -### Synopsis - -update registry from remote source. - -``` -apigear template update [flags] -``` - -### Options - -``` - -h, --help help for update -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_template_upgrade.md b/docs/apigear_template_upgrade.md deleted file mode 100644 index b1ce0030..00000000 --- a/docs/apigear_template_upgrade.md +++ /dev/null @@ -1,24 +0,0 @@ -## apigear template upgrade - -upgrade installed template - -### Synopsis - -upgrade installed template. If name is not specified, all installed templates will be upgraded. - -``` -apigear template upgrade [name] [flags] -``` - -### Options - -``` - -a, --all upgrade all installed templates - -h, --help help for upgrade -``` - -### SEE ALSO - -* [apigear template](apigear_template.md) - manage sdk templates - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_update.md b/docs/apigear_update.md deleted file mode 100644 index 4f444f2b..00000000 --- a/docs/apigear_update.md +++ /dev/null @@ -1,24 +0,0 @@ -## apigear update - -update the program - -### Synopsis - -check and update the program to the latest version - -``` -apigear update [flags] -``` - -### Options - -``` - -f, --force force update - -h, --help help for update -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_version.md b/docs/apigear_version.md deleted file mode 100644 index aefa1108..00000000 --- a/docs/apigear_version.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear version - -display version information - -### Synopsis - -display version, commit and build-date information - -``` -apigear version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_x.md b/docs/apigear_x.md deleted file mode 100644 index 255e4854..00000000 --- a/docs/apigear_x.md +++ /dev/null @@ -1,22 +0,0 @@ -## apigear x - -Experimental commands - -### Synopsis - -Command which are under development or experimental - -### Options - -``` - -h, --help help for x -``` - -### SEE ALSO - -* [apigear](apigear.md) - apigear creates instrumented SDKs from an API description -* [apigear x doc](apigear_x_doc.md) - exports cli docs as markdown -* [apigear x json2yaml](apigear_x_json2yaml.md) - convert json doc to yaml doc -* [apigear x yaml2json](apigear_x_yaml2json.md) - convert yaml doc to json doc - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_x_doc.md b/docs/apigear_x_doc.md deleted file mode 100644 index c120bdf6..00000000 --- a/docs/apigear_x_doc.md +++ /dev/null @@ -1,24 +0,0 @@ -## apigear x doc - -exports cli docs as markdown - -### Synopsis - -export the cli docs as markdown document into a dir - -``` -apigear x doc [flags] -``` - -### Options - -``` - -f, --force make dir and overwrite existing files - -h, --help help for doc -``` - -### SEE ALSO - -* [apigear x](apigear_x.md) - Experimental commands - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_x_json2yaml.md b/docs/apigear_x_json2yaml.md deleted file mode 100644 index a99a3941..00000000 --- a/docs/apigear_x_json2yaml.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear x json2yaml - -convert json doc to yaml doc - -### Synopsis - -convert one or many json documents to yaml documents - -``` -apigear x json2yaml [flags] -``` - -### Options - -``` - -h, --help help for json2yaml -``` - -### SEE ALSO - -* [apigear x](apigear_x.md) - Experimental commands - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/apigear_x_yaml2json.md b/docs/apigear_x_yaml2json.md deleted file mode 100644 index 2d8c0dc6..00000000 --- a/docs/apigear_x_yaml2json.md +++ /dev/null @@ -1,23 +0,0 @@ -## apigear x yaml2json - -convert yaml doc to json doc - -### Synopsis - -convert one or many yaml documents to json documents - -``` -apigear x yaml2json [flags] -``` - -### Options - -``` - -h, --help help for yaml2json -``` - -### SEE ALSO - -* [apigear x](apigear_x.md) - Experimental commands - -###### Auto generated by spf13/cobra on 15-Mar-2023 diff --git a/docs/streams.md b/docs/streams.md new file mode 100644 index 00000000..75de8c82 --- /dev/null +++ b/docs/streams.md @@ -0,0 +1,144 @@ +# Streams + + +## Server startup + +To start the API Gear server with streams support, run the following command: +```sh +apigear serve +``` + +## Recording a device stream + +### Using Monitor Data + +To simulate monitoring data for testing purposes, you can use the `apigear monitor feed` command with a sample NDJSON file: + +```sh +apigear monitor feed --repeat 1000 --interval 1s data/mon/sample.ndjson +``` + +This command will continuously publish monitoring data from the `sample.ndjson` file every second one line and repeat it 1000 times. + +### Producing data directly into streams + +Alternatively, simulate a device producing monitoring data, you can publish data from a NDJSON file directly into the streams system using the following command: + +```sh +apigear stream publish --file data/mon/sample.ndjson --device 123 --interval 1s +``` + +This will send every 1s a new line from the `sample.ndjson` file to the monitoring system for device `123`. + +### Starting the recording + +To start recording a device stream, use the following command: + +```sh +apigear stream record --device 123 +``` + +This will output: +``` +recording started session= +``` + +You will need this session ID to stop the recording and play it back later. + +### Stopping the recording + +To stop the recording, use the following command with the session ID you received earlier: +```sh +apigear stream stop --session +``` + +### Viewing recorded sessions + +List all recorded sessions: +```sh +apigear stream ls +``` + +Show detailed information about a specific session: +```sh +apigear stream show --session +``` + +## Playing back a recorded stream + +### Olink connection + +To play back the recorded stream, first connect to the Olink server and link the desired object: +```sh +apigear olink +> connect +> link demo.Counter +``` + +### Playback command + +Then, use the following command to play back the recorded stream using the session ID: +```sh +apigear stream play --session +``` + +You can control playback speed with the `--speed` flag (e.g., `--speed 0.25` for quarter speed, `--speed 2` for double speed). + +## Additional commands + +### Monitoring live streams + +To monitor live data from a device in real-time: +```sh +apigear stream tail --device 123 +``` + +### Generating test data + +To generate test monitoring data from a template: +```sh +apigear stream generate --template template.json --output test-data.ndjson --count 1000 +``` + +### Managing devices + +Set device metadata: +```sh +apigear stream device-set --device 123 --desc "Test Device" --location "Lab A" --owner "Team X" +``` + +Get device information: +```sh +apigear stream device-get --device 123 +``` + +List all devices: +```sh +apigear stream device-ls +``` + +### Managing device buffers + +Enable buffering for a device (useful with `--pre-roll` during recording): +```sh +apigear stream device buffer enable --device 123 --window 5m +``` + +Get buffer information: +```sh +apigear stream device buffer info --device 123 +``` + +## Behind the scenes + +- When you start the apigear server, you actually start a NATS server with JetStream enabled. +- When you publish data to the monitoring system using `stream publish`, the data is sent to a NATS subject based on the device ID (e.g., `monitor.123`). +- To record this data, run `apigear stream record --device 123`, which creates a JetStream consumer subscription to the monitoring subject for the specified device ID. +- The recording entry and state are stored in a KV store in JetStream. This allows us to watch the state and resume interrupted recordings. +- Device information is also stored in the KV store, so we know which device each recording belongs to. +- The recording subscription stores the recorded data in a JetStream stream under a unique session ID. +- To stop the recording, run `apigear stream stop --session `, which stops the subscription and finalizes the recorded data. +- Before playback, connect to the Olink server and link the desired object (e.g., `demo.Counter`) to receive the playback data. +- Finally, run `apigear stream play --session `, which reads the recorded data from JetStream and publishes it to the linked Olink object using a JetStream consumer. +- You can control the playback speed using the `--speed` flag (e.g., `--speed 2` for double speed). +- Device buffers can be enabled with `--pre-roll` during recording to capture data from before the recording started. diff --git a/go.mod b/go.mod index 25fcff93..a588334d 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,9 @@ go 1.25.0 require ( github.com/apigear-io/apigear-by-example v0.1.0 - github.com/spf13/cobra v1.9.1 - github.com/spf13/viper v1.20.1 - github.com/stretchr/testify v1.11.0 + github.com/spf13/cobra v1.10.1 + github.com/spf13/viper v1.21.0 + github.com/stretchr/testify v1.11.1 gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -14,21 +14,22 @@ require ( github.com/Masterminds/semver/v3 v3.4.0 github.com/antlr4-go/antlr/v4 v4.13.1 github.com/apigear-io/objectlink-core-go v0.5.4 - github.com/creativeprojects/go-selfupdate v1.5.0 + github.com/creativeprojects/go-selfupdate v1.5.1 github.com/dop251/goja v0.0.0-20250630131328-58d95d85e994 github.com/dop251/goja_nodejs v0.0.0-20250409162600-f7acab6894b0 github.com/ettle/strcase v0.2.0 github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.9.0 github.com/gertd/go-pluralize v0.2.1 - github.com/go-chi/chi/v5 v5.2.2 + github.com/go-chi/chi/v5 v5.2.3 github.com/go-git/go-git/v5 v5.16.2 github.com/go-viper/mapstructure/v2 v2.4.0 github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1 github.com/goccy/go-yaml v1.18.0 github.com/google/uuid v1.6.0 - github.com/mark3labs/mcp-go v0.38.0 - github.com/nats-io/nats-server/v2 v2.11.8 + github.com/gorilla/websocket v1.5.3 + github.com/mark3labs/mcp-go v0.41.1 + github.com/nats-io/nats-server/v2 v2.12.0 github.com/rs/zerolog v1.34.0 github.com/whilp/git-urls v1.0.0 github.com/xeipuuv/gojsonschema v1.2.0 @@ -37,49 +38,49 @@ require ( require ( atomicgo.dev/schedule v0.1.0 // indirect - code.gitea.io/sdk/gitea v0.21.0 // indirect + code.gitea.io/sdk/gitea v0.22.0 // indirect dario.cat/mergo v1.0.2 // indirect github.com/42wim/httpsig v1.2.3 // indirect + github.com/antithesishq/antithesis-sdk-go v0.5.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/chzyer/readline v1.5.1 // indirect + github.com/clipperhouse/uax29/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/cyphar/filepath-securejoin v0.5.0 // indirect github.com/davidmz/go-pageant v1.0.2 // indirect github.com/go-fed/httpsig v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/go-github/v30 v30.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/go-tpm v0.9.5 // indirect - github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect - github.com/gorilla/websocket v1.5.3 // indirect + github.com/google/go-tpm v0.9.6 // indirect + github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/invopop/jsonschema v0.13.0 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect - github.com/mailru/easyjson v0.9.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/mailru/easyjson v0.9.1 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/nats-io/jwt/v2 v2.8.0 // indirect github.com/nats-io/nkeys v0.4.11 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect - github.com/pjbgf/sha1cd v0.4.0 // indirect + github.com/pjbgf/sha1cd v0.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.10.0 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect github.com/skeema/knownhosts v1.3.1 // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/ulikunitz/xz v0.5.13 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xanzy/go-gitlab v0.115.0 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/time v0.12.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/oauth2 v0.31.0 // indirect + golang.org/x/time v0.13.0 // indirect ) require ( @@ -87,6 +88,7 @@ require ( atomicgo.dev/keyboard v0.2.9 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/brianvoe/gofakeit/v7 v7.7.3 github.com/containerd/console v1.0.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dlclark/regexp2 v1.11.5 // indirect @@ -95,32 +97,30 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect - github.com/gookit/color v1.5.4 // indirect + github.com/gookit/color v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/kevinburke/ssh_config v1.4.0 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mitchellh/mapstructure v1.5.0 - github.com/nats-io/nats.go v1.45.0 + github.com/nats-io/nats.go v1.46.1 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pterm/pterm v0.12.81 - github.com/rivo/uniseg v0.4.7 // indirect - github.com/sasha-s/go-deadlock v0.3.5 github.com/sergi/go-diff v1.4.0 // indirect - github.com/spf13/afero v1.14.0 // indirect - github.com/spf13/cast v1.9.2 // indirect - github.com/spf13/pflag v1.0.7 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - golang.org/x/crypto v0.41.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect ) diff --git a/go.sum b/go.sum index eb1940f0..a39b06b2 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8= atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ= atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs= atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU= -code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4= -code.gitea.io/sdk/gitea v0.21.0/go.mod h1:tnBjVhuKJCn8ibdyyhvUyxrR1Ca2KHEoTWoukNhXQPA= +code.gitea.io/sdk/gitea v0.22.0 h1:HCKq7bX/HQ85Nw7c/HAhWgRye+vBp5nQOE8Md1+9Ef0= +code.gitea.io/sdk/gitea v0.22.0/go.mod h1:yyF5+GhljqvA30sRDreoyHILruNiy4ASufugzYg0VHM= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs= @@ -30,8 +30,8 @@ github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBi github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0= -github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= +github.com/antithesishq/antithesis-sdk-go v0.5.0 h1:cudCFF83pDDANcXFzkQPUHHedfnnIbUO3JMr9fqwFJs= +github.com/antithesishq/antithesis-sdk-go v0.5.0/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/apigear-io/apigear-by-example v0.1.0 h1:DLvoafzSx4R0q+Rw+KZU3aHysuyG5fbSK8neMJJsg9M= @@ -43,6 +43,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/brianvoe/gofakeit/v7 v7.7.3 h1:RWOATEGpJ5EVg2nN8nlaEyaV/aB4d6c3GqYrbqQekss= +github.com/brianvoe/gofakeit/v7 v7.7.3/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= @@ -51,6 +53,8 @@ github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY= +github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= @@ -60,10 +64,10 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creativeprojects/go-selfupdate v1.5.0 h1:4zuFafc/qGpymx7umexxth2y2lJXoBR49c3uI0Hr+zU= -github.com/creativeprojects/go-selfupdate v1.5.0/go.mod h1:Pewm8hY7Xe1ne7P8irVBAFnXjTkRuxbbkMlBeTdumNQ= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/creativeprojects/go-selfupdate v1.5.1 h1:fuyEGFFfqcC8SxDGolcEPYPLXGQ9Mcrc5uRyRG2Mqnk= +github.com/creativeprojects/go-selfupdate v1.5.1/go.mod h1:2uY75rP8z/D/PBuDn6mlBnzu+ysEmwOJfcgF8np0JIM= +github.com/cyphar/filepath-securejoin v0.5.0 h1:hIAhkRBMQ8nIeuVwcAoymp7MY4oherZdAxD+m0u9zaw= +github.com/cyphar/filepath-securejoin v0.5.0/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -94,8 +98,8 @@ github.com/gitsight/go-vcsurl v1.0.1 h1:wkijKsbVg9R2IBP97U7wOANeIW9WJJKkBwS9Xqll github.com/gitsight/go-vcsurl v1.0.1/go.mod h1:qRFdKDa/0Lh9MT0xE+qQBYZ/01+mY1H40rZUHR24X9U= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= -github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= -github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -126,16 +130,18 @@ github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQF github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/go-tpm v0.9.5 h1:ocUmnDebX54dnW+MQWGQRbdaAcJELsa6PqZhJ48KwVU= -github.com/google/go-tpm v0.9.5/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/go-tpm v0.9.6 h1:Ku42PT4LmjDu1H5C5ISWLlpI1mj+Zq7sPGKoRw2XROA= +github.com/google/go-tpm v0.9.6/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8 h1:ZI8gCoCjGzPsum4L21jHdQs8shFBIQih1TM9Rd/c+EQ= +github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0= +github.com/gookit/assert v0.1.1/go.mod h1:jS5bmIVQZTIwk42uXl4lyj4iaaxx32tqH16CFj0VX2E= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= -github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= -github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= +github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA= +github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -159,8 +165,8 @@ github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYW github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -170,10 +176,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mark3labs/mcp-go v0.38.0 h1:E5tmJiIXkhwlV0pLAwAT0O5ZjUZSISE/2Jxg+6vpq4I= -github.com/mark3labs/mcp-go v0.38.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= +github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= +github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= +github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= @@ -182,18 +188,18 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= -github.com/nats-io/nats-server/v2 v2.11.8 h1:7T1wwwd/SKTDWW47KGguENE7Wa8CpHxLD1imet1iW7c= -github.com/nats-io/nats-server/v2 v2.11.8/go.mod h1:C2zlzMA8PpiMMxeXSz7FkU3V+J+H15kiqrkvgtn2kS8= -github.com/nats-io/nats.go v1.45.0 h1:/wGPbnYXDM0pLKFjZTX+2JOw9TQPoIgTFrUaH97giwA= -github.com/nats-io/nats.go v1.45.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nats-server/v2 v2.12.0 h1:OIwe8jZUqJFrh+hhiyKu8snNib66qsx806OslqJuo74= +github.com/nats-io/nats-server/v2 v2.12.0/go.mod h1:nr8dhzqkP5E/lDwmn+A2CvQPMd1yDKXQI7iGg3lAvww= +github.com/nats-io/nats.go v1.46.1 h1:bqQ2ZcxVd2lpYI97xYASeRTY3I5boe/IVmuUDPitHfo= +github.com/nats-io/nats.go v1.46.1/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -202,11 +208,8 @@ github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= -github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= -github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= -github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= -github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pjbgf/sha1cd v0.5.0 h1:a+UkboSi1znleCDUNT3M5YxjOnN1fz2FhN48FlwCxs0= +github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -222,8 +225,6 @@ github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkG github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA= github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= @@ -231,29 +232,25 @@ github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.10.0 h1:FM8Cv6j2KqIhM2ZK7HZjm4mpj9NBktLgowT1aN9q5Cc= -github.com/sagikazarmark/locafero v0.10.0/go.mod h1:Ieo3EUsjifvQu4NZwV5sPd4dwvu0OCgEQV7vjc9yDjw= -github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= -github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= -github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= -github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= -github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -261,12 +258,12 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= -github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/ulikunitz/xz v0.5.13 h1:ar98gWrjf4H1ev05fYP/o29PDZw9DrI3niHtnEqyuXA= -github.com/ulikunitz/xz v0.5.13/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU= github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= @@ -290,15 +287,17 @@ github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT0 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= -golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= -golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -308,11 +307,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -336,25 +335,25 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/pkg/app/connect.go b/pkg/app/connect.go new file mode 100644 index 00000000..d425b7a9 --- /dev/null +++ b/pkg/app/connect.go @@ -0,0 +1,82 @@ +package app + +import ( + "context" + "errors" + + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/cli/pkg/server" + "github.com/apigear-io/cli/pkg/sim" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +func WithNATS(ctx context.Context, addr string, fn func(*nats.Conn) error) error { + nc, err := natsutil.ConnectNATS(addr) + if err != nil { + log.Info().Msg("NATS server not available, starting temporary server") + err = WithServer(ctx, server.Options{ + NatsHost: "localhost", + NatsPort: 4222, + HttpAddr: "localhost:5555", + }, func(s *server.Server) error { + nc, err = s.NetworkManager().NatsConnection() + return err + }) + } + if err != nil { + return err + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Msg("failed to drain nats connection") + } + }() + log.Info().Msg("NATS server available") + return fn(nc) +} + +func WithJetstream(server string, fn func(js jetstream.JetStream) error, opt ...nats.Option) error { + js, err := natsutil.ConnectJetStream(server, opt...) + if err != nil { + return err + } + if callErr := fn(js); callErr != nil { + if drainErr := js.Conn().Drain(); drainErr != nil { + return errors.Join(callErr, drainErr) + } + return callErr + } + return js.Conn().Drain() +} + +func WithServer(ctx context.Context, opts server.Options, fn func(*server.Server) error) error { + server := server.New(opts) + err := server.Start(ctx) + if err != nil { + return err + } + defer func() { + if stopErr := server.Stop(); stopErr != nil { + log.Error().Err(stopErr).Msg("failed to stop server") + } + }() + return fn(server) +} + +func WithSimuClient(ctx context.Context, natsServer string, action func(ctx context.Context, client *sim.Client) error) error { + nc, err := nats.Connect(natsServer) + if err != nil { + log.Error().Err(err).Msg("failed to connect to nats server") + return err + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Msg("failed to drain nats connection") + } + nc.Close() + }() + client := sim.NewClient(nc) + return action(ctx, client) +} diff --git a/pkg/cmd/cfg/env.go b/pkg/cmd/cfg/env.go index 821102e6..ca3b2e86 100644 --- a/pkg/cmd/cfg/env.go +++ b/pkg/cmd/cfg/env.go @@ -19,7 +19,7 @@ func jsonIdent(v any) string { func NewEnvCommand() *cobra.Command { cmd := &cobra.Command{ Use: "env", - Short: "Env prints apigear environment variables", + Short: "env prints apigear environment variables", Long: `Env prints apigear environment variables`, Run: func(cmd *cobra.Command, args []string) { settings := cfg.AllSettings() diff --git a/pkg/cmd/cfg/get.go b/pkg/cmd/cfg/get.go index d74ed56a..2db94f0d 100644 --- a/pkg/cmd/cfg/get.go +++ b/pkg/cmd/cfg/get.go @@ -9,7 +9,7 @@ func NewGetCmd() *cobra.Command { cmd := &cobra.Command{ Use: "get", Aliases: []string{"g"}, - Short: "Display configuration values", + Short: "display configuration values", Long: `Display the value of a configuration variable`, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/pkg/cmd/cfg/info.go b/pkg/cmd/cfg/info.go index b8825d61..5496c48c 100644 --- a/pkg/cmd/cfg/info.go +++ b/pkg/cmd/cfg/info.go @@ -10,7 +10,7 @@ func NewInfoCmd() *cobra.Command { cmd := &cobra.Command{ Use: "info", Aliases: []string{"i"}, - Short: "Display the config information", + Short: "display the config information", Long: `Display the config information and the location of the config file`, Run: func(cmd *cobra.Command, _ []string) { cmd.Println("info:") diff --git a/pkg/cmd/cfg/root.go b/pkg/cmd/cfg/root.go index e5ae3072..35a73068 100644 --- a/pkg/cmd/cfg/root.go +++ b/pkg/cmd/cfg/root.go @@ -9,7 +9,7 @@ func NewRootCommand() *cobra.Command { cmd := &cobra.Command{ Use: "config", Aliases: []string{"cfg", "c"}, - Short: "Display the config vars", + Short: "display the config vars", Long: `Display and edit the configuration variables`, } cmd.AddCommand(NewInfoCmd()) diff --git a/pkg/cmd/gen/expert.go b/pkg/cmd/gen/expert.go index f21c5da2..601963db 100644 --- a/pkg/cmd/gen/expert.go +++ b/pkg/cmd/gen/expert.go @@ -34,7 +34,7 @@ func NewExpertCommand() *cobra.Command { cmd := &cobra.Command{ Use: "expert", Aliases: []string{"x"}, - Short: "Generate code using expert mode", + Short: "generate code using expert mode", Long: `in expert mode you can individually set your generator options. This is helpful when you do not have a solution document.`, RunE: func(cmd *cobra.Command, args []string) error { doc := MakeSolution(options) diff --git a/pkg/cmd/gen/root.go b/pkg/cmd/gen/root.go index ebc324ba..dee445e9 100644 --- a/pkg/cmd/gen/root.go +++ b/pkg/cmd/gen/root.go @@ -9,7 +9,7 @@ func NewRootCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "generate", Aliases: []string{"gen", "g"}, - Short: "Generate code from APIs", + Short: "generate code from APIs", Long: `generate API SDKs from API descriptions using templates`, } cmd.AddCommand(NewExpertCommand(), NewSolutionCommand()) diff --git a/pkg/cmd/gen/sol.go b/pkg/cmd/gen/sol.go index 3c8db3c3..c23e4150 100644 --- a/pkg/cmd/gen/sol.go +++ b/pkg/cmd/gen/sol.go @@ -18,7 +18,7 @@ func NewSolutionCommand() *cobra.Command { var force bool var cmd = &cobra.Command{ Use: "solution [solution-file]", - Short: "Generate SDK using a solution document", + Short: "generate SDK using a solution document", Aliases: []string{"sol", "s"}, Args: cobra.ExactArgs(1), Long: `A solution is a yaml document which describes different layers. diff --git a/pkg/cmd/mcp.go b/pkg/cmd/mcp.go index 8e410864..505e3933 100644 --- a/pkg/cmd/mcp.go +++ b/pkg/cmd/mcp.go @@ -8,7 +8,7 @@ import ( func NewMCPCommand() *cobra.Command { cmd := &cobra.Command{ Use: "mcp", - Short: "Start MCP server exposing apigear CLI commands", + Short: "start MCP server exposing apigear CLI commands", Long: `Start a Model Context Protocol (MCP) server that exposes selected apigear CLI commands as tools for AI assistants.`, RunE: func(cmd *cobra.Command, args []string) error { return mcp.RunMCPServer() diff --git a/pkg/cmd/mon/feed.go b/pkg/cmd/mon/feed.go index 55f746ef..de942c0a 100644 --- a/pkg/cmd/mon/feed.go +++ b/pkg/cmd/mon/feed.go @@ -2,6 +2,7 @@ package mon import ( "fmt" + "strings" "time" "github.com/apigear-io/cli/pkg/helper" @@ -11,17 +12,19 @@ import ( "github.com/spf13/cobra" ) -func NewClientCommand() *cobra.Command { +func NewFeedCommand() *cobra.Command { type ClientOptions struct { - url string // monitor server url - script string // script to run - repeat int // -1 for infinite - sleep time.Duration // sleep between each event + url string // monitor server url + script string // script to run + repeat int // -1 for infinite + interval time.Duration // sleep between each event + deviceId string // device id to use + batch int // number of events to send in a batch } var options = &ClientOptions{} var cmd = &cobra.Command{ Use: "feed", - Short: "Feed a script to a monitor", + Short: "feed a script to a monitor", Long: `Feeds API calls from various sources to the monitor to be displayed. This is mainly to playback recorded API calls.`, Args: cobra.ExactArgs(1), RunE: func(_ *cobra.Command, args []string) error { @@ -31,7 +34,7 @@ func NewClientCommand() *cobra.Command { var err error switch helper.Ext(options.script) { case ".json", ".ndjson": - events, err = mon.ReadJsonEvents(options.script) + events, err = helper.ReadNDJSONFile[mon.Event](options.script) log.Debug().Msgf("read %d events", len(events)) if err != nil { return fmt.Errorf("error reading events: %w", err) @@ -53,28 +56,49 @@ func NewClientCommand() *cobra.Command { if len(events) == 0 { return fmt.Errorf("no events to send") } - sender := helper.NewHTTPSender(options.url) - ctrl := helper.NewSenderControl[mon.Event](options.repeat, options.sleep) + url := strings.Join([]string{strings.TrimRight(options.url, "/"), "monitor", options.deviceId}, "/") + log.Info().Msgf("sending %d events to %s", len(events), url) + sender := helper.NewHTTPSender(url) + ctrl := helper.NewSenderControl[mon.Event](options.repeat, options.interval, options.batch) + + // Counter to track sent messages + var sentCount int + err = ctrl.Run(events, func(event mon.Event) error { - if event.Source == "" { - event.Source = "123" + if event.Device == "" { + event.Device = options.deviceId } // send as an array of events - payload := [1]mon.Event{event} - return sender.SendValue(payload) + payload := [1]mon.Event{event} + log.Info().Msgf("send event %s %s %s", event.Device, event.Type.String(), event.Symbol) + err := sender.SendValue(payload) + if err == nil { + sentCount++ + } + return err }) if err != nil { log.Warn().Msgf("error sending events: %s", err) } + + // Report total sent + totalExpected := len(events) * options.repeat + log.Info().Msgf("feed completed: sent %d/%d events (%.1f%%)", sentCount, totalExpected, float64(sentCount)/float64(totalExpected)*100) + fmt.Printf("Feed completed: sent %d/%d events\n", sentCount, totalExpected) + return nil }, } - cmd.Flags().StringVar(&options.url, "url", "http://localhost:5555/monitor/123", "monitor server address") + cmd.Flags().StringVar(&options.url, "url", "http://localhost:5555", "monitor server address") // repeat is -1 for infinite cmd.Flags().IntVar(&options.repeat, "repeat", 1, "number of times to repeat the script") // sleep is in milliseconds - cmd.Flags().DurationVar(&options.sleep, "sleep", 0, "sleep between each event") + cmd.Flags().DurationVar(&options.interval, "interval", 100*time.Millisecond, "interval between each event") + // deviceId to use + cmd.Flags().StringVar(&options.deviceId, "device", "123", "device id to use") + // batch size + cmd.Flags().IntVar(&options.batch, "batch", 1, "number of events to send in a batch") return cmd } diff --git a/pkg/cmd/mon/root.go b/pkg/cmd/mon/root.go index efeb78b6..312f4fff 100644 --- a/pkg/cmd/mon/root.go +++ b/pkg/cmd/mon/root.go @@ -9,10 +9,10 @@ func NewRootCommand() *cobra.Command { cmd := &cobra.Command{ Use: "monitor", Aliases: []string{"mon", "m"}, - Short: "Display monitor API calls", + Short: "display monitor API calls", Long: `Display monitored API calls using a monitoring server. SDKs typically create trace points and forward all API traffic to this monitoring service if configured.`, } - cmd.AddCommand(NewClientCommand()) - cmd.AddCommand(NewServerCommand()) + cmd.AddCommand(NewFeedCommand()) + cmd.AddCommand(NewRunCommand()) return cmd } diff --git a/pkg/cmd/mon/run.go b/pkg/cmd/mon/run.go index 950ecf92..351a63bf 100644 --- a/pkg/cmd/mon/run.go +++ b/pkg/cmd/mon/run.go @@ -1,37 +1,43 @@ package mon import ( - "github.com/apigear-io/cli/pkg/log" - "github.com/apigear-io/cli/pkg/mon" - "github.com/apigear-io/cli/pkg/net" + "github.com/apigear-io/cli/pkg/app" + "github.com/apigear-io/cli/pkg/streams/msgio" + "github.com/nats-io/nats.go" "github.com/spf13/cobra" ) -func NewServerCommand() *cobra.Command { - var addr string +func NewRunCommand() *cobra.Command { + var natsURL string + var verbose bool + var deviceID string + var pretty bool + var headers bool var cmd = &cobra.Command{ Use: "run", Aliases: []string{"r", "start"}, - Short: "Run the monitor server", + Short: "run the monitor server", Long: `The monitor server runs on a HTTP port and listens for API calls.`, RunE: func(cmd *cobra.Command, _ []string) error { - netman := net.NewManager() - opts := net.Options{ - HttpAddr: addr, - } - err := netman.Start(&opts) - if err != nil { - return err + + opts := msgio.TailOptions{ + Verbose: verbose, + Pretty: pretty, + Headers: headers, + DeviceID: deviceID, } - netman.MonitorEmitter().AddHook(func(e *mon.Event) { - log.Info().Msgf("event: %s %s %v", e.Type.String(), e.Source, e.Data) - }) - netman.OnMonitorEvent(func(event *mon.Event) { - log.Info().Str("source", event.Source).Str("type", event.Type.String()).Str("symbol", event.Symbol).Any("data", event.Data).Msg("received monitor event") + + return app.WithNATS(cmd.Context(), natsURL, func(nc *nats.Conn) error { + tailer := msgio.NewTailer(nc, opts) + return tailer.Run(cmd.Context()) }) - return netman.Wait(cmd.Context()) }, } - cmd.Flags().StringVarP(&addr, "addr", "a", "127.0.0.1:5555", "address to listen on") + + cmd.Flags().StringVarP(&natsURL, "nats-url", "n", nats.DefaultURL, "NATS server URL") + cmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "enable verbose logging") + cmd.Flags().StringVarP(&deviceID, "device-id", "d", "", "device ID to monitor") + cmd.Flags().BoolVarP(&pretty, "pretty", "p", false, "pretty print JSON output") + cmd.Flags().BoolVarP(&headers, "headers", "H", false, "include headers in output") return cmd } diff --git a/pkg/cmd/olink/root.go b/pkg/cmd/olink/root.go index f6452300..dd13f216 100644 --- a/pkg/cmd/olink/root.go +++ b/pkg/cmd/olink/root.go @@ -10,7 +10,7 @@ func NewRootCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "olink", Aliases: []string{"ol"}, - Short: "Start an ObjectLink REPL to test the olink protocol", + Short: "start an ObjectLink REPL to test the olink protocol", Long: `The olink command starts an interactive REPL (Read-Eval-Print Loop) for testing the ObjectLink protocol. It provides commands to connect to servers, link to objects, invoke methods, set properties, and observe signals.`, diff --git a/pkg/cmd/prj/open.go b/pkg/cmd/prj/open.go index ed3eda45..7b9cee5a 100644 --- a/pkg/cmd/prj/open.go +++ b/pkg/cmd/prj/open.go @@ -10,7 +10,7 @@ import ( func NewOpenCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "open project-path", - Short: "Open a project in studio", + Short: "open a project in studio", Long: `Open the given project in the desktop studio, if installed`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/pkg/cmd/prj/root.go b/pkg/cmd/prj/root.go index cb725046..43de9c1b 100644 --- a/pkg/cmd/prj/root.go +++ b/pkg/cmd/prj/root.go @@ -9,7 +9,7 @@ func NewRootCommand() *cobra.Command { cmd := &cobra.Command{ Use: "project", Aliases: []string{"prj"}, - Short: "Manage apigear projects", + Short: "manage apigear projects", Long: `Projects consist of API descriptions, SDK configuration, simulation documents and other files`, } cmd.AddCommand(NewAddCommand()) diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 3646ddf9..62746795 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -13,6 +13,7 @@ import ( "github.com/apigear-io/cli/pkg/cmd/stim" "github.com/apigear-io/cli/pkg/cmd/tpl" "github.com/apigear-io/cli/pkg/cmd/x" + "github.com/apigear-io/cli/pkg/streams/cli" "github.com/spf13/cobra" ) @@ -42,5 +43,6 @@ func NewRootCommand() *cobra.Command { cmd.AddCommand(tpl.NewRootCommand()) cmd.AddCommand(olink.NewRootCommand()) cmd.AddCommand(NewMCPCommand()) + cmd.AddCommand(cli.NewStreamCmd()) return cmd } diff --git a/pkg/cmd/serve.go b/pkg/cmd/serve.go index e8e4bc53..99354c12 100644 --- a/pkg/cmd/serve.go +++ b/pkg/cmd/serve.go @@ -1,10 +1,10 @@ package cmd import ( + "github.com/apigear-io/cli/pkg/app" + "github.com/apigear-io/cli/pkg/helper" "github.com/apigear-io/cli/pkg/log" - "github.com/apigear-io/cli/pkg/mon" - "github.com/apigear-io/cli/pkg/net" - "github.com/apigear-io/cli/pkg/sim" + "github.com/apigear-io/cli/pkg/server" "github.com/spf13/cobra" ) @@ -16,22 +16,21 @@ func NewServeCommand() *cobra.Command { Use: "serve", Short: "starts apigear server for monitoring and simulation", RunE: func(cmd *cobra.Command, args []string) error { - netman := net.NewManager() - server := sim.NewOlinkServer() - sim.NewManager(sim.ManagerOptions{ - Server: server, - }) - if err := netman.Start(&net.Options{ + log.Info().Msg("starting streams") + opts := server.Options{ NatsHost: natsHost, NatsPort: natsPort, HttpAddr: httpAddr, - }); err != nil { - return err } - netman.OnMonitorEvent(func(event *mon.Event) { - log.Info().Str("source", event.Source).Str("type", event.Type.String()).Str("symbol", event.Symbol).Any("data", event.Data).Msg("received monitor event") + err := app.WithServer(cmd.Context(), opts, func(s *server.Server) error { + log.Info().Msgf("nats server running at %s:%d", opts.NatsHost, opts.NatsPort) + // s.NetworkManager().OnMonitorEvent(func(event *mon.Event) { + // log.Debug().Str("source", event.Device).Str("type", event.Type.String()).Str("symbol", event.Symbol).Any("data", event.Data).Msg("received monitor event") + // }) + return helper.Wait(cmd.Context(), nil) }) - return netman.Wait(cmd.Context()) + log.Info().Msg("server is running. Press Ctrl+C to stop.") + return err }, } diff --git a/pkg/cmd/sim/feed.go b/pkg/cmd/sim/feed.go index 4f62533c..f7ffcc1a 100644 --- a/pkg/cmd/sim/feed.go +++ b/pkg/cmd/sim/feed.go @@ -1,115 +1,54 @@ package sim import ( - "context" - "encoding/json" - "fmt" "path/filepath" "time" "github.com/apigear-io/cli/pkg/helper" "github.com/apigear-io/cli/pkg/log" - "github.com/apigear-io/objectlink-core-go/olink/client" - "github.com/apigear-io/objectlink-core-go/olink/core" - "github.com/apigear-io/objectlink-core-go/olink/ws" + "github.com/apigear-io/cli/pkg/olnk" "github.com/spf13/cobra" ) -// client messages supported for feed -// - ["link", "demo.Calc"] -// - ["set", "demo.Calc/total", 20] -// - ["invoke", 1, "demo.Calc/add", [1]] -// - ["unlink", "demo.Calc"] -// server messages not supported for feed -// - ["init", "demo.Calc", { "total": 10 }] -// - ["change", "demo.Calc/total", 20] -// - ["reply", 1, "demo.Calc/add", 21] -// - ["signal", "demo.Calc/clearDone", []] -// - ["error", "init", 0, "init error"] - -type ObjectSink struct { - objectId string -} - -func (s *ObjectSink) ObjectId() string { - return s.objectId -} - -func (s *ObjectSink) HandleSignal(signalId string, args core.Args) { - log.Info().Msgf("<- signal %s(%v)", signalId, args) -} -func (s *ObjectSink) HandlePropertyChange(propertyId string, value core.Any) { - log.Info().Msgf("<- property %s = %v", propertyId, value) -} -func (s *ObjectSink) HandleInit(objectId string, props core.KWArgs, node *client.Node) { - s.objectId = objectId - log.Info().Msgf("<- init %s with %v", objectId, props) -} -func (s *ObjectSink) HandleRelease() { - log.Info().Msgf("<- release %s", s.objectId) - s.objectId = "" -} - -var _ client.IObjectSink = &ObjectSink{} - func NewClientCommand() *cobra.Command { type ClientOptions struct { addr string script string sleep time.Duration repeat int + batch int } var options = &ClientOptions{} // cmd represents the simCli command var cmd = &cobra.Command{ Use: "feed", Aliases: []string{"f"}, - Short: "Feed simulation from command line", + Short: "feed simulation from command line", Long: `Feed simulation calls using JSON documents from command line`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { options.script = args[0] log.Info().Str("script", options.script).Str("addr", options.addr).Int("repeat", options.repeat).Dur("sleep", options.sleep).Msg("feed simulation") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - registry := client.NewRegistry() - registry.SetSinkFactory(func(objectId string) client.IObjectSink { - return &ObjectSink{objectId: objectId} - }) - log.Debug().Msgf("run script %s", options.script) - conn, err := ws.Dial(ctx, options.addr) + feeder := olnk.NewFeeder() + err := feeder.Connect(cmd.Context(), options.addr) if err != nil { return err } defer func() { - if err := conn.Close(); err != nil { - log.Error().Err(err).Msg("failed to close connection") + if closeErr := feeder.Close(); closeErr != nil { + log.Error().Err(closeErr).Msg("failed to close feeder") } }() - node := client.NewNode(registry) - conn.SetOutput(node) - node.SetOutput(conn) - registry.AttachClientNode(node) switch filepath.Ext(options.script) { case ".ndjson": items, err := helper.ScanFile(options.script) if err != nil { return err } - ctrl := helper.NewSenderControl[[]byte](options.repeat, options.sleep) - err = ctrl.Run(items, func(data []byte) error { - log.Debug().Msgf("send -> %s", data) - err := handleNodeData(node, data) - if err != nil { - return err - } - return nil - }) - if err != nil { - log.Warn().Err(err).Msg("send error") - } + ctrl := helper.NewSenderControl[[]byte](options.repeat, options.sleep, options.batch) + return ctrl.Run(items, feeder.Feed) } - <-ctx.Done() + <-cmd.Context().Done() log.Info().Msg("done") return nil }, @@ -117,40 +56,6 @@ func NewClientCommand() *cobra.Command { cmd.Flags().DurationVarP(&options.sleep, "sleep", "", 100, "sleep duration between messages") cmd.Flags().StringVarP(&options.addr, "addr", "", "ws://127.0.0.1:4333/ws", "address of the simulation server") cmd.Flags().IntVarP(&options.repeat, "repeat", "", 1, "number of times to repeat the script") + cmd.Flags().IntVarP(&options.batch, "batch", "", 1, "number of messages to send in a batch") return cmd } - -func handleNodeData(node *client.Node, data []byte) error { - var m core.Message - err := json.Unmarshal(data, &m) - if err != nil { - log.Error().Err(err).Msgf("invalid message: %s", data) - return err - } - s, ok := m[0].(string) - if !ok { - log.Error().Msgf("invalid message type, expected string: %v", m) - return fmt.Errorf("invalid message type, expected string: %v", m) - } - m[0] = core.MsgTypeFromString(s) - switch m[0] { - case core.MsgLink: - objectId := m.AsLink() - node.LinkRemoteNode(objectId) - case core.MsgUnlink: - objectId := m.AsLink() - node.UnlinkRemoteNode(objectId) - case core.MsgSetProperty: - propertyId, value := m.AsSetProperty() - node.SetRemoteProperty(propertyId, value) - case core.MsgInvoke: - _, methodId, args := m.AsInvoke() - node.InvokeRemote(methodId, args, func(arg client.InvokeReplyArg) { - log.Info().Msgf("<- reply %s : %v", arg.Identifier, arg.Value) - }) - default: - log.Info().Msgf("not supported message type: %v", m) - return fmt.Errorf("not supported message type: %v", m) - } - return nil -} diff --git a/pkg/cmd/sim/root.go b/pkg/cmd/sim/root.go index b4860318..18240a0f 100644 --- a/pkg/cmd/sim/root.go +++ b/pkg/cmd/sim/root.go @@ -9,7 +9,7 @@ func NewRootCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "simulate", Aliases: []string{"sim", "s", "simu"}, - Short: "Simulate API calls", + Short: "simulate API calls", Long: `Simulate api calls using either a dynamic JS script or a static YAML document`, } cmd.AddCommand(NewClientCommand()) diff --git a/pkg/cmd/sim/run.go b/pkg/cmd/sim/run.go index 5420a346..334485a6 100644 --- a/pkg/cmd/sim/run.go +++ b/pkg/cmd/sim/run.go @@ -2,22 +2,21 @@ package sim import ( "context" - "os" "path/filepath" + "github.com/apigear-io/cli/pkg/app" + "github.com/apigear-io/cli/pkg/helper" "github.com/apigear-io/cli/pkg/log" - "github.com/apigear-io/cli/pkg/mon" - "github.com/apigear-io/cli/pkg/net" "github.com/apigear-io/cli/pkg/sim" "github.com/apigear-io/cli/pkg/tasks" + "github.com/nats-io/nats.go" "github.com/spf13/cobra" ) func NewRunCommand() *cobra.Command { var fn string - var addr string - var noServe bool + var natsServer string var watch bool // cmd represents the simSvr command @@ -25,94 +24,59 @@ func NewRunCommand() *cobra.Command { Use: "run", Aliases: []string{"r"}, Args: cobra.ExactArgs(1), - Short: "Run simulation server using an optional scenario file", - Long: `Simulation server simulates the API backend. -In its simplest form it just answers every call and all properties are set to default values. + Short: "run simulation server using an optional scenario file", + Long: `Simulation server simulates the API backend. +In its simplest form it just answers every call and all properties are set to default values. Using a scenario you can define additional static and scripted data and behavior.`, RunE: func(cmd *cobra.Command, args []string) error { - netman := net.NewManager() - if err := netman.Start(&net.Options{ - NatsListen: false, - HttpAddr: addr, - HttpDisabled: noServe, - }); err != nil { - return err - } - netman.OnMonitorEvent(func(event *mon.Event) { - log.Info().Str("source", event.Source).Str("type", event.Type.String()).Str("symbol", event.Symbol).Any("data", event.Data).Msg("received monitor event") - }) - var simman *sim.Manager - if !noServe { - simman = sim.NewManager(sim.ManagerOptions{}) - simman.Start(netman) - } else { - simman = sim.NewManager(sim.ManagerOptions{}) - } - - scriptFile := args[0] - cwd, err := os.Getwd() + absFile, err := filepath.Abs(args[0]) if err != nil { - log.Error().Err(err).Msg("failed to get current working directory") return err } + return app.WithSimuClient(cmd.Context(), natsServer, func(ctx context.Context, client *sim.Client) error { + taskManager := tasks.NewTaskManager() + taskName := "sim-script" - absFile := filepath.Clean(filepath.Join(cwd, scriptFile)) - - // Create task manager and register sim task - taskManager := tasks.NewTaskManager() - taskName := "sim-script" - - // Create task function that runs the script - taskFunc := func(ctx context.Context) error { - return runScript(ctx, simman, netman, absFile, fn) - } - - // Register the task - taskManager.Register(taskName, map[string]interface{}{ - "script_file": absFile, - "function": fn, - }, taskFunc) + // Create task function that runs the script + taskFunc := func(ctx context.Context) error { + resp, err := client.RunScript(absFile) + if err != nil { + log.Error().Err(err).Msg("failed to run script") + return err + } + if resp.Error != "" { + log.Error().Err(err).Str("error", resp.Error).Msg("failed to run script") + return err + } + log.Info().Str("file", absFile).Msg("script executed") + return nil + } - ctx := cmd.Context() + // Register the task + taskManager.Register(taskName, map[string]interface{}{ + "script_file": absFile, + "function": fn, + }, taskFunc) - if watch { - log.Info().Str("file", absFile).Msg("watching script file") - // Use task manager's watch functionality - if err := taskManager.Watch(ctx, taskName, absFile); err != nil { - return err - } - return netman.Wait(ctx) - } else { - // Run once without watching - if err := taskManager.Run(ctx, taskName); err != nil { - return err + if watch { + log.Info().Str("file", absFile).Msg("watching script file") + // Use task manager's watch functionality + if err := taskManager.Watch(ctx, taskName, absFile); err != nil { + return err + } + } else { + // Run once without watching + if err := taskManager.Run(ctx, taskName); err != nil { + return err + } } - return netman.Wait(ctx) - } + return helper.Wait(ctx, nil) + }) }, } cmd.Flags().StringVar(&fn, "fn", "main", "function to run") - cmd.Flags().StringVar(&addr, "addr", "localhost:5555", "protocol server address") - cmd.Flags().BoolVar(&noServe, "no-serve", false, "disable protocol server") + cmd.Flags().StringVar(&natsServer, "nats-server", nats.DefaultURL, "nats server url") cmd.Flags().BoolVar(&watch, "watch", false, "watch for changes in the script file") return cmd } - -func runScript(ctx context.Context, sm *sim.Manager, nm *net.NetworkManager, absFile string, fn string) error { - log.Info().Str("script", absFile).Msg("load script file into simulation") - content, err := os.ReadFile(absFile) - if err != nil { - log.Error().Err(err).Msg("failed to read script file") - return err - } - script := sim.NewScript(absFile, string(content)) - sm.ScriptRun(script) - if fn != "" { - log.Info().Str("function", fn).Msg("run world function") - sm.FunctionRun(fn, nil) - } - // Return immediately after running the script - // Don't block here - the TaskManager will handle the lifecycle - return nil -} diff --git a/pkg/cmd/spec/check.go b/pkg/cmd/spec/check.go index 6d807aed..3965ca10 100644 --- a/pkg/cmd/spec/check.go +++ b/pkg/cmd/spec/check.go @@ -13,7 +13,7 @@ func NewCheckCommand() *cobra.Command { cmd := &cobra.Command{ Use: "check", Aliases: []string{"c", "lint"}, - Short: "Check document", + Short: "check document", Long: `Check documents and report errors`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/pkg/cmd/spec/root.go b/pkg/cmd/spec/root.go index 1a07659f..ed5d6c08 100644 --- a/pkg/cmd/spec/root.go +++ b/pkg/cmd/spec/root.go @@ -9,7 +9,7 @@ func NewRootCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "spec", Aliases: []string{"s"}, - Short: "Load and validate files", + Short: "load and validate files", Long: `Specification defines the file formats used inside apigear`, } cmd.AddCommand(NewCheckCommand()) diff --git a/pkg/cmd/spec/show.go b/pkg/cmd/spec/show.go index 1c3f071d..ac111afb 100644 --- a/pkg/cmd/spec/show.go +++ b/pkg/cmd/spec/show.go @@ -13,7 +13,7 @@ func NewShowCommand() *cobra.Command { cmd := &cobra.Command{ Use: "schema", Aliases: []string{"s", "show", "view"}, - Short: "Show schema for module, solution, rules documents", + Short: "show schema for module, solution, rules documents", Long: `Show the schema for module, solutions, rules documents in either yaml or json form`, RunE: func(cmd *cobra.Command, args []string) error { docType, _ := cmd.Flags().GetString("type") diff --git a/pkg/cmd/stim/root.go b/pkg/cmd/stim/root.go index 5c79aa23..99e0b7b0 100644 --- a/pkg/cmd/stim/root.go +++ b/pkg/cmd/stim/root.go @@ -9,7 +9,7 @@ func NewRootCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "stimulate", Aliases: []string{"stim"}, - Short: "Stimulate API calls to services", + Short: "stimulate API calls to services", Long: `Stimulate API calls using either a dynamic JS script to services`, } cmd.AddCommand(NewRunCommand()) diff --git a/pkg/cmd/stim/run.go b/pkg/cmd/stim/run.go index da22987f..5c1c9d7a 100644 --- a/pkg/cmd/stim/run.go +++ b/pkg/cmd/stim/run.go @@ -21,7 +21,7 @@ func NewRunCommand() *cobra.Command { Use: "run", Aliases: []string{"r"}, Args: cobra.ExactArgs(1), - Short: "Run stimulation script using an optional scenario file", + Short: "run stimulation script using an optional scenario file", Long: `Stimulation script runs scripted calls to a service backend.`, RunE: func(cmd *cobra.Command, args []string) error { simman := sim.NewManager(sim.ManagerOptions{}) diff --git a/pkg/cmd/x/idl2yaml.go b/pkg/cmd/x/idl2yaml.go index e53f9900..d1e0a3ce 100644 --- a/pkg/cmd/x/idl2yaml.go +++ b/pkg/cmd/x/idl2yaml.go @@ -57,7 +57,7 @@ func idl2yaml(input string) error { func NewIdl2YamlCommand() *cobra.Command { cmd := &cobra.Command{ Use: "idl2yaml [file]", - Short: "Convert IDL file to YAML", + Short: "convert IDL file to YAML", Long: `Convert an IDL file to a YAML representation.`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/cmd/x/root.go b/pkg/cmd/x/root.go index 7feabba0..fa2a82f1 100644 --- a/pkg/cmd/x/root.go +++ b/pkg/cmd/x/root.go @@ -8,7 +8,7 @@ func NewRootCommand() *cobra.Command { cmd := &cobra.Command{ Use: "x", Aliases: []string{"experimental"}, - Short: "Experimental commands", + Short: "experimental commands", Long: `Command which are under development or experimental`, } cmd.AddCommand(NewDocsCommand()) @@ -16,6 +16,8 @@ func NewRootCommand() *cobra.Command { cmd.AddCommand(NewYaml2JsonCommand()) cmd.AddCommand(NewYaml2IdlCommand()) cmd.AddCommand(NewIdl2YamlCommand()) + cmd.AddCommand(NewWSEchoCommand()) + cmd.AddCommand(NewWSCatCommand()) return cmd } diff --git a/pkg/cmd/x/wscat.go b/pkg/cmd/x/wscat.go new file mode 100644 index 00000000..2bf98257 --- /dev/null +++ b/pkg/cmd/x/wscat.go @@ -0,0 +1,28 @@ +package x + +import ( + "time" + + "github.com/apigear-io/cli/pkg/net" + "github.com/spf13/cobra" +) + +func NewWSCatCommand() *cobra.Command { + var opts net.WSClientOptions + var cmd = &cobra.Command{ + Use: "wscat", + Aliases: []string{"ws", "websocket"}, + Short: "run the WebSocket cat client", + Long: `The WebSocket cat client connects to the WebSocket proxy and allows sending and receiving messages.`, + RunE: func(cmd *cobra.Command, _ []string) error { + return net.RunWSClient(cmd.Context(), opts) + }, + } + + cmd.Flags().StringVarP(&opts.URL, "url", "u", "", "WebSocket server URL") + cmd.Flags().DurationVarP(&opts.Interval, "interval", "i", 100*time.Millisecond, "Interval between messages") + cmd.Flags().IntVarP(&opts.Repeat, "repeat", "r", 1, "Number of times to repeat the messages") + cmd.Flags().BoolVarP(&opts.DecodeJSON, "decode-json", "d", false, "Decode JSON messages") + + return cmd +} diff --git a/pkg/cmd/x/wsecho.go b/pkg/cmd/x/wsecho.go new file mode 100644 index 00000000..6c469b9a --- /dev/null +++ b/pkg/cmd/x/wsecho.go @@ -0,0 +1,23 @@ +package x + +import ( + "github.com/apigear-io/cli/pkg/net" + "github.com/spf13/cobra" +) + +func NewWSEchoCommand() *cobra.Command { + var opts net.WSEchoOptions + var cmd = &cobra.Command{ + Use: "wsecho", + Aliases: []string{"wse", "websocket-echo"}, + Short: "run the WebSocket echo server", + Long: `The WebSocket echo server echoes back any message it receives from clients.`, + RunE: func(cmd *cobra.Command, _ []string) error { + return net.RunWSEcho(cmd.Context(), opts) + }, + } + + cmd.Flags().StringVarP(&opts.Addr, "address", "a", ":8080", "WebSocket server address") + + return cmd +} diff --git a/pkg/gen/generator.go b/pkg/gen/generator.go index 6e237a18..75496ecf 100644 --- a/pkg/gen/generator.go +++ b/pkg/gen/generator.go @@ -175,12 +175,15 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { } scopes := f.FindScopesByMatch(spec.ScopeSystem) for _, scope := range scopes { + log.Debug().Msgf("processing system scope %s", scope.Match) err := g.processScope(scope, ctx) if err != nil { + log.Warn().Msgf("An error occured") return err } } for _, module := range g.opts.System.Modules { + log.Debug().Msgf("processing module %s", module.Name) // process module scopes := f.FindScopesByMatch(spec.ScopeModule) ctx := model.ModuleScope{ @@ -190,12 +193,15 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { Meta: g.opts.Meta, } for _, scope := range scopes { + log.Debug().Msgf("processing module scope %s", scope.Match) err := g.processScope(scope, ctx) if err != nil { + log.Warn().Msgf("An error occured") return err } } for _, iface := range module.Interfaces { + log.Debug().Msgf("processing interface %s", iface.Name) // process interface ctx := model.InterfaceScope{ System: g.opts.System, @@ -206,13 +212,16 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { } scopes := f.FindScopesByMatch(spec.ScopeInterface) for _, scope := range scopes { + log.Debug().Msgf("processing interface scope %s", scope.Match) err := g.processScope(scope, ctx) if err != nil { + log.Warn().Msgf("An error occured") return err } } } for _, struct_ := range module.Structs { + log.Debug().Msgf("processing struct %s", struct_.Name) // process struct ctx := model.StructScope{ System: g.opts.System, @@ -223,13 +232,16 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { } scopes := f.FindScopesByMatch(spec.ScopeStruct) for _, scope := range scopes { + log.Debug().Msgf("processing struct scope %s", scope.Match) err := g.processScope(scope, ctx) if err != nil { + log.Warn().Msgf("An error occured") return err } } } for _, enum := range module.Enums { + log.Debug().Msgf("processing enum %s", enum.Name) // process enum ctx := model.EnumScope{ System: g.opts.System, @@ -240,13 +252,16 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { } scopes := f.FindScopesByMatch(spec.ScopeEnum) for _, scope := range scopes { + log.Debug().Msgf("processing enum scope %s", scope.Match) err := g.processScope(scope, ctx) if err != nil { + log.Warn().Msgf("An error occured") return err } } } for _, extern := range module.Externs { + log.Debug().Msgf("processing extern %s", extern.Name) ctx := model.ExternScope{ System: g.opts.System, Module: module, @@ -256,6 +271,7 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { } scopes := f.FindScopesByMatch(spec.ScopeExtern) for _, scope := range scopes { + log.Debug().Msgf("processing extern scope %s", scope.Match) err := g.processScope(scope, ctx) if err != nil { log.Warn().Msgf("An error occured") @@ -269,6 +285,7 @@ func (g *generator) processFeature(f *spec.FeatureRule) error { // processScope processes a scope rule (e.g. system, modules, ...) with the given context func (g *generator) processScope(scope *spec.ScopeRule, ctx any) error { + log.Debug().Msgf("processing scope %s", scope.Match) prefix := scope.Prefix for _, doc := range scope.Documents { // clean doc target diff --git a/pkg/helper/ndjson.go b/pkg/helper/ndjson.go index ad039f69..89b9778a 100644 --- a/pkg/helper/ndjson.go +++ b/pkg/helper/ndjson.go @@ -3,38 +3,111 @@ package helper import ( "bufio" "encoding/json" + "errors" "io" - "log" "os" + "time" ) -// Scan scans a reader line by line and writes to the writer. -func ScanNDJSON[T any](r io.Reader) ([]T, error) { - var items []T - scanner := bufio.NewScanner(r) - for scanner.Scan() { +// NDJSONScanner streams NDJSON content line by line to a callback. +type NDJSONScanner struct { + Sleep time.Duration + Repeat int +} + +// NewNDJSONScanner creates a new NDJSON scanner. +func NewNDJSONScanner(sleep time.Duration, repeat int) *NDJSONScanner { + return &NDJSONScanner{Sleep: sleep, Repeat: repeat} +} + +// OnLineFunc is invoked for each NDJSON line. Returning io.EOF stops the scan gracefully. +type OnLineFunc func(line []byte) error + +// Scan streams lines from the reader to the callback. +func (s *NDJSONScanner) Scan(r io.Reader, fn OnLineFunc) error { + if fn == nil { + return errors.New("ndjson: callback cannot be nil") + } + + repeat := s.Repeat + if repeat == 0 { + repeat = 1 + } + + run := func(reader io.Reader) error { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Bytes() + dup := append([]byte(nil), line...) + if err := fn(dup); err != nil { + if errors.Is(err, io.EOF) { + return nil + } + return err + } + if s.Sleep > 0 { + time.Sleep(s.Sleep) + } + } + return scanner.Err() + } + + seeker, seekable := r.(io.Seeker) + + for pass := 0; repeat < 0 || pass < repeat; pass++ { + if pass > 0 { + if !seekable { + return errors.New("ndjson: repeat requires seekable reader") + } + if _, err := seeker.Seek(0, io.SeekStart); err != nil { + return err + } + } + if err := run(r); err != nil { + return err + } + } + return nil +} + +// ScanFile streams a file's NDJSON content to the callback. +func (s *NDJSONScanner) ScanFile(path string, fn OnLineFunc) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { + _ = f.Close() + }() + return s.Scan(f, fn) +} + +// ReadNDJSON reads all NDJSON entries from the reader into a slice. +func ReadNDJSON[T any](r io.Reader) ([]T, error) { + var out []T + scanner := NewNDJSONScanner(0, 1) + err := scanner.Scan(r, func(line []byte) error { var item T - line := scanner.Bytes() - err := json.Unmarshal(line, &item) - if err != nil { - return nil, err + if err := json.Unmarshal(line, &item); err != nil { + return err } - items = append(items, item) + out = append(out, item) + return nil + }) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err } - return items, scanner.Err() + return out, nil } -// ScanFile scans a file line by line and writes to the writer. -func ScanNDJSONFile[T any](path string) ([]T, error) { +// ReadNDJSONFile reads all NDJSON entries from the file into a slice. +func ReadNDJSONFile[T any](path string) ([]T, error) { f, err := os.Open(path) if err != nil { return nil, err } defer func() { - if err := f.Close(); err != nil { - log.Printf("error closing file %s: %v", path, err) - _ = err - } + _ = f.Close() }() - return ScanNDJSON[T](f) + return ReadNDJSON[T](f) } diff --git a/pkg/helper/ndjson_test.go b/pkg/helper/ndjson_test.go new file mode 100644 index 00000000..d1764f41 --- /dev/null +++ b/pkg/helper/ndjson_test.go @@ -0,0 +1,22 @@ +package helper + +import ( + "path/filepath" + "testing" +) + +type testEvent struct { + Device string `json:"device"` + Type string `json:"type"` +} + +func TestReadNDJSONFile(t *testing.T) { + path := filepath.Join("..", "mon", "testdata", "events.ndjson") + events, err := ReadNDJSONFile[testEvent](path) + if err != nil { + t.Fatalf("ReadNDJSONFile: %v", err) + } + if len(events) != 4 { + t.Fatalf("expected 4 events, got %d", len(events)) + } +} diff --git a/pkg/helper/sender.go b/pkg/helper/sender.go index 48f6c9b0..912b50d2 100644 --- a/pkg/helper/sender.go +++ b/pkg/helper/sender.go @@ -5,29 +5,44 @@ import ( ) type SenderControl[T any] struct { - sleep time.Duration - repeat int + interval time.Duration + repeat int + batch int } -func NewSenderControl[T any](repeat int, sleep time.Duration) *SenderControl[T] { +func NewSenderControl[T any](repeat int, interval time.Duration, batch int) *SenderControl[T] { + if repeat < 0 { + repeat = 1 + } + if batch < 0 { + batch = 1 + } + if interval < 0 { + interval = 100 * time.Millisecond + } return &SenderControl[T]{ - sleep: sleep, - repeat: repeat, + interval: interval, + repeat: repeat, + batch: batch, } } func (t *SenderControl[T]) Run(items []T, send func(T) error) error { - if t.repeat == 0 { - t.repeat = 1 - } for i := 0; i < t.repeat; i++ { - for _, item := range items { - err := send(item) - if err != nil { - return err + for j := 0; j < len(items); j += t.batch { + end := j + t.batch + if end > len(items) { + end = len(items) + } + batch := items[j:end] + for _, item := range batch { + err := send(item) + if err != nil { + return err + } } - if t.sleep > 0 { - time.Sleep(t.sleep) + if t.interval > 0 { + time.Sleep(t.interval) } } } diff --git a/pkg/helper/wait.go b/pkg/helper/wait.go new file mode 100644 index 00000000..1c4c7418 --- /dev/null +++ b/pkg/helper/wait.go @@ -0,0 +1,22 @@ +package helper + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +func Wait(ctx context.Context, cleanup func()) error { + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) + if cleanup != nil { + defer cleanup() + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-sig: + return nil + } +} diff --git a/pkg/model/module.go b/pkg/model/module.go index dd897249..b7b60d83 100644 --- a/pkg/model/module.go +++ b/pkg/model/module.go @@ -50,6 +50,7 @@ func (v Version) Patch() int { type Import struct { NamedNode `json:",inline" yaml:",inline"` + // TDOD: add resolved module reference } func NewImport(name string, version string) *Import { diff --git a/pkg/mon/event.go b/pkg/mon/event.go index 33dbff03..97a637ee 100644 --- a/pkg/mon/event.go +++ b/pkg/mon/event.go @@ -7,10 +7,6 @@ import ( "github.com/google/uuid" ) -const ( - MonitorSubject = "mon" -) - // EventType is the type of event. type EventType string @@ -33,7 +29,7 @@ const ( // Event represents an API event. type Event struct { Id string `json:"id" yaml:"id" csv:"id"` - Source string `json:"source" yaml:"source" csv:"source"` + Device string `json:"device" yaml:"device" csv:"device"` Type EventType `json:"type" yaml:"type" csv:"type"` Timestamp time.Time `json:"timestamp" yaml:"timestamp" csv:"timestamp"` Symbol string `json:"symbol" yaml:"symbol" csv:"symbol"` @@ -41,7 +37,7 @@ type Event struct { } func (e *Event) Subject() string { - return "mon." + e.Source + return "mon." + e.Device } // EventFactory is used to create events. @@ -64,7 +60,7 @@ func (f EventFactory) MakeEvent(kind EventType, symbol string, data Payload) *Ev Id: id, Type: kind, Timestamp: time.Now(), - Source: f.Source, + Device: f.Source, Symbol: symbol, Data: data, } @@ -87,8 +83,8 @@ func (f EventFactory) MakeState(symbol string, data Payload) *Event { // Sanitize ensures events are valid and fills in missing fields. func (f EventFactory) Sanitize(event *Event) *Event { - if event.Source == "" { - event.Source = f.Source + if event.Device == "" { + event.Device = f.Source } if event.Id == "" { event.Id = uuid.New().String() diff --git a/pkg/mon/ndjson.go b/pkg/mon/ndjson.go index 7cd999f2..9c7d8f97 100644 --- a/pkg/mon/ndjson.go +++ b/pkg/mon/ndjson.go @@ -1,39 +1,26 @@ package mon import ( - "bufio" "encoding/json" - "os" -) + "io" -// TODO: there is already a ndjon scanner in helper package + "github.com/apigear-io/cli/pkg/helper" +) // ReadJsonEvents reads monitor events from a json stream file func ReadJsonEvents(fn string) ([]Event, error) { + scanner := helper.NewNDJSONScanner(0, 1) var events []Event - // read file line by line using scanner - file, err := os.Open(fn) - if err != nil { - return nil, err - } - defer func() { - if err := file.Close(); err != nil { - log.Error().Err(err).Msgf("failed to close file %s", fn) - } - }() - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - // decode each line into an event using json + + err := scanner.ScanFile(fn, func(line []byte) error { var event Event - err := json.Unmarshal([]byte(line), &event) - if err != nil { - return nil, err + if err := json.Unmarshal(line, &event); err != nil { + return err } events = append(events, event) - } - err = scanner.Err() - if err != nil { + return nil + }) + if err != nil && err != io.EOF { return nil, err } return events, nil diff --git a/pkg/mon/ndjson_test.go b/pkg/mon/ndjson_test.go deleted file mode 100644 index 09c0b709..00000000 --- a/pkg/mon/ndjson_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package mon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestJsonReader(t *testing.T) { - // create a channel to receive events - // create a reader - events, err := ReadJsonEvents("testdata/events.ndjson") - assert.NoError(t, err) - assert.Equal(t, 4, len(events)) -} diff --git a/pkg/mon/script.go b/pkg/mon/script.go index a2353f8d..2c6be85d 100644 --- a/pkg/mon/script.go +++ b/pkg/mon/script.go @@ -71,7 +71,7 @@ func (s *EventScript) jsCall(symbol string, data Payload) { Id: uuid.New().String(), Type: TypeCall, Timestamp: time.Now(), - Source: "345", + Device: "345", Symbol: symbol, Data: data, } @@ -84,7 +84,7 @@ func (s *EventScript) jsSignal(symbol string, data Payload) { Id: uuid.New().String(), Type: TypeSignal, Timestamp: time.Now(), - Source: "345", + Device: "345", Symbol: symbol, Data: data, } @@ -97,7 +97,7 @@ func (s *EventScript) jsSet(symbol string, data Payload) { Id: uuid.New().String(), Type: TypeState, Timestamp: time.Now(), - Source: "345", + Device: "345", Symbol: symbol, Data: data, } diff --git a/pkg/net/http.monitor.go b/pkg/net/http.monitor.go index f1e92a99..c153fe4a 100644 --- a/pkg/net/http.monitor.go +++ b/pkg/net/http.monitor.go @@ -1,15 +1,21 @@ package net import ( + "context" "encoding/json" "net/http" "strconv" + "sync" "sync/atomic" "time" "github.com/apigear-io/cli/pkg/log" "github.com/apigear-io/cli/pkg/mon" + "github.com/apigear-io/cli/pkg/streams" + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/controller" "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" "github.com/go-chi/chi/v5" "github.com/google/uuid" @@ -17,15 +23,37 @@ import ( var counter = atomic.Uint64{} -func MonitorRequestHandler(nc *nats.Conn) http.HandlerFunc { +// deviceTracker tracks seen device IDs and triggers on-demand recording for new devices. +type deviceTracker struct { + devices sync.Map // deviceId (string) -> true (bool) +} + +// isNewDevice checks if a device is new and marks it as seen atomically. +// Returns true if the device was newly added, false if already seen. +func (dt *deviceTracker) isNewDevice(deviceId string) bool { + _, loaded := dt.devices.LoadOrStore(deviceId, true) + return !loaded // true if newly stored, false if already existed +} + +func MonitorRequestHandler(nc *nats.Conn, doLog bool) http.HandlerFunc { + // Create device tracker for auto on-demand recording + tracker := &deviceTracker{} + return func(w http.ResponseWriter, r *http.Request) { - source := chi.URLParam(r, "source") - log.Debug().Msgf("handle monitor request %s", source) - if source == "" { + deviceId := chi.URLParam(r, "source") + log.Debug().Msgf("handle monitor request %s", deviceId) + if deviceId == "" { log.Error().Msg("source id is required") http.Error(w, "source id is required", http.StatusBadRequest) return } + + // Check if this is a new device and auto-start recording if needed + if tracker.isNewDevice(deviceId) { + log.Info().Msgf("new device detected: %s, starting recording", deviceId) + go autoStartRecording(nc, deviceId) + } + var events []*mon.Event err := json.NewDecoder(r.Body).Decode(&events) if err != nil { @@ -33,30 +61,79 @@ func MonitorRequestHandler(nc *nats.Conn) http.HandlerFunc { http.Error(w, err.Error(), http.StatusBadRequest) return } + // Prepare all events (set metadata, fire hooks) for _, event := range events { - event.Source = source + event.Device = deviceId if event.Id == "" { event.Id = strconv.FormatUint(counter.Add(1), 10) } if event.Timestamp.IsZero() { event.Timestamp = time.Now() } - data, err := json.Marshal(event) - if err != nil { - log.Error().Msgf("marshal event: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return + if doLog { + log.Info().Msgf("event: %+v", event) } mon.Emitter.FireHook(event) - subject := event.Subject() - err = nc.Publish(subject, data) - if err != nil { - log.Error().Msgf("publish event: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return + } + + // Bulk publish all events with single flush + err = streams.PublishMonitorMessageBulk(nc, events) + if err != nil { + log.Error().Msgf("bulk publish events: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} + +// autoStartRecording sends an RPC command to start recording for a device. +// This runs in a background goroutine to avoid blocking the HTTP response. +func autoStartRecording(nc *nats.Conn, deviceId string) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + // Check if a recording is already active for this device + js, err := jetstream.New(nc) + if err != nil { + log.Warn().Msgf("auto-start recording: failed to get jetstream for device %s: %v", deviceId, err) + // Continue anyway - controller will reject if recording exists + } else { + states, err := controller.ListStates(js, config.StateBucket) + if err != nil { + log.Debug().Msgf("auto-start recording: failed to list states for device %s: %v", deviceId, err) + // Continue anyway - controller will reject if recording exists + } else { + // Check if any active recording exists for this device + for _, state := range states { + if state.DeviceID == deviceId && state.Status == "running" { + log.Debug().Msgf("auto-start recording skipped: device %s already has active session %s", deviceId, state.SessionID) + return + } } } } + + request := controller.RpcRequest{ + Action: controller.ActionStart, + Subject: config.MonitorSubject, + DeviceID: deviceId, + // SessionID will be auto-generated + // Retention, PreRoll, etc. use defaults + } + + resp, err := controller.SendCommand(ctx, nc, config.RecordRpcSubject, request) + if err != nil { + log.Warn().Msgf("auto-start recording failed for device %s: %v", deviceId, err) + return + } + + if !resp.OK { + // Controller rejected (e.g., recording already exists) + log.Debug().Msgf("auto-start recording response for device %s: %s", deviceId, resp.Message) + return + } + + log.Info().Msgf("auto-started recording for device %s, session=%s", deviceId, resp.SessionID) } // HandleMonitorRequest handles the monitor http request. @@ -79,7 +156,7 @@ func HandleMonitorRequest(w http.ResponseWriter, r *http.Request) { } // set source and id for each event for _, event := range events { - event.Source = source + event.Device = source event.Id = uuid.New().String() if event.Timestamp.IsZero() { event.Timestamp = time.Now() diff --git a/pkg/net/manager.go b/pkg/net/manager.go index 357aa41b..9cd501fc 100644 --- a/pkg/net/manager.go +++ b/pkg/net/manager.go @@ -3,96 +3,128 @@ package net import ( "context" "encoding/json" + "errors" "fmt" "os" "os/signal" "syscall" + "time" "github.com/apigear-io/cli/pkg/helper" "github.com/apigear-io/cli/pkg/log" "github.com/apigear-io/cli/pkg/mon" + "github.com/apigear-io/cli/pkg/streams/config" "github.com/nats-io/nats.go" ) type Options struct { - NatsHost string `json:"nats_host"` - NatsPort int `json:"nats_port"` - NatsDisabled bool `json:"nats_disabled"` - NatsListen bool `json:"nats_inprocess_only"` - NatsLeafURL string `json:"nats_leaf_url"` - NatsCredentials string `json:"nats_credentials"` - HttpAddr string `json:"http_addr"` - HttpDisabled bool `json:"http_disabled"` - MonitorDisabled bool `json:"monitor_disabled"` - ObjectAPIDisabled bool `json:"object_api_disabled"` - Logging bool `json:"logging"` -} - -var DefaultOptions = &Options{ - NatsHost: "localhost", - NatsPort: 4222, - NatsDisabled: false, - NatsListen: false, - NatsLeafURL: "", - NatsCredentials: "", - HttpAddr: "localhost:5555", - HttpDisabled: false, - MonitorDisabled: false, - ObjectAPIDisabled: false, - Logging: false, + NatsServerURL string `json:"nats_server_url"` + HttpAddr string `json:"http_addr"` + Logging bool `json:"logging"` + WSProxy *WSProxyConfig `json:"ws_proxy,omitempty"` +} + +type WSProxyConfig struct { + Enabled bool `json:"enabled"` + BasePath string `json:"base_path"` + Routes []RouteConfig `json:"routes"` + ReconnectAttempts int `json:"reconnect_attempts"` + ReconnectBackoff time.Duration `json:"reconnect_backoff"` +} + +func (o *Options) Validate() error { + if o.NatsServerURL == "" { + o.NatsServerURL = nats.DefaultURL + log.Info().Msgf("nats server URL not set, using default: %s", o.NatsServerURL) + } + if o.HttpAddr == "" { + o.HttpAddr = "127.0.0.1:5555" + log.Info().Msgf("http address not set, using default: %s", o.HttpAddr) + } + if o.WSProxy != nil { + if o.WSProxy.BasePath == "" { + o.WSProxy.BasePath = "/ws" + } + if o.WSProxy.ReconnectAttempts <= 0 { + o.WSProxy.ReconnectAttempts = 3 + } + if o.WSProxy.ReconnectBackoff <= 0 { + o.WSProxy.ReconnectBackoff = 500 * time.Millisecond + } + } + return nil } type NetworkManager struct { - opts *Options - natsServer *NatsServer + opts Options httpServer *HTTPServer nc *nats.Conn + wsProxy *WSProxy + olnkServer *OlinkServer + olnkRelay *ReplayOlinkRelay } -func NewManager() *NetworkManager { +func NewManager(opts Options) *NetworkManager { log.Debug().Msg("net.NewManager") - return &NetworkManager{} + if err := opts.Validate(); err != nil { + log.Error().Err(err).Msg("invalid network manager options") + } + return &NetworkManager{ + opts: opts, + olnkServer: NewOlinkServer(), + } } -func (s *NetworkManager) Start(opts *Options) error { - s.opts = opts +func (m *NetworkManager) NatsConnection() (*nats.Conn, error) { + if m.nc != nil && !m.nc.IsClosed() { + return m.nc, nil + } + if m.opts.NatsServerURL == "" { + return nil, fmt.Errorf("nats server URL not set") + } + nc, err := nats.Connect(m.opts.NatsServerURL) + if err != nil { + return nil, err + } + m.nc = nc + return m.nc, nil +} + +func (m *NetworkManager) Start(ctx context.Context) error { log.Debug().Msg("start network manager") - if !s.opts.HttpDisabled { - err := s.StartHTTP(s.opts.HttpAddr) - if err != nil { - log.Error().Err(err).Msg("failed to start http server") - return err - } + err := m.StartHTTP(m.opts.HttpAddr) + if err != nil { + log.Error().Err(err).Msg("failed to start http server") + return err } - if !s.opts.NatsDisabled { - err := s.StartNATS(&NatsServerOptions{ - Host: s.opts.NatsHost, - Port: s.opts.NatsPort, - NatsListen: s.opts.NatsListen, - LeafURL: s.opts.NatsLeafURL, - Credentials: s.opts.NatsCredentials, - }) - if err != nil { - log.Error().Err(err).Msg("failed to start nats server") - return err - } + err = m.EnableMonitor(true) + if err != nil { + log.Error().Err(err).Msg("failed to enable monitor") + return err } - if !s.opts.MonitorDisabled { - err := s.EnableMonitor() - if err != nil { - log.Error().Err(err).Msg("failed to enable monitor") - return err - } + if err := m.EnableWSProxy(); err != nil { + log.Error().Err(err).Msg("failed to enable ws proxy") + return err + } + err = m.enableOlinkServer() + if err != nil { + log.Error().Err(err).Msg("failed to enable olink server") + return err + } + err = m.enableReplayRelay() + if err != nil { + log.Error().Err(err).Msg("failed to enable replay relay") + return err } return nil } -func (s *NetworkManager) Wait(ctx context.Context) error { +func (m *NetworkManager) Wait(ctx context.Context) error { log.Info().Msg("services running...") sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) defer func() { - err := s.Stop() + err := m.Stop() if err != nil { log.Error().Err(err).Msg("failed to stop services") } @@ -106,67 +138,30 @@ func (s *NetworkManager) Wait(ctx context.Context) error { } } -func (s *NetworkManager) Stop() error { +func (m *NetworkManager) Stop() error { log.Info().Msg("stop network manager") - err := s.StopHTTP() - if err != nil { - return err - } - err = s.StopNATS() - if err != nil { - return err - } - return nil -} - -func (s *NetworkManager) StartNATS(opts *NatsServerOptions) error { - if s.natsServer != nil { - return fmt.Errorf("nats server already started") - } - server, err := NewNatsServer(opts) + err := m.StopHTTP() if err != nil { return err } - s.natsServer = server - return s.natsServer.Start() -} - -func (s *NetworkManager) StopNATS() error { - log.Info().Msg("stop nats server") - if s.nc != nil { - err := s.nc.Drain() + if m.olnkRelay != nil { + log.Info().Msg("stop olink replay relay") + err = m.olnkRelay.Stop() if err != nil { - return err + log.Error().Err(err).Msg("failed to stop olink replay relay") } } - if s.natsServer != nil { - return s.natsServer.Shutdown() - } return nil } -func (s *NetworkManager) NatsClientURL() string { - if s.natsServer != nil { - return s.natsServer.ClientURL() - } - return "" -} - -func (s *NetworkManager) NatsConnection() (*nats.Conn, error) { - if s.natsServer == nil { - return nil, fmt.Errorf("nats server not started") - } - return s.natsServer.Connection() -} - -func (s *NetworkManager) StartHTTP(addr string) error { - if s.httpServer != nil { +func (m *NetworkManager) StartHTTP(addr string) error { + if m.httpServer != nil { log.Info().Msg("stop running http server") - s.httpServer.Stop() + m.httpServer.Stop() } log.Info().Msg("start http server") - s.httpServer = NewHTTPServer(&HttpServerOptions{Addr: addr}) - err := s.httpServer.Start() + m.httpServer = NewHTTPServer(&HttpServerOptions{Addr: addr}) + err := m.httpServer.Start() if err != nil { log.Error().Err(err).Msg("failed to start http server") } @@ -174,70 +169,165 @@ func (s *NetworkManager) StartHTTP(addr string) error { return err } -func (s *NetworkManager) StopHTTP() error { +func (m *NetworkManager) StopHTTP() error { log.Info().Msg("stop http server") - if s.httpServer != nil { - s.httpServer.Stop() + if m.httpServer != nil { + m.httpServer.Stop() } return nil } -func (s *NetworkManager) HttpServer() *HTTPServer { - return s.httpServer +func (m *NetworkManager) HttpServer() *HTTPServer { + return m.httpServer } -func (s *NetworkManager) EnableMonitor() error { - if s.httpServer == nil { +func (m *NetworkManager) EnableMonitor(doLog bool) error { + log.Info().Msg("enable monitor endpoint") + if m.httpServer == nil { log.Error().Msg("http server not started") return fmt.Errorf("http server not started") } - nc, err := s.NatsConnection() + nc, err := m.NatsConnection() if err != nil { - log.Error().Msgf("nats connection: %v", err) + log.Error().Err(err).Msg("nats connection") + return err } - s.httpServer.Router().HandleFunc("/monitor/{source}", MonitorRequestHandler(nc)) - log.Info().Msgf("start http monitor endpoint on http://%s/monitor/{source}", s.httpServer.Address()) + m.httpServer.Router().HandleFunc("/monitor/{source}", MonitorRequestHandler(nc, doLog)) + log.Info().Msgf("start http monitor endpoint on http://%s/monitor/{source}", m.httpServer.Address()) return nil } -func (s *NetworkManager) GetMonitorAddress() (string, error) { +func (m *NetworkManager) EnableWSProxy() error { + cfg := m.opts.WSProxy + if cfg == nil || !cfg.Enabled { + return nil + } + if m.httpServer == nil { + return fmt.Errorf("http server not started") + } + + opts := ProxyOptions{ + BasePath: cfg.BasePath, + Routes: cfg.Routes, + ReconnectAttempts: cfg.ReconnectAttempts, + ReconnectBackoff: cfg.ReconnectBackoff, + OnConnect: func(ctx context.Context, info *ConnectionInfo) error { + log.Info().Str("target", info.TargetURL).Str("path", info.Route.Path).Msg("ws proxy connection accepted") + return nil + }, + OnDisconnect: func(ctx context.Context, info *ConnectionInfo, err error) { + event := log.Info() + if err != nil && !errors.Is(err, context.Canceled) { + event = log.Warn().Err(err) + } + event.Str("target", info.TargetURL).Str("path", info.Route.Path).Msg("ws proxy connection closed") + }, + } + + proxy, err := NewWSProxy(opts) + if err != nil { + return fmt.Errorf("ws proxy init: %w", err) + } + m.wsProxy = proxy + + m.httpServer.Router().Mount("/", proxy) + log.Info().Msgf("ws proxy enabled at %s", cfg.BasePath) + return nil +} + +func (m *NetworkManager) GetMonitorAddress() (string, error) { log.Info().Msg("get monitor address") - if s.httpServer == nil { + if m.httpServer == nil { return "", fmt.Errorf("http server not started") } - return fmt.Sprintf("http://%s/monitor/${source}", s.httpServer.Address()), nil + return fmt.Sprintf("http://%s/monitor/${source}", m.httpServer.Address()), nil } -func (s *NetworkManager) GetSimulationAddress() (string, error) { +func (m *NetworkManager) GetSimulationAddress() (string, error) { log.Info().Msg("get simulation address") - if s.httpServer == nil { + if m.httpServer == nil { return "", fmt.Errorf("http server not started") } - return fmt.Sprintf("ws://%s/ws", s.httpServer.Address()), nil + return fmt.Sprintf("ws://%s/ws", m.httpServer.Address()), nil } // MonitorEmitter return the monitor event emitter. -func (s *NetworkManager) MonitorEmitter() *helper.Hook[mon.Event] { +func (m *NetworkManager) MonitorEmitter() *helper.Hook[mon.Event] { return &mon.Emitter } -func (s *NetworkManager) OnMonitorEvent(fn func(event *mon.Event)) { - nc, err := s.NatsConnection() +func (m *NetworkManager) OnMonitorEvent(fn func(event *mon.Event)) { + nc, err := m.NatsConnection() if err != nil { log.Error().Msgf("nats connection: %v", err) return } - log.Debug().Msg("subscribe to monitor events") - _, err = nc.Subscribe(mon.MonitorSubject+".>", func(msg *nats.Msg) { + log.Info().Msg("subscribe to monitor events") + _, err = nc.Subscribe(config.MonitorSubject+".>", func(msg *nats.Msg) { var event mon.Event - err := json.Unmarshal(msg.Data, &event) - if err != nil { - log.Error().Msgf("unmarshal event: %v", err) - return + + // Try to read metadata from NATS headers first (optimized path) + if msg.Header != nil && msg.Header.Get("X-Monitor-Type") != "" { + // Headers available - reconstruct event from headers + data payload + event.Type = mon.ParseEventType(msg.Header.Get("X-Monitor-Type")) + event.Symbol = msg.Header.Get("X-Monitor-Symbol") + event.Device = msg.Header.Get("X-Monitor-Device") + event.Id = msg.Header.Get("X-Monitor-Id") + // Parse timestamp if available + if tsStr := msg.Header.Get("X-Monitor-Timestamp"); tsStr != "" { + if ts, err := time.Parse("2006-01-02T15:04:05.999999999Z07:00", tsStr); err == nil { + event.Timestamp = ts + } + } + + // Unmarshal only the Data payload (not full event) + var payload mon.Payload + if err := json.Unmarshal(msg.Data, &payload); err != nil { + log.Error().Msgf("unmarshal data payload: %v", err) + return + } + event.Data = payload + } else { + // Fallback: full event decode (backward compatibility with old messages) + if err := json.Unmarshal(msg.Data, &event); err != nil { + log.Error().Msgf("unmarshal event: %v", err) + return + } } + fn(&event) }) if err != nil { log.Error().Err(err).Msg("failed to subscribe to monitor events") } } + +func (m *NetworkManager) enableOlinkServer() error { + if m.httpServer == nil { + return fmt.Errorf("http server not started") + } + addr := m.HttpServer().Address() + log.Info().Msgf("starting Olink server at ws://%s/ws", addr) + m.HttpServer().Router().Handle("/ws", m.olnkServer) + return nil +} + +func (m *NetworkManager) OlinkServer() *OlinkServer { + return m.olnkServer +} + +func (m *NetworkManager) enableReplayRelay() error { + log.Info().Msg("enable olink replay relay") + nc, err := m.NatsConnection() + if err != nil { + log.Error().Err(err).Msg("failed to get nats connection for replay relay") + return err + } + relay := NewReplayOlinkRelay(nc, config.PlaybackSubject, m.OlinkServer()) + if err := relay.Start(context.Background()); err != nil { + log.Error().Err(err).Msg("failed to start playback relay") + return err + } + m.olnkRelay = relay + return nil +} diff --git a/pkg/net/nats.server.go b/pkg/net/nats.server.go deleted file mode 100644 index 035ab1a7..00000000 --- a/pkg/net/nats.server.go +++ /dev/null @@ -1,110 +0,0 @@ -package net - -import ( - "fmt" - "net/url" - "time" - - "github.com/apigear-io/cli/pkg/cfg" - "github.com/apigear-io/cli/pkg/log" - "github.com/nats-io/nats-server/v2/server" - "github.com/nats-io/nats.go" -) - -// Create an embedded NATS server - -const ( - NatsTimeout = 30 * time.Second -) - -type NatsServerOptions struct { - Host string - Port int - NatsListen bool - LeafURL string - Credentials string - Logging bool -} - -type NatsServer struct { - opts *NatsServerOptions - ns *server.Server - nc *nats.Conn -} - -func NewNatsServer(opts *NatsServerOptions) (*NatsServer, error) { - if opts.Host == "" { - opts.Host = "localhost" - } - if opts.Port == 0 { - opts.Port = 4222 - } - sopts := &server.Options{ - ServerName: "apigear_server", - Host: opts.Host, - Port: opts.Port, - DontListen: !opts.NatsListen, - JetStream: true, - JetStreamDomain: "apigear", - StoreDir: cfg.ConfigDir() + "/nats", - } - if opts.LeafURL != "" { - leafURL, err := url.Parse(opts.LeafURL) - if err != nil { - return nil, err - } - sopts.LeafNode = server.LeafNodeOpts{ - Remotes: []*server.RemoteLeafOpts{ - { - URLs: []*url.URL{leafURL}, - Credentials: opts.Credentials, - }, - }, - } - } - server, err := server.NewServer(sopts) - if err != nil { - log.Error().Err(err).Msg("failed to create nats server") - return nil, err - } - if opts.Logging { - server.ConfigureLogger() - } - - return &NatsServer{opts: opts, ns: server}, nil -} - -func (ns *NatsServer) Start() error { - log.Info().Msg("start nats server") - ns.ns.Start() - log.Info().Msg("wait for nats server to be ready") - if !ns.ns.ReadyForConnections(NatsTimeout) { - return fmt.Errorf("nats server not ready") - } - log.Info().Msgf("start nats server listen at %s", ns.ns.ClientURL()) - return nil -} - -func (ns *NatsServer) Shutdown() error { - ns.ns.Shutdown() - return nil -} - -func (ns *NatsServer) ClientURL() string { - return ns.ns.ClientURL() -} - -func (ns *NatsServer) Connection() (*nats.Conn, error) { - if ns.nc == nil { - copts := []nats.Option{} - if ns.opts.NatsListen { - copts = append(copts, nats.InProcessServer(ns.ns)) - } - nc, err := nats.Connect(ns.ns.ClientURL(), copts...) - if err != nil { - return nil, err - } - ns.nc = nc - } - return ns.nc, nil -} diff --git a/pkg/net/ndjson.go b/pkg/net/ndjson.go deleted file mode 100644 index 3d3fbe4e..00000000 --- a/pkg/net/ndjson.go +++ /dev/null @@ -1,59 +0,0 @@ -package net - -import ( - "bufio" - "io" - "os" - "time" - - "github.com/apigear-io/cli/pkg/log" -) - -// TODO: there is already a ndjon scanner in helper package - -// NDJSONScanner scans a reader line by line and writes to the writer. -type NDJSONScanner struct { - sleep time.Duration - repeat int -} - -// NewNDJSONScanner creates a new NDJSON scanner. -func NewNDJSONScanner(sleep time.Duration, repeat int) *NDJSONScanner { - return &NDJSONScanner{ - sleep: sleep, - repeat: repeat, - } -} - -// Scan scans a reader line by line and writes to the writer. -func (s *NDJSONScanner) Scan(r io.Reader, w io.Writer) error { - scanner := bufio.NewScanner(r) - for i := 0; i < s.repeat; i++ { - for scanner.Scan() { - line := scanner.Bytes() - log.Debug().Msgf("write: %s", line) - _, err := w.Write(line) - if err != nil { - return err - } - if s.sleep > 0 { - time.Sleep(s.sleep) - } - } - } - return scanner.Err() -} - -// ScanFile scans a file line by line and writes to the writer. -func (s *NDJSONScanner) ScanFile(path string, w io.Writer) error { - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { - if err := f.Close(); err != nil { - log.Error().Err(err).Msgf("failed to close file %s", path) - } - }() - return s.Scan(f, w) -} diff --git a/pkg/sim/olink_server.go b/pkg/net/olink_server.go similarity index 83% rename from pkg/sim/olink_server.go rename to pkg/net/olink_server.go index 48d28397..e94ebaa8 100644 --- a/pkg/sim/olink_server.go +++ b/pkg/net/olink_server.go @@ -1,9 +1,10 @@ -package sim +package net import ( "context" "net/http" + "github.com/apigear-io/cli/pkg/log" "github.com/apigear-io/objectlink-core-go/olink/remote" "github.com/apigear-io/objectlink-core-go/olink/ws" ) @@ -11,6 +12,7 @@ import ( type IOlinkServer interface { RegisterSource(source remote.IObjectSource) UnregisterSource(source remote.IObjectSource) + SetSourceFactory(factory remote.SourceFactory) } type OlinkServer struct { @@ -48,3 +50,7 @@ func (s *OlinkServer) RegisterSource(source remote.IObjectSource) { func (s *OlinkServer) UnregisterSource(source remote.IObjectSource) { s.registry.RemoveObjectSource(source) } + +func (s *OlinkServer) SetSourceFactory(factory remote.SourceFactory) { + s.registry.SetSourceFactory(factory) +} diff --git a/pkg/net/path.go b/pkg/net/path.go new file mode 100644 index 00000000..95d08292 --- /dev/null +++ b/pkg/net/path.go @@ -0,0 +1,21 @@ +package net + +import "strings" + +// NormalizePath ensures a path starts with a leading slash and removes any trailing slash (except for root). +func NormalizePath(path string) string { + p := strings.TrimSpace(path) + if p == "" || p == "/" { + return "/" + } + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + if len(p) > 1 && strings.HasSuffix(p, "/") { + p = strings.TrimRight(p, "/") + if p == "" { + return "/" + } + } + return p +} diff --git a/pkg/net/replay_relay.go b/pkg/net/replay_relay.go new file mode 100644 index 00000000..3c769dea --- /dev/null +++ b/pkg/net/replay_relay.go @@ -0,0 +1,119 @@ +package net + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/cli/pkg/mon" + "github.com/apigear-io/objectlink-core-go/olink/core" + "github.com/nats-io/nats.go" +) + +type ReplayOlinkRelay struct { + nc *nats.Conn + sub *nats.Subscription + subject string + factory *PlaybackSourceFactory + conv *core.MessageConverter +} + +func NewReplayOlinkRelay(nc *nats.Conn, subject string, server IOlinkServer) *ReplayOlinkRelay { + factory := NewPlaybackSourceFactory() + server.SetSourceFactory(factory.SourceFactoryFunc()) + return &ReplayOlinkRelay{ + nc: nc, + subject: subject, + factory: factory, + conv: core.NewConverter(core.FormatJson), + } +} + +func (r *ReplayOlinkRelay) Start(ctx context.Context) error { + sub, err := r.nc.Subscribe(r.subject, r.handleMsg) + if err != nil { + return err + } + r.sub = sub + + go func() { + <-ctx.Done() + _ = r.Stop() + }() + + log.Info().Str("subject", r.subject).Msg("playback relay subscribed") + return nil +} + +func (r *ReplayOlinkRelay) Stop() error { + if r.sub != nil { + _ = r.sub.Unsubscribe() + r.sub = nil + } + return nil +} + +func (r *ReplayOlinkRelay) handleMsg(msg *nats.Msg) { + if msg == nil { + return + } + log.Debug().Str("subject", msg.Subject).RawJSON("data", msg.Data).Msg("playback relay: message received") + + // Try to read metadata from NATS headers first (optimized path) + var event mon.Event + if msg.Header != nil && msg.Header.Get("X-Monitor-Type") != "" { + // Headers available - reconstruct event from headers + data payload + event.Type = mon.ParseEventType(msg.Header.Get("X-Monitor-Type")) + event.Symbol = msg.Header.Get("X-Monitor-Symbol") + event.Device = msg.Header.Get("X-Monitor-Device") + event.Id = msg.Header.Get("X-Monitor-Id") + // Timestamp parsing optional for routing + + // Unmarshal only the Data payload (not full event) + var payload mon.Payload + if err := json.Unmarshal(msg.Data, &payload); err != nil { + log.Error().Err(err).Msg("playback relay: unmarshal data payload failed") + return + } + event.Data = payload + } else { + // Fallback: full event decode (backward compatibility with old messages) + if err := json.Unmarshal(msg.Data, &event); err != nil { + log.Error().Err(err).Msg("playback relay: unmarshal event failed") + return + } + } + + // one event can trigger multiple OLink frames (e.g., state with multiple fields) + frames, err := convertEventToOlinkMessages(&event) + if err != nil { + log.Error().Err(err).Msg("playback relay: convert event failed") + return + } + r.factory.Dispatch(frames) +} + +// convertEventToOlinkMessages converts a monitor event to one or more OLink messages +func convertEventToOlinkMessages(event *mon.Event) ([]core.Message, error) { + switch event.Type { + case mon.TypeCall: + return []core.Message{core.MakeInvokeMessage(0, event.Symbol, core.AsArgs(nil))}, nil + case mon.TypeSignal: + return []core.Message{core.MakeSignalMessage(event.Symbol, core.AsArgs(event.Data))}, nil + case mon.TypeState: + // State can have multiple fields, each becomes a separate PropertyChange message + messages := make([]core.Message, 0, len(event.Data)) + module, object := core.SymbolIdToParts(event.Symbol) + for field, value := range event.Data { + // Build the property symbol: objectId/member + objectId := fmt.Sprintf("%s.%s", module, object) + propertySymbol := core.MakeSymbolId(objectId, field) + log.Debug().Str("propertySymbol", propertySymbol).Interface("value", value).Msg("playback relay: state field") + messages = append(messages, core.MakePropertyChangeMessage(propertySymbol, value)) + } + return messages, nil + default: + return nil, fmt.Errorf("unknown event type: %s", event.Type) + } +} diff --git a/pkg/net/replay_source.go b/pkg/net/replay_source.go new file mode 100644 index 00000000..efc6e0e1 --- /dev/null +++ b/pkg/net/replay_source.go @@ -0,0 +1,192 @@ +package net + +import ( + "fmt" + "sync" + + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/objectlink-core-go/olink/core" + "github.com/apigear-io/objectlink-core-go/olink/remote" +) + +// PlaybackSourceFactory manages per-object playback sources and dispatches +// recorded ObjectLink messages to the registered nodes. +type PlaybackSourceFactory struct { + mu sync.RWMutex + sources map[string]*PlaybackSource +} + +func NewPlaybackSourceFactory() *PlaybackSourceFactory { + return &PlaybackSourceFactory{sources: make(map[string]*PlaybackSource)} +} + +// SourceFactoryFunc returns a remote.SourceFactory compatible function so the +// registry can lazily create playback sources when a node links to an object. +func (f *PlaybackSourceFactory) SourceFactoryFunc() remote.SourceFactory { + return func(objectID string) remote.IObjectSource { + return f.getOrCreate(objectID) + } +} + +// Dispatch routes decoded ObjectLink messages to the appropriate playback sources. +func (f *PlaybackSourceFactory) Dispatch(messages []core.Message) { + for _, msg := range messages { + log.Info().Msgf("playback: dispatching message %v", msg) + objectID := resolveObjectID(msg) + if objectID == "" { + log.Warn().Msg("playback: unable to resolve object id from message") + continue + } + src := f.getOrCreate(objectID) + src.HandleMessage(msg) + } +} + +func (f *PlaybackSourceFactory) getOrCreate(objectID string) *PlaybackSource { + f.mu.Lock() + defer f.mu.Unlock() + if src, ok := f.sources[objectID]; ok { + return src + } + src := NewPlaybackSource(objectID) + f.sources[objectID] = src + return src +} + +// resolveObjectID extracts the object identifier from a generic ObjectLink message. +func resolveObjectID(msg core.Message) string { + switch msg.Type() { + case core.MsgLink, core.MsgInit, core.MsgUnlink: + return core.AsString(msg[1]) + case core.MsgSetProperty, core.MsgPropertyChange: + propertyID := core.AsString(msg[1]) + objectID, _ := core.SymbolIdToParts(propertyID) + return objectID + case core.MsgInvoke: + _, methodID, _ := msg.AsInvoke() + objectID, _ := core.SymbolIdToParts(methodID) + return objectID + case core.MsgInvokeReply: + _, methodID, _ := msg.AsInvokeReply() + objectID, _ := core.SymbolIdToParts(methodID) + return objectID + case core.MsgSignal: + signalID, _ := msg.AsSignal() + objectID, _ := core.SymbolIdToParts(signalID) + return objectID + default: + return "" + } +} + +// PlaybackSource implements remote.IObjectSource and replays messages to linked nodes. +type PlaybackSource struct { + objectID string + mu sync.RWMutex + nodes map[*remote.Node]struct{} + props core.KWArgs + initMsg core.Message +} + +func NewPlaybackSource(objectID string) *PlaybackSource { + return &PlaybackSource{ + objectID: objectID, + nodes: make(map[*remote.Node]struct{}), + props: core.KWArgs{}, + } +} + +func (s *PlaybackSource) ObjectId() string { + return s.objectID +} + +func (s *PlaybackSource) Invoke(methodId string, args core.Args) (core.Any, error) { + return nil, fmt.Errorf("playback source %s: invoke not supported", s.objectID) +} + +func (s *PlaybackSource) SetProperty(propertyId string, value core.Any) error { + return fmt.Errorf("playback source %s: set property not supported", s.objectID) +} + +func (s *PlaybackSource) Linked(objectId string, node *remote.Node) error { + s.mu.Lock() + s.nodes[node] = struct{}{} + init := s.initMsg + s.mu.Unlock() + + if init != nil { + node.SendMessage(init) + } + return nil +} + +func (s *PlaybackSource) CollectProperties() (core.KWArgs, error) { + s.mu.RLock() + defer s.mu.RUnlock() + return cloneKWArgs(s.props), nil +} + +// HandleMessage updates internal state and broadcasts the message to linked nodes. +func (s *PlaybackSource) HandleMessage(msg core.Message) { + s.mu.Lock() + s.updateStateLocked(msg) + nodes := make([]*remote.Node, 0, len(s.nodes)) + for node := range s.nodes { + nodes = append(nodes, node) + } + outgoing := cloneMessage(msg) + s.mu.Unlock() + + for _, node := range nodes { + node.SendMessage(outgoing) + } +} + +func (s *PlaybackSource) updateStateLocked(msg core.Message) { + switch msg.Type() { + case core.MsgInit: + _, props := msg.AsInit() + s.props = cloneKWArgs(props) + s.initMsg = core.MakeInitMessage(s.objectID, cloneKWArgs(props)) + case core.MsgPropertyChange: + propertyID, value := msg.AsPropertyChange() + objectID, name := core.SymbolIdToParts(propertyID) + if objectID == s.objectID && name != "" { + if s.props == nil { + s.props = core.KWArgs{} + } + s.props[name] = value + } + case core.MsgSetProperty: + propertyID, value := msg.AsSetProperty() + objectID, name := core.SymbolIdToParts(propertyID) + if objectID == s.objectID && name != "" { + if s.props == nil { + s.props = core.KWArgs{} + } + s.props[name] = value + } + } +} + +func cloneKWArgs(in core.KWArgs) core.KWArgs { + if in == nil { + return nil + } + out := make(core.KWArgs, len(in)) + for k, v := range in { + out[k] = v + } + return out +} + +func cloneMessage(msg core.Message) core.Message { + if msg == nil { + return nil + } + copy := make(core.Message, len(msg)) + for i, v := range msg { + copy[i] = v + } + return copy +} diff --git a/pkg/net/wscat.go b/pkg/net/wscat.go new file mode 100644 index 00000000..5a545bf2 --- /dev/null +++ b/pkg/net/wscat.go @@ -0,0 +1,174 @@ +package net + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/apigear-io/cli/pkg/helper" + "github.com/gorilla/websocket" +) + +// WSClientOptions configure the wscat client behaviour. +type WSClientOptions struct { + URL string + Headers http.Header + Dialer *websocket.Dialer + File string + DecodeJSON bool + Interval time.Duration + Repeat int + OnMessage func(messageType int, payload []byte) + OnClose func(error) + OnSend func([]byte) +} + +// RunWSClient connects to the WebSocket endpoint and either reads from stdin or the configured file. +func RunWSClient(ctx context.Context, opts WSClientOptions) error { + if opts.URL == "" { + return fmt.Errorf("wscat: url cannot be empty") + } + + dialer := opts.Dialer + if dialer == nil { + dialer = websocket.DefaultDialer + } + + conn, _, err := dialer.DialContext(ctx, opts.URL, opts.Headers) + if err != nil { + return err + } + defer func() { + _ = conn.Close() + }() + + recvErr := make(chan error, 1) + go func() { + for { + typ, payload, err := conn.ReadMessage() + if err != nil { + recvErr <- err + return + } + if opts.OnMessage != nil { + opts.OnMessage(typ, payload) + } + } + }() + + sendErr := make(chan error, 1) + go func() { + var err error + if opts.File != "" { + err = sendFile(ctx, conn, opts.File, opts.Interval, opts.Repeat, opts.DecodeJSON, opts.OnSend) + } else { + err = sendInteractive(ctx, conn, opts.DecodeJSON, opts.OnSend) + } + sendErr <- err + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-recvErr: + if opts.OnClose != nil { + opts.OnClose(err) + } + return err + case err := <-sendErr: + if opts.OnClose != nil { + opts.OnClose(err) + } + return err + } +} + +func sendInteractive(ctx context.Context, conn *websocket.Conn, decodeJSON bool, onSend func([]byte)) error { + reader := bufio.NewReader(os.Stdin) + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + line, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF { + return nil + } + return err + } + payload := append([]byte{}, processPayload(line, decodeJSON)...) + if onSend != nil { + onSend(payload) + } + if err := conn.WriteMessage(websocket.TextMessage, payload); err != nil { + return err + } + } +} + +func sendFile(ctx context.Context, conn *websocket.Conn, path string, interval time.Duration, repeat int, decodeJSON bool, onSend func([]byte)) error { + repeatCount := repeat + if repeatCount == 0 { + repeatCount = 1 + } + + for pass := 0; repeatCount < 0 || pass < repeatCount; pass++ { + f, err := os.Open(path) + if err != nil { + return err + } + + scanErr := func() error { + defer func() { + _ = f.Close() + }() + scanner := helper.NewNDJSONScanner(interval, 1) + return scanner.Scan(f, func(line []byte) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + payload := append([]byte{}, processPayload(line, decodeJSON)...) + if onSend != nil { + onSend(payload) + } + return conn.WriteMessage(websocket.TextMessage, payload) + }) + }() + if scanErr != nil { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if errors.Is(scanErr, context.Canceled) { + return ctx.Err() + } + return scanErr + } + } + return nil +} + +func processPayload(input []byte, decode bool) []byte { + if !decode { + return input + } + var anyVal any + if err := json.Unmarshal(input, &anyVal); err != nil { + return input + } + if normalized, err := json.Marshal(anyVal); err == nil { + return normalized + } + return input +} diff --git a/pkg/net/wsecho.go b/pkg/net/wsecho.go new file mode 100644 index 00000000..75bdcc62 --- /dev/null +++ b/pkg/net/wsecho.go @@ -0,0 +1,83 @@ +package net + +import ( + "context" + "log" + "net/http" + "time" + + "github.com/gorilla/websocket" +) + +// WSEchoOptions configure the echo server behaviour. +type WSEchoOptions struct { + Addr string + Path string + Headers http.Header + Upgrader *websocket.Upgrader +} + +// RunWSEcho starts a WebSocket echo server until the context is cancelled. +func RunWSEcho(ctx context.Context, opts WSEchoOptions) error { + path := NormalizePath(opts.Path) + if path == "" || path == "/" { + path = "/ws" + } + + upgrader := opts.Upgrader + if upgrader == nil { + upgrader = &websocket.Upgrader{ + CheckOrigin: func(*http.Request) bool { return true }, + } + } + + mux := http.NewServeMux() + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(w, r, opts.Headers) + if err != nil { + log.Printf("wsecho: upgrade error: %v", err) + return + } + defer func() { + if err := conn.Close(); err != nil { + log.Printf("wsecho: close error: %v", err) + } + }() + + log.Printf("wsecho: client connected %s", r.RemoteAddr) + for { + typ, payload, err := conn.ReadMessage() + if err != nil { + log.Printf("wsecho: read error: %v", err) + return + } + if err := conn.WriteMessage(typ, payload); err != nil { + log.Printf("wsecho: write error: %v", err) + return + } + } + }) + + server := &http.Server{ + Addr: opts.Addr, + Handler: mux, + } + + done := make(chan struct{}) + go func() { + <-ctx.Done() + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := server.Shutdown(shutdownCtx); err != nil { + log.Printf("wsecho: shutdown error: %v", err) + } + close(done) + }() + + log.Printf("wsecho: listening on %s%s", opts.Addr, path) + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + return err + } + <-done + return nil +} diff --git a/pkg/net/wsproxy.go b/pkg/net/wsproxy.go new file mode 100644 index 00000000..8ce8584a --- /dev/null +++ b/pkg/net/wsproxy.go @@ -0,0 +1,458 @@ +package net + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/go-chi/chi/v5" + "github.com/google/uuid" + "github.com/gorilla/websocket" + "github.com/rs/zerolog/log" +) + +// MessageMode defines whether a proxy route operates on text or binary frames. +type MessageMode int + +const ( + // MessageModeText indicates that proxy messages are plain text frames. + MessageModeText MessageMode = iota + // MessageModeBinary indicates that proxy messages are binary frames. + MessageModeBinary +) + +// MessageDirection describes the movement of a proxied message. +type MessageDirection int + +const ( + // DirectionClientToUpstream covers messages flowing from connected clients to the upstream service. + DirectionClientToUpstream MessageDirection = iota + // DirectionUpstreamToClient covers messages flowing from the upstream service back to the client. + DirectionUpstreamToClient +) + +// RouteConfig configures how requests for a specific path should be proxied. +type RouteConfig struct { + // Path describes the HTTP route to match, supporting colon-style parameters (e.g. /ws/:id). + Path string + // Param identifies the named parameter that maps to Targets keys. + Param string + // Targets maps parameter values to upstream WebSocket URLs. + Targets map[string]string + // Mode controls whether the route expects text or binary messages. + Mode MessageMode +} + +// MiddlewareFunc allows consumers to inspect and mutate proxied WebSocket messages. +type MiddlewareFunc func(ctx context.Context, msg *ProxyMessage) error + +// ProxyMessage carries metadata and payload for middleware inspection. +type ProxyMessage struct { + Connection *ConnectionInfo + Direction MessageDirection + Type MessageMode + Data []byte + Drop bool +} + +// ConnectionInfo holds details about a proxied WebSocket session. +type ConnectionInfo struct { + ID string + Route *RouteConfig + TargetID string + TargetURL string + Request *http.Request +} + +// ProxyOptions encapsulates WSProxy configuration. +type ProxyOptions struct { + BasePath string + Routes []RouteConfig + Dialer *websocket.Dialer + Upgrader *websocket.Upgrader + ReconnectAttempts int + ReconnectBackoff time.Duration + Middlewares []MiddlewareFunc + OnConnect func(ctx context.Context, info *ConnectionInfo) error + OnDisconnect func(ctx context.Context, info *ConnectionInfo, err error) +} + +// WSProxy upgrades incoming HTTP requests to WebSockets and bridges them to upstream targets. +type WSProxy struct { + opts ProxyOptions + router chi.Router + + mu sync.RWMutex + // routes are tracked in opts.Routes +} + +// ErrTargetNotConfigured indicates the target value is missing for a given parameter. +var ErrTargetNotConfigured = errors.New("wsproxy: target not configured") + +// ErrUnexpectedMessageType indicates a frame type that does not align with the configured MessageMode. +var ErrUnexpectedMessageType = errors.New("wsproxy: unexpected websocket message type") + +// NewWSProxy validates the provided options and returns a ready-to-use proxy. +func NewWSProxy(opts ProxyOptions) (*WSProxy, error) { + if opts.Upgrader == nil { + opts.Upgrader = &websocket.Upgrader{ + CheckOrigin: func(*http.Request) bool { return true }, + } + } + if opts.Dialer == nil { + opts.Dialer = websocket.DefaultDialer + } + if opts.ReconnectAttempts < 1 { + opts.ReconnectAttempts = 1 + } + if opts.ReconnectBackoff <= 0 { + opts.ReconnectBackoff = 500 * time.Millisecond + } + opts.BasePath = normalizeBasePath(opts.BasePath) + + existingRoutes := opts.Routes + existingMiddleware := opts.Middlewares + opts.Routes = nil + opts.Middlewares = nil + + proxy := &WSProxy{ + opts: opts, + router: chi.NewRouter(), + } + + for idx := range existingRoutes { + if err := proxy.AddRoute(existingRoutes[idx]); err != nil { + return nil, fmt.Errorf("wsproxy: route %d invalid: %w", idx, err) + } + } + + for _, mw := range existingMiddleware { + proxy.Use(mw) + } + + return proxy, nil +} + +// Use appends middleware handlers that can inspect or drop proxied messages. +func (p *WSProxy) Use(mw MiddlewareFunc) { + if mw == nil { + return + } + p.mu.Lock() + p.opts.Middlewares = append(p.opts.Middlewares, mw) + p.mu.Unlock() +} + +// AddRoute registers an additional proxy route at runtime. +func (p *WSProxy) AddRoute(route RouteConfig) error { + if strings.TrimSpace(route.Path) == "" && strings.TrimSpace(p.opts.BasePath) == "" { + return errors.New("wsproxy: route path cannot be empty when base path is empty") + } + if len(route.Targets) == 0 { + return fmt.Errorf("wsproxy: route %s must define at least one target", route.Path) + } + if route.Param == "" && strings.Contains(route.Path, ":") { + return fmt.Errorf("wsproxy: route %s requires Param to select target", route.Path) + } + if route.Mode != MessageModeText && route.Mode != MessageModeBinary { + route.Mode = MessageModeText + } + routeCopy := route + p.mu.Lock() + p.opts.Routes = append(p.opts.Routes, routeCopy) + cfg := &p.opts.Routes[len(p.opts.Routes)-1] + path := buildRoutePath(p.opts.BasePath, cfg.Path) + p.router.Handle(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + p.serveRoute(w, r, cfg) + })) + p.mu.Unlock() + return nil +} + +// ServeHTTP routes the request to the configured WebSocket route handlers. +func (p *WSProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + p.router.ServeHTTP(w, r) +} + +func (p *WSProxy) serveRoute(w http.ResponseWriter, r *http.Request, route *RouteConfig) { + targetID := "" + if route.Param != "" { + targetID = chi.URLParam(r, route.Param) + } + + targetID, targetURL, err := resolveTarget(route, targetID) + if err != nil { + http.Error(w, ErrTargetNotConfigured.Error(), http.StatusNotFound) + return + } + + conn, err := p.opts.Upgrader.Upgrade(w, r, nil) + if err != nil { + log.Warn().Err(err).Msg("websocket upgrade failed") + return + } + defer func() { + if err := conn.Close(); err != nil { + log.Warn().Err(err).Msg("failed to close client websocket") + } + }() + + upstream, err := p.dialUpstream(r.Context(), targetURL) + if err != nil { + log.Warn().Err(err).Str("target", targetURL).Msg("websocket upstream dial failed") + _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseTryAgainLater, "upstream unavailable")) + return + } + defer func() { + if err := upstream.Close(); err != nil { + log.Warn().Err(err).Str("target", targetURL).Msg("failed to close upstream websocket") + } + }() + + connectionID := uuid.NewString() + info := &ConnectionInfo{ + ID: connectionID, + Route: route, + TargetID: targetID, + TargetURL: targetURL, + Request: r, + } + + if p.opts.OnConnect != nil { + if err := p.opts.OnConnect(r.Context(), info); err != nil { + log.Warn().Err(err).Str("connection", connectionID).Msg("wsproxy connect hook rejected client") + _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.ClosePolicyViolation, "connection rejected")) + return + } + } + + sessionCtx, cancel := context.WithCancel(r.Context()) + defer cancel() + + var ( + wg sync.WaitGroup + resultErr error + errOnce sync.Once + ) + recordError := func(err error) { + if err == nil { + return + } + errOnce.Do(func() { + resultErr = err + cancel() + }) + } + + switch route.Mode { + case MessageModeText: + wg.Add(2) + go forward(sessionCtx, &wg, recordError, p.middlewareRunner, info, conn, upstream, DirectionClientToUpstream, websocket.TextMessage) + go forward(sessionCtx, &wg, recordError, p.middlewareRunner, info, upstream, conn, DirectionUpstreamToClient, websocket.TextMessage) + case MessageModeBinary: + wg.Add(2) + go forward(sessionCtx, &wg, recordError, p.middlewareRunner, info, conn, upstream, DirectionClientToUpstream, websocket.BinaryMessage) + go forward(sessionCtx, &wg, recordError, p.middlewareRunner, info, upstream, conn, DirectionUpstreamToClient, websocket.BinaryMessage) + default: + recordError(fmt.Errorf("wsproxy: unsupported message mode %d", route.Mode)) + } + + wg.Wait() + + if p.opts.OnDisconnect != nil { + p.opts.OnDisconnect(r.Context(), info, resultErr) + } +} + +type middlewareRunner func(ctx context.Context, msg *ProxyMessage) error + +func (p *WSProxy) middlewareRunner(ctx context.Context, msg *ProxyMessage) error { + p.mu.RLock() + middlewares := append([]MiddlewareFunc(nil), p.opts.Middlewares...) + p.mu.RUnlock() + + for _, mw := range middlewares { + if mw == nil { + continue + } + if err := mw(ctx, msg); err != nil { + return err + } + if msg.Drop { + return nil + } + } + return nil +} + +func forward(ctx context.Context, wg *sync.WaitGroup, recordErr func(error), run middlewareRunner, info *ConnectionInfo, reader *websocket.Conn, writer *websocket.Conn, direction MessageDirection, expectedType int) { + defer wg.Done() + for { + select { + case <-ctx.Done(): + return + default: + } + + frameType, payload, err := reader.ReadMessage() + if err != nil { + recordErr(err) + return + } + + if frameType != expectedType { + switch frameType { + case websocket.TextMessage, websocket.BinaryMessage: + recordErr(fmt.Errorf("%w: got %d expected %d", ErrUnexpectedMessageType, frameType, expectedType)) + default: + // Ignore control frames; gorilla handles ping/pong automatically. + } + continue + } + + msg := &ProxyMessage{ + Connection: info, + Direction: direction, + Type: modeFromFrame(frameType), + Data: payload, + } + + if run != nil { + if err := run(ctx, msg); err != nil { + recordErr(err) + return + } + if msg.Drop { + continue + } + } + + if err := writer.WriteMessage(frameType, msg.Data); err != nil { + recordErr(err) + return + } + } +} + +func (p *WSProxy) dialUpstream(ctx context.Context, target string) (*websocket.Conn, error) { + var lastErr error + for attempt := 0; attempt < p.opts.ReconnectAttempts; attempt++ { + conn, _, err := p.opts.Dialer.DialContext(ctx, target, nil) + if err == nil { + return conn, nil + } + lastErr = err + log.Warn().Err(err).Str("target", target).Int("attempt", attempt+1).Msg("wsproxy upstream dial failed") + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(p.opts.ReconnectBackoff): + } + } + return nil, lastErr +} + +func modeFromFrame(frameType int) MessageMode { + switch frameType { + case websocket.BinaryMessage: + return MessageModeBinary + default: + return MessageModeText + } +} + +func resolveTarget(route *RouteConfig, requestedID string) (string, string, error) { + if route.Param != "" { + if requestedID == "" { + return "", "", ErrTargetNotConfigured + } + if url, ok := route.Targets[requestedID]; ok { + return requestedID, url, nil + } + return "", "", ErrTargetNotConfigured + } + + if requestedID != "" { + if url, ok := route.Targets[requestedID]; ok { + return requestedID, url, nil + } + } + + if url, ok := route.Targets[""]; ok { + return "", url, nil + } + + if len(route.Targets) == 1 { + for id, url := range route.Targets { + return id, url, nil + } + } + + return "", "", ErrTargetNotConfigured +} + +func buildRoutePath(base, path string) string { + base = normalizeBasePath(base) + path = strings.TrimSpace(path) + + switch { + case base == "" && (path == "" || path == "/"): + return convertColonParams("/") + case base == "": + return convertColonParams("/" + strings.TrimPrefix(path, "/")) + case path == "" || path == "/": + if base == "" { + return convertColonParams("/") + } + return convertColonParams(base) + default: + return convertColonParams(strings.TrimRight(base, "/") + "/" + strings.TrimPrefix(path, "/")) + } +} + +func normalizeBasePath(base string) string { + base = strings.TrimSpace(base) + if base == "" { + return "" + } + if !strings.HasPrefix(base, "/") { + base = "/" + base + } + if len(base) > 1 { + base = strings.TrimRight(base, "/") + if base == "" { + base = "/" + } + } + return base +} + +func convertColonParams(path string) string { + var b strings.Builder + b.Grow(len(path) + 4) + for i := 0; i < len(path); i++ { + if path[i] == ':' { + j := i + 1 + for j < len(path) && path[j] != '/' { + j++ + } + if j > i+1 { + b.WriteByte('{') + b.WriteString(path[i+1 : j]) + b.WriteByte('}') + i = j - 1 + continue + } + } + b.WriteByte(path[i]) + } + result := b.String() + if result == "" { + return "/" + } + return result +} diff --git a/pkg/net/wsproxy_test.go b/pkg/net/wsproxy_test.go new file mode 100644 index 00000000..fd4228ec --- /dev/null +++ b/pkg/net/wsproxy_test.go @@ -0,0 +1,153 @@ +package net + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/gorilla/websocket" +) + +// echoServer upgrades to WebSocket and echoes what it receives. +func echoServer(w http.ResponseWriter, r *http.Request) { + upgrader := websocket.Upgrader{ + CheckOrigin: func(*http.Request) bool { return true }, + } + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + defer func() { + _ = conn.Close() + }() + for { + messageType, payload, err := conn.ReadMessage() + if err != nil { + return + } + if err := conn.WriteMessage(messageType, payload); err != nil { + return + } + } +} + +func TestWSProxy_BasicTextRoundTrip(t *testing.T) { + upstream := httptest.NewServer(http.HandlerFunc(echoServer)) + t.Cleanup(upstream.Close) + + upstreamWSURL := strings.Replace(upstream.URL, "http", "ws", 1) + + proxy, err := NewWSProxy(ProxyOptions{ + BasePath: "/ws", + Routes: []RouteConfig{{ + Path: "/:id", + Param: "id", + Targets: map[string]string{"abc": upstreamWSURL}, + Mode: MessageModeText, + }}, + }) + if err != nil { + t.Fatalf("create proxy: %v", err) + } + + server := httptest.NewServer(proxy) + t.Cleanup(server.Close) + + dialer := websocket.Dialer{} + proxyURL := "ws" + strings.TrimPrefix(server.URL, "http") + "/ws/abc" + clientConn, _, err := dialer.Dial(proxyURL, nil) + if err != nil { + t.Fatalf("dial proxy: %v", err) + } + t.Cleanup(func() { + if err := clientConn.Close(); err != nil { + t.Errorf("close client connection: %v", err) + } + }) + + payload := map[string]string{"hello": "world"} + data, err := json.Marshal(payload) + if err != nil { + t.Fatalf("marshal payload: %v", err) + } + if err := clientConn.WriteMessage(websocket.TextMessage, data); err != nil { + t.Fatalf("write message: %v", err) + } + + messageType, resp, err := clientConn.ReadMessage() + if err != nil { + t.Fatalf("read message: %v", err) + } + if messageType != websocket.TextMessage { + t.Fatalf("unexpected message type: got %d", messageType) + } + if string(resp) != string(data) { + t.Fatalf("unexpected payload: got %s want %s", resp, data) + } +} + +func TestWSProxy_MiddlewareDrop(t *testing.T) { + upstream := httptest.NewServer(http.HandlerFunc(echoServer)) + t.Cleanup(upstream.Close) + + var received atomic.Bool + + upstreamWSURL := strings.Replace(upstream.URL, "http", "ws", 1) + + proxy, err := NewWSProxy(ProxyOptions{ + BasePath: "/ws", + Routes: []RouteConfig{{ + Path: "", + Targets: map[string]string{"": upstreamWSURL}, + Mode: MessageModeBinary, + }}, + }) + if err != nil { + t.Fatalf("create proxy: %v", err) + } + + proxy.Use(func(ctx context.Context, msg *ProxyMessage) error { + if msg.Direction == DirectionClientToUpstream { + msg.Drop = true + } + return nil + }) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + received.Store(true) + proxy.ServeHTTP(w, r) + })) + t.Cleanup(server.Close) + + dialer := websocket.Dialer{} + proxyURL := "ws" + strings.TrimPrefix(server.URL, "http") + "/ws" + clientConn, _, err := dialer.Dial(proxyURL, nil) + if err != nil { + t.Fatalf("dial proxy: %v", err) + } + t.Cleanup(func() { + if err := clientConn.Close(); err != nil { + t.Errorf("close client connection: %v", err) + } + }) + + if err := clientConn.WriteMessage(websocket.BinaryMessage, []byte("ignored")); err != nil { + t.Fatalf("write message: %v", err) + } + + if err := clientConn.SetReadDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatalf("set read deadline: %v", err) + } + _, _, err = clientConn.ReadMessage() + if err == nil { + t.Fatalf("expected read error due to dropped message") + } + if !received.Load() { + t.Fatalf("expected HTTP handler to be invoked") + } +} diff --git a/pkg/olnk/feeder.go b/pkg/olnk/feeder.go new file mode 100644 index 00000000..3a30f88b --- /dev/null +++ b/pkg/olnk/feeder.go @@ -0,0 +1,91 @@ +package olnk + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/objectlink-core-go/olink/client" + "github.com/apigear-io/objectlink-core-go/olink/core" + "github.com/apigear-io/objectlink-core-go/olink/ws" +) + +type Feeder struct { + registry *client.Registry + conn *ws.Connection + node *client.Node +} + +func NewFeeder() *Feeder { + registry := client.NewRegistry() + registry.SetSinkFactory(func(objectId string) client.IObjectSink { + return &ObjectSink{objectId: objectId} + }) + node := client.NewNode(registry) + registry.AttachClientNode(node) + + return &Feeder{ + registry: registry, + node: node, + } +} + +func (f *Feeder) Connect(ctx context.Context, addr string) error { + conn, err := ws.Dial(ctx, addr) + if err != nil { + return err + } + f.conn = conn + conn.SetOutput(f.node) + f.node.SetOutput(conn) + return nil +} + +func (f *Feeder) Close() error { + if f.conn == nil { + return nil + } + err := f.conn.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close connection") + return err + } + f.conn = nil + return nil +} + +func (f *Feeder) Feed(data []byte) error { + var m core.Message + err := json.Unmarshal(data, &m) + if err != nil { + log.Error().Err(err).Msgf("invalid message: %s", data) + return err + } + s, ok := m[0].(string) + if !ok { + log.Error().Msgf("invalid message type, expected string: %v", m) + return fmt.Errorf("invalid message type, expected string: %v", m) + } + m[0] = core.MsgTypeFromString(s) + switch m[0] { + case core.MsgLink: + objectId := m.AsLink() + f.node.LinkRemoteNode(objectId) + case core.MsgUnlink: + objectId := m.AsLink() + f.node.UnlinkRemoteNode(objectId) + case core.MsgSetProperty: + propertyId, value := m.AsSetProperty() + f.node.SetRemoteProperty(propertyId, value) + case core.MsgInvoke: + _, methodId, args := m.AsInvoke() + f.node.InvokeRemote(methodId, args, func(arg client.InvokeReplyArg) { + log.Info().Msgf("<- reply %s : %v", arg.Identifier, arg.Value) + }) + default: + log.Info().Msgf("not supported message type: %v", m) + return fmt.Errorf("not supported message type: %v", m) + } + return nil +} diff --git a/pkg/olnk/sink.go b/pkg/olnk/sink.go new file mode 100644 index 00000000..57d02f14 --- /dev/null +++ b/pkg/olnk/sink.go @@ -0,0 +1,42 @@ +package olnk + +import ( + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/objectlink-core-go/olink/client" + "github.com/apigear-io/objectlink-core-go/olink/core" +) + +// client messages supported for feed +// - ["link", "demo.Calc"] +// - ["set", "demo.Calc/total", 20] +// - ["invoke", 1, "demo.Calc/add", [1]] +// - ["unlink", "demo.Calc"] +// server messages not supported for feed +// - ["init", "demo.Calc", { "total": 10 }] +// - ["change", "demo.Calc/total", 20] +// - ["reply", 1, "demo.Calc/add", 21] +// - ["signal", "demo.Calc/clearDone", []] +// - ["error", "init", 0, "init error"] + +type ObjectSink struct { + objectId string +} + +func (s *ObjectSink) ObjectId() string { + return s.objectId +} + +func (s *ObjectSink) HandleSignal(signalId string, args core.Args) { + log.Info().Msgf("<- signal %s(%v)", signalId, args) +} +func (s *ObjectSink) HandlePropertyChange(propertyId string, value core.Any) { + log.Info().Msgf("<- property %s = %v", propertyId, value) +} +func (s *ObjectSink) HandleInit(objectId string, props core.KWArgs, node *client.Node) { + s.objectId = objectId + log.Info().Msgf("<- init %s with %v", objectId, props) +} +func (s *ObjectSink) HandleRelease() { + log.Info().Msgf("<- release %s", s.objectId) + s.objectId = "" +} diff --git a/pkg/server/server.go b/pkg/server/server.go new file mode 100644 index 00000000..4af2ea7d --- /dev/null +++ b/pkg/server/server.go @@ -0,0 +1,91 @@ +package server + +import ( + "context" + + "github.com/apigear-io/cli/pkg/cfg" + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/cli/pkg/net" + "github.com/apigear-io/cli/pkg/sim" + "github.com/apigear-io/cli/pkg/streams" +) + +type Options struct { + NatsHost string + NatsPort int + HttpAddr string + Logging bool +} + +type Server struct { + opts Options + strman *streams.Manager + siman *sim.Manager + netman *net.NetworkManager +} + +func New(opts Options) *Server { + if opts.NatsPort == 0 { + opts.NatsPort = 4222 + } + if opts.HttpAddr == "" { + opts.HttpAddr = "localhost:5555" + } + if opts.NatsHost == "" { + opts.NatsHost = "localhost" + } + return &Server{ + opts: opts, + strman: streams.NewManager(streams.ManagerOptions{ + NatsPort: opts.NatsPort, + AppDir: cfg.ConfigDir(), + Logging: opts.Logging, + }), + siman: sim.NewManager(sim.ManagerOptions{}), + netman: net.NewManager(net.Options{ + NatsServerURL: opts.NatsHost, + HttpAddr: opts.HttpAddr, + Logging: opts.Logging, + }), + } +} + +func (s *Server) Start(ctx context.Context) error { + // start http server + // start nats server + // start stream server + log.Info().Msg("starting stream manager") + err := s.strman.Start(ctx) + if err != nil { + return err + } + // network services + log.Info().Msg("starting network manager") + err = s.netman.Start(ctx) + if err != nil { + return err + } + // simulation server + log.Info().Msg("starting simulation manager") + err = s.siman.Start(ctx, s.netman) + if err != nil { + return err + } + log.Info().Msg("server started") + return nil +} + +func (s *Server) Stop() error { + log.Info().Msg("stopping server") + return nil +} + +func (s *Server) NetworkManager() *net.NetworkManager { + return s.netman +} +func (s *Server) StreamManager() *streams.Manager { + return s.strman +} +func (s *Server) SimulationManager() *sim.Manager { + return s.siman +} diff --git a/pkg/sim/controller.go b/pkg/sim/controller.go new file mode 100644 index 00000000..120550fe --- /dev/null +++ b/pkg/sim/controller.go @@ -0,0 +1,201 @@ +package sim + +import ( + "encoding/json" + "errors" + "os" + "path/filepath" + + "github.com/nats-io/nats.go" +) + +const ( + CmdScriptStart = "script.start" + CmdScriptStop = "script.stop" + CmdFunctionRun = "function.run" + ControllerSubject = "sim.controller" +) + +type RpcRequest struct { + Action string `json:"action"` + World string `json:"world,omitempty"` + Script Script `json:"script,omitempty"` + Function string `json:"function,omitempty"` + FunctionArgs []any `json:"function_args,omitempty"` +} + +type RpcResponse struct { + Status string `json:"status"` + Error string `json:"error,omitempty"` + Data []byte `json:"data,omitempty"` +} + +type Controller struct { + nc *nats.Conn + sub *nats.Subscription + m *Manager +} + +func NewController(nc *nats.Conn, m *Manager) (*Controller, error) { + c := &Controller{ + nc: nc, + m: m, + } + sub, err := nc.Subscribe(ControllerSubject, c.handleMsg) + if err != nil { + return nil, err + } + c.sub = sub + return c, nil +} + +func (c *Controller) Close() error { + if c.sub != nil { + var joinedErr error + if err := c.nc.Drain(); err != nil { + joinedErr = err + } + if err := c.sub.Unsubscribe(); err != nil { + if joinedErr == nil { + joinedErr = err + } else { + joinedErr = errors.Join(joinedErr, err) + } + } + return joinedErr + } + return nil +} + +func (c *Controller) handleMsg(msg *nats.Msg) { + var req RpcRequest + err := json.Unmarshal(msg.Data, &req) + if err != nil { + c.replyError(msg, "invalid request") + return + } + switch req.Action { + case CmdScriptStart: + resp := c.handleStart(req) + c.respond(msg, resp) + case CmdScriptStop: + resp := c.handleStop(req) + c.respond(msg, resp) + case CmdFunctionRun: + resp := c.handleRunFunction(req) + c.respond(msg, resp) + default: + c.replyError(msg, "unknown action") + } + +} + +func (c *Controller) replyError(msg *nats.Msg, errMsg string) { + reply := msg.Reply + if reply == "" { + return + } + resp := RpcResponse{ + Status: "error", + Error: errMsg, + } + data, err := json.Marshal(resp) + if err != nil { + log.Error().Err(err).Msg("failed to marshal error response") + return + } + if err := msg.Respond(data); err != nil { + log.Error().Err(err).Msg("failed to respond to message") + } +} + +func (c *Controller) handleStart(req RpcRequest) RpcResponse { + c.m.ScriptRun(req.Script) + // Implement start logic here + return RpcResponse{Status: "started"} +} + +func (c *Controller) handleStop(req RpcRequest) RpcResponse { + if err := c.m.ScriptStop(req.World); err != nil { + return RpcResponse{Status: "error", Error: err.Error()} + } + return RpcResponse{Status: "stopped"} +} + +func (c *Controller) handleRunFunction(req RpcRequest) RpcResponse { + c.m.FunctionRun(req.Function, req.FunctionArgs) + // Implement function run logic here + return RpcResponse{Status: "function run"} +} + +func (c *Controller) respond(msg *nats.Msg, resp RpcResponse) { + if msg.Reply == "" { + return + } + data, err := json.Marshal(resp) + if err != nil { + c.replyError(msg, "failed to marshal response") + return + } + if err := msg.Respond(data); err != nil { + log.Error().Err(err).Msg("failed to respond to message") + } +} + +type Client struct { + nc *nats.Conn +} + +func NewClient(nc *nats.Conn) *Client { + return &Client{nc: nc} +} + +func (c *Client) SendCommand(req RpcRequest) (RpcResponse, error) { + data, err := json.Marshal(req) + if err != nil { + return RpcResponse{}, err + } + msg, err := c.nc.Request(ControllerSubject, data, nats.DefaultTimeout) + if err != nil { + return RpcResponse{}, err + } + var resp RpcResponse + err = json.Unmarshal(msg.Data, &resp) + if err != nil { + return RpcResponse{}, err + } + return resp, nil +} + +func (c *Client) RunScript(fname string) (RpcResponse, error) { + absName, error := filepath.Abs(fname) + if error != nil { + return RpcResponse{}, error + } + content, err := os.ReadFile(absName) + if err != nil { + return RpcResponse{}, err + } + + script := NewScript(absName, string(content)) + req := RpcRequest{ + Action: CmdScriptStart, + Script: script, + } + return c.SendCommand(req) +} +func (c *Client) StopScript(world string) (RpcResponse, error) { + req := RpcRequest{ + Action: CmdScriptStop, + World: world, + } + return c.SendCommand(req) +} +func (c *Client) RunFunction(function string, args []any) (RpcResponse, error) { + req := RpcRequest{ + Action: CmdFunctionRun, + Function: function, + FunctionArgs: args, + } + return c.SendCommand(req) +} diff --git a/pkg/sim/engine.go b/pkg/sim/engine.go index 26e58530..5fc6b903 100644 --- a/pkg/sim/engine.go +++ b/pkg/sim/engine.go @@ -5,6 +5,7 @@ import ( "path/filepath" "sync" + "github.com/apigear-io/cli/pkg/net" "github.com/apigear-io/objectlink-core-go/olink/remote" "github.com/dop251/goja" "github.com/dop251/goja_nodejs/console" @@ -42,7 +43,7 @@ func createPathResolver(workDir string) require.PathResolver { type EngineOptions struct { WorkDir string - Server IOlinkServer + Server net.IOlinkServer Connector IOlinkConnector } type Engine struct { @@ -50,7 +51,7 @@ type Engine struct { world *World loop *eventloop.EventLoop workDir string - server IOlinkServer + server net.IOlinkServer connector IOlinkConnector rt *goja.Runtime registry *require.Registry @@ -62,7 +63,7 @@ func NewEngine(opts EngineOptions) *Engine { opts.WorkDir = "." } if opts.Server == nil { - opts.Server = NewOlinkServer() + opts.Server = net.NewOlinkServer() } if opts.Connector == nil { opts.Connector = NewOlinkConnector() @@ -84,23 +85,23 @@ func NewEngine(opts EngineOptions) *Engine { } e.world = NewWorld(e) e.loop.Start() - + // Initial setup - wait for initialization to complete before returning // This ensures e.rt is set and the engine is fully ready done := make(chan bool) e.loop.RunOnLoop(func(rt *goja.Runtime) { - e.rt = rt // Set the runtime once during initialization + e.rt = rt // Set the runtime once during initialization rt.SetFieldNameMapper(goja.UncapFieldNameMapper()) e.world.register(rt) registry.Enable(rt) done <- true }) - <-done // Wait for initialization to complete - + <-done // Wait for initialization to complete + return e } -func (e *Engine) SetOlinkServer(server IOlinkServer) { +func (e *Engine) SetOlinkServer(server net.IOlinkServer) { e.rw.Lock() defer e.rw.Unlock() e.server = server diff --git a/pkg/sim/manager.go b/pkg/sim/manager.go index 211dff5c..b6b14a3a 100644 --- a/pkg/sim/manager.go +++ b/pkg/sim/manager.go @@ -1,32 +1,35 @@ package sim import ( + "context" + "github.com/apigear-io/cli/pkg/net" ) type ManagerOptions struct { - Server IOlinkServer } type Manager struct { engine *Engine - server IOlinkServer + netman *net.NetworkManager + opts ManagerOptions } func NewManager(opts ManagerOptions) *Manager { m := &Manager{ engine: nil, - server: opts.Server, + opts: opts, } return m } -func (m *Manager) Start(netman *net.NetworkManager) { - server := NewOlinkServer() - addr := netman.HttpServer().Address() - log.Info().Msgf("starting Olink server at ws://%s/ws", addr) - netman.HttpServer().Router().Handle("/ws", server) - m.server = server +func (m *Manager) Start(ctx context.Context, netman *net.NetworkManager) error { + m.netman = netman + return nil +} + +func (m *Manager) OlinkServer() *net.OlinkServer { + return m.netman.OlinkServer() } func (m *Manager) Stop() { @@ -40,7 +43,7 @@ func (m *Manager) ScriptRun(script Script) string { if m.engine != nil { m.engine.Close() } - m.engine = NewEngine(EngineOptions{Server: m.server, WorkDir: script.Dir}) + m.engine = NewEngine(EngineOptions{Server: m.OlinkServer(), WorkDir: script.Dir}) m.engine.RunScript(script.Name, script.Content) log.Info().Msgf("manager running script %s", script.Name) return script.Name diff --git a/pkg/sim/olink_server_test.go b/pkg/sim/mock_engine_test.go similarity index 75% rename from pkg/sim/olink_server_test.go rename to pkg/sim/mock_engine_test.go index 27aecff8..0295761b 100644 --- a/pkg/sim/olink_server_test.go +++ b/pkg/sim/mock_engine_test.go @@ -3,6 +3,7 @@ package sim import ( "slices" + "github.com/apigear-io/cli/pkg/net" "github.com/apigear-io/objectlink-core-go/olink/remote" ) @@ -10,7 +11,7 @@ type MockEngineServer struct { sources []remote.IObjectSource } -var _ IOlinkServer = (*MockEngineServer)(nil) +var _ net.IOlinkServer = (*MockEngineServer)(nil) func (m *MockEngineServer) RegisterSource(source remote.IObjectSource) { m.sources = append(m.sources, source) @@ -23,3 +24,5 @@ func (m *MockEngineServer) UnregisterSource(source remote.IObjectSource) { } } } + +func (m *MockEngineServer) SetSourceFactory(remote.SourceFactory) {} diff --git a/pkg/sim/null.go b/pkg/sim/null.go index 4103f378..2458627b 100644 --- a/pkg/sim/null.go +++ b/pkg/sim/null.go @@ -2,7 +2,6 @@ package sim import ( "github.com/apigear-io/objectlink-core-go/olink/client" - "github.com/apigear-io/objectlink-core-go/olink/remote" ) type NullConnector struct { @@ -36,16 +35,3 @@ func (c *NullConnector) Node(url string) *client.Node { type NullServer struct { } - -var _ IOlinkServer = (*NullServer)(nil) - -func NewNullServer() *NullServer { - return &NullServer{} -} - -func (c *NullServer) RegisterSource(sink remote.IObjectSource) { - log.Info().Msg("Register source") -} -func (c *NullServer) UnregisterSource(sink remote.IObjectSource) { - log.Info().Msg("Unregister source") -} diff --git a/pkg/spec/rules.go b/pkg/spec/rules.go index b82edc63..d75e06e9 100644 --- a/pkg/spec/rules.go +++ b/pkg/spec/rules.go @@ -201,12 +201,14 @@ func (r *FeatureRule) Validate() error { // FindScopeByMatch returns the first scope that matches the given match. func (s *FeatureRule) FindScopesByMatch(match ScopeType) []*ScopeRule { + log.Debug().Msgf("finding scopes by match: %s", match) var scopes []*ScopeRule for _, scope := range s.Scopes { if scope.Match == match { scopes = append(scopes, scope) } } + log.Debug().Msgf("found %d scopes by match: %s", len(scopes), match) return scopes } diff --git a/pkg/streams/README.md b/pkg/streams/README.md new file mode 100644 index 00000000..df5d41b3 --- /dev/null +++ b/pkg/streams/README.md @@ -0,0 +1,54 @@ +# ApiGear Streams + +## Concepts + +This package provides the streaming services for ApiGear, built on top of NATS and NATS JetStream. + +- A stream is a sequence of messages, and a subject is a string that identifies a stream. +- Messages are published to subjects, and subscribers can receive messages from subjects. +- A store is a KV store that persists state for a specific domain. +- A buffer is a temporary storage for messages that are not yet processed by subscribers. +- A consumer is an entity that subscribes to a subject and processes messages from that subject. +- A server is a NATS server instance that manages the streams, subjects, and messages. +- A monitor is a http endpoint that ingests messages from devices and forwards them to the appropriate subjects. + +``` +http -> monitor -> subject (deviceID) -> buffer (5min) -> store (recordingID) -> consumer (replay) +``` + +### Server + +Initially we launch a NATS server with JetStream enabled. The server manages the streams, subjects, and messages. The server has an own disk storage for persistent streams, and an in-memory storage for temporary streams. +The service also loads the controller service and the buffer service, which are used to manage the streams and buffers. + +### Stream Monitor + +We then attach a HTTP monitor to the server, which listens for incoming messages from devices. The monitor forwards the messages to the appropriate subjects in the NATS server, based on the device ID. These messages are not persisted, but can be processed by subscribers in real-time. Device information is stored in the device KV store. + +### Buffer Window + +To allow later to record messages form the past, we can attach a buffer to the device ID subject. The buffer stores messages temporarily until they are processed by subscribers, or the retention policy (e.g. 5min) deletes the messages. This allows us to record messages from the past, even if the subscribers were not connected at the time the messages were published. The buffer informatin is attached to the device KV store. + +### Recording + +We can start the recording of a device monitoring data, which creates a persistent stream for the device ID subject. The stream stores all messages published to the subject, and allows subscribers to receive messages from the past. Recording state is tracked in the session KV store. The stream can be configured with a retention policy (e.g. 24h), which deletes messages older than the specified duration. A recording has a session ID, which is used to identify the recording session for later retrieval. + +### Replay + +We can then replay the recorded messages from a specific recording session. This creates a consumer that subscribes to the device ID subject, and processes messages from the persistent stream. The consumer can be configured with a start time, which allows to replay messages from a specific point in time and a speed factor, to replay messages faster or slower than real-time. The consumer processes messages in the order they were published, and can be stopped and restarted as needed. + +## CLI Usage + +``` +# start a NATS server with JetStream enabled +apigear streams serve + +# generate sample messages for a device +apigear streams data generate -c 1000 -o data.jsonl -t examples/orders.tmpl + +# send sample messages to the monitor stream endpoint +apigear streams data publish --device-id 1234 --file data.jsonl --intervall 1s + +# display the monitor stream messages in real-time +apigear streams data tail --device-id 1234 + diff --git a/pkg/streams/buffer/buffer.go b/pkg/streams/buffer/buffer.go new file mode 100644 index 00000000..bd078103 --- /dev/null +++ b/pkg/streams/buffer/buffer.go @@ -0,0 +1,176 @@ +package buffer + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +// EnsureStream creates or updates the buffer stream for a device. +func EnsureStream(js jetstream.JetStream, deviceID string, window time.Duration) (string, string, error) { + if window <= 0 { + return "", "", fmt.Errorf("buffer window must be positive") + } + streamName := config.BufferStreamName(deviceID) + subject := config.BufferSubjectName(deviceID) + + cfg := jetstream.StreamConfig{ + Name: streamName, + Subjects: []string{subject}, + Retention: jetstream.LimitsPolicy, + MaxAge: window, + Storage: jetstream.FileStorage, + } + + _, err := js.CreateOrUpdateStream(context.Background(), cfg) + if err != nil { + return "", "", err + } + return streamName, subject, nil +} + +// Append stores a monitor message in the device buffer. +func Append(ctx context.Context, js jetstream.JetStream, deviceID string, window time.Duration, msg *nats.Msg) error { + if window <= 0 { + return nil + } + _, subject, err := EnsureStream(js, deviceID, window) + if err != nil { + return err + } + + buffered := &nats.Msg{ + Subject: subject, + Header: natsutil.CloneHeader(msg.Header), + Data: append([]byte(nil), msg.Data...), + } + if buffered.Header == nil { + buffered.Header = nats.Header{} + } + buffered.Header.Set(config.HeaderBufferedAt, time.Now().UTC().Format(time.RFC3339Nano)) + + if deadline, ok := ctx.Deadline(); ok { + buffered.Header.Set(config.HeaderDeadline, deadline.Format(time.RFC3339Nano)) + } + + _, err = js.PublishMsg(ctx, buffered) + return err +} + +// Replay streams buffered messages in the given window into the provided publisher function. +func Replay(ctx context.Context, js jetstream.JetStream, deviceID string, since time.Time, until time.Time, publish func(*nats.Msg, time.Time) error) (int, time.Time, error) { + return replay(ctx, js, deviceID, since, until, publish) +} + +func replay(ctx context.Context, js jetstream.JetStream, deviceID string, since, until time.Time, publish func(*nats.Msg, time.Time) error) (int, time.Time, error) { + stream := config.BufferStreamName(deviceID) + subject := config.BufferSubjectName(deviceID) + + durable := config.BufferReplayConsumerName(deviceID) + consumer, err := js.CreateOrUpdateConsumer(context.Background(), stream, jetstream.ConsumerConfig{ + Durable: durable, + AckPolicy: jetstream.AckExplicitPolicy, + DeliverPolicy: jetstream.DeliverAllPolicy, + FilterSubject: subject, + }) + if err != nil { + if errors.Is(err, jetstream.ErrStreamNotFound) { + return 0, time.Time{}, nil + } + return 0, time.Time{}, err + } + defer func() { + _ = js.DeleteConsumer(context.Background(), stream, durable) + }() + + count := 0 + var last time.Time + + for { + err := ctx.Err() + if err != nil { + return count, last, err + } + + batch, err := consumer.Fetch(64, jetstream.FetchMaxWait(250*time.Millisecond)) + if err != nil { + if errors.Is(err, jetstream.ErrNoMessages) || errors.Is(err, nats.ErrTimeout) { + break + } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return count, last, err + } + return count, last, err + } + + processed := 0 + for msg := range batch.Messages() { + if msg == nil { + continue + } + processed++ + + headers := natsutil.CloneHeader(msg.Headers()) + ts := parseBufferedAt(headers.Get(config.HeaderBufferedAt)) + if !ts.IsZero() { + if ts.Before(since) || ts.After(until) { + _ = msg.Ack() + continue + } + } + + buffered := &nats.Msg{ + Subject: subject, + Header: headers, + Data: append([]byte(nil), msg.Data()...), + } + + err := publish(buffered, ts) + if err != nil { + return count, last, err + } + count++ + if !ts.IsZero() { + last = ts + } + err = msg.Ack() + if err != nil { + return count, last, err + } + } + + err = batch.Error() + if err != nil { + if errors.Is(err, jetstream.ErrNoMessages) || errors.Is(err, nats.ErrTimeout) { + break + } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return count, last, err + } + return count, last, err + } + + if processed == 0 { + break + } + } + + return count, last, nil +} + +func parseBufferedAt(v string) time.Time { + if v == "" { + return time.Time{} + } + ts, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return time.Time{} + } + return ts +} diff --git a/pkg/streams/buffer/buffer_service.go b/pkg/streams/buffer/buffer_service.go new file mode 100644 index 00000000..d0ea6d0d --- /dev/null +++ b/pkg/streams/buffer/buffer_service.go @@ -0,0 +1,70 @@ +package buffer + +import ( + "context" + "strings" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +// BufferOptions controls the buffer runner behaviour. +type BufferOptions struct { + MonitorSubject string +} + +// RunBuffer mirrors all monitor traffic into device buffers using the global buffer window. +func RunBuffer(ctx context.Context, js jetstream.JetStream, opts BufferOptions) error { + if js == nil { + return nil + } + + subjectPrefix := opts.MonitorSubject + if subjectPrefix == "" { + subjectPrefix = config.MonitorSubject + } + + subjectPattern := subjectPrefix + ".>" + sub, err := js.Conn().Subscribe(subjectPattern, func(msg *nats.Msg) { + deviceID := extractDeviceID(subjectPrefix, msg.Subject) + if deviceID == "" { + return + } + // Always buffer with the global window + err := Append(ctx, js, deviceID, config.BufferWindow, msg) + if err != nil { + log.Error().Err(err).Str("device", deviceID).Msg("buffer append failed") + } + }) + if err != nil { + return err + } + defer func() { + if err := sub.Unsubscribe(); err != nil { + log.Warn().Err(err).Msg("buffer: unsubscribe failed") + } + }() + + log.Info(). + Str("subject", subjectPrefix). + Dur("window", config.BufferWindow). + Msg("buffer runner started (all devices)") + + <-ctx.Done() + log.Info().Msg("buffer runner stopped") + return ctx.Err() +} + +func extractDeviceID(prefix, subject string) string { + if !strings.HasPrefix(subject, prefix+".") { + return "" + } + trimmed := strings.TrimPrefix(subject, prefix+".") + if trimmed == "" { + return "" + } + parts := strings.Split(trimmed, ".") + return parts[0] +} diff --git a/pkg/streams/buffer/buffer_service_test.go b/pkg/streams/buffer/buffer_service_test.go new file mode 100644 index 00000000..a10d1cd8 --- /dev/null +++ b/pkg/streams/buffer/buffer_service_test.go @@ -0,0 +1,69 @@ +package buffer_test + +import ( + "context" + "testing" + "time" + + "github.com/apigear-io/cli/pkg/streams/buffer" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + "github.com/stretchr/testify/require" +) + +func TestRunBufferMirrorsMessages(t *testing.T) { + srv, err := natsutil.StartServer(natsutil.ServerConfig{Options: &server.Options{JetStream: true, StoreDir: t.TempDir()}}) + require.NoError(t, err) + t.Cleanup(srv.Shutdown) + + js, err := natsutil.ConnectJetStream(srv.ClientURL()) + require.NoError(t, err) + t.Cleanup(js.Conn().Close) + + // No need to create device metadata - buffering is now always on + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan error, 1) + go func() { + done <- buffer.RunBuffer(ctx, js, buffer.BufferOptions{MonitorSubject: "monitor"}) + }() + + // Brief delay to let buffer service start + time.Sleep(50 * time.Millisecond) + + pub, err := nats.Connect(srv.ClientURL()) + require.NoError(t, err) + t.Cleanup(pub.Close) + + require.NoError(t, pub.Publish("monitor.device-a", []byte(`{"temp":21}`))) + require.NoError(t, pub.Flush()) + + // Wait for append + require.Eventually(t, func() bool { + _, _, err := buffer.EnsureStream(js, "device-a", 2*time.Minute) + if err != nil { + return false + } + stream, err := js.Stream(context.Background(), "STREAMS_BUFFER_DEVICE_A") + if err != nil { + return false + } + info, err := stream.Info(context.Background()) + if err != nil { + return false + } + return info.State.Msgs > 0 + }, 2*time.Second, 100*time.Millisecond) + + cancel() + require.Eventually(t, func() bool { + select { + case err := <-done: + return err == context.Canceled || err == nil + default: + return false + } + }, time.Second, 50*time.Millisecond) +} diff --git a/pkg/streams/cli/data_generate.go b/pkg/streams/cli/data_generate.go new file mode 100644 index 00000000..ad7a0df0 --- /dev/null +++ b/pkg/streams/cli/data_generate.go @@ -0,0 +1,36 @@ +package cli + +import ( + "time" + + "github.com/apigear-io/cli/pkg/streams/msgio" + "github.com/spf13/cobra" +) + +func newStreamGenerateCmd() *cobra.Command { + opts := &msgio.GenerateOptions{ + Count: 1000, + Seed: time.Now().UnixNano(), + } + + cmd := &cobra.Command{ + Use: "generate", + Short: "generate JSONL monitor data from a template", + Long: "Render a Go template repeatedly with faker-backed helpers to build large JSONL files for testing.", + Aliases: []string{"gen"}, + GroupID: "data", + RunE: func(cmd *cobra.Command, _ []string) error { + return msgio.Generate(*opts) + }, + } + + cmd.Flags().StringVarP(&opts.TemplatePath, "template", "t", "", "Template file describing a single JSON object") + cmd.Flags().StringVarP(&opts.OutputPath, "output", "o", "", "Destination JSONL file (defaults to stdout)") + cmd.Flags().IntVarP(&opts.Count, "count", "c", opts.Count, "Number of JSON objects to generate") + cmd.Flags().Int64Var(&opts.Seed, "seed", opts.Seed, "Random seed for faker data") + if err := cmd.MarkFlagRequired("template"); err != nil { + cobra.CheckErr(err) + } + + return cmd +} diff --git a/pkg/streams/cli/data_publish.go b/pkg/streams/cli/data_publish.go new file mode 100644 index 00000000..f56988a9 --- /dev/null +++ b/pkg/streams/cli/data_publish.go @@ -0,0 +1,54 @@ +package cli + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/msgio" + "github.com/spf13/cobra" +) + +func newStreamPublishCmd() *cobra.Command { + opts := &msgio.PublishOptions{ + Subject: config.MonitorSubject, + MaxLine: 8 * 1024 * 1024, + Validate: true, + Headers: map[string]string{}, + } + + cmd := &cobra.Command{ + Use: "publish", + Short: "publish JSONL messages to a NATS monitor subject", + Aliases: []string{"send", "pub"}, + GroupID: "data", + RunE: func(cmd *cobra.Command, _ []string) error { + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() + + opts.ServerURL = rootOpts.server + opts.Verbose = rootOpts.verbose + return msgio.PublishFromFile(ctx, *opts) + }, + } + + cmd.Flags().StringVarP(&opts.FilePath, "file", "f", "", "Path to JSONL file to publish") + cmd.Flags().StringVar(&opts.Subject, "subject", opts.Subject, "Base monitor subject name") + cmd.Flags().StringVar(&opts.DeviceID, "device", "", "Device identifier used to segment streams") + cmd.Flags().DurationVar(&opts.Interval, "interval", opts.Interval, "Optional delay between published messages") + cmd.Flags().IntVar(&opts.MaxLine, "max-line-bytes", opts.MaxLine, "Maximum size of a single JSON line in bytes") + cmd.Flags().BoolVar(&opts.Validate, "validate", opts.Validate, "Validate that each line contains valid JSON before publishing") + cmd.Flags().StringToStringVar(&opts.Headers, "header", opts.Headers, "Additional NATS headers to include in each message") + cmd.Flags().BoolVar(&opts.Echo, "echo", false, "Print each published message to stdout") + + if err := cmd.MarkFlagRequired("file"); err != nil { + cobra.CheckErr(err) + } + if err := cmd.MarkFlagRequired("device"); err != nil { + cobra.CheckErr(err) + } + + return cmd +} diff --git a/pkg/streams/cli/helpers.go b/pkg/streams/cli/helpers.go new file mode 100644 index 00000000..9e08c8e2 --- /dev/null +++ b/pkg/streams/cli/helpers.go @@ -0,0 +1,59 @@ +package cli + +import ( + "context" + "errors" + "os" + "os/signal" + "syscall" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +func withSignalContext(parent context.Context, fn func(context.Context) error) error { + ctx, cancel := signal.NotifyContext(parent, os.Interrupt, syscall.SIGTERM) + defer cancel() + return fn(ctx) +} + +func withJetStream(ctx context.Context, fn func(jetstream.JetStream) error) error { + js, err := natsutil.ConnectJetStream(rootOpts.server) + if err != nil { + return err + } + retErr := fn(js) + if drainErr := js.Conn().Drain(); drainErr != nil { + if retErr == nil { + retErr = drainErr + } else { + retErr = errors.Join(retErr, drainErr) + } + } + return retErr +} + +func withSessionManager(ctx context.Context, bucket string, fn func(*session.SessionStore) error) error { + if bucket == "" { + bucket = config.SessionBucket + } + return withJetStream(ctx, func(js jetstream.JetStream) error { + mgr, err := session.NewSessionStore(js, bucket) + if err != nil { + return err + } + return fn(mgr) + }) +} + +func withNATS(_ context.Context, fn func(*nats.Conn) error) error { + nc, err := natsutil.ConnectNATS(rootOpts.server) + if err != nil { + return err + } + defer nc.Drain() + return fn(nc) +} diff --git a/pkg/streams/cli/rec_export.go b/pkg/streams/cli/rec_export.go new file mode 100644 index 00000000..0fbc3294 --- /dev/null +++ b/pkg/streams/cli/rec_export.go @@ -0,0 +1,127 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/spf13/cobra" +) + +func newStreamExportCmd() *cobra.Command { + opts := &session.ExportOptions{ + Bucket: config.SessionBucket, + } + var deviceID string + + cmd := &cobra.Command{ + Use: "export", + Short: "export a recorded stream session", + GroupID: "data", + RunE: func(cmd *cobra.Command, _ []string) error { + opts.ServerURL = rootOpts.server + opts.Verbose = rootOpts.verbose + + // Validate that either --session or --device is provided + if opts.SessionID == "" && deviceID == "" { + return fmt.Errorf("either --session or --device must be specified") + } + if opts.SessionID != "" && deviceID != "" { + return fmt.Errorf("cannot specify both --session and --device") + } + + // If device is specified, find the latest session for that device + if deviceID != "" { + var foundSession *session.Metadata + if err := withSessionManager(cmd.Context(), opts.Bucket, func(mgr *session.SessionStore) error { + sessions, err := mgr.List() + if err != nil { + return fmt.Errorf("list sessions: %w", err) + } + + // Find the most recent session for this device + var latestSession *session.Metadata + for i := range sessions { + if sessions[i].DeviceID == deviceID { + if latestSession == nil || sessions[i].Start.After(latestSession.Start) { + latestSession = &sessions[i] + } + } + } + + if latestSession == nil { + return fmt.Errorf("no sessions found for device %s", deviceID) + } + + foundSession = latestSession + opts.SessionID = latestSession.SessionID + return nil + }); err != nil { + return err + } + + // Print info about the found session + cmd.Printf("searching latest device session, found: %s, recorded at: %s\n", + foundSession.SessionID, + foundSession.Start.Format("2006-01-02 15:04:05")) + } + + file, err := resolveExportWriter(opts.OutputPath) + if err != nil { + return err + } + opts.Writer = file + defer file.Close() + + // Get message count before export using withSessionManager + var messageCount int + if err := withSessionManager(cmd.Context(), opts.Bucket, func(mgr *session.SessionStore) error { + meta, err := mgr.Info(opts.SessionID) + if err != nil { + return err + } + messageCount = meta.MessageCount + return nil + }); err != nil { + return err + } + + if err := session.Export(cmd.Context(), *opts); err != nil { + return err + } + + cmd.Printf("session %s exported to %s (%d messages)\n", opts.SessionID, opts.OutputPath, messageCount) + return nil + }, + } + + cmd.Flags().StringVar(&opts.SessionID, "session", "", "Session identifier to export") + cmd.Flags().StringVar(&deviceID, "device", "", "Device identifier (exports latest session)") + cmd.Flags().StringVar(&opts.OutputPath, "output", "", "Destination JSONL file") + cmd.MarkFlagsMutuallyExclusive("session", "device") + if err := cmd.MarkFlagRequired("output"); err != nil { + cobra.CheckErr(err) + } + + return cmd +} + +func resolveExportWriter(path string) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("output path cannot be empty") + } + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, fmt.Errorf("create export dir: %w", err) + } + + file, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("create export file: %w", err) + } + + return file, nil +} diff --git a/pkg/streams/cli/rec_import.go b/pkg/streams/cli/rec_import.go new file mode 100644 index 00000000..40cd49c4 --- /dev/null +++ b/pkg/streams/cli/rec_import.go @@ -0,0 +1,82 @@ +package cli + +import ( + "errors" + "fmt" + "os" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/spf13/cobra" +) + +func newStreamImportCmd() *cobra.Command { + opts := &session.ImportOptions{ + SessionBucket: config.SessionBucket, + } + + cmd := &cobra.Command{ + Use: "import", + Short: "import a recorded stream session from JSONL", + GroupID: "data", + RunE: func(cmd *cobra.Command, _ []string) error { + opts.ServerURL = rootOpts.server + opts.Verbose = rootOpts.verbose + + file, err := resolveImportReader(opts.InputPath) + if err != nil { + return err + } + var closeFn func() error + if file != nil { + opts.Reader = file + closeFn = file.Close + } else { + opts.Reader = os.Stdin + } + + if err := session.Import(cmd.Context(), *opts); err != nil { + if closeFn != nil { + if closeErr := closeFn(); closeErr != nil { + return errors.Join(err, closeErr) + } + } + return err + } + + if closeFn != nil { + if err := closeFn(); err != nil { + return err + } + } + + if file != nil { + cmd.Printf("session imported from %s\n", opts.InputPath) + } else { + cmd.Println("session imported from stdin") + } + return nil + }, + } + + opts.InputPath = "-" + opts.DeviceID = "123" + + cmd.Flags().StringVar(&opts.InputPath, "input", opts.InputPath, "Source JSONL file (use '-' for stdin)") + cmd.Flags().StringVar(&opts.DeviceID, "device", opts.DeviceID, "Device identifier (auto-created if doesn't exist)") + + return cmd +} + +func resolveImportReader(path string) (*os.File, error) { + if path == "" || path == "-" { + return nil, nil + } + + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open import file: %w", err) + } + + return file, nil +} diff --git a/pkg/streams/cli/rec_list.go b/pkg/streams/cli/rec_list.go new file mode 100644 index 00000000..224e269d --- /dev/null +++ b/pkg/streams/cli/rec_list.go @@ -0,0 +1,72 @@ +package cli + +import ( + "fmt" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/spf13/cobra" +) + +func newStreamListCmd() *cobra.Command { + var deviceID string + + cmd := &cobra.Command{ + Use: "ls", + Short: "list recorded stream sessions", + Aliases: []string{"list"}, + GroupID: "session", + RunE: func(cmd *cobra.Command, _ []string) error { + return withSessionManager(cmd.Context(), config.SessionBucket, func(mgr *session.SessionStore) error { + metas, err := mgr.List() + if err != nil { + return err + } + + // Filter by device if specified + if deviceID != "" { + filtered := make([]session.Metadata, 0) + for _, meta := range metas { + if meta.DeviceID == deviceID { + filtered = append(filtered, meta) + } + } + metas = filtered + } + + if len(metas) == 0 { + if deviceID != "" { + cmd.Printf("no sessions found for device %s\n", deviceID) + } else { + cmd.Println("no sessions found") + } + return nil + } + + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "%-36s %-12s %-25s %-25s %-9s %s\n", + "SESSION", "DEVICE", "START", "END", "DURATION", "MESSAGES"); err != nil { + return err + } + for _, meta := range metas { + duration := meta.End.Sub(meta.Start) + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "%-36s %-12s %-25s %-25s %-9s %d\n", + meta.SessionID, + meta.DeviceID, + meta.Start.Format(time.RFC3339), + meta.End.Format(time.RFC3339), + duration.Round(time.Millisecond), + meta.MessageCount, + ); err != nil { + return err + } + } + return nil + }) + }, + } + + cmd.Flags().StringVar(&deviceID, "device", "", "Filter sessions by device identifier") + + return cmd +} diff --git a/pkg/streams/cli/rec_play.go b/pkg/streams/cli/rec_play.go new file mode 100644 index 00000000..26f5bbec --- /dev/null +++ b/pkg/streams/cli/rec_play.go @@ -0,0 +1,43 @@ +package cli + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/spf13/cobra" +) + +func newStreamPlayCmd() *cobra.Command { + opts := &session.PlaybackOptions{ + Bucket: config.SessionBucket, + Speed: 1, + } + + cmd := &cobra.Command{ + Use: "play", + Short: "play back a recorded stream session", + Aliases: []string{"replay"}, + GroupID: "record", + RunE: func(cmd *cobra.Command, _ []string) error { + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() + + opts.ServerURL = rootOpts.server + opts.Verbose = rootOpts.verbose + return session.Playback(ctx, *opts) + }, + } + + cmd.Flags().StringVar(&opts.SessionID, "session", "", "Session identifier to replay") + cmd.Flags().StringVar(&opts.TargetSubject, "target-subject", "", "Optional override subject to publish during playback (default: "+config.PlaybackSubject+")") + cmd.Flags().Float64Var(&opts.Speed, "speed", opts.Speed, "Playback speed multiplier (e.g. 0.25, 1, 5)") + if err := cmd.MarkFlagRequired("session"); err != nil { + cobra.CheckErr(err) + } + + return cmd +} diff --git a/pkg/streams/cli/rec_remove.go b/pkg/streams/cli/rec_remove.go new file mode 100644 index 00000000..46c0aecd --- /dev/null +++ b/pkg/streams/cli/rec_remove.go @@ -0,0 +1,71 @@ +package cli + +import ( + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/spf13/cobra" +) + +func newStreamRemoveCmd() *cobra.Command { + var sessionID string + var purgeAll bool + + cmd := &cobra.Command{ + Use: "rm", + Short: "remove a recorded stream session", + Aliases: []string{"rm"}, + GroupID: "session", + RunE: func(cmd *cobra.Command, _ []string) error { + return withSessionManager(cmd.Context(), config.SessionBucket, func(mgr *session.SessionStore) error { + if purgeAll { + // Delete all sessions + sessions, err := mgr.List() + if err != nil { + return err + } + if len(sessions) == 0 { + cmd.Println("no sessions to delete") + return nil + } + + deletedCount := 0 + failedCount := 0 + for _, meta := range sessions { + err := mgr.Delete(meta.SessionID) + if err != nil { + cmd.Printf("failed to delete session %s: %v\n", meta.SessionID, err) + failedCount++ + continue + } + cmd.Printf("deleted session %s\n", meta.SessionID) + deletedCount++ + } + + cmd.Printf("\ndeleted %d session(s)", deletedCount) + if failedCount > 0 { + cmd.Printf(", %d failed", failedCount) + } + cmd.Println() + return nil + } + + // Delete single session + err := mgr.Delete(sessionID) + if err != nil { + return err + } + cmd.Printf("session %s deleted\n", sessionID) + return nil + }) + }, + } + + cmd.Flags().StringVar(&sessionID, "session", "", "Session identifier") + cmd.Flags().BoolVar(&purgeAll, "purge-all", false, "Delete all sessions") + + // Make session flag required only when purge-all is not set + cmd.MarkFlagsOneRequired("session", "purge-all") + cmd.MarkFlagsMutuallyExclusive("session", "purge-all") + + return cmd +} diff --git a/pkg/streams/cli/rec_show.go b/pkg/streams/cli/rec_show.go new file mode 100644 index 00000000..de02f346 --- /dev/null +++ b/pkg/streams/cli/rec_show.go @@ -0,0 +1,73 @@ +package cli + +import ( + "fmt" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/spf13/cobra" +) + +func newStreamShowCmd() *cobra.Command { + var sessionID string + + cmd := &cobra.Command{ + Use: "show", + Short: "show stream session details", + Aliases: []string{"info"}, + GroupID: "session", + RunE: func(cmd *cobra.Command, _ []string) error { + return withSessionManager(cmd.Context(), config.SessionBucket, func(mgr *session.SessionStore) error { + meta, err := mgr.Info(sessionID) + if err != nil { + return err + } + + duration := meta.End.Sub(meta.Start) + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "session: %s\n", meta.SessionID); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "device: %s\n", meta.DeviceID); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "stream: %s\n", meta.Stream); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "subject: %s\n", meta.SourceSubject); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "start: %s\n", meta.Start.Format(time.RFC3339)); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "end: %s\n", meta.End.Format(time.RFC3339)); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "duration: %s\n", duration.Round(time.Millisecond)); err != nil { + return err + } + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "messages: %d\n", meta.MessageCount); err != nil { + return err + } + if meta.Retention != "" { + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "retention: %s\n", meta.Retention); err != nil { + return err + } + } + if meta.Note != "" { + if _, err := fmt.Fprintf(cmd.OutOrStdout(), "note: %s\n", meta.Note); err != nil { + return err + } + } + return nil + }) + }, + } + + cmd.Flags().StringVar(&sessionID, "session", "", "Session identifier") + if err := cmd.MarkFlagRequired("session"); err != nil { + cobra.CheckErr(err) + } + + return cmd +} diff --git a/pkg/streams/cli/rec_start.go b/pkg/streams/cli/rec_start.go new file mode 100644 index 00000000..a4c7cbd9 --- /dev/null +++ b/pkg/streams/cli/rec_start.go @@ -0,0 +1,96 @@ +package cli + +import ( + "context" + "errors" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/controller" + "github.com/nats-io/nats.go" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +type recordStartOptions struct { + Subject string + DeviceID string + SessionID string + Retention time.Duration + Note string + PreRoll time.Duration +} + +func newStreamRecordCmd() *cobra.Command { + opts := &recordStartOptions{ + Subject: config.MonitorSubject, + } + + cmd := &cobra.Command{ + Use: "record", + Short: "record a stream for a device (defaults to device 123)", + Aliases: []string{"begin"}, + GroupID: "record", + RunE: func(cmd *cobra.Command, _ []string) error { + return withSignalContext(cmd.Context(), func(ctx context.Context) error { + return runRecordingStart(ctx, cmd, opts) + }) + }, + } + + cmd.Flags().StringVar(&opts.DeviceID, "device", "123", "Device identifier to record") + cmd.Flags().DurationVar(&opts.Retention, "retention", 0, "Optional JetStream retention (e.g. 24h)") + cmd.Flags().StringVar(&opts.Note, "note", "", "Optional note/description for this recording session") + cmd.Flags().DurationVar(&opts.PreRoll, "pre-roll", 0, "Optional buffer window to include before start (e.g. 5m)") + + return cmd +} + +func runRecordingStart(ctx context.Context, cmd *cobra.Command, opts *recordStartOptions) error { + retention := "" + if opts.Retention > 0 { + retention = opts.Retention.String() + } + preRoll := "" + if opts.PreRoll > 0 { + preRoll = opts.PreRoll.String() + } + + request := controller.RpcRequest{ + Action: controller.ActionStart, + Subject: opts.Subject, + DeviceID: opts.DeviceID, + SessionID: opts.SessionID, + Retention: retention, + SessionBucket: config.SessionBucket, + Note: opts.Note, + PreRoll: preRoll, + Verbose: rootOpts.verbose, + } + + return withNATS(ctx, func(nc *nats.Conn) error { + log.Info().Str("device", opts.DeviceID).Str("subject", opts.Subject).Msg("record start request") + + resp, err := controller.SendCommand(ctx, nc, config.RecordRpcSubject, request) + if err != nil { + return err + } + if !resp.OK { + if resp.Message == "" { + return errors.New("record command failed") + } + return errors.New(resp.Message) + } + + log.Info().Str("session", resp.SessionID).Str("device", opts.DeviceID).Msg("recording started") + cmd.Printf("recording started session=%s device=%s\n", resp.SessionID, opts.DeviceID) + if rootOpts.verbose && resp.State != nil { + cmd.Printf("state: %s (subject=%s device=%s messages=%d)\n", + resp.State.Status, resp.State.Subject, resp.State.DeviceID, resp.State.MessageCount) + if !resp.State.StartedAt.IsZero() { + cmd.Printf("started: %s\n", resp.State.StartedAt.Format(time.RFC3339)) + } + } + return nil + }) +} diff --git a/pkg/streams/cli/rec_state.go b/pkg/streams/cli/rec_state.go new file mode 100644 index 00000000..39089bc2 --- /dev/null +++ b/pkg/streams/cli/rec_state.go @@ -0,0 +1,60 @@ +package cli + +import ( + "context" + "errors" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/controller" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +func newStreamStateCmd() *cobra.Command { + var sessionID string + + cmd := &cobra.Command{ + Use: "state", + Short: "show the latest recording state", + Aliases: []string{"status"}, + GroupID: "record", + RunE: func(cmd *cobra.Command, _ []string) error { + if sessionID == "" { + return errors.New("session cannot be empty") + } + return withSignalContext(cmd.Context(), func(ctx context.Context) error { + return withJetStream(ctx, func(js jetstream.JetStream) error { + state, err := controller.FetchState(js, config.StateBucket, sessionID) + if err != nil { + return err + } + + log.Debug().Str("session", state.SessionID).Str("status", state.Status).Int("messages", state.MessageCount).Msg("record status") + + cmd.Printf("session: %s\n", state.SessionID) + cmd.Printf("status: %s\n", state.Status) + cmd.Printf("device: %s\n", state.DeviceID) + cmd.Printf("subject: %s\n", state.Subject) + cmd.Printf("messages:%d\n", state.MessageCount) + if !state.StartedAt.IsZero() { + cmd.Printf("started: %s\n", state.StartedAt.Format(time.RFC3339)) + } + if !state.LastMessageAt.IsZero() { + cmd.Printf("last-message: %s\n", state.LastMessageAt.Format(time.RFC3339)) + } + if state.LastError != "" { + cmd.Printf("error: %s\n", state.LastError) + } + cmd.Printf("updated: %s\n", state.UpdatedAt.Format(time.RFC3339)) + return nil + }) + }) + }, + } + + cmd.Flags().StringVar(&sessionID, "session", "", "Session identifier") + cmd.MarkFlagRequired("session") + return cmd +} diff --git a/pkg/streams/cli/rec_stop.go b/pkg/streams/cli/rec_stop.go new file mode 100644 index 00000000..b8673df6 --- /dev/null +++ b/pkg/streams/cli/rec_stop.go @@ -0,0 +1,138 @@ +package cli + +import ( + "context" + "errors" + "fmt" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/controller" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +func newStreamStopCmd() *cobra.Command { + var sessionID string + var deviceID string + + cmd := &cobra.Command{ + Use: "stop", + Short: "stop active stream recording(s)", + Long: "Stop one or more active recordings. Use --session to stop a specific session, --device to stop all sessions for a device, or omit both to stop all active recordings.", + Aliases: []string{"end"}, + GroupID: "record", + RunE: func(cmd *cobra.Command, _ []string) error { + return withSignalContext(cmd.Context(), func(ctx context.Context) error { + // Case 1: Stop specific session + if sessionID != "" { + return stopSession(ctx, cmd, sessionID) + } + + // Case 2 & 3: Stop by device or all sessions + return withJetStream(ctx, func(js jetstream.JetStream) error { + nc := js.Conn() + + // Get all active sessions + states, err := controller.ListStates(js, config.StateBucket) + if err != nil { + return fmt.Errorf("list states: %w", err) + } + + var sessionsToStop []string + + if deviceID != "" { + // Stop all sessions for a specific device + for _, state := range states { + if state.Status == "running" && state.DeviceID == deviceID { + sessionsToStop = append(sessionsToStop, state.SessionID) + } + } + if len(sessionsToStop) == 0 { + cmd.Printf("no active recordings found for device %s\n", deviceID) + return nil + } + cmd.Printf("searching for sessions for device %s\n", deviceID) + cmd.Printf("found %d active session(s)\n", len(sessionsToStop)) + } else { + // Stop all running sessions + for _, state := range states { + if state.Status == "running" { + sessionsToStop = append(sessionsToStop, state.SessionID) + } + } + if len(sessionsToStop) == 0 { + cmd.Println("no active recordings found") + return nil + } + cmd.Printf("found %d active session(s)\n", len(sessionsToStop)) + } + + // Stop each session + stoppedCount := 0 + failedCount := 0 + for _, sessionID := range sessionsToStop { + request := controller.RpcRequest{ + Action: controller.ActionStop, + SessionID: sessionID, + } + log.Debug().Str("session", sessionID).Msg("record stop request") + + resp, err := controller.SendCommand(ctx, nc, config.RecordRpcSubject, request) + if err != nil { + log.Error().Err(err).Str("session", sessionID).Msg("stop command failed") + failedCount++ + continue + } + if !resp.OK { + log.Error().Str("session", sessionID).Str("message", resp.Message).Msg("stop command failed") + failedCount++ + continue + } + + log.Debug().Str("session", resp.SessionID).Msg("recording stopped") + cmd.Printf("stopped session=%s\n", resp.SessionID) + stoppedCount++ + } + + cmd.Printf("\nstopped %d session(s)", stoppedCount) + if failedCount > 0 { + cmd.Printf(", %d failed", failedCount) + } + cmd.Println() + return nil + }) + }) + }, + } + + cmd.Flags().StringVar(&sessionID, "session", "", "Session identifier to stop") + cmd.Flags().StringVar(&deviceID, "device", "", "Stop all sessions for this device") + return cmd +} + +func stopSession(ctx context.Context, cmd *cobra.Command, sessionID string) error { + return withNATS(ctx, func(nc *nats.Conn) error { + request := controller.RpcRequest{ + Action: controller.ActionStop, + SessionID: sessionID, + } + log.Debug().Str("session", sessionID).Msg("record stop request") + + resp, err := controller.SendCommand(ctx, nc, config.RecordRpcSubject, request) + if err != nil { + return err + } + if !resp.OK { + if resp.Message == "" { + return errors.New("stop command failed") + } + return errors.New(resp.Message) + } + + log.Debug().Str("session", resp.SessionID).Msg("recording stopped") + cmd.Printf("recording stopped session=%s\n", resp.SessionID) + return nil + }) +} diff --git a/pkg/streams/cli/stream.go b/pkg/streams/cli/stream.go new file mode 100644 index 00000000..33934de7 --- /dev/null +++ b/pkg/streams/cli/stream.go @@ -0,0 +1,55 @@ +package cli + +import ( + "fmt" + "os" + + "github.com/nats-io/nats.go" + "github.com/spf13/cobra" +) + +var rootOpts = struct { + server string + verbose bool +}{ + server: nats.DefaultURL, + verbose: false, +} + +func Execute() { + cmd := NewStreamCmd() + err := cmd.Execute() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func NewStreamCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "stream", + Short: "manage message streams", + Long: "manage device streams, captured live data , manages device metadata, and replays recorded sessions.", + } + cmd.PersistentFlags().StringVar(&rootOpts.server, "server", nats.DefaultURL, "NATS server URL") + cmd.PersistentFlags().BoolVarP(&rootOpts.verbose, "verbose", "v", false, "Enable verbose output") + cmd.AddGroup(&cobra.Group{ID: "record", Title: "stream recording"}) + cmd.AddGroup(&cobra.Group{ID: "session", Title: "recording sessions"}) + cmd.AddGroup(&cobra.Group{ID: "data", Title: "data"}) + cmd.AddCommand( + newStreamRecordCmd(), + newStreamStateCmd(), + newStreamPlayCmd(), + newStreamStopCmd(), + newStreamListCmd(), + newStreamShowCmd(), + newStreamRemoveCmd(), + newStreamExportCmd(), + newStreamImportCmd(), + newStreamTailCmd(), + newStreamPublishCmd(), + newStreamGenerateCmd(), + ) + + return cmd +} diff --git a/pkg/streams/cli/stream_tail.go b/pkg/streams/cli/stream_tail.go new file mode 100644 index 00000000..d39289c9 --- /dev/null +++ b/pkg/streams/cli/stream_tail.go @@ -0,0 +1,39 @@ +package cli + +import ( + "context" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/msgio" + "github.com/nats-io/nats.go" + "github.com/spf13/cobra" +) + +func newStreamTailCmd() *cobra.Command { + opts := msgio.TailOptions{ + Subject: config.MonitorSubject, + } + + cmd := &cobra.Command{ + Use: "tail", + Short: "tail a live stream from a device", + Aliases: []string{"follow", "watch"}, + GroupID: "record", + RunE: func(cmd *cobra.Command, _ []string) error { + return withSignalContext(cmd.Context(), func(ctx context.Context) error { + opts.Verbose = rootOpts.verbose + return withNATS(ctx, func(nc *nats.Conn) error { + tailer := msgio.NewTailer(nc, opts) + return tailer.Run(ctx) + }) + }) + }, + } + + cmd.Flags().StringVar(&opts.Subject, "subject", opts.Subject, "Base monitor subject name") + cmd.Flags().StringVar(&opts.DeviceID, "device", "", "Device identifier to subscribe to. If empty, subscribes to all devices.") + cmd.Flags().BoolVar(&opts.Pretty, "pretty", false, "Pretty print JSON payloads") + cmd.Flags().BoolVar(&opts.Headers, "headers", false, "Print message headers") + + return cmd +} diff --git a/pkg/streams/config/config.go b/pkg/streams/config/config.go new file mode 100644 index 00000000..b86c2182 --- /dev/null +++ b/pkg/streams/config/config.go @@ -0,0 +1,84 @@ +package config + +import ( + "fmt" + "strings" + "time" +) + +const ( + // Default JetStream bucket names. + SessionBucket = "streams_session" + StateBucket = "streams_record_state" + + // Default subjects and prefixes. + RecordRpcSubject = "streams.record.rpc" + SessionSubjectPrefix = "streams.session" + BufferSubjectPrefix = "streams.buffer" + MonitorSubject = "monitor" + RecordControllerQueueGroup = "streams-record-controller" + PlaybackSubject = "streams.playback" + + // Header keys used across publishing, recording, and buffering flows. + HeaderDevice = "X-Streams-Device" + HeaderSession = "X-Streams-Session" + HeaderFile = "X-Streams-File" + HeaderRecordedAt = "X-Streams-Recorded-At" + HeaderReplayedAt = "X-Streams-Replayed-At" + HeaderBufferedAt = "X-Streams-Buffered-At" + HeaderDeadline = "X-Streams-Deadline" + HeaderPreRoll = "X-Streams-PreRoll" + + // Default buffer window for all devices (used for pre-roll) + BufferWindow = 5 * time.Minute + BufferRefresh = 15 * time.Second +) + +// SessionSubject returns the fully qualified JetStream subject used to persist +// recorded session messages for the given session identifier. +func SessionSubject(sessionID string) string { + if sessionID == "" { + return SessionSubjectPrefix + } + return fmt.Sprintf("%s.%s", SessionSubjectPrefix, sessionID) +} + +// DeviceSubject returns a device-scoped subject by concatenating the base +// subject prefix and the provided device identifier. +func DeviceSubject(base, deviceID string) string { + if base == "" || deviceID == "" { + return base + } + return fmt.Sprintf("%s.%s", base, deviceID) +} + +func SanitizeId(id string) string { + cleaned := strings.ToUpper(id) + cleaned = strings.ReplaceAll(cleaned, "-", "_") + cleaned = strings.ReplaceAll(cleaned, ".", "_") + return cleaned +} + +func BufferSubjectName(deviceID string) string { + return fmt.Sprintf("%s.%s", BufferSubjectPrefix, SanitizeId(deviceID)) +} + +func BufferStreamName(deviceID string) string { + return "STREAMS_BUFFER_" + SanitizeId(deviceID) +} + +func SubjectJoin(s ...string) string { + return strings.Join(s, ".") +} + +func ExportConsumerName(sessionID string) string { + return fmt.Sprintf("EXP_%s", SanitizeId(sessionID)) +} + +func PlaybackConsumerName(sessionID string) string { + return fmt.Sprintf("PB_%s_%d", SanitizeId(sessionID), time.Now().UnixNano()) +} + +func BufferReplayConsumerName(deviceID string) string { + return fmt.Sprintf("BUFREP_%s_%d", SanitizeId(deviceID), time.Now().UnixNano()) +} diff --git a/pkg/streams/controller/client.go b/pkg/streams/controller/client.go new file mode 100644 index 00000000..f7e2dfb9 --- /dev/null +++ b/pkg/streams/controller/client.go @@ -0,0 +1,100 @@ +package controller + +import ( + "context" + "encoding/json" + "errors" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +// SendCommand issues a controller command over NATS request/reply. +func SendCommand(ctx context.Context, nc *nats.Conn, subject string, cmd RpcRequest) (RpcResponse, error) { + if nc == nil { + return RpcResponse{}, errors.New("nats connection is nil") + } + if subject == "" { + subject = DefaultCommandSubject + } + + data, err := json.Marshal(cmd) + if err != nil { + return RpcResponse{}, err + } + + msg, err := nc.RequestWithContext(ctx, subject, data) + if err != nil { + return RpcResponse{}, err + } + + var resp RpcResponse + err = json.Unmarshal(msg.Data, &resp) + if err != nil { + return RpcResponse{}, err + } + return resp, nil +} + +// FetchState retrieves a session state snapshot from the controller KV bucket. +func FetchState(js jetstream.JetStream, bucket, sessionID string) (StateSnapshot, error) { + snap := StateSnapshot{SessionID: sessionID} + if js == nil { + return snap, errors.New("jetstream context is nil") + } + if bucket == "" { + bucket = DefaultStateBucket + } + kv, err := js.KeyValue(context.Background(), bucket) + if err != nil { + return snap, err + } + entry, err := kv.Get(context.Background(), sessionID) + if err != nil { + return snap, err + } + err = json.Unmarshal(entry.Value(), &snap) + if err != nil { + return snap, err + } + return snap, nil +} + +// ListStates retrieves all session state snapshots from the controller KV bucket. +func ListStates(js jetstream.JetStream, bucket string) ([]StateSnapshot, error) { + if js == nil { + return nil, errors.New("jetstream context is nil") + } + if bucket == "" { + bucket = DefaultStateBucket + } + kv, err := js.KeyValue(context.Background(), bucket) + if err != nil { + return nil, err + } + + keys, err := kv.Keys(context.Background()) + if err != nil { + if errors.Is(err, jetstream.ErrNoKeysFound) { + return []StateSnapshot{}, nil + } + return nil, err + } + + states := make([]StateSnapshot, 0, len(keys)) + for _, key := range keys { + entry, err := kv.Get(context.Background(), key) + if err != nil { + continue + } + var snap StateSnapshot + if err := json.Unmarshal(entry.Value(), &snap); err != nil { + continue + } + if snap.SessionID == "" { + snap.SessionID = key + } + states = append(states, snap) + } + return states, nil +} diff --git a/pkg/streams/controller/command.go b/pkg/streams/controller/command.go new file mode 100644 index 00000000..10ef5314 --- /dev/null +++ b/pkg/streams/controller/command.go @@ -0,0 +1,68 @@ +package controller + +import ( + "fmt" + "strings" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/google/uuid" +) + +// startCommand captures a validated controller start request with parsed fields. +type startCommand struct { + Subject string + DeviceID string + SessionID string + Retention time.Duration + SessionBucket string + Note string + PreRoll time.Duration + Verbose bool +} + +func (cmd RpcRequest) normalizeStart() (startCommand, error) { + var out startCommand + + subject := strings.TrimSpace(cmd.Subject) + if subject == "" { + return out, fmt.Errorf("subject cannot be empty") + } + out.Subject = subject + + deviceID := strings.TrimSpace(cmd.DeviceID) + out.DeviceID = deviceID + + sessionID := strings.TrimSpace(cmd.SessionID) + if sessionID == "" { + sessionID = uuid.NewString() + } + out.SessionID = sessionID + + retention, err := parseRetention(cmd.Retention) + if err != nil { + return out, err + } + out.Retention = retention + + sessionBucket := strings.TrimSpace(cmd.SessionBucket) + if sessionBucket == "" { + sessionBucket = config.SessionBucket + } + out.SessionBucket = sessionBucket + + out.Note = strings.TrimSpace(cmd.Note) + + preRoll := strings.TrimSpace(cmd.PreRoll) + if preRoll != "" { + dur, err := time.ParseDuration(preRoll) + if err != nil { + return out, fmt.Errorf("invalid pre-roll: %v", err) + } + out.PreRoll = dur + } + + out.Verbose = cmd.Verbose + + return out, nil +} diff --git a/pkg/streams/controller/service.go b/pkg/streams/controller/service.go new file mode 100644 index 00000000..cd0459ca --- /dev/null +++ b/pkg/streams/controller/service.go @@ -0,0 +1,432 @@ +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +const ( + DefaultCommandSubject = config.RecordRpcSubject + DefaultStateBucket = config.StateBucket +) + +const ( + ActionStart = "start" + ActionStop = "stop" +) + +// RpcRequest represents an RPC request sent to the controller. +type RpcRequest struct { + Action string `json:"action"` + Subject string `json:"subject,omitempty"` + DeviceID string `json:"device_id,omitempty"` + SessionID string `json:"session_id,omitempty"` + Retention string `json:"retention,omitempty"` + SessionBucket string `json:"session_bucket,omitempty"` + Note string `json:"note,omitempty"` + PreRoll string `json:"pre_roll,omitempty"` + Verbose bool `json:"verbose,omitempty"` +} + +// RpcResponse communicates the outcome of a controller command. +type RpcResponse struct { + OK bool `json:"ok"` + Message string `json:"message,omitempty"` + SessionID string `json:"session_id,omitempty"` + State *StateSnapshot `json:"state,omitempty"` +} + +// StateSnapshot is persisted in the KV state bucket. +type StateSnapshot struct { + SessionID string `json:"session_id"` + DeviceID string `json:"device_id"` + Subject string `json:"subject"` + Status string `json:"status"` + MessageCount int `json:"message_count"` + LastError string `json:"last_error,omitempty"` + StartedAt time.Time `json:"started_at,omitempty"` + LastMessageAt time.Time `json:"last_message_at,omitempty"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Options configure the controller runtime. +type Options struct { + ServerURL string + RecordRpcSubject string + StateBucket string +} + +// NewController creates a new controller instance with the provided options. +func NewController(js jetstream.JetStream, opts Options) (*Controller, error) { + if js == nil { + return nil, errors.New("jetstream context is nil") + } + if opts.RecordRpcSubject == "" { + opts.RecordRpcSubject = config.RecordRpcSubject + } + if opts.StateBucket == "" { + opts.StateBucket = config.StateBucket + } + if opts.ServerURL == "" { + return nil, errors.New("server URL is required") + } + + ctx := context.Background() + kv, err := natsutil.EnsureKeyValue(ctx, js, opts.StateBucket) + if err != nil { + return nil, fmt.Errorf("state bucket %s: %w", opts.StateBucket, err) + } + + return &Controller{ + js: js, + opts: opts, + stateKV: kv, + jobs: map[string]*recordJob{}, + }, nil +} + +type Controller struct { + js jetstream.JetStream + opts Options + stateKV jetstream.KeyValue + + mu sync.Mutex + jobs map[string]*recordJob + sub *nats.Subscription +} + +type recordJob struct { + cancel context.CancelFunc + done chan struct{} +} + +// Start begins listening for RPC commands on the configured subject. +func (c *Controller) Start() error { + log.Info(). + Str("subject", c.opts.RecordRpcSubject). + Str("queue", config.RecordControllerQueueGroup). + Msg("starting record controller") + sub, err := c.js.Conn().QueueSubscribe(c.opts.RecordRpcSubject, config.RecordControllerQueueGroup, c.handleMsg) + if err != nil { + return fmt.Errorf("subscribe %s: %w", c.opts.RecordRpcSubject, err) + } + + c.mu.Lock() + c.sub = sub + c.mu.Unlock() + + log.Info(). + Str("subject", c.opts.RecordRpcSubject). + Str("queue", config.RecordControllerQueueGroup). + Msg("record controller started") + return nil +} + +// Close gracefully shuts down the controller by unsubscribing and stopping all jobs. +func (c *Controller) Close() { + c.mu.Lock() + if c.sub != nil { + if err := c.sub.Drain(); err != nil { + log.Warn().Err(err).Str("subject", c.opts.RecordRpcSubject).Msg("failed to drain subscription") + } + c.sub = nil + } + c.mu.Unlock() + + c.stopAll() +} + +func (c *Controller) stopAll() { + c.mu.Lock() + jobs := make([]*recordJob, 0, len(c.jobs)) + for sessionID, job := range c.jobs { + jobs = append(jobs, job) + delete(c.jobs, sessionID) + } + c.mu.Unlock() + + for _, job := range jobs { + job.cancel() + <-job.done + } +} + +func (c *Controller) handleMsg(msg *nats.Msg) { + var req RpcRequest + err := json.Unmarshal(msg.Data, &req) + if err != nil { + log.Error().Err(err).Msg("invalid command payload") + c.respondError(msg, "invalid command payload: %v", err) + return + } + + switch strings.ToLower(req.Action) { + case ActionStart: + log.Debug().Str("session", req.SessionID).Str("device", req.DeviceID).Msg("handling start command") + resp := c.handleStart(req) + c.respond(msg, resp) + case ActionStop: + log.Debug().Str("session", req.SessionID).Msg("handling stop command") + resp := c.handleStop(req) + c.respond(msg, resp) + default: + log.Warn().Str("action", req.Action).Msg("unknown controller action") + c.respondError(msg, "unknown action %q", req.Action) + } +} + +func (c *Controller) handleStart(req RpcRequest) RpcResponse { + start, err := req.normalizeStart() + if err != nil { + log.Warn().Err(err).Str("action", req.Action).Msg("start command invalid") + resp := RpcResponse{Message: err.Error()} + if start.SessionID != "" { + resp.SessionID = start.SessionID + } + return resp + } + + if start.PreRoll > 0 { + if start.PreRoll > config.BufferWindow { + return RpcResponse{Message: fmt.Sprintf("pre-roll %s exceeds buffer window %s", start.PreRoll, config.BufferWindow), SessionID: start.SessionID} + } + } + + // Device ID is required for recording + if start.DeviceID == "" { + return RpcResponse{Message: "device-id is required for recording", SessionID: start.SessionID} + } + + job := &recordJob{done: make(chan struct{})} + + c.mu.Lock() + if _, exists := c.jobs[start.SessionID]; exists { + c.mu.Unlock() + log.Warn().Str("session", start.SessionID).Msg("start command rejected: already running") + return RpcResponse{Message: fmt.Sprintf("session %s already running", start.SessionID), SessionID: start.SessionID} + } + + // Check if device is already being recorded + for existingSessionID, existingJob := range c.jobs { + if existingJob == nil { + continue + } + // Load existing session state to check device ID + existingState, err := c.loadState(existingSessionID) + if err == nil && existingState.DeviceID == start.DeviceID && existingState.Status == "running" { + c.mu.Unlock() + log.Warn().Str("device", start.DeviceID).Str("existing_session", existingSessionID).Msg("start command rejected: device already being recorded") + return RpcResponse{Message: fmt.Sprintf("device %s already being recorded by session %s", start.DeviceID, existingSessionID), SessionID: start.SessionID} + } + } + + ctx, cancel := context.WithCancel(context.Background()) + job.cancel = cancel + c.jobs[start.SessionID] = job + c.mu.Unlock() + + started := time.Now().UTC() + state := StateSnapshot{ + SessionID: start.SessionID, + DeviceID: start.DeviceID, + Subject: start.Subject, + Status: "running", + MessageCount: 0, + StartedAt: started, + } + _ = c.writeState(state) + + go c.runRecord(ctx, job, start, started) + + log.Info().Str("session", start.SessionID).Str("device", start.DeviceID).Msg("recording job launched") + return RpcResponse{OK: true, Message: "recording started", SessionID: start.SessionID, State: &state} +} + +func (c *Controller) runRecord(ctx context.Context, job *recordJob, start startCommand, started time.Time) { + log.Info().Str("session", start.SessionID).Str("device", start.DeviceID).Str("server", c.opts.ServerURL).Msg("recording job started") + defer func() { + close(job.done) + c.mu.Lock() + delete(c.jobs, start.SessionID) + c.mu.Unlock() + }() + + opts := session.RecordOptions{ + ServerURL: c.opts.ServerURL, + Subject: start.Subject, + DeviceID: start.DeviceID, + SessionID: start.SessionID, + Retention: start.Retention, + SessionBucket: start.SessionBucket, + Note: start.Note, + Verbose: start.Verbose, + PreRoll: start.PreRoll, + } + + opts.Progress = func(meta session.Metadata) { + snap := StateSnapshot{ + SessionID: meta.SessionID, + DeviceID: meta.DeviceID, + Subject: meta.SourceSubject, + Status: "running", + MessageCount: meta.MessageCount, + StartedAt: started, + LastMessageAt: meta.End, + } + err := c.writeState(snap) + if err != nil { + log.Error().Err(err).Str("session", meta.SessionID).Msg("update state failed") + } + } + + meta, err := session.Record(ctx, opts) + + state := StateSnapshot{ + SessionID: start.SessionID, + DeviceID: start.DeviceID, + Subject: start.Subject, + StartedAt: started, + LastMessageAt: time.Now().UTC(), + } + + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + state.Status = "stopped" + } else { + state.Status = "error" + state.LastError = err.Error() + } + } else { + state.Status = "stopped" + if meta != nil { + state.MessageCount = meta.MessageCount + state.DeviceID = meta.DeviceID + state.Subject = meta.SourceSubject + state.LastMessageAt = meta.End + } + } + + _ = c.writeState(state) +} + +func (c *Controller) handleStop(req RpcRequest) RpcResponse { + sessionID := strings.TrimSpace(req.SessionID) + if sessionID == "" { + return RpcResponse{Message: "session-id cannot be empty"} + } + + c.mu.Lock() + job, jobExists := c.jobs[sessionID] + c.mu.Unlock() + + if !jobExists { + // nothing running, but update state to stopped + snap, err := c.loadState(sessionID) + if err != nil { + log.Error().Err(err).Str("session", sessionID).Msg("load state failed") + return RpcResponse{Message: fmt.Sprintf("load state: %v", err), SessionID: sessionID} + } + snap.Status = "stopped" + snap.LastError = "" + if snap.StartedAt.IsZero() { + snap.StartedAt = time.Now().UTC() + } + _ = c.writeState(snap) + return RpcResponse{OK: true, SessionID: sessionID, Message: "no active recording"} + } + + job.cancel() + <-job.done + + log.Info().Str("session", sessionID).Msg("recording job signaled to stop") + return RpcResponse{OK: true, SessionID: sessionID, Message: "recording stopped"} +} + +func (c *Controller) respond(msg *nats.Msg, resp RpcResponse) { + if !resp.OK && resp.Message == "" { + resp.Message = "command failed" + } + data, _ := json.Marshal(resp) + log.Debug().Str("session", resp.SessionID).Bool("ok", resp.OK).Msg("command response") + _ = msg.Respond(data) +} + +func (c *Controller) respondError(msg *nats.Msg, format string, args ...any) { + resp := RpcResponse{OK: false, Message: fmt.Sprintf(format, args...)} + data, _ := json.Marshal(resp) + log.Error().Msgf(format, args...) + _ = msg.Respond(data) +} + +func (c *Controller) writeState(state StateSnapshot) error { + if state.SessionID == "" { + return errors.New("state missing session id") + } + if state.Subject == "" || state.DeviceID == "" { + prev, err := c.loadState(state.SessionID) + if err == nil { + if state.Subject == "" { + state.Subject = prev.Subject + } + if state.DeviceID == "" { + state.DeviceID = prev.DeviceID + } + if state.MessageCount == 0 { + state.MessageCount = prev.MessageCount + } + if state.StartedAt.IsZero() { + state.StartedAt = prev.StartedAt + } + if state.LastMessageAt.IsZero() { + state.LastMessageAt = prev.LastMessageAt + } + } + } + state.UpdatedAt = time.Now().UTC() + data, err := json.Marshal(state) + if err != nil { + return err + } + _, err = c.stateKV.Put(context.Background(), state.SessionID, data) + return err +} + +func (c *Controller) loadState(sessionID string) (StateSnapshot, error) { + value := StateSnapshot{SessionID: sessionID} + entry, err := c.stateKV.Get(context.Background(), sessionID) + if err != nil { + if errors.Is(err, jetstream.ErrKeyNotFound) { + return value, nil + } + return value, err + } + err = json.Unmarshal(entry.Value(), &value) + if err != nil { + return value, err + } + return value, nil +} + +func parseRetention(value string) (time.Duration, error) { + value = strings.TrimSpace(value) + if value == "" { + return 0, nil + } + d, err := time.ParseDuration(value) + if err != nil { + return 0, fmt.Errorf("invalid retention duration: %w", err) + } + return d, nil +} diff --git a/pkg/streams/controller/service_test.go b/pkg/streams/controller/service_test.go new file mode 100644 index 00000000..d0a1a26d --- /dev/null +++ b/pkg/streams/controller/service_test.go @@ -0,0 +1,253 @@ +package controller_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/apigear-io/cli/pkg/streams/buffer" + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/controller" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/stretchr/testify/require" +) + +func TestControllerStartStop(t *testing.T) { + h := newControllerHarness(t) + t.Cleanup(h.Close) + + client := h.NewClientConn() + defer client.Close() + + sessionID := "test-session" + subject := "monitor" + deviceID := "device-1" + + startResp := sendStart(t, client, controller.RpcRequest{ + Action: controller.ActionStart, + Subject: subject, + DeviceID: deviceID, + SessionID: sessionID, + SessionBucket: session.DefaultBucket, + }) + require.True(t, startResp.OK, "start response should succeed: %s", startResp.Message) + + // Allow recorder goroutine to establish subscriptions. + time.Sleep(100 * time.Millisecond) + + publisher := h.NewClientConn() + defer publisher.Close() + + for i := 0; i < 3; i++ { + msg := fmt.Sprintf("{\"i\":%d}", i) + require.NoError(t, publisher.Publish(subject+"."+deviceID, []byte(msg))) + } + require.NoError(t, publisher.Flush()) + + require.Eventually(t, func() bool { + snap, err := controller.FetchState(h.ctrlJS, controller.DefaultStateBucket, sessionID) + if err != nil { + return false + } + return snap.Status == "running" && snap.MessageCount == 3 + }, 3*time.Second, 100*time.Millisecond, "controller did not capture messages") + + stopResp := sendStop(t, client, sessionID) + require.True(t, stopResp.OK, "stop response should succeed: %s", stopResp.Message) + + require.Eventually(t, func() bool { + snap, err := controller.FetchState(h.ctrlJS, controller.DefaultStateBucket, sessionID) + if err != nil { + return false + } + return snap.Status == "stopped" && snap.MessageCount == 3 + }, 3*time.Second, 100*time.Millisecond, "controller state not updated after stop") + + mgr, err := session.NewSessionStore(h.ctrlJS, session.DefaultBucket) + require.NoError(t, err) + meta, _, err := mgr.Load(sessionID) + require.NoError(t, err) + require.Equal(t, 3, meta.MessageCount) +} + +func TestControllerDuplicateStart(t *testing.T) { + h := newControllerHarness(t) + t.Cleanup(h.Close) + + client := h.NewClientConn() + defer client.Close() + + cmd := controller.RpcRequest{ + Action: controller.ActionStart, + Subject: "monitor", + DeviceID: "device-1", + SessionID: "dup-session", + SessionBucket: session.DefaultBucket, + } + + resp := sendStart(t, client, cmd) + require.True(t, resp.OK) + + dup := sendStart(t, client, cmd) + require.False(t, dup.OK) + require.Contains(t, dup.Message, "already running") +} + +func TestControllerPreRoll(t *testing.T) { + h := newControllerHarness(t) + t.Cleanup(h.Close) + + // Create buffer stream (buffering is now always on) + _, subject, err := buffer.EnsureStream(h.ctrlJS, "preroll-device", 5*time.Minute) + require.NoError(t, err) + + recordedAt := time.Now().Add(-30 * time.Second).UTC() + msg := &nats.Msg{ + Subject: subject, + Header: nats.Header{}, + Data: []byte(`{"preroll":true}`), + } + msg.Header.Set(config.HeaderBufferedAt, recordedAt.Format(time.RFC3339Nano)) + _, err = h.ctrlJS.PublishMsg(context.Background(), msg) + require.NoError(t, err) + + client := h.NewClientConn() + defer client.Close() + + resp := sendStart(t, client, controller.RpcRequest{ + Action: controller.ActionStart, + Subject: "monitor", + DeviceID: "preroll-device", + SessionID: "preroll-session", + SessionBucket: session.DefaultBucket, + PreRoll: "2m", + }) + require.True(t, resp.OK, resp.Message) + + require.Eventually(t, func() bool { + snap, err := controller.FetchState(h.ctrlJS, controller.DefaultStateBucket, "preroll-session") + return err == nil && snap.Status == "running" && snap.MessageCount >= 1 + }, 2*time.Second, 100*time.Millisecond, "pre-roll data not observed") + + stop := sendStop(t, client, "preroll-session") + require.True(t, stop.OK) + + mgr, err := session.NewSessionStore(h.ctrlJS, session.DefaultBucket) + require.NoError(t, err) + meta, _, err := mgr.Load("preroll-session") + require.NoError(t, err) + require.GreaterOrEqual(t, meta.MessageCount, 1) + + require.Eventually(t, func() bool { + snap, err := controller.FetchState(h.ctrlJS, controller.DefaultStateBucket, "preroll-session") + return err == nil && snap.Status == "stopped" && snap.MessageCount >= 1 + }, 2*time.Second, 100*time.Millisecond) +} + +func TestControllerStopWithoutStart(t *testing.T) { + h := newControllerHarness(t) + t.Cleanup(h.Close) + + client := h.NewClientConn() + defer client.Close() + + resp := sendStop(t, client, "missing-session") + require.True(t, resp.OK) + require.Equal(t, "no active recording", resp.Message) +} + +func TestControllerInvalidAction(t *testing.T) { + h := newControllerHarness(t) + t.Cleanup(h.Close) + + client := h.NewClientConn() + defer client.Close() + + resp, err := controller.SendCommand(context.Background(), client, controller.DefaultCommandSubject, controller.RpcRequest{Action: "bogus"}) + require.NoError(t, err) + require.False(t, resp.OK) + require.Contains(t, resp.Message, "unknown action") +} + +type controllerHarness struct { + t *testing.T + srv *natsutil.ServerHandle + ctrl *controller.Controller + ctrlJS jetstream.JetStream + serverURL string +} + +func newControllerHarness(t *testing.T) *controllerHarness { + t.Helper() + + srv, err := natsutil.StartServer(natsutil.ServerConfig{ + Options: &server.Options{ + JetStream: true, + StoreDir: t.TempDir(), + }, + }) + require.NoError(t, err) + + js, err := natsutil.ConnectJetStream(srv.ClientURL()) + require.NoError(t, err) + + ctrl, err := controller.NewController(js, controller.Options{ServerURL: srv.ClientURL()}) + require.NoError(t, err) + + err = ctrl.Start() + require.NoError(t, err) + + harness := &controllerHarness{ + t: t, + srv: srv, + ctrl: ctrl, + ctrlJS: js, + serverURL: srv.ClientURL(), + } + + // Give the subscription a moment to be registered. + time.Sleep(50 * time.Millisecond) + + return harness +} + +func (h *controllerHarness) NewClientConn() *nats.Conn { + h.t.Helper() + conn, err := nats.Connect(h.serverURL) + require.NoError(h.t, err) + h.t.Cleanup(func() { + _ = conn.Drain() + }) + return conn +} + +func (h *controllerHarness) Close() { + h.t.Helper() + h.ctrl.Close() + if err := h.ctrlJS.Conn().Drain(); err != nil { + h.t.Errorf("drain controller jetstream connection: %v", err) + } + h.srv.Shutdown() +} + +func sendStart(t *testing.T, nc *nats.Conn, cmd controller.RpcRequest) controller.RpcResponse { + t.Helper() + resp, err := controller.SendCommand(context.Background(), nc, controller.DefaultCommandSubject, cmd) + require.NoError(t, err) + return resp +} + +func sendStop(t *testing.T, nc *nats.Conn, sessionID string) controller.RpcResponse { + t.Helper() + resp, err := controller.SendCommand(context.Background(), nc, controller.DefaultCommandSubject, controller.RpcRequest{ + Action: controller.ActionStop, + SessionID: sessionID, + }) + require.NoError(t, err) + return resp +} diff --git a/pkg/streams/logging/logging.go b/pkg/streams/logging/logging.go new file mode 100644 index 00000000..ffd9844d --- /dev/null +++ b/pkg/streams/logging/logging.go @@ -0,0 +1,22 @@ +package logging + +import ( + "os" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +// Configure sets up zerolog for the application. +func Configure(verbose bool) { + level := zerolog.InfoLevel + if verbose { + level = zerolog.DebugLevel + } + + zerolog.TimeFieldFormat = time.RFC3339 + output := zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.TimeOnly} + logger := zerolog.New(output).With().Timestamp().Logger().Level(level) + log.Logger = logger +} diff --git a/pkg/streams/manager.go b/pkg/streams/manager.go new file mode 100644 index 00000000..448f6286 --- /dev/null +++ b/pkg/streams/manager.go @@ -0,0 +1,156 @@ +package streams + +import ( + "context" + "errors" + "path" + "time" + + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/cli/pkg/streams/buffer" + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/controller" + "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +const ( + NatsTimeout = 10 * time.Second +) + +type ManagerOptions struct { + NatsPort int + AppDir string + Logging bool +} + +type Manager struct { + js jetstream.JetStream + srv *server.Server + nc *nats.Conn + opts ManagerOptions + controller *controller.Controller + serverURL string +} + +func NewManager(opts ManagerOptions) *Manager { + return &Manager{ + opts: opts, + } +} + +func (m *Manager) Start(ctx context.Context) error { + log.Info().Msg("starting streams manager") + err := m.runServer() + if err != nil { + return err + } + err = m.runServices(ctx) + if err != nil { + return err + } + return nil +} + +func (m *Manager) runServer() error { + if m.srv != nil && m.srv.ReadyForConnections(0) { + return nil + } + m.srv = server.New(&server.Options{ + Port: m.opts.NatsPort, + JetStream: true, + StoreDir: path.Join(m.opts.AppDir, "nats"), + }) + if m.opts.Logging { + m.srv.ConfigureLogger() + } + m.srv.Start() + // wait for server to be ready + if !m.srv.ReadyForConnections(NatsTimeout) { + m.srv.Shutdown() + return errors.New("nats server not ready in time") + } + m.serverURL = m.srv.ClientURL() + log.Info().Str("url", m.serverURL).Msg("NATS server started") + log.Debug().Str("url", m.serverURL).Msg("connecting to NATS without in-process option") + // connect to server using TCP so downstream clients observe a routable address + nc, err := nats.Connect(m.serverURL) + if err != nil { + m.srv.Shutdown() + return err + } + m.nc = nc + // create jetstream context + js, err := jetstream.New(m.nc) + if err != nil { + m.nc.Close() + m.srv.Shutdown() + return err + } + m.js = js + log.Info().Str("connected", js.Conn().ConnectedUrl()).Msg("JetStream connected") + return nil +} + +func (m *Manager) runServices(ctx context.Context) error { + // Create and start controller + ctrl, err := controller.NewController(m.js, controller.Options{ + ServerURL: m.serverURL, + RecordRpcSubject: config.RecordRpcSubject, + StateBucket: config.StateBucket, + }) + if err != nil { + return err + } + if err := ctrl.Start(); err != nil { + return err + } + m.controller = ctrl + + // Start buffer service in background + go func() { + err := buffer.RunBuffer(ctx, m.js, buffer.BufferOptions{ + MonitorSubject: config.MonitorSubject, + }) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error().Err(err).Msg("buffer service error") + } + }() + + return nil +} + +func (m *Manager) ClientURL() string { + if m.serverURL != "" { + return m.serverURL + } + return nats.DefaultURL +} + +func (m *Manager) JetStream() jetstream.JetStream { + return m.js +} + +func (m *Manager) Shutdown() error { + if m.controller != nil { + m.controller.Close() + } + if m.nc != nil && !m.nc.IsClosed() { + m.nc.Close() + } + if m.srv != nil { + m.srv.Shutdown() + } + return nil +} + +func (m *Manager) Connection() (*nats.Conn, error) { + if m.nc == nil || m.nc.IsClosed() { + return nil, errors.New("nats server not started") + } + if m.srv == nil || !m.srv.ReadyForConnections(0) { + return nil, errors.New("nats server not ready") + } + return m.nc, nil +} diff --git a/pkg/streams/monitor.go b/pkg/streams/monitor.go new file mode 100644 index 00000000..3fbad24a --- /dev/null +++ b/pkg/streams/monitor.go @@ -0,0 +1,72 @@ +package streams + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/apigear-io/cli/pkg/log" + "github.com/apigear-io/cli/pkg/mon" + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/nats-io/nats.go" +) + +func PublishMonitorMessage(nc *nats.Conn, deviceId string, data []byte) error { + log.Debug().Msgf("publish monitor message from device %s", deviceId) + return nc.Publish(config.DeviceSubject(config.MonitorSubject, deviceId), data) +} + +// PublishMonitorMessageBulk publishes multiple monitor events efficiently using NATS headers +// and bulk publishing with a single flush operation. +func PublishMonitorMessageBulk(nc *nats.Conn, events []*mon.Event) error { + if nc == nil { + return fmt.Errorf("nats connection is nil") + } + if len(events) == 0 { + return nil + } + + log.Debug().Msgf("bulk publish %d monitor messages", len(events)) + + // Publish all messages (client buffers automatically) + for _, event := range events { + // Marshal only the Data payload (not the full event) + data, err := json.Marshal(event.Data) + if err != nil { + return fmt.Errorf("marshal event data: %w", err) + } + + // Create message with headers for metadata + subject := config.DeviceSubject(config.MonitorSubject, event.Device) + msg := &nats.Msg{ + Subject: subject, + Header: nats.Header{}, + Data: data, + } + + // Add metadata as NATS headers + msg.Header.Set("X-Monitor-Device", event.Device) + msg.Header.Set("X-Monitor-Id", event.Id) + msg.Header.Set("X-Monitor-Type", string(event.Type)) + msg.Header.Set("X-Monitor-Timestamp", event.Timestamp.Format("2006-01-02T15:04:05.999999999Z07:00")) + msg.Header.Set("X-Monitor-Symbol", event.Symbol) + + // Publish (buffered by client) + if err := nc.PublishMsg(msg); err != nil { + return fmt.Errorf("publish event %s: %w", event.Id, err) + } + } + + // FlushTimeout ensures all buffered messages are sent AND confirmed by server + // This waits for a PING/PONG roundtrip, guaranteeing the server has received all messages + if err := nc.FlushTimeout(5 * time.Second); err != nil { + return fmt.Errorf("flush timeout: %w", err) + } + + // Check for any async publish errors + if err := nc.LastError(); err != nil { + return fmt.Errorf("nats error: %w", err) + } + + return nil +} diff --git a/pkg/streams/msgio/generate.go b/pkg/streams/msgio/generate.go new file mode 100644 index 00000000..dcd53143 --- /dev/null +++ b/pkg/streams/msgio/generate.go @@ -0,0 +1,327 @@ +package msgio + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + "time" + + "github.com/brianvoe/gofakeit/v7" + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +// GenerateOptions controls how JSONL data is synthesized from a template. +type GenerateOptions struct { + TemplatePath string + OutputPath string + Count int + Seed int64 +} + +// Generate renders the template Count times, writing JSONL output to the destination. +func Generate(opts GenerateOptions) error { + if err := validateOptions(opts); err != nil { + return err + } + + tpl, err := parseTemplate(opts) + if err != nil { + return err + } + + writer, closeFn, err := openOutput(opts.OutputPath) + if err != nil { + return err + } + if closeFn != nil { + defer func() { + if closeErr := closeFn(); closeErr != nil { + log.Error().Err(closeErr).Msg("failed to close output writer") + } + }() + } + + return renderRecords(opts.Count, tpl, writer) +} + +// validateOptions checks that the provided options are valid. +func validateOptions(opts GenerateOptions) error { + if opts.Count <= 0 { + return errors.New("count must be positive") + } + if opts.TemplatePath == "" { + return errors.New("template path cannot be empty") + } + return nil +} + +// parseTemplate reads and parses the template file specified in opts. +func parseTemplate(opts GenerateOptions) (*template.Template, error) { + tplData, err := os.ReadFile(opts.TemplatePath) + if err != nil { + return nil, fmt.Errorf("read template: %w", err) + } + + tpl, err := template.New(filepath.Base(opts.TemplatePath)).Funcs(newTemplateFuncMap(opts)).Parse(string(tplData)) + if err != nil { + return nil, fmt.Errorf("parse template: %w", err) + } + return tpl, nil +} + +func openOutput(path string) (io.Writer, func() error, error) { + if path == "" || path == "-" { + return os.Stdout, nil, nil + } + + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return nil, nil, fmt.Errorf("create output dir: %w", err) + } + + f, err := os.Create(path) + if err != nil { + return nil, nil, fmt.Errorf("create output: %w", err) + } + return f, f.Close, nil +} + +// renderRecords renders count records using the provided template and writes them to the writer. +func renderRecords(count int, tpl *template.Template, writer io.Writer) error { + buf := bytes.NewBuffer(nil) + for i := 0; i < count; i++ { + line, err := renderRecord(tpl, buf, i) + if err != nil { + return err + } + if line == "" { + continue + } + if _, err := writer.Write([]byte(line)); err != nil { + return fmt.Errorf("write record %d: %w", i+1, err) + } + } + return nil +} + +// renderRecord renders a single record using the provided template and buffer. +func renderRecord(tpl *template.Template, buf *bytes.Buffer, index int) (string, error) { + buf.Reset() + if err := tpl.Execute(buf, nil); err != nil { + return "", fmt.Errorf("render record %d: %w", index+1, err) + } + line := strings.TrimSpace(buf.String()) + if line == "" { + return "", nil + } + var compact bytes.Buffer + if err := json.Compact(&compact, []byte(line)); err != nil { + return "", fmt.Errorf("record %d: invalid JSON output: %w", index+1, err) + } + output := compact.String() + if !strings.HasSuffix(output, "\n") { + output += "\n" + } + return output, nil +} + +// newTemplateFuncMap returns a map of functions to be used in templates. +func newTemplateFuncMap(opts GenerateOptions) template.FuncMap { + seed := uint64(opts.Seed) + if opts.Seed < 0 { + seed = uint64(math.Abs(float64(opts.Seed))) + } + faker := gofakeit.New(seed) + + seq := 0 + + return template.FuncMap{ + "faker": func(path string) (string, error) { + return fakerValue(faker, path) + }, + "seq": func() int { + seq++ + return seq + }, + "uuid": func() string { + return uuid.NewString() + }, + "timestamp": func(layout ...string) string { + format := time.RFC3339Nano + if len(layout) > 0 && layout[0] != "" { + format = layout[0] + } + return time.Now().UTC().Format(format) + }, + "randInt": func(min, max int) (int, error) { + if min > max { + return 0, fmt.Errorf("randInt min greater than max") + } + return faker.Number(min, max), nil + }, + "randFloat": func(min, max float64) (float64, error) { + if min > max { + return 0, fmt.Errorf("randFloat min greater than max") + } + return faker.Float64Range(min, max), nil + }, + } +} + +// fakerValue generates a fake value based on the provided path. +func fakerValue(faker *gofakeit.Faker, path string) (string, error) { + parts := strings.Split(strings.ToLower(path), ".") + if len(parts) == 0 { + return "", errors.New("empty faker path") + } + + switch parts[0] { + case "person": + if len(parts) == 1 || parts[1] == "name" { + return faker.Name(), nil + } + switch parts[1] { + case "first_name": + return faker.FirstName(), nil + case "last_name": + return faker.LastName(), nil + case "ssn": + return faker.SSN(), nil + case "phone": + return faker.Phone(), nil + case "email": + return faker.Email(), nil + case "job": + return faker.JobTitle(), nil + } + case "address": + addr := faker.Address() + if len(parts) == 1 { + return fmt.Sprintf("%s, %s, %s %s", addr.Street, addr.City, addr.State, addr.Zip), nil + } + switch parts[1] { + case "street": + return addr.Street, nil + case "city": + return addr.City, nil + case "state": + return addr.State, nil + case "zip": + return addr.Zip, nil + case "country": + return addr.Country, nil + case "latitude": + return fmt.Sprintf("%.6f", addr.Latitude), nil + case "longitude": + return fmt.Sprintf("%.6f", addr.Longitude), nil + } + case "internet": + if len(parts) == 1 { + return faker.DomainName(), nil + } + switch parts[1] { + case "email": + return faker.Email(), nil + case "user": + return faker.Username(), nil + case "domain": + return faker.DomainName(), nil + case "ipv4": + return faker.IPv4Address(), nil + case "ipv6": + return faker.IPv6Address(), nil + case "url": + return faker.URL(), nil + } + case "company": + if len(parts) == 1 { + return faker.Company(), nil + } + switch parts[1] { + case "name": + return faker.Company(), nil + case "bs": + return faker.BS(), nil + case "buzzword": + return faker.BuzzWord(), nil + case "slogan": + return faker.Slogan(), nil + } + case "lorem": + if len(parts) == 1 { + return strings.Join(generateWords(faker, 3), " "), nil + } + switch parts[1] { + case "word": + return faker.Word(), nil + case "words": + count := 3 + if len(parts) > 2 { + if n, err := strconv.Atoi(parts[2]); err == nil { + count = n + } + } + return strings.Join(generateWords(faker, count), " "), nil + case "sentence": + n := 12 + if len(parts) > 2 { + if v, err := strconv.Atoi(parts[2]); err == nil { + n = v + } + } + return faker.Sentence(n), nil + case "paragraph": + n := 3 + if len(parts) > 2 { + if v, err := strconv.Atoi(parts[2]); err == nil { + n = v + } + } + return faker.Paragraph(n, 3, 12, " "), nil + } + case "uuid": + return uuid.NewString(), nil + case "boolean": + return fmt.Sprintf("%t", faker.Bool()), nil + case "date": + return faker.Date().Format(time.RFC3339Nano), nil + case "number": + min, max := 0, 100 + if len(parts) > 1 { + if v, err := strconv.Atoi(parts[1]); err == nil { + min = v + } + } + if len(parts) > 2 { + if v, err := strconv.Atoi(parts[2]); err == nil { + max = v + } + } + if min > max { + return "", fmt.Errorf("number: min greater than max") + } + return fmt.Sprintf("%d", faker.Number(min, max)), nil + } + + return "", fmt.Errorf("unsupported faker path %q", path) +} + +// generateWords generates a slice of fake words of the specified count. +func generateWords(faker *gofakeit.Faker, count int) []string { + if count <= 0 { + return []string{} + } + words := make([]string, count) + for i := range words { + words[i] = faker.Word() + } + return words +} diff --git a/pkg/streams/msgio/publish.go b/pkg/streams/msgio/publish.go new file mode 100644 index 00000000..a0237d1c --- /dev/null +++ b/pkg/streams/msgio/publish.go @@ -0,0 +1,174 @@ +package msgio + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/nats-io/nats.go" + "github.com/rs/zerolog/log" +) + +// PublishOptions controls how a JSONL file is streamed to NATS. +type PublishOptions struct { + ServerURL string + Subject string + DeviceID string + FilePath string + Interval time.Duration + MaxLine int + Validate bool + Headers map[string]string + Verbose bool + Echo bool +} + +// PublishFromFile reads a JSONL file and publishes each line to the derived subject. +func PublishFromFile(ctx context.Context, opts PublishOptions) error { + if opts.FilePath == "" { + return errors.New("file path cannot be empty") + } + + baseSubject := strings.TrimSpace(opts.Subject) + if baseSubject == "" { + return errors.New("subject cannot be empty") + } + + deviceID := strings.TrimSpace(opts.DeviceID) + if deviceID == "" { + return errors.New("device-id cannot be empty") + } + + if opts.ServerURL == "" { + return errors.New("server URL cannot be empty") + } + + fullSubject := config.DeviceSubject(baseSubject, deviceID) + + nc, err := nats.Connect(opts.ServerURL) + if err != nil { + return fmt.Errorf("connect to NATS: %w", err) + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Msg("failed to drain NATS connection after publish") + } + }() + + file, err := os.Open(opts.FilePath) + if err != nil { + return fmt.Errorf("open file: %w", err) + } + defer func() { + if closeErr := file.Close(); closeErr != nil { + log.Error().Err(closeErr).Str("file", opts.FilePath).Msg("failed to close publish file") + } + }() + + info, err := file.Stat() + if err != nil { + return fmt.Errorf("stat file: %w", err) + } + if info.IsDir() { + return fmt.Errorf("%s is a directory", opts.FilePath) + } + + scanner := bufio.NewScanner(file) + buf := make([]byte, 1024*1024) + maxLine := opts.MaxLine + if maxLine <= 0 { + maxLine = 8 * 1024 * 1024 + } + scanner.Buffer(buf, maxLine) + + lineNumber := 0 + for scanner.Scan() { + select { + case <-ctx.Done(): + if opts.Verbose { + log.Warn().Int("lines", lineNumber).Msg("publish interrupted") + } + return ctx.Err() + default: + } + + rawLine := strings.TrimSpace(scanner.Text()) + if rawLine == "" { + continue + } + lineNumber++ + + if opts.Validate { + var jsRaw json.RawMessage + err := json.Unmarshal([]byte(rawLine), &jsRaw) + if err != nil { + return fmt.Errorf("line %d: invalid JSON: %w", lineNumber, err) + } + } + + msg := &nats.Msg{ + Subject: fullSubject, + Header: nats.Header{}, + Data: []byte(rawLine), + } + + msg.Header.Set("Content-Type", "application/json") + msg.Header.Set(config.HeaderDevice, deviceID) + msg.Header.Set(config.HeaderFile, filepath.Base(opts.FilePath)) + for k, v := range opts.Headers { + msg.Header.Set(k, v) + } + + if err := nc.PublishMsg(msg); err != nil { + return fmt.Errorf("publish line %d: %w", lineNumber, err) + } + + if opts.Verbose { + log.Info().Int("line", lineNumber).Str("subject", fullSubject).Msg("published line") + } + + if opts.Echo { + if _, err := fmt.Fprintln(os.Stdout, rawLine); err != nil { + return fmt.Errorf("echo line %d: %w", lineNumber, err) + } + } + + if opts.Interval > 0 { + select { + case <-ctx.Done(): + if opts.Verbose { + log.Warn().Int("line", lineNumber).Msg("publish interrupted during interval") + } + return ctx.Err() + case <-time.After(opts.Interval): + } + } + } + + err = scanner.Err() + if err != nil { + return fmt.Errorf("scan file: %w", err) + } + + err = nc.Flush() + if err != nil { + return fmt.Errorf("flush connection: %w", err) + } + err = nc.LastError() + if err != nil { + return fmt.Errorf("nats error: %w", err) + } + + if opts.Verbose { + log.Info().Int("messages", lineNumber).Str("subject", fullSubject).Msg("completed publishing") + } + + return nil +} diff --git a/pkg/streams/msgio/tail.go b/pkg/streams/msgio/tail.go new file mode 100644 index 00000000..dc2b2e3e --- /dev/null +++ b/pkg/streams/msgio/tail.go @@ -0,0 +1,122 @@ +package msgio + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/nats-io/nats.go" + "github.com/rs/zerolog/log" +) + +// TailOptions controls how a monitoring subscription behaves. +type TailOptions struct { + Subject string + DeviceID string + Pretty bool + Headers bool + Verbose bool + Writer io.Writer +} + +// Tailer handles streaming monitor messages from NATS. +type Tailer struct { + opts TailOptions + deviceID string + fullSubject string + nc *nats.Conn +} + +// NewTailer prepares a Tailer instance for the provided context and options. +func NewTailer(nc *nats.Conn, opts TailOptions) *Tailer { + opts.Subject = strings.TrimSpace(opts.Subject) + opts.DeviceID = strings.TrimSpace(opts.DeviceID) + + if opts.DeviceID == "" { + opts.DeviceID = ">" + } + if opts.Writer == nil { + opts.Writer = os.Stdout + } + if opts.Subject == "" { + opts.Subject = config.MonitorSubject + } + + t := &Tailer{ + opts: opts, + deviceID: opts.DeviceID, + fullSubject: config.SubjectJoin(opts.Subject, opts.DeviceID), + nc: nc, + } + + return t +} + +// Run subscribes to the specified device stream and processes incoming messages. +func (t *Tailer) Run(ctx context.Context) error { + _, cancel := context.WithCancel(ctx) + defer cancel() + + sub, err := t.nc.Subscribe(t.fullSubject, func(msg *nats.Msg) { + t.renderMessage(msg) + }) + if err != nil { + return fmt.Errorf("subscribe: %w", err) + } + defer func() { + if drainErr := sub.Drain(); drainErr != nil && !errors.Is(drainErr, nats.ErrConnectionClosed) { + log.Warn().Err(drainErr).Msg("drain subscription error") + } + }() + err = t.nc.Flush() + if err != nil { + return fmt.Errorf("flush: %w", err) + } + if t.opts.Verbose { + log.Info().Str("subject", t.fullSubject).Msg("monitoring") + } + <-ctx.Done() + return nil +} + +func (t *Tailer) renderMessage(msg *nats.Msg) { + if t.opts.Headers && len(msg.Header) > 0 { + keys := make([]string, 0, len(msg.Header)) + for key := range msg.Header { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + for _, value := range msg.Header.Values(key) { + _, err := fmt.Fprintf(t.opts.Writer, "# header %s=%s\n", key, value) + if err != nil { + log.Error().Err(err).Msg("failed to write header") + } + } + } + } + + body := msg.Data + if t.opts.Pretty { + var buf bytes.Buffer + err := json.Indent(&buf, body, "", " ") + if err == nil { + body = buf.Bytes() + } else if t.opts.Verbose { + log.Warn().Err(err).Msg("pretty print failed") + } + } + + line := strings.TrimRight(string(body), "\n") + _, err := fmt.Fprintln(os.Stdout, line) + if err != nil { + log.Error().Err(err).Msg("failed to write message") + } +} diff --git a/pkg/streams/natsutil/connect.go b/pkg/streams/natsutil/connect.go new file mode 100644 index 00000000..e331610d --- /dev/null +++ b/pkg/streams/natsutil/connect.go @@ -0,0 +1,38 @@ +package natsutil + +import ( + "fmt" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +// ConnectJetStream establishes a NATS connection and JetStream context. +func ConnectJetStream(server string, opt ...nats.Option) (jetstream.JetStream, error) { + nc, err := nats.Connect(server, opt...) + if err != nil { + return nil, fmt.Errorf("connect to NATS: %w", err) + } + js, err := jetstream.New(nc) + if err != nil { + if drainErr := nc.Drain(); drainErr != nil { + log.Warn().Err(drainErr).Msg("failed to drain NATS connection after jetstream error") + } + return nil, fmt.Errorf("jetstream context: %w", err) + } + inProcess := nc.ConnectedAddr() == "pipe" + log.Debug().Str("url", nc.ConnectedUrl()).Str("addr", nc.ConnectedAddr()).Bool("in_process", inProcess).Msg("JetStream connection established") + return js, nil +} + +func ConnectNATS(server string, opt ...nats.Option) (*nats.Conn, error) { + nc, err := nats.Connect(server, opt...) + if err != nil { + log.Error().Err(err).Str("server", server).Msg("failed to connect to NATS server") + return nil, fmt.Errorf("connect to NATS: %w", err) + } + inProcess := nc.ConnectedAddr() == "pipe" + log.Debug().Str("url", nc.ConnectedUrl()).Str("addr", nc.ConnectedAddr()).Bool("in_process", inProcess).Msg("NATS connection established") + return nc, nil +} diff --git a/pkg/streams/natsutil/embedded.go b/pkg/streams/natsutil/embedded.go new file mode 100644 index 00000000..986a63e9 --- /dev/null +++ b/pkg/streams/natsutil/embedded.go @@ -0,0 +1,86 @@ +package natsutil + +import ( + "errors" + "os" + "time" + + "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +type EmbeddedServer struct { + srv *server.Server + nc *nats.Conn + storeDir string + js jetstream.JetStream +} + +func NewEmbeddedServer() (*EmbeddedServer, error) { + tmpDir, err := os.MkdirTemp("", "nats-server-") + if err != nil { + return nil, err + } + srv, err := server.NewServer(&server.Options{ + JetStream: true, + DontListen: true, + StoreDir: tmpDir, + }) + if err != nil { + return nil, err + } + srv.Start() + if !srv.ReadyForConnections(5 * time.Second) { + srv.Shutdown() + return nil, errors.New("nats server not ready in time") + } + nc, err := nats.Connect(srv.ClientURL(), nats.InProcessServer(srv)) + if err != nil { + srv.Shutdown() + return nil, err + } + return &EmbeddedServer{ + srv: srv, + nc: nc, + storeDir: tmpDir, + }, nil +} + +func (e *EmbeddedServer) Close() { + if e.nc != nil && !e.nc.IsClosed() { + e.nc.Close() + } + if err := os.RemoveAll(e.storeDir); err != nil { + log.Warn().Err(err).Str("dir", e.storeDir).Msg("failed to remove embedded store directory") + } + e.srv.Shutdown() +} + +func (e *EmbeddedServer) NatsConn() *nats.Conn { + return e.nc +} + +func (e *EmbeddedServer) Server() *server.Server { + return e.srv +} + +func (e *EmbeddedServer) StoreDir() string { + return e.storeDir +} + +func (e *EmbeddedServer) JetStream() (jetstream.JetStream, error) { + if e.js == nil { + js, err := jetstream.New(e.nc) + if err != nil { + return nil, err + } + e.js = js + } + return e.js, nil +} + +func (e *EmbeddedServer) ClientURL() string { + return e.srv.ClientURL() +} diff --git a/pkg/streams/natsutil/embedded_test.go b/pkg/streams/natsutil/embedded_test.go new file mode 100644 index 00000000..4b009542 --- /dev/null +++ b/pkg/streams/natsutil/embedded_test.go @@ -0,0 +1,26 @@ +package natsutil + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestEmbeddedServer(t *testing.T) { + es, err := NewEmbeddedServer() + assert.NoError(t, err) + defer es.Close() + assert.DirExists(t, es.storeDir) + + nc := es.NatsConn() + assert.NotNil(t, nc) + assert.False(t, nc.IsClosed()) + + srv := es.Server() + assert.NotNil(t, srv) + assert.True(t, srv.ReadyForConnections(5*time.Second)) + + es.Close() + assert.NoDirExists(t, es.storeDir) +} diff --git a/pkg/streams/natsutil/header.go b/pkg/streams/natsutil/header.go new file mode 100644 index 00000000..f557f475 --- /dev/null +++ b/pkg/streams/natsutil/header.go @@ -0,0 +1,17 @@ +package natsutil + +import "github.com/nats-io/nats.go" + +// CloneHeader returns a deep copy of a NATS header map. +func CloneHeader(h nats.Header) nats.Header { + if len(h) == 0 { + return nats.Header{} + } + out := nats.Header{} + for key, values := range h { + for _, value := range values { + out.Add(key, value) + } + } + return out +} diff --git a/pkg/streams/natsutil/kv.go b/pkg/streams/natsutil/kv.go new file mode 100644 index 00000000..bba1b1f6 --- /dev/null +++ b/pkg/streams/natsutil/kv.go @@ -0,0 +1,17 @@ +package natsutil + +import ( + "context" + "errors" + + "github.com/nats-io/nats.go/jetstream" +) + +// EnsureKeyValue returns a key-value bucket, creating it if missing. +func EnsureKeyValue(ctx context.Context, js jetstream.JetStream, bucket string) (jetstream.KeyValue, error) { + kv, err := js.KeyValue(ctx, bucket) + if errors.Is(err, jetstream.ErrBucketNotFound) { + kv, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: bucket}) + } + return kv, err +} diff --git a/pkg/streams/natsutil/server.go b/pkg/streams/natsutil/server.go new file mode 100644 index 00000000..37e80b37 --- /dev/null +++ b/pkg/streams/natsutil/server.go @@ -0,0 +1,128 @@ +package natsutil + +import ( + "errors" + "os" + "time" + + "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + "github.com/rs/zerolog/log" +) + +// ServerConfig controls how a test or embedded NATS server should be started. +type ServerConfig struct { + Options *server.Options + Embedded bool + TempDir string +} + +// ServerHandle wraps a running NATS server instance. +type ServerHandle struct { + srv *server.Server + storeDir string + embedded bool +} + +// StartServer boots a NATS server suitable for tests or local tooling. +func StartServer(cfg ServerConfig) (*ServerHandle, error) { + opts := cloneOptions(cfg.Options) + if opts.StoreDir == "" { + dir := cfg.TempDir + if dir == "" { + tmp, err := os.MkdirTemp("", "streams-nats-") + if err != nil { + return nil, err + } + dir = tmp + } + opts.StoreDir = dir + } + if !opts.JetStream { + opts.JetStream = true + } + if opts.Host == "" { + opts.Host = "127.0.0.1" + } + if cfg.Embedded { + // When running embedded, callers should connect via nats.InProcessServer. + opts.Port = 0 + } else if opts.Port == 0 { + opts.Port = -1 // auto-select free port + } + opts.NoSigs = true + opts.NoLog = true + log.Debug().Str("host", opts.Host).Int("port", opts.Port).Bool("embedded", cfg.Embedded).Str("store", opts.StoreDir).Msg("starting embedded NATS server") + + srv, err := server.NewServer(opts) + if err != nil { + return nil, err + } + srv.Start() + if !srv.ReadyForConnections(5 * time.Second) { + srv.Shutdown() + return nil, errors.New("nats server not ready in time") + } + log.Debug().Str("url", srv.ClientURL()).Bool("embedded", cfg.Embedded).Msg("embedded NATS server ready") + return &ServerHandle{srv: srv, storeDir: opts.StoreDir, embedded: cfg.Embedded}, nil +} + +// Shutdown stops the server and cleans up temporary resources. +func (h *ServerHandle) Shutdown() { + if h == nil { + return + } + if h.srv != nil { + log.Debug().Str("url", h.srv.ClientURL()).Msg("shutting down embedded NATS server") + h.srv.Shutdown() + } + if h.storeDir != "" { + _ = os.RemoveAll(h.storeDir) + } +} + +// ClientURL returns the URL clients can use to connect (only valid when not embedded). +func (h *ServerHandle) ClientURL() string { + if h == nil || h.srv == nil { + return "" + } + return h.srv.ClientURL() +} + +func (h *ServerHandle) NatsConn() (*nats.Conn, error) { + if h == nil || h.srv == nil { + return nil, errors.New("server not running") + } + return nats.Connect(h.srv.ClientURL()) +} + +// InProcessOption returns a connection option for embedded servers. +func (h *ServerHandle) InProcessOption() nats.Option { + if h == nil || h.srv == nil { + log.Debug().Msg("in-process option requested without running server") + return nil + } + log.Debug().Str("url", h.srv.ClientURL()).Msg("providing in-process NATS option") + inner := nats.InProcessServer(h.srv) + return func(o *nats.Options) error { + log.Debug().Msg("applying in-process NATS option") + return inner(o) + } +} + +func cloneOptions(opts *server.Options) *server.Options { + if opts == nil { + return &server.Options{} + } + out := *opts + return &out +} + +func WithServer(cfg ServerConfig, fn func(h *ServerHandle) error) error { + srv, err := StartServer(cfg) + if err != nil { + return err + } + defer srv.Shutdown() + return fn(srv) +} diff --git a/pkg/streams/session/export.go b/pkg/streams/session/export.go new file mode 100644 index 00000000..e92ab9c7 --- /dev/null +++ b/pkg/streams/session/export.go @@ -0,0 +1,193 @@ +package session + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +// ExportOptions controls exporting a recorded session to JSONL. +type ExportOptions struct { + ServerURL string + SessionID string + Bucket string + Writer io.Writer + OutputPath string // optional destination path for messaging purposes + Verbose bool +} + +// Envelope wraps a message with its headers for export/import. +type Envelope struct { + Headers map[string]string `json:"headers"` + Data json.RawMessage `json:"data"` +} + +// MetadataLine is the first line in an exported JSONL file containing session metadata. +type MetadataLine struct { + Metadata Metadata `json:"metadata"` +} + +// Export writes the messages of a recorded session to the provided writer as JSONL. +func Export(ctx context.Context, opts ExportOptions) error { + if opts.ServerURL == "" { + return errors.New("server URL cannot be empty") + } + if strings.TrimSpace(opts.SessionID) == "" { + return errors.New("session-id cannot be empty") + } + if opts.Writer == nil { + return errors.New("writer cannot be nil") + } + + nc, err := nats.Connect(opts.ServerURL) + if err != nil { + return fmt.Errorf("connect to NATS: %w", err) + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Msg("failed to drain NATS connection after export") + } + }() + + js, err := jetstream.New(nc) + if err != nil { + return fmt.Errorf("jetstream context: %w", err) + } + + sessMgr, err := NewSessionStore(js, opts.Bucket) + if err != nil { + return err + } + + meta, err := sessMgr.Info(opts.SessionID) + if err != nil { + return fmt.Errorf("load metadata: %w", err) + } + + // Write metadata as first line + metaLine := MetadataLine{Metadata: *meta} + metaJSON, err := json.Marshal(metaLine) + if err != nil { + return fmt.Errorf("marshal metadata: %w", err) + } + if _, err := opts.Writer.Write(metaJSON); err != nil { + return fmt.Errorf("write metadata: %w", err) + } + if _, err := opts.Writer.Write([]byte("\n")); err != nil { + return fmt.Errorf("write metadata: %w", err) + } + + durable := config.ExportConsumerName(meta.SessionID) + consumer, err := js.CreateOrUpdateConsumer(context.Background(), meta.Stream, jetstream.ConsumerConfig{ + Durable: durable, + AckPolicy: jetstream.AckExplicitPolicy, + DeliverPolicy: jetstream.DeliverAllPolicy, + FilterSubject: meta.SessionSubject, + }) + if err != nil { + return fmt.Errorf("create consumer: %w", err) + } + defer func() { + _ = js.DeleteConsumer(context.Background(), meta.Stream, durable) + }() + + written := 0 + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + batch, err := consumer.Fetch(128, jetstream.FetchMaxWait(1*time.Second)) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } + if errors.Is(err, jetstream.ErrNoMessages) { + break + } + return fmt.Errorf("fetch: %w", err) + } + + received := 0 + for msg := range batch.Messages() { + if msg == nil { + continue + } + received++ + + // Extract headers from NATS message + headers := make(map[string]string) + for key, values := range msg.Headers() { + if len(values) > 0 { + headers[key] = values[0] + } + } + + // Create envelope with headers and data + envelope := Envelope{ + Headers: headers, + Data: json.RawMessage(msg.Data()), + } + + envelopeJSON, err := json.Marshal(envelope) + if err != nil { + return fmt.Errorf("marshal envelope: %w", err) + } + + if _, err := opts.Writer.Write(envelopeJSON); err != nil { + return fmt.Errorf("write message: %w", err) + } + if _, err := opts.Writer.Write([]byte("\n")); err != nil { + return fmt.Errorf("write message: %w", err) + } + written++ + _ = msg.Ack() + + // Progress reporting + if opts.Verbose && written%100 == 0 { + if meta.MessageCount > 0 { + log.Info().Int("count", written).Int("total", meta.MessageCount).Msg("exporting messages") + } else { + log.Info().Int("count", written).Msg("exporting messages") + } + } + } + + if batchErr := batch.Error(); batchErr != nil { + if errors.Is(batchErr, context.Canceled) || errors.Is(batchErr, context.DeadlineExceeded) { + return batchErr + } + if !errors.Is(batchErr, jetstream.ErrNoMessages) { + return fmt.Errorf("fetch: %w", batchErr) + } + } + + if received == 0 { + break + } + + if meta.MessageCount > 0 && written >= meta.MessageCount { + break + } + } + + if opts.Verbose { + log.Info(). + Str("session_id", meta.SessionID). + Int("messages", written). + Msg("export complete") + } + + return nil +} diff --git a/pkg/streams/session/import.go b/pkg/streams/session/import.go new file mode 100644 index 00000000..e67ef580 --- /dev/null +++ b/pkg/streams/session/import.go @@ -0,0 +1,186 @@ +package session + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +// ImportOptions controls importing a recorded session from JSONL. +type ImportOptions struct { + ServerURL string + Reader io.Reader + InputPath string // optional source path for messaging purposes + DeviceID string // defaults to "123" if not specified + SessionBucket string + Verbose bool +} + +// Import reads messages from a JSONL file and recreates the session in JetStream. +func Import(ctx context.Context, opts ImportOptions) error { + if opts.ServerURL == "" { + return errors.New("server URL cannot be empty") + } + if opts.Reader == nil { + return errors.New("reader cannot be nil") + } + + // Default device ID to "123" if not specified + if strings.TrimSpace(opts.DeviceID) == "" { + opts.DeviceID = "123" + } + + // Default session bucket + if opts.SessionBucket == "" { + opts.SessionBucket = config.SessionBucket + } + + nc, err := nats.Connect(opts.ServerURL) + if err != nil { + return fmt.Errorf("connect to NATS: %w", err) + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Msg("failed to drain NATS connection after import") + } + }() + + js, err := jetstream.New(nc) + if err != nil { + return fmt.Errorf("jetstream context: %w", err) + } + + scanner := bufio.NewScanner(opts.Reader) + + // Read first line (metadata) + if !scanner.Scan() { + if err := scanner.Err(); err != nil { + return fmt.Errorf("read metadata line: %w", err) + } + return errors.New("empty input file") + } + + var metaLine MetadataLine + if err := json.Unmarshal(scanner.Bytes(), &metaLine); err != nil { + return fmt.Errorf("parse metadata: %w", err) + } + + meta := metaLine.Metadata + + // Override device ID if provided + if opts.DeviceID != "" { + meta.DeviceID = opts.DeviceID + } + + // Create session manager + sessMgr, err := NewSessionStore(js, opts.SessionBucket) + if err != nil { + return fmt.Errorf("create session store: %w", err) + } + + // Create session stream + streamName := StreamName(meta.SessionID) + stream, err := js.CreateStream(ctx, jetstream.StreamConfig{ + Name: streamName, + Subjects: []string{meta.SessionSubject}, + Retention: jetstream.LimitsPolicy, + Storage: jetstream.FileStorage, + MaxAge: 72 * time.Hour, // Default retention + }) + if err != nil { + return fmt.Errorf("create stream: %w", err) + } + + // Reset metadata counters for import + meta.Start = time.Now() + meta.MessageCount = 0 + meta.Stream = streamName + + // Store initial metadata + if _, err := sessMgr.Put(&meta, 0); err != nil { + return fmt.Errorf("store metadata: %w", err) + } + + // Process messages + messageCount := 0 + for scanner.Scan() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var envelope Envelope + if err := json.Unmarshal(scanner.Bytes(), &envelope); err != nil { + return fmt.Errorf("parse envelope at line %d: %w", messageCount+2, err) + } + + // Create NATS message with headers + msg := nats.NewMsg(meta.SessionSubject) + msg.Data = envelope.Data + + // Add headers from envelope + for key, value := range envelope.Headers { + msg.Header.Add(key, value) + } + + // Override device and session IDs if they differ + msg.Header.Set(config.HeaderDevice, meta.DeviceID) + msg.Header.Set(config.HeaderSession, meta.SessionID) + + // Publish to stream + if _, err := js.PublishMsg(ctx, msg); err != nil { + return fmt.Errorf("publish message %d: %w", messageCount+1, err) + } + + messageCount++ + + if opts.Verbose && messageCount%100 == 0 { + log.Info().Int("count", messageCount).Msg("imported messages") + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("read input: %w", err) + } + + // Update metadata with final counts + meta.End = time.Now() + meta.MessageCount = messageCount + + // Load current revision and update + _, rev, err := sessMgr.Load(meta.SessionID) + if err != nil { + return fmt.Errorf("load metadata for update: %w", err) + } + if _, err := sessMgr.Put(&meta, rev); err != nil { + return fmt.Errorf("update metadata: %w", err) + } + + // Verify stream info + info, err := stream.Info(ctx) + if err != nil { + return fmt.Errorf("verify stream: %w", err) + } + + if opts.Verbose { + log.Info(). + Str("session_id", meta.SessionID). + Str("device_id", meta.DeviceID). + Int("messages", messageCount). + Uint64("stream_messages", info.State.Msgs). + Msg("import complete") + } + + return nil +} diff --git a/pkg/streams/session/manager.go b/pkg/streams/session/manager.go new file mode 100644 index 00000000..7ee1aada --- /dev/null +++ b/pkg/streams/session/manager.go @@ -0,0 +1,160 @@ +package session + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/nats-io/nats.go/jetstream" +) + +const DefaultBucket = config.SessionBucket + +// Metadata captures information about a recorded session. +type Metadata struct { + SessionID string `json:"session_id"` + DeviceID string `json:"device_id"` + SourceSubject string `json:"source_subject"` + SessionSubject string `json:"session_subject"` + Stream string `json:"stream"` + Bucket string `json:"bucket"` + Start time.Time `json:"start"` + End time.Time `json:"end"` + MessageCount int `json:"message_count"` + Retention string `json:"retention,omitempty"` + Note string `json:"note,omitempty"` +} + +// SessionStore provides helper methods around session metadata backed by JetStream KV. +type SessionStore struct { + js jetstream.JetStream + bucket string + kv jetstream.KeyValue +} + +// NewSessionStore returns a Manager ensuring the session bucket exists. +func NewSessionStore(js jetstream.JetStream, bucket string) (*SessionStore, error) { + if bucket == "" { + bucket = config.SessionBucket + } + ctx := context.Background() + kv, err := natsutil.EnsureKeyValue(ctx, js, bucket) + if err != nil { + return nil, fmt.Errorf("sessions bucket %s: %w", bucket, err) + } + return &SessionStore{js: js, bucket: bucket, kv: kv}, nil +} + +// Bucket returns the configured bucket name. +func (m *SessionStore) Bucket() string { + return m.bucket +} + +// Put stores or updates session metadata in the bucket. +func (m *SessionStore) Put(meta *Metadata, revision uint64) (uint64, error) { + if meta == nil { + return revision, errors.New("metadata is nil") + } + data, err := json.Marshal(meta) + if err != nil { + return revision, err + } + ctx := context.Background() + if revision == 0 { + rev, err := m.kv.Create(ctx, meta.SessionID, data) + if err != nil { + return revision, err + } + return rev, nil + } + rev, err := m.kv.Update(ctx, meta.SessionID, data, revision) + if err != nil { + return revision, err + } + return rev, nil +} + +// Load retrieves metadata for a session ID along with its revision. +func (m *SessionStore) Load(sessionID string) (*Metadata, uint64, error) { + entry, err := m.kv.Get(context.Background(), sessionID) + if err != nil { + return nil, 0, err + } + var meta Metadata + err = json.Unmarshal(entry.Value(), &meta) + if err != nil { + return nil, 0, err + } + if meta.SessionID == "" { + meta.SessionID = sessionID + } + if meta.Bucket == "" { + meta.Bucket = m.bucket + } + return &meta, entry.Revision(), nil +} + +// Info retrieves metadata for a session without revision details. +func (m *SessionStore) Info(sessionID string) (*Metadata, error) { + meta, _, err := m.Load(sessionID) + return meta, err +} + +// List returns all metadata entries in the bucket. +func (m *SessionStore) List() ([]Metadata, error) { + keys, err := m.kv.Keys(context.Background()) + if err != nil { + if errors.Is(err, jetstream.ErrNoKeysFound) { + return nil, nil + } + return nil, err + } + + sessions := make([]Metadata, 0, len(keys)) + for _, key := range keys { + entry, err := m.kv.Get(context.Background(), key) + if err != nil { + continue + } + var meta Metadata + err = json.Unmarshal(entry.Value(), &meta) + if err != nil { + continue + } + if meta.SessionID == "" { + meta.SessionID = key + } + if meta.Bucket == "" { + meta.Bucket = m.bucket + } + sessions = append(sessions, meta) + } + return sessions, nil +} + +// Delete removes a session's metadata and JetStream stream. +func (m *SessionStore) Delete(sessionID string) error { + meta, _, err := m.Load(sessionID) + if err != nil { + return err + } + ctx := context.Background() + err = m.js.DeleteStream(ctx, meta.Stream) + if err != nil && !errors.Is(err, jetstream.ErrStreamNotFound) { + return fmt.Errorf("delete stream: %w", err) + } + return m.kv.Delete(ctx, sessionID) +} + +// StreamName derives a sanitized stream name for a session identifier. +func StreamName(sessionID string) string { + upper := strings.ToUpper(sessionID) + upper = strings.ReplaceAll(upper, "-", "_") + upper = strings.ReplaceAll(upper, ".", "_") + return "STREAMS_" + upper +} diff --git a/pkg/streams/session/playback.go b/pkg/streams/session/playback.go new file mode 100644 index 00000000..eb8f3aa1 --- /dev/null +++ b/pkg/streams/session/playback.go @@ -0,0 +1,200 @@ +package session + +import ( + "context" + "errors" + "fmt" + "math" + "strings" + "time" + + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +// PlaybackOptions controls replay of a recorded session. +type PlaybackOptions struct { + ServerURL string + SessionID string + Bucket string + TargetSubject string + Speed float64 + Verbose bool +} + +// Playback replays a recorded session at the requested speed. +func Playback(ctx context.Context, opts PlaybackOptions) error { + log.Info().Str("session_id", opts.SessionID).Msg("starting session playback") + if opts.ServerURL == "" { + return errors.New("server URL cannot be empty") + } + if strings.TrimSpace(opts.SessionID) == "" { + return errors.New("session-id cannot be empty") + } + if opts.Speed == 0 { + opts.Speed = 1 + } + if opts.Speed <= 0 || math.IsNaN(opts.Speed) || math.IsInf(opts.Speed, 0) { + return fmt.Errorf("invalid speed %v", opts.Speed) + } + + nc, err := nats.Connect(opts.ServerURL) + if err != nil { + return fmt.Errorf("connect to NATS: %w", err) + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Msg("failed to drain NATS connection after playback") + } + }() + + js, err := jetstream.New(nc) + if err != nil { + return fmt.Errorf("jetstream context: %w", err) + } + + sessMgr, err := NewSessionStore(js, opts.Bucket) + if err != nil { + return err + } + + meta, err := sessMgr.Info(opts.SessionID) + if err != nil { + return fmt.Errorf("load metadata: %w", err) + } + + targetSubject := strings.TrimSpace(opts.TargetSubject) + if targetSubject == "" { + targetSubject = config.PlaybackSubject + } + + log.Info().Str("session_id", meta.SessionID).Str("from_stream", meta.Stream). + Str("from_subject", meta.SessionSubject). + Str("to_subject", targetSubject). + Int("message_count", meta.MessageCount). + Msg("beginning session playback") + durable := config.PlaybackConsumerName(meta.SessionID) + consumer, err := js.CreateOrUpdateConsumer(context.Background(), meta.Stream, jetstream.ConsumerConfig{ + Durable: durable, + AckPolicy: jetstream.AckExplicitPolicy, + DeliverPolicy: jetstream.DeliverAllPolicy, + FilterSubject: meta.SessionSubject, + }) + if err != nil { + return fmt.Errorf("create consumer: %w", err) + } + defer func() { + _ = js.DeleteConsumer(context.Background(), meta.Stream, durable) + }() + + var ( + prevTime time.Time + played int + ) + + for { + log.Debug().Str("session_id", meta.SessionID).Msg("fetching next batch of messages for playback") + err := ctx.Err() + if err != nil { + return err + } + + batch, err := consumer.Fetch(50, jetstream.FetchContext(ctx)) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } + if errors.Is(err, jetstream.ErrNoMessages) { + break + } + return fmt.Errorf("fetch: %w", err) + } + + received := 0 + for msg := range batch.Messages() { + if msg == nil { + continue + } + received++ + log.Debug().Str("session_id", meta.SessionID).Int("received", received).Msg("message fetched for playback") + err := ctx.Err() + if err != nil { + return err + } + + headers := natsutil.CloneHeader(msg.Headers()) + recordedAt := parseRecordedAt(headers.Get(config.HeaderRecordedAt)) + if !prevTime.IsZero() { + delay := recordedAt.Sub(prevTime) + if delay < 0 { + delay = 0 + } + scaled := time.Duration(float64(delay) / opts.Speed) + if scaled > 0 { + select { + case <-time.After(scaled): + case <-ctx.Done(): + return ctx.Err() + } + } + } + + publishMsg := &nats.Msg{ + Subject: targetSubject, + Header: headers, + Data: append([]byte(nil), msg.Data()...), + } + publishMsg.Header.Set(config.HeaderReplayedAt, time.Now().UTC().Format(time.RFC3339Nano)) + + log.Info().Str("to", targetSubject). + Int("received", received). + Str("session_id", meta.SessionID). + Msg("replaying message") + err = nc.PublishMsg(publishMsg) + if err != nil { + return fmt.Errorf("publish replay: %w", err) + } + + err = msg.Ack() + if err != nil { + return fmt.Errorf("ack: %w", err) + } + + prevTime = recordedAt + played++ + } + + if batchErr := batch.Error(); batchErr != nil { + if errors.Is(batchErr, context.Canceled) || errors.Is(batchErr, context.DeadlineExceeded) { + return batchErr + } + if !errors.Is(batchErr, jetstream.ErrNoMessages) { + return fmt.Errorf("fetch: %w", batchErr) + } + } + + if received == 0 { + break + } + + if meta.MessageCount > 0 && played >= meta.MessageCount { + break + } + } + + return nil +} + +func parseRecordedAt(value string) time.Time { + if value == "" { + return time.Now().UTC() + } + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return time.Now().UTC() + } + return t +} diff --git a/pkg/streams/session/record.go b/pkg/streams/session/record.go new file mode 100644 index 00000000..6091aa3a --- /dev/null +++ b/pkg/streams/session/record.go @@ -0,0 +1,290 @@ +package session + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/apigear-io/cli/pkg/streams/buffer" + "github.com/apigear-io/cli/pkg/streams/config" + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/google/uuid" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/rs/zerolog/log" +) + +// RecordOptions controls how a live device stream is captured into JetStream. +type RecordOptions struct { + ServerURL string + Subject string + DeviceID string + SessionID string + Retention time.Duration + SessionBucket string + Note string + Verbose bool + Progress func(Metadata) + PreRoll time.Duration +} + +// Record subscribes to subject.deviceID and persists messages into a dedicated JetStream stream, tracking metadata in KV. +func Record(ctx context.Context, opts RecordOptions) (*Metadata, error) { + if opts.ServerURL == "" { + return nil, errors.New("server URL cannot be empty") + } + baseSubject := strings.TrimSpace(opts.Subject) + if baseSubject == "" { + return nil, errors.New("subject cannot be empty") + } + opts.DeviceID = strings.TrimSpace(opts.DeviceID) + if opts.DeviceID == "" { + return nil, errors.New("device-id cannot be empty") + } + sessionID := strings.TrimSpace(opts.SessionID) + if sessionID == "" { + sessionID = uuid.NewString() + } + sessionBucket := strings.TrimSpace(opts.SessionBucket) + if sessionBucket == "" { + sessionBucket = config.SessionBucket + } + + nc, err := nats.Connect(opts.ServerURL) + if err != nil { + return nil, fmt.Errorf("connect to NATS: %w", err) + } + defer func() { + if drainErr := nc.Drain(); drainErr != nil { + log.Error().Err(drainErr).Str("session", sessionID).Msg("failed to drain NATS connection after record") + } + }() + + js, err := jetstream.New(nc) + if err != nil { + return nil, fmt.Errorf("jetstream context: %w", err) + } + + sessMgr, err := NewSessionStore(js, sessionBucket) + if err != nil { + return nil, err + } + _, _, err = sessMgr.Load(sessionID) + if err == nil { + log.Warn().Str("session", sessionID).Msg("session already exists") + return nil, fmt.Errorf("session %s already exists", sessionID) + } else if !errors.Is(err, jetstream.ErrKeyNotFound) { + return nil, err + } + + sourceSubject := config.DeviceSubject(baseSubject, opts.DeviceID) + sessionSubject := config.SessionSubject(sessionID) + streamName := StreamName(sessionID) + + streamCfg := jetstream.StreamConfig{ + Name: streamName, + Subjects: []string{sessionSubject}, + Retention: jetstream.LimitsPolicy, + Storage: jetstream.FileStorage, + } + if opts.Retention > 0 { + streamCfg.MaxAge = opts.Retention + } + + _, err = js.CreateStream(ctx, streamCfg) + if err != nil { + return nil, fmt.Errorf("add stream: %w", err) + } + + log.Info().Str("session", sessionID).Str("device", opts.DeviceID).Msg("record stream created") + + metadata := &Metadata{ + SessionID: sessionID, + DeviceID: opts.DeviceID, + SourceSubject: sourceSubject, + SessionSubject: sessionSubject, + Stream: streamName, + Bucket: sessionBucket, + Start: time.Now().UTC(), + End: time.Now().UTC(), + Note: opts.Note, + } + if opts.Retention > 0 { + metadata.Retention = opts.Retention.String() + } + + if opts.PreRoll > 0 { + replayCtx, cancelReplay := context.WithTimeout(context.Background(), opts.PreRoll+time.Second) + defer cancelReplay() + since := time.Now().Add(-opts.PreRoll) + until := time.Now() + count, last, err := buffer.Replay(replayCtx, js, opts.DeviceID, since, until, func(bufMsg *nats.Msg, bufferedAt time.Time) error { + recordedAt := bufferedAt + if recordedAt.IsZero() { + recordedAt = time.Now().UTC() + } + replayed := &nats.Msg{ + Subject: sessionSubject, + Header: nats.Header{}, + Data: append([]byte(nil), bufMsg.Data...), + } + replayed.Header.Set("Content-Type", "application/json") + replayed.Header.Set(config.HeaderDevice, opts.DeviceID) + replayed.Header.Set(config.HeaderSession, sessionID) + replayed.Header.Set(config.HeaderRecordedAt, recordedAt.Format(time.RFC3339Nano)) + replayed.Header.Set(config.HeaderPreRoll, "true") + return publishToStream(replayCtx, js, replayed) + }) + if err != nil { + log.Error().Err(err).Str("session", sessionID).Msg("pre-roll replay failed") + } else if count > 0 { + metadata.MessageCount = count + if !last.IsZero() { + metadata.End = last + } + } + } + + revision, err := sessMgr.Put(metadata, 0) + if err != nil { + return nil, err + } + if opts.Progress != nil { + opts.Progress(*metadata) + } + + // Capture the time BEFORE subscription to catch any missed messages from buffer + subscriptionStartTime := time.Now().UTC() + + msgCh := make(chan *nats.Msg, 1024) + sub, err := nc.ChanSubscribe(sourceSubject, msgCh) + if err != nil { + return nil, fmt.Errorf("subscribe source: %w", err) + } + defer func() { + if err := sub.Drain(); err != nil { + log.Warn().Err(err).Str("subject", sourceSubject).Msg("failed to drain subscription") + } + }() + + // Ensure subscription is fully established on NATS server before proceeding + // This prevents race condition where messages arrive before subscription is ready + if err := nc.FlushTimeout(2 * time.Second); err != nil { + return nil, fmt.Errorf("flush subscription: %w", err) + } + + // Replay messages from buffer that arrived during subscription setup + // This ensures we don't miss any messages due to timing + subscriptionReadyTime := time.Now().UTC() + replayCtx, cancelReplay := context.WithTimeout(context.Background(), 5*time.Second) + count, last, err := buffer.Replay(replayCtx, js, opts.DeviceID, subscriptionStartTime, subscriptionReadyTime, func(bufMsg *nats.Msg, bufferedAt time.Time) error { + recordedAt := bufferedAt + if recordedAt.IsZero() { + recordedAt = time.Now().UTC() + } + replayed := &nats.Msg{ + Subject: sessionSubject, + Header: nats.Header{}, + Data: append([]byte(nil), bufMsg.Data...), + } + replayed.Header.Set("Content-Type", "application/json") + replayed.Header.Set(config.HeaderDevice, opts.DeviceID) + replayed.Header.Set(config.HeaderSession, sessionID) + replayed.Header.Set(config.HeaderRecordedAt, recordedAt.Format(time.RFC3339Nano)) + // Copy original headers from buffered message if they exist + if bufMsg.Header != nil { + for k, v := range bufMsg.Header { + if k != config.HeaderBufferedAt && k != config.HeaderDeadline { + replayed.Header[k] = v + } + } + } + return publishToStream(replayCtx, js, replayed) + }) + cancelReplay() + if err != nil { + log.Warn().Err(err).Str("session", sessionID).Msg("failed to replay messages from buffer during setup") + } else if count > 0 { + log.Info().Str("session", sessionID).Int("count", count).Msg("replayed messages that arrived during subscription setup") + metadata.MessageCount += count + if !last.IsZero() { + metadata.End = last + } + } + + var mu sync.Mutex + + updateMeta := func(update func(*Metadata)) error { + mu.Lock() + defer mu.Unlock() + update(metadata) + rev, err := sessMgr.Put(metadata, revision) + if err != nil { + return err + } + revision = rev + if opts.Progress != nil { + copy := *metadata + opts.Progress(copy) + } + return nil + } + + for { + select { + case <-ctx.Done(): + err := ctx.Err() + _ = updateMeta(func(m *Metadata) { + m.End = time.Now().UTC() + }) + if errors.Is(err, context.Canceled) { + log.Info().Str("session", sessionID).Msg("record context canceled") + return metadata, nil + } + return metadata, err + case msg, ok := <-msgCh: + if !ok { + log.Info().Str("session", sessionID).Msg("record channel closed") + return metadata, nil + } + + recordedAt := time.Now().UTC() + stored := &nats.Msg{ + Subject: sessionSubject, + Header: natsutil.CloneHeader(msg.Header), + Data: append([]byte(nil), msg.Data...), + } + stored.Header.Set("Content-Type", "application/json") + stored.Header.Set(config.HeaderDevice, opts.DeviceID) + stored.Header.Set(config.HeaderSession, sessionID) + stored.Header.Set(config.HeaderRecordedAt, recordedAt.Format(time.RFC3339Nano)) + + err := publishToStream(ctx, js, stored) + if err != nil { + log.Error().Err(err).Str("session", sessionID).Msg("publish to stream failed") + return metadata, err + } + + err = updateMeta(func(m *Metadata) { + m.MessageCount++ + m.End = recordedAt + }) + if err != nil { + log.Error().Err(err).Str("session", sessionID).Msg("update metadata failed") + return metadata, err + } + } + } +} + +func publishToStream(ctx context.Context, js jetstream.JetStream, msg *nats.Msg) error { + err := ctx.Err() + if err != nil { + return err + } + _, err = js.PublishMsg(ctx, msg) + return err +} diff --git a/pkg/streams/session/record_test.go b/pkg/streams/session/record_test.go new file mode 100644 index 00000000..3978c431 --- /dev/null +++ b/pkg/streams/session/record_test.go @@ -0,0 +1,85 @@ +package session_test + +import ( + "context" + "testing" + "time" + + "github.com/apigear-io/cli/pkg/streams/natsutil" + "github.com/apigear-io/cli/pkg/streams/session" + "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + "github.com/stretchr/testify/require" +) + +func TestRecordProgressCallback(t *testing.T) { + srv, err := natsutil.StartServer(natsutil.ServerConfig{ + Options: &server.Options{ + JetStream: true, + StoreDir: t.TempDir(), + }, + }) + require.NoError(t, err) + t.Cleanup(srv.Shutdown) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + progressCh := make(chan session.Metadata, 4) + opts := session.RecordOptions{ + ServerURL: srv.ClientURL(), + Subject: "monitor", + DeviceID: "device-progress", + SessionBucket: session.DefaultBucket, + Progress: func(meta session.Metadata) { + progressCh <- meta + }, + } + + metaCh := make(chan *session.Metadata, 1) + errCh := make(chan error, 1) + + go func() { + meta, err := session.Record(ctx, opts) + metaCh <- meta + errCh <- err + }() + + time.Sleep(100 * time.Millisecond) + + publisher, err := nats.Connect(srv.ClientURL()) + require.NoError(t, err) + t.Cleanup(publisher.Close) + + require.NoError(t, publisher.Publish("monitor.device-progress", []byte(`{"hello":true}`))) + require.NoError(t, publisher.Flush()) + + var update session.Metadata + require.Eventually(t, func() bool { + select { + case update = <-progressCh: + return update.MessageCount >= 1 + default: + return false + } + }, 2*time.Second, 50*time.Millisecond, "expected progress update") + require.GreaterOrEqual(t, update.MessageCount, 1) + require.Equal(t, "device-progress", update.DeviceID) + + cancel() + + select { + case err := <-errCh: + require.NoError(t, err) + case <-time.After(2 * time.Second): + t.Fatal("record did not stop") + } + + select { + case meta := <-metaCh: + require.NotNil(t, meta) + require.GreaterOrEqual(t, meta.MessageCount, 1) + default: + t.Fatal("expected metadata result") + } +} diff --git a/pkg/tasks/event.go b/pkg/tasks/event.go index 43400349..21297c09 100644 --- a/pkg/tasks/event.go +++ b/pkg/tasks/event.go @@ -34,11 +34,12 @@ func (e *TaskEvent) String() string { return fmt.Sprintf("task %s: %s -> %v", e.Name, e.State, e.Meta) } -func NewTaskEvent(item *TaskItem, state TaskState) *TaskEvent { +// NewTaskEvent creates a new task event +func NewTaskEvent(name string, state TaskState) *TaskEvent { return &TaskEvent{ - Name: item.name, + Name: name, State: state, - Meta: item.meta, + Meta: map[string]interface{}{}, } } diff --git a/pkg/tasks/manager.go b/pkg/tasks/manager.go index e4491209..f7dee131 100644 --- a/pkg/tasks/manager.go +++ b/pkg/tasks/manager.go @@ -3,149 +3,163 @@ package tasks import ( "context" "errors" - - "github.com/apigear-io/cli/pkg/helper" - "github.com/sasha-s/go-deadlock" + "sync" ) // ErrTaskNotFound is returned when a task is not found var ErrTaskNotFound = errors.New("task not found") -// TaskManager allows you to create tasks and run them +// TaskManager provides a simple registry for managing multiple tasks type TaskManager struct { - deadlock.RWMutex - helper.Hook[TaskEvent] - tasks map[string]*TaskItem + mu sync.RWMutex + tasks map[string]*taskEntry + hooks []func(*TaskEvent) + hooksMu sync.RWMutex +} + +type taskEntry struct { + task *Task + fn TaskFunc } // NewTaskManager creates a new task manager func NewTaskManager() *TaskManager { return &TaskManager{ - tasks: make(map[string]*TaskItem), - Hook: helper.Hook[TaskEvent]{}, + tasks: make(map[string]*taskEntry), + hooks: make([]func(*TaskEvent), 0), } } -// Register creates a new task -func (tm *TaskManager) Register(name string, meta map[string]interface{}, tf TaskFunc) *TaskItem { - if tm.Has(name) { - err := tm.RmTask(name) - if err != nil { - log.Warn().Err(err).Msg("error removing task") - } - } - task := NewTaskItem(name, meta, tf) - tm.AddTask(task) - return task +// AddHook adds an event hook function +func (tm *TaskManager) AddHook(fn func(*TaskEvent)) { + tm.hooksMu.Lock() + defer tm.hooksMu.Unlock() + tm.hooks = append(tm.hooks, fn) } -// AddTask adds a task to the task manager -func (tm *TaskManager) AddTask(task *TaskItem) { - if task == nil { - return - } - if tm.Has(task.name) { - return +// fireHook fires event hooks (if any exist) +func (tm *TaskManager) fireHook(name string, state TaskState) { + tm.hooksMu.RLock() + hooks := make([]func(*TaskEvent), len(tm.hooks)) + copy(hooks, tm.hooks) + tm.hooksMu.RUnlock() + + if len(hooks) > 0 { + event := &TaskEvent{ + Name: name, + State: state, + Meta: map[string]interface{}{}, + } + for _, hook := range hooks { + hook(event) + } } - tm.Lock() - defer tm.Unlock() - tm.tasks[task.name] = task - tm.FireHook(NewTaskEvent(task, TaskStateAdded)) } -// RmTask removes a task from the task manager -func (tm *TaskManager) RmTask(name string) error { - task := tm.Get(name) - if task == nil { - return ErrTaskNotFound +// Register creates and registers a task (meta is ignored for simplicity) +func (tm *TaskManager) Register(name string, meta map[string]interface{}, fn TaskFunc) *Task { + tm.mu.Lock() + defer tm.mu.Unlock() + + // Remove existing task if present + if entry, exists := tm.tasks[name]; exists { + entry.task.Cancel() + entry.task.CancelWatch() + tm.fireHook(name, TaskStateRemoved) } - task.Cancel() - tm.Lock() - defer tm.Unlock() - delete(tm.tasks, name) - tm.FireHook(NewTaskEvent(task, TaskStateRemoved)) - return nil -} -// Get returns a task -func (tm *TaskManager) Get(name string) *TaskItem { - tm.RLock() - defer tm.RUnlock() - task, ok := tm.tasks[name] - if !ok { - return nil + task := NewTask() + tm.tasks[name] = &taskEntry{ + task: task, + fn: fn, } + tm.fireHook(name, TaskStateAdded) return task } -// Run runs a task +// Run runs a registered task once func (tm *TaskManager) Run(ctx context.Context, name string) error { - task := tm.Get(name) - if task == nil { + tm.mu.RLock() + entry, exists := tm.tasks[name] + tm.mu.RUnlock() + + if !exists { return ErrTaskNotFound } - tm.FireHook(NewTaskEvent(task, TaskStateRunning)) - err := task.Run(ctx) + + tm.fireHook(name, TaskStateRunning) + err := entry.task.Run(ctx, entry.fn) if err != nil { - log.Error().Err(err).Str("task", name).Msg("failed to run task") - tm.FireHook(NewTaskEvent(task, TaskStateFailed)) + tm.fireHook(name, TaskStateFailed) return err } - tm.FireHook(NewTaskEvent(task, TaskStateFinished)) + tm.fireHook(name, TaskStateFinished) return nil } -// Watch watches a task +// Watch runs a registered task and watches files for changes func (tm *TaskManager) Watch(ctx context.Context, name string, dependencies ...string) error { - task := tm.Get(name) - if task == nil { + tm.mu.RLock() + entry, exists := tm.tasks[name] + tm.mu.RUnlock() + + if !exists { return ErrTaskNotFound } - err := task.Run(ctx) - if err != nil { - log.Error().Err(err).Str("task", name).Msg("failed to run task") + + tm.fireHook(name, TaskStateWatching) + go func() { + if err := entry.task.Watch(ctx, entry.fn, dependencies...); err != nil { + log.Error().Err(err).Str("task", name).Msg("watch failed") + tm.fireHook(name, TaskStateFailed) + } + }() + return nil +} + +// Cancel cancels a registered task +func (tm *TaskManager) Cancel(name string) error { + tm.mu.RLock() + entry, exists := tm.tasks[name] + tm.mu.RUnlock() + + if !exists { + return ErrTaskNotFound } - go task.Watch(ctx, dependencies...) - tm.FireHook(NewTaskEvent(task, TaskStateWatching)) + + entry.task.Cancel() + entry.task.CancelWatch() + tm.fireHook(name, TaskStateStopped) return nil } -// Names returns the names of all the tasks -func (tm *TaskManager) Names() []string { - tm.RLock() - defer tm.RUnlock() - var names []string - for name := range tm.tasks { - names = append(names, name) +// CancelAll cancels all registered tasks +func (tm *TaskManager) CancelAll() { + tm.mu.RLock() + defer tm.mu.RUnlock() + + for _, entry := range tm.tasks { + entry.task.Cancel() + entry.task.CancelWatch() } - return names } // Has returns true if the task exists func (tm *TaskManager) Has(name string) bool { - tm.RLock() - defer tm.RUnlock() - _, ok := tm.tasks[name] - return ok + tm.mu.RLock() + defer tm.mu.RUnlock() + _, exists := tm.tasks[name] + return exists } -// Cancel cancels a task -func (tm *TaskManager) Cancel(name string) error { - task := tm.Get(name) - if task == nil { - return ErrTaskNotFound - } - task.CancelWatch() - task.Cancel() - tm.FireHook(NewTaskEvent(task, TaskStateStopped)) - return nil -} +// Names returns the names of all registered tasks +func (tm *TaskManager) Names() []string { + tm.mu.RLock() + defer tm.mu.RUnlock() -// CancelAll cancels all the tasks -func (tm *TaskManager) CancelAll() { - tm.RLock() - defer tm.RUnlock() - for _, task := range tm.tasks { - task.Cancel() + names := make([]string, 0, len(tm.tasks)) + for name := range tm.tasks { + names = append(names, name) } + return names } diff --git a/pkg/tasks/manager_test.go b/pkg/tasks/manager_test.go new file mode 100644 index 00000000..f3ec8e1a --- /dev/null +++ b/pkg/tasks/manager_test.go @@ -0,0 +1,469 @@ +package tasks + +import ( + "context" + "errors" + "sync" + "testing" + "time" +) + +func TestNewTaskManager(t *testing.T) { + tm := NewTaskManager() + if tm == nil { + t.Fatal("NewTaskManager returned nil") + } + if tm.tasks == nil { + t.Error("tasks map not initialized") + } + if tm.hooks == nil { + t.Error("hooks slice not initialized") + } +} + +func TestTaskManager_Register(t *testing.T) { + tm := NewTaskManager() + + executed := false + fn := func(ctx context.Context) error { + executed = true + return nil + } + + task := tm.Register("test-task", nil, fn) + if task == nil { + t.Fatal("Register returned nil task") + } + + if !tm.Has("test-task") { + t.Error("task not registered") + } + + // Run the task to verify it was stored correctly + err := tm.Run(context.Background(), "test-task") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if !executed { + t.Error("task function was not executed") + } +} + +func TestTaskManager_RegisterReplaceExisting(t *testing.T) { + tm := NewTaskManager() + + firstExecuted := false + first := func(ctx context.Context) error { + firstExecuted = true + return nil + } + + secondExecuted := false + second := func(ctx context.Context) error { + secondExecuted = true + return nil + } + + // Register first task + tm.Register("task", nil, first) + + // Register second task with same name + tm.Register("task", nil, second) + + // Run should execute the second task + err := tm.Run(context.Background(), "task") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if firstExecuted { + t.Error("first task should not have been executed") + } + if !secondExecuted { + t.Error("second task should have been executed") + } +} + +func TestTaskManager_Run(t *testing.T) { + tm := NewTaskManager() + + executed := false + fn := func(ctx context.Context) error { + executed = true + return nil + } + + tm.Register("test", nil, fn) + + err := tm.Run(context.Background(), "test") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if !executed { + t.Error("task was not executed") + } +} + +func TestTaskManager_RunNotFound(t *testing.T) { + tm := NewTaskManager() + + err := tm.Run(context.Background(), "nonexistent") + if err != ErrTaskNotFound { + t.Errorf("expected ErrTaskNotFound, got %v", err) + } +} + +func TestTaskManager_RunWithError(t *testing.T) { + tm := NewTaskManager() + + expectedErr := errors.New("task error") + fn := func(ctx context.Context) error { + return expectedErr + } + + tm.Register("error-task", nil, fn) + + err := tm.Run(context.Background(), "error-task") + if err != expectedErr { + t.Errorf("expected error %v, got %v", expectedErr, err) + } +} + +func TestTaskManager_Watch(t *testing.T) { + tm := NewTaskManager() + + execCount := 0 + var mu sync.Mutex + + fn := func(ctx context.Context) error { + mu.Lock() + execCount++ + mu.Unlock() + return nil + } + + tm.Register("watch-task", nil, fn) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start watching (this runs in background) + err := tm.Watch(ctx, "watch-task") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Give it a moment to execute + time.Sleep(100 * time.Millisecond) + + mu.Lock() + count := execCount + mu.Unlock() + + if count < 1 { + t.Error("watch task should have executed at least once") + } + + cancel() +} + +func TestTaskManager_WatchNotFound(t *testing.T) { + tm := NewTaskManager() + + err := tm.Watch(context.Background(), "nonexistent") + if err != ErrTaskNotFound { + t.Errorf("expected ErrTaskNotFound, got %v", err) + } +} + +func TestTaskManager_Cancel(t *testing.T) { + tm := NewTaskManager() + + blocked := make(chan struct{}) + fn := func(ctx context.Context) error { + <-ctx.Done() + close(blocked) + return ctx.Err() + } + + tm.Register("blocking", nil, fn) + + // Start task in background + errCh := make(chan error, 1) + go func() { + errCh <- tm.Run(context.Background(), "blocking") + }() + + // Give it time to start + time.Sleep(50 * time.Millisecond) + + // Cancel the task + err := tm.Cancel("blocking") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Wait for task to finish + select { + case <-blocked: + // Success - task was cancelled + case <-time.After(1 * time.Second): + t.Error("task was not cancelled in time") + } + + select { + case runErr := <-errCh: + if runErr != nil && !errors.Is(runErr, context.Canceled) { + t.Errorf("unexpected run error: %v", runErr) + } + case <-time.After(1 * time.Second): + t.Error("run did not return after cancellation") + } +} + +func TestTaskManager_CancelNotFound(t *testing.T) { + tm := NewTaskManager() + + err := tm.Cancel("nonexistent") + if err != ErrTaskNotFound { + t.Errorf("expected ErrTaskNotFound, got %v", err) + } +} + +func TestTaskManager_CancelAll(t *testing.T) { + tm := NewTaskManager() + + count := 3 + blocked := make([]chan struct{}, count) + + for i := 0; i < count; i++ { + blocked[i] = make(chan struct{}) + ch := blocked[i] // capture for closure + fn := func(ctx context.Context) error { + <-ctx.Done() + close(ch) + return ctx.Err() + } + tm.Register("task-"+string(rune('A'+i)), nil, fn) + } + + // Start all tasks + errs := make(chan error, count) + for i := 0; i < count; i++ { + name := "task-" + string(rune('A'+i)) + go func(taskName string) { + errs <- tm.Run(context.Background(), taskName) + }(name) + } + + time.Sleep(50 * time.Millisecond) + + // Cancel all + tm.CancelAll() + + // Wait for all to finish + for i := 0; i < count; i++ { + select { + case <-blocked[i]: + // Success + case <-time.After(1 * time.Second): + t.Errorf("task %d was not cancelled", i) + } + } + + for i := 0; i < count; i++ { + select { + case runErr := <-errs: + if runErr != nil && !errors.Is(runErr, context.Canceled) { + t.Errorf("unexpected run error: %v", runErr) + } + case <-time.After(1 * time.Second): + t.Errorf("run %d did not return after CancelAll", i) + } + } +} + +func TestTaskManager_Has(t *testing.T) { + tm := NewTaskManager() + + if tm.Has("test") { + t.Error("should not have task before registration") + } + + tm.Register("test", nil, func(ctx context.Context) error { return nil }) + + if !tm.Has("test") { + t.Error("should have task after registration") + } +} + +func TestTaskManager_Names(t *testing.T) { + tm := NewTaskManager() + + names := tm.Names() + if len(names) != 0 { + t.Error("expected empty names initially") + } + + tm.Register("task1", nil, func(ctx context.Context) error { return nil }) + tm.Register("task2", nil, func(ctx context.Context) error { return nil }) + tm.Register("task3", nil, func(ctx context.Context) error { return nil }) + + names = tm.Names() + if len(names) != 3 { + t.Errorf("expected 3 names, got %d", len(names)) + } + + // Check all names are present + nameSet := make(map[string]bool) + for _, name := range names { + nameSet[name] = true + } + + for _, expected := range []string{"task1", "task2", "task3"} { + if !nameSet[expected] { + t.Errorf("missing task name: %s", expected) + } + } +} + +func TestTaskManager_AddHook(t *testing.T) { + tm := NewTaskManager() + + events := make([]TaskEvent, 0) + var mu sync.Mutex + + tm.AddHook(func(evt *TaskEvent) { + mu.Lock() + events = append(events, *evt) + mu.Unlock() + }) + + fn := func(ctx context.Context) error { return nil } + tm.Register("hooked", nil, fn) + + err := tm.Run(context.Background(), "hooked") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Give hooks time to fire + time.Sleep(50 * time.Millisecond) + + mu.Lock() + eventCount := len(events) + mu.Unlock() + + // Should have: Added, Running, Finished + if eventCount < 3 { + t.Errorf("expected at least 3 events, got %d", eventCount) + } + + // Check event states + mu.Lock() + hasAdded := false + hasRunning := false + hasFinished := false + for _, evt := range events { + switch evt.State { + case TaskStateAdded: + hasAdded = true + case TaskStateRunning: + hasRunning = true + case TaskStateFinished: + hasFinished = true + } + } + mu.Unlock() + + if !hasAdded { + t.Error("missing TaskStateAdded event") + } + if !hasRunning { + t.Error("missing TaskStateRunning event") + } + if !hasFinished { + t.Error("missing TaskStateFinished event") + } +} + +func TestTaskManager_HooksOnError(t *testing.T) { + tm := NewTaskManager() + + var lastState TaskState + var mu sync.Mutex + + tm.AddHook(func(evt *TaskEvent) { + mu.Lock() + lastState = evt.State + mu.Unlock() + }) + + fn := func(ctx context.Context) error { + return errors.New("fail") + } + + tm.Register("failing", nil, fn) + if err := tm.Run(context.Background(), "failing"); err == nil { + t.Fatal("expected run to fail") + } + + time.Sleep(50 * time.Millisecond) + + mu.Lock() + state := lastState + mu.Unlock() + + if state != TaskStateFailed { + t.Errorf("expected TaskStateFailed, got %v", state) + } +} + +func TestTaskManager_Concurrent(t *testing.T) { + tm := NewTaskManager() + + var wg sync.WaitGroup + count := 10 + + // Concurrent registrations + for i := 0; i < count; i++ { + wg.Add(1) + go func(n int) { + defer wg.Done() + name := "concurrent-" + string(rune('0'+n)) + tm.Register(name, nil, func(ctx context.Context) error { + time.Sleep(10 * time.Millisecond) + return nil + }) + }(i) + } + + wg.Wait() + + // Concurrent runs + errCh := make(chan error, count) + for i := 0; i < count; i++ { + wg.Add(1) + go func(n int) { + defer wg.Done() + name := "concurrent-" + string(rune('0'+n)) + errCh <- tm.Run(context.Background(), name) + }(i) + } + + wg.Wait() + close(errCh) + + for err := range errCh { + if err != nil { + t.Errorf("concurrent run failed: %v", err) + } + } + + // Verify all registered + if len(tm.Names()) != count { + t.Errorf("expected %d tasks, got %d", count, len(tm.Names())) + } +} diff --git a/pkg/tasks/task.go b/pkg/tasks/task.go index b34afd55..a68fe584 100644 --- a/pkg/tasks/task.go +++ b/pkg/tasks/task.go @@ -4,7 +4,6 @@ import ( "context" "os" "path/filepath" - "sync" "github.com/apigear-io/cli/pkg/helper" "github.com/fsnotify/fsnotify" @@ -13,47 +12,33 @@ import ( // TaskFunc is the function type of the task to run type TaskFunc func(ctx context.Context) error -// TaskItem is the task item stored in the TaskManager -type TaskItem struct { - sync.RWMutex - name string - meta map[string]interface{} - taskFunc TaskFunc +// Task represents a simple runnable task with optional file watching +type Task struct { cancel context.CancelFunc watchCancel context.CancelFunc } -// NewTaskItem creates a new task item -func NewTaskItem(name string, meta map[string]interface{}, tf TaskFunc) *TaskItem { - return &TaskItem{ - name: name, - meta: meta, - taskFunc: tf, - } +// NewTask creates a new task +func NewTask() *Task { + return &Task{} } -// Run runs the task once -func (t *TaskItem) Run(ctx context.Context) error { - log.Debug().Msgf("run task: %s", t.name) +// Run runs the task function once +func (t *Task) Run(ctx context.Context, fn TaskFunc) error { if t.cancel != nil { - // cancel the previous task t.cancel() } ctx, t.cancel = context.WithCancel(ctx) - err := t.taskFunc(ctx) - // handle the error - if err != nil { - t.UpdateMeta(map[string]interface{}{ - "error": err.Error(), - }) - return err - } - return nil + return fn(ctx) } -// Watch watches all the dependencies of the task and runs the task -// it uses fsnotify to watch the files -func (t *TaskItem) Watch(ctx context.Context, dependencies ...string) { +// Watch watches files and re-runs the task function when they change +func (t *Task) Watch(ctx context.Context, fn TaskFunc, dependencies ...string) error { + // Run once initially + if err := t.Run(ctx, fn); err != nil { + log.Error().Err(err).Msg("initial task run failed") + } + if t.watchCancel != nil { t.watchCancel() } @@ -61,11 +46,8 @@ func (t *TaskItem) Watch(ctx context.Context, dependencies ...string) { watcher, err := fsnotify.NewWatcher() if err != nil { - log.Error().Msgf("error creating watcher: %s", err) - if err := watcher.Close(); err != nil { - log.Error().Err(err).Msg("failed to close watcher") - } - return + log.Error().Err(err).Msg("error creating watcher") + return err } defer func() { if err := watcher.Close(); err != nil { @@ -76,31 +58,31 @@ func (t *TaskItem) Watch(ctx context.Context, dependencies ...string) { for _, dep := range dependencies { // check if the file exists if _, err := os.Stat(dep); os.IsNotExist(err) { - log.Debug().Msgf("file %s does not exist", dep) + log.Debug().Str("file", dep).Msg("file does not exist") continue } - log.Info().Msgf("watching file %s", dep) + log.Info().Str("file", dep).Msg("watching file") err := watcher.Add(dep) if err != nil { - log.Debug().Msgf("error watching file %s: %s", dep, err) + log.Debug().Err(err).Str("file", dep).Msg("error watching file") } // check if the dependency is a directory if helper.IsDir(dep) { err = filepath.WalkDir(dep, func(path string, d os.DirEntry, err error) error { if err != nil { - log.Error().Err(err).Msgf("error walking directory %s", dep) + log.Error().Err(err).Str("dir", dep).Msg("error walking directory") return err } if d.IsDir() { err = watcher.Add(path) if err != nil { - log.Warn().Err(err).Msgf("error watching directory %s", path) + log.Warn().Err(err).Str("path", path).Msg("error watching directory") } } return nil }) if err != nil { - log.Warn().Err(err).Msgf("error walking directory %s", dep) + log.Warn().Err(err).Str("dir", dep).Msg("error walking directory") } } } @@ -108,41 +90,30 @@ func (t *TaskItem) Watch(ctx context.Context, dependencies ...string) { for { select { case <-ctx.Done(): - return + return ctx.Err() case event := <-watcher.Events: if event.Op&fsnotify.Write == fsnotify.Write { - log.Debug().Msgf("modified file: %s", event.Name) - err := t.Run(ctx) - if err != nil { - log.Error().Err(err).Msgf("failed to run task %s", t.name) + log.Debug().Str("file", event.Name).Msg("file modified") + if err := t.Run(ctx, fn); err != nil { + log.Error().Err(err).Msg("task run failed") } } case err := <-watcher.Errors: - log.Error().Msgf("error watching file: %s", err) + log.Error().Err(err).Msg("watcher error") } } } -// Cancel cancels the task -func (t *TaskItem) Cancel() { - if t.cancel == nil { - return - } - t.cancel() -} - -func (t *TaskItem) CancelWatch() { - if t.watchCancel == nil { - return +// Cancel cancels the running task +func (t *Task) Cancel() { + if t.cancel != nil { + t.cancel() } - t.watchCancel() } -// UpdateMeta updates the meta data of the task -func (t *TaskItem) UpdateMeta(meta map[string]interface{}) { - t.Lock() - defer t.Unlock() - for k, v := range meta { - t.meta[k] = v +// CancelWatch cancels the watch operation +func (t *Task) CancelWatch() { + if t.watchCancel != nil { + t.watchCancel() } } diff --git a/pkg/tasks/task_test.go b/pkg/tasks/task_test.go new file mode 100644 index 00000000..457b293e --- /dev/null +++ b/pkg/tasks/task_test.go @@ -0,0 +1,390 @@ +package tasks + +import ( + "context" + "errors" + "os" + "path/filepath" + "testing" + "time" +) + +func TestNewTask(t *testing.T) { + task := NewTask() + if task == nil { + t.Fatal("NewTask returned nil") + } +} + +func TestTask_Run(t *testing.T) { + task := NewTask() + + executed := false + fn := func(ctx context.Context) error { + executed = true + return nil + } + + err := task.Run(context.Background(), fn) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if !executed { + t.Error("task function was not executed") + } +} + +func TestTask_RunWithError(t *testing.T) { + task := NewTask() + + expectedErr := errors.New("task failed") + fn := func(ctx context.Context) error { + return expectedErr + } + + err := task.Run(context.Background(), fn) + if err != expectedErr { + t.Errorf("expected error %v, got %v", expectedErr, err) + } +} + +func TestTask_RunCancellation(t *testing.T) { + task := NewTask() + + started := make(chan struct{}) + finished := make(chan struct{}) + + fn := func(ctx context.Context) error { + close(started) + <-ctx.Done() + close(finished) + return ctx.Err() + } + + errCh := make(chan error, 1) + go func() { + errCh <- task.Run(context.Background(), fn) + }() + + // Wait for task to start + <-started + + // Cancel the task + task.Cancel() + + // Wait for task to finish + select { + case <-finished: + // Success + case <-time.After(1 * time.Second): + t.Error("task did not finish after cancellation") + } + + select { + case runErr := <-errCh: + if runErr != nil && !errors.Is(runErr, context.Canceled) { + t.Errorf("unexpected run error: %v", runErr) + } + case <-time.After(1 * time.Second): + t.Error("run did not return after cancellation") + } +} + +func TestTask_RunReplacePrevious(t *testing.T) { + task := NewTask() + + first := make(chan struct{}) + second := make(chan struct{}) + + fn1 := func(ctx context.Context) error { + <-ctx.Done() + close(first) + return ctx.Err() + } + + fn2 := func(ctx context.Context) error { + close(second) + return nil + } + + // Start first task + firstErr := make(chan error, 1) + go func() { + firstErr <- task.Run(context.Background(), fn1) + }() + time.Sleep(50 * time.Millisecond) + + // Start second task (should cancel first) + err := task.Run(context.Background(), fn2) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + // First should be cancelled + select { + case <-first: + // Success - first was cancelled + case <-time.After(1 * time.Second): + t.Error("first task was not cancelled") + } + + select { + case err := <-firstErr: + if !errors.Is(err, context.Canceled) { + t.Errorf("expected context canceled from first task, got %v", err) + } + case <-time.After(1 * time.Second): + t.Error("first task run did not return") + } + + // Second should complete + select { + case <-second: + // Success + case <-time.After(100 * time.Millisecond): + t.Error("second task did not complete") + } +} + +func TestTask_Cancel(t *testing.T) { + task := NewTask() + + // Cancel without running should not panic + task.Cancel() + + // Cancel multiple times should not panic + task.Cancel() + task.Cancel() +} + +func TestTask_CancelWatch(t *testing.T) { + task := NewTask() + + // CancelWatch without watching should not panic + task.CancelWatch() + + // Multiple calls should not panic + task.CancelWatch() + task.CancelWatch() +} + +func TestTask_Watch(t *testing.T) { + task := NewTask() + + // Create a temporary file to watch + tmpDir := t.TempDir() + testFile := filepath.Join(tmpDir, "test.txt") + + err := os.WriteFile(testFile, []byte("initial"), 0644) + if err != nil { + t.Fatalf("failed to create test file: %v", err) + } + + execCount := 0 + fn := func(ctx context.Context) error { + execCount++ + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan error, 1) + go func() { + done <- task.Watch(ctx, fn, testFile) + }() + + // Wait for initial execution + time.Sleep(100 * time.Millisecond) + + if execCount < 1 { + t.Error("task should have executed at least once initially") + } + + initialCount := execCount + + // Modify the file + err = os.WriteFile(testFile, []byte("modified"), 0644) + if err != nil { + t.Fatalf("failed to modify test file: %v", err) + } + + // Wait for watch to trigger + time.Sleep(200 * time.Millisecond) + + if execCount <= initialCount { + t.Error("task should have executed again after file modification") + } + + // Cancel and wait for completion + cancel() + + select { + case err := <-done: + if err != context.Canceled { + t.Errorf("expected context.Canceled, got %v", err) + } + case <-time.After(1 * time.Second): + t.Error("watch did not stop after cancellation") + } +} + +func TestTask_WatchDirectory(t *testing.T) { + task := NewTask() + + // Create a temporary directory to watch + tmpDir := t.TempDir() + + execCount := 0 + fn := func(ctx context.Context) error { + execCount++ + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + + done := make(chan error, 1) + go func() { + done <- task.Watch(ctx, fn, tmpDir) + }() + + // Wait for initial execution + time.Sleep(100 * time.Millisecond) + + if execCount < 1 { + t.Error("task should have executed at least once initially") + } + + // Wait for timeout + <-done +} + +func TestTask_WatchNonexistentFile(t *testing.T) { + task := NewTask() + + execCount := 0 + fn := func(ctx context.Context) error { + execCount++ + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + // Watch a file that doesn't exist + err := task.Watch(ctx, fn, "/nonexistent/file.txt") + + // Should still execute once (initial run) + if execCount < 1 { + t.Error("task should have executed at least once initially") + } + + // Should complete without error (just no watching) + if err != context.DeadlineExceeded { + t.Logf("watch completed with: %v", err) + } +} + +func TestTask_WatchCancellation(t *testing.T) { + task := NewTask() + + tmpDir := t.TempDir() + testFile := filepath.Join(tmpDir, "test.txt") + + err := os.WriteFile(testFile, []byte("test"), 0644) + if err != nil { + t.Fatalf("failed to create test file: %v", err) + } + + fn := func(ctx context.Context) error { + time.Sleep(10 * time.Millisecond) + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + + done := make(chan error, 1) + go func() { + done <- task.Watch(ctx, fn, testFile) + }() + + // Let it run briefly + time.Sleep(100 * time.Millisecond) + + // Cancel via context + cancel() + + select { + case <-done: + // Success + case <-time.After(1 * time.Second): + t.Error("watch did not stop after context cancellation") + } +} + +func TestTask_WatchMultipleFiles(t *testing.T) { + task := NewTask() + + tmpDir := t.TempDir() + file1 := filepath.Join(tmpDir, "file1.txt") + file2 := filepath.Join(tmpDir, "file2.txt") + + err := os.WriteFile(file1, []byte("file1"), 0644) + if err != nil { + t.Fatalf("failed to create file1: %v", err) + } + + err = os.WriteFile(file2, []byte("file2"), 0644) + if err != nil { + t.Fatalf("failed to create file2: %v", err) + } + + execCount := 0 + fn := func(ctx context.Context) error { + execCount++ + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan error, 1) + go func() { + done <- task.Watch(ctx, fn, file1, file2) + }() + + // Wait for initial execution + time.Sleep(100 * time.Millisecond) + + if execCount < 1 { + t.Error("task should have executed at least once initially") + } + + initialCount := execCount + + // Modify one file + err = os.WriteFile(file1, []byte("modified"), 0644) + if err != nil { + t.Fatalf("failed to modify file1: %v", err) + } + + // Wait for watch to trigger + time.Sleep(200 * time.Millisecond) + + if execCount <= initialCount { + t.Error("task should have executed after file modification") + } + + cancel() + + select { + case err := <-done: + if err != nil && !errors.Is(err, context.Canceled) { + t.Errorf("unexpected watch error: %v", err) + } + case <-time.After(1 * time.Second): + t.Error("watch did not return after cancellation") + } +}